summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/acpi_memhotplug.c5
-rw-r--r--drivers/acpi/arm64/iort.c13
-rw-r--r--drivers/acpi/custom_method.c4
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/pci_mcfg.c7
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/sleep.h1
-rw-r--r--drivers/ata/ahci_brcm.c46
-rw-r--r--drivers/atm/fore200e.c1
-rw-r--r--drivers/atm/idt77252.c6
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/suni.c1
-rw-r--r--drivers/auxdisplay/panel.c7
-rw-r--r--drivers/base/firmware_loader/main.c2
-rw-r--r--drivers/base/memory.c101
-rw-r--r--drivers/bcma/driver_mips.c7
-rw-r--r--drivers/block/brd.c1
-rw-r--r--drivers/block/loop.c1
-rw-r--r--drivers/bluetooth/Kconfig10
-rw-r--r--drivers/bluetooth/Makefile2
-rw-r--r--drivers/bluetooth/btintel.c232
-rw-r--r--drivers/bluetooth/btintel.h19
-rw-r--r--drivers/bluetooth/btusb.c408
-rw-r--r--drivers/bluetooth/hci_bcm.c19
-rw-r--r--drivers/bluetooth/hci_intel.c7
-rw-r--r--drivers/bluetooth/hci_qca.c17
-rw-r--r--drivers/bluetooth/virtio_bt.c401
-rw-r--r--drivers/char/Kconfig10
-rw-r--r--drivers/char/mem.c231
-rw-r--r--drivers/clk/sifive/Kconfig2
-rw-r--r--drivers/clk/sifive/fu740-prci.c11
-rw-r--r--drivers/clk/sifive/fu740-prci.h2
-rw-r--r--drivers/clk/sifive/sifive-prci.c54
-rw-r--r--drivers/clk/sifive/sifive-prci.h13
-rw-r--r--drivers/clocksource/arm_arch_timer.c36
-rw-r--r--drivers/crypto/ccp/sev-dev.c193
-rw-r--r--drivers/crypto/ccp/sev-dev.h4
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/at_xdmac.c11
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c178
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h37
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c277
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c300
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.h2
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c77
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.h4
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-regs.h291
-rw-r--r--drivers/dma/idxd/Makefile2
-rw-r--r--drivers/dma/idxd/cdev.c132
-rw-r--r--drivers/dma/idxd/device.c283
-rw-r--r--drivers/dma/idxd/dma.c77
-rw-r--r--drivers/dma/idxd/idxd.h168
-rw-r--r--drivers/dma/idxd/init.c485
-rw-r--r--drivers/dma/idxd/irq.c29
-rw-r--r--drivers/dma/idxd/perfmon.c662
-rw-r--r--drivers/dma/idxd/perfmon.h119
-rw-r--r--drivers/dma/idxd/registers.h120
-rw-r--r--drivers/dma/idxd/submit.c42
-rw-r--r--drivers/dma/idxd/sysfs.c776
-rw-r--r--drivers/dma/k3dma.c4
-rw-r--r--drivers/dma/qcom/gpi.c1
-rw-r--r--drivers/dma/qcom/hidma.c6
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c8
-rw-r--r--drivers/firmware/dmi_scan.c1
-rw-r--r--drivers/firmware/psci/psci.c2
-rw-r--r--drivers/firmware/smccc/Makefile2
-rw-r--r--drivers/firmware/smccc/kvm_guest.c50
-rw-r--r--drivers/firmware/smccc/smccc.c1
-rw-r--r--drivers/firmware/xilinx/zynqmp.c114
-rw-r--r--drivers/gpio/Kconfig24
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c50
-rw-r--r--drivers/gpio/gpio-aggregator.c39
-rw-r--r--drivers/gpio/gpio-ich.c2
-rw-r--r--drivers/gpio/gpio-it87.c8
-rw-r--r--drivers/gpio/gpio-mockup.c9
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c47
-rw-r--r--drivers/gpio/gpio-mxs.c5
-rw-r--r--drivers/gpio/gpio-omap.c5
-rw-r--r--drivers/gpio/gpio-realtek-otto.c325
-rw-r--r--drivers/gpio/gpio-regmap.c5
-rw-r--r--drivers/gpio/gpio-sch.c198
-rw-r--r--drivers/gpio/gpiolib-acpi.c21
-rw-r--r--drivers/gpio/gpiolib-acpi.h4
-rw-r--r--drivers/gpio/gpiolib-of.c6
-rw-r--r--drivers/gpio/gpiolib.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c193
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c129
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c27
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c27
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c25
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h8
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h31
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h16
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h41
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c4
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h40
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c378
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c123
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c55
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c117
-rw-r--r--drivers/gpu/drm/i915/i915_request.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c1
-rw-r--r--drivers/gpu/drm/radeon/cik.c4
-rw-r--r--drivers/gpu/drm/radeon/si.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c113
-rw-r--r--drivers/gpu/drm/tegra/dc.h6
-rw-r--r--drivers/gpu/drm/tegra/drm.c27
-rw-r--r--drivers/gpu/drm/tegra/drm.h5
-rw-r--r--drivers/gpu/drm/tegra/fb.c10
-rw-r--r--drivers/gpu/drm/tegra/gem.h6
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c4
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c4
-rw-r--r--drivers/gpu/drm/tegra/hub.c41
-rw-r--r--drivers/gpu/drm/tegra/plane.c32
-rw-r--r--drivers/gpu/drm/tegra/vic.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_memory.c2
-rw-r--r--drivers/gpu/host1x/bus.c31
-rw-r--r--drivers/gpu/host1x/cdma.c11
-rw-r--r--drivers/gpu/host1x/debug.c14
-rw-r--r--drivers/gpu/host1x/dev.c6
-rw-r--r--drivers/gpu/host1x/dev.h13
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c2
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c10
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c2
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x07_vm.h2
-rw-r--r--drivers/gpu/host1x/intr.c28
-rw-r--r--drivers/gpu/host1x/intr.h4
-rw-r--r--drivers/gpu/host1x/job.c5
-rw-r--r--drivers/gpu/host1x/syncpt.c202
-rw-r--r--drivers/gpu/host1x/syncpt.h4
-rw-r--r--drivers/hid/Kconfig20
-rw-r--r--drivers/hid/Makefile6
-rw-r--r--drivers/hid/hid-alps.c2
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-debug.c1
-rw-r--r--drivers/hid/hid-elan.c17
-rw-r--r--drivers/hid/hid-ft260.c1054
-rw-r--r--drivers/hid/hid-ids.h14
-rw-r--r--drivers/hid/hid-input.c22
-rw-r--r--drivers/hid/hid-kye.c2
-rw-r--r--drivers/hid/hid-lenovo.c147
-rw-r--r--drivers/hid/hid-lg.c24
-rw-r--r--drivers/hid/hid-logitech-dj.c131
-rw-r--r--drivers/hid/hid-logitech-hidpp.c7
-rw-r--r--drivers/hid/hid-magicmouse.c158
-rw-r--r--drivers/hid/hid-picolcd_core.c5
-rw-r--r--drivers/hid/hid-plantronics.c60
-rw-r--r--drivers/hid/hid-quirks.c5
-rw-r--r--drivers/hid/hid-sensor-custom.c5
-rw-r--r--drivers/hid/hid-sensor-hub.c4
-rw-r--r--drivers/hid/hid-thrustmaster.c371
-rw-r--r--drivers/hid/hid-uclogic-params.c8
-rw-r--r--drivers/hid/hid-uclogic-rdesc.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-acpi.c52
-rw-r--r--drivers/hid/surface-hid/Kconfig42
-rw-r--r--drivers/hid/surface-hid/Makefile7
-rw-r--r--drivers/hid/surface-hid/surface_hid.c253
-rw-r--r--drivers/hid/surface-hid/surface_hid_core.c272
-rw-r--r--drivers/hid/surface-hid/surface_hid_core.h77
-rw-r--r--drivers/hid/surface-hid/surface_kbd.c300
-rw-r--r--drivers/hid/usbhid/hid-pidff.c4
-rw-r--r--drivers/hid/usbhid/hiddev.c6
-rw-r--r--drivers/hid/usbhid/usbkbd.c18
-rw-r--r--drivers/hid/wacom_sys.c2
-rw-r--r--drivers/hid/wacom_wac.c50
-rw-r--r--drivers/hid/wacom_wac.h1
-rw-r--r--drivers/hwspinlock/Kconfig11
-rw-r--r--drivers/hwspinlock/Makefile1
-rw-r--r--drivers/hwspinlock/sirf_hwspinlock.c105
-rw-r--r--drivers/hwtracing/coresight/Kconfig24
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c29
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c119
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c161
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c19
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h83
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c1157
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.h152
-rw-r--r--drivers/i2c/busses/Kconfig22
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c268
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c1
-rw-r--r--drivers/i2c/busses/i2c-cadence.c9
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c6
-rw-r--r--drivers/i2c/busses/i2c-cp2615.c330
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h8
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c155
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c61
-rw-r--r--drivers/i2c/busses/i2c-emev2.c5
-rw-r--r--drivers/i2c/busses/i2c-hisi.c504
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-icy.c32
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c4
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c6
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c28
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c5
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c16
-rw-r--r--drivers/i2c/busses/i2c-mpc.c579
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c17
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c4
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c6
-rw-r--r--drivers/i2c/busses/i2c-omap.c8
-rw-r--r--drivers/i2c/busses/i2c-powermac.c5
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c4
-rw-r--r--drivers/i2c/busses/i2c-rcar.c89
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c9
-rw-r--r--drivers/i2c/busses/i2c-scmi.c2
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c5
-rw-r--r--drivers/i2c/busses/i2c-sprd.c5
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c82
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c79
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c1
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/i2c-boardinfo.c11
-rw-r--r--drivers/i2c/i2c-core-base.c114
-rw-r--r--drivers/i2c/i2c-dev.c9
-rw-r--r--drivers/infiniband/core/cache.c87
-rw-r--r--drivers/infiniband/core/cm.c58
-rw-r--r--drivers/infiniband/core/cm_msgs.h4
-rw-r--r--drivers/infiniband/core/cma.c116
-rw-r--r--drivers/infiniband/core/cma_configfs.c8
-rw-r--r--drivers/infiniband/core/cma_priv.h10
-rw-r--r--drivers/infiniband/core/core_priv.h28
-rw-r--r--drivers/infiniband/core/counters.c62
-rw-r--r--drivers/infiniband/core/device.c37
-rw-r--r--drivers/infiniband/core/iwpm_msg.c3
-rw-r--r--drivers/infiniband/core/mad.c79
-rw-r--r--drivers/infiniband/core/mad_rmpp.c10
-rw-r--r--drivers/infiniband/core/multicast.c8
-rw-r--r--drivers/infiniband/core/nldev.c176
-rw-r--r--drivers/infiniband/core/opa_smi.h4
-rw-r--r--drivers/infiniband/core/rdma_core.c4
-rw-r--r--drivers/infiniband/core/restrack.c3
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c52
-rw-r--r--drivers/infiniband/core/rw.c25
-rw-r--r--drivers/infiniband/core/sa.h2
-rw-r--r--drivers/infiniband/core/sa_query.c22
-rw-r--r--drivers/infiniband/core/security.c8
-rw-r--r--drivers/infiniband/core/smi.c12
-rw-r--r--drivers/infiniband/core/smi.h4
-rw-r--r--drivers/infiniband/core/sysfs.c29
-rw-r--r--drivers/infiniband/core/ucma.c8
-rw-r--r--drivers/infiniband/core/umem.c20
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c4
-rw-r--r--drivers/infiniband/core/user_mad.c34
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c25
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c32
-rw-r--r--drivers/infiniband/core/verbs.c43
-rw-r--r--drivers/infiniband/hw/bnxt_re/Kconfig4
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c63
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h11
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h33
-rw-r--r--drivers/infiniband/hw/efa/efa.h14
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c10
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c14
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c8
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c10
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h5
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c2
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.c6
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c1
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h20
-rw-r--r--drivers/infiniband/hw/hfi1/init.c7
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h2
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h15
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c13
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c71
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c128
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h2
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c2
-rw-r--r--drivers/infiniband/hw/hfi1/msix.c12
-rw-r--r--drivers/infiniband/hw/hfi1/netdev.h39
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c172
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h18
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c2
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h179
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c12
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h1
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c8
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h5
-rw-r--r--drivers/infiniband/hw/hfi1/vnic.h2
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c114
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h25
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c92
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h91
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c55
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c2267
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h578
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c74
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c59
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c124
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hmc.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_osdep.h22
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.c6
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c14
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_virtchnl.c2
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c16
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c46
-rw-r--r--drivers/infiniband/hw/mlx4/main.c47
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h26
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c3
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c101
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h3
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c8
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c10
-rw-r--r--drivers/infiniband/hw/mlx5/counters.h2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c64
-rw-r--r--drivers/infiniband/hw/mlx5/dm.c587
-rw-r--r--drivers/infiniband/hw/mlx5/dm.h68
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c11
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c9
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h4
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c16
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c16
-rw-r--r--drivers/infiniband/hw/mlx5/main.c346
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h182
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c163
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c185
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c17
-rw-r--r--drivers/infiniband/hw/mlx5/std_types.c173
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h7
-rw-r--r--drivers/infiniband/hw/qedr/main.c8
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c9
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h11
-rw-r--r--drivers/infiniband/hw/qib/qib.h34
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h7
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c68
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h6
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c6
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h6
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h10
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c12
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h45
-rw-r--r--drivers/infiniband/sw/rdmavt/mad.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/mad.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c34
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.h11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.h4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h30
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c271
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c14
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c52
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c32
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h60
-rw-r--r--drivers/infiniband/sw/siw/iwarp.h13
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c19
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h5
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.h10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c26
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h1
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c16
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c48
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c122
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h1
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c20
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c36
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c35
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c4
-rw-r--r--drivers/input/Makefile1
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/gpio_keys.c105
-rw-r--r--drivers/input/keyboard/imx_keypad.c13
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c3
-rw-r--r--drivers/input/keyboard/tegra-kbc.c5
-rw-r--r--drivers/input/misc/Kconfig11
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ims-pcu.c1
-rw-r--r--drivers/input/misc/iqs626a.c1838
-rw-r--r--drivers/input/misc/max8997_haptic.c9
-rw-r--r--drivers/input/mouse/elan_i2c.h7
-rw-r--r--drivers/input/mouse/elan_i2c_core.c58
-rw-r--r--drivers/input/mouse/elantech.c6
-rw-r--r--drivers/input/serio/apbps2.c3
-rw-r--r--drivers/input/touchscreen.c (renamed from drivers/input/touchscreen/of_touchscreen.c)13
-rw-r--r--drivers/input/touchscreen/Kconfig39
-rw-r--r--drivers/input/touchscreen/Makefile4
-rw-r--r--drivers/input/touchscreen/ar1021_i2c.c5
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c83
-rw-r--r--drivers/input/touchscreen/bu21029_ts.c4
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c39
-rw-r--r--drivers/input/touchscreen/cyttsp_core.h1
-rw-r--r--drivers/input/touchscreen/elants_i2c.c44
-rw-r--r--drivers/input/touchscreen/exc3000.c253
-rw-r--r--drivers/input/touchscreen/hycon-hy46xx.c591
-rw-r--r--drivers/input/touchscreen/ili210x.c2
-rw-r--r--drivers/input/touchscreen/ilitek_ts_i2c.c690
-rw-r--r--drivers/input/touchscreen/iqs5xx.c171
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c10
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c5
-rw-r--r--drivers/input/touchscreen/mms114.c26
-rw-r--r--drivers/input/touchscreen/msg2638.c337
-rw-r--r--drivers/input/touchscreen/silead.c46
-rw-r--r--drivers/input/touchscreen/stmfts.c3
-rw-r--r--drivers/input/touchscreen/tsc2007.h4
-rw-r--r--drivers/input/touchscreen/tsc2007_core.c60
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c56
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c3
-rw-r--r--drivers/input/touchscreen/zinitix.c4
-rw-r--r--drivers/iommu/Kconfig16
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/amd/amd_iommu.h2
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd/init.c59
-rw-r--r--drivers/iommu/amd/iommu.c201
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c247
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h18
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c117
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h2
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c5
-rw-r--r--drivers/iommu/dma-iommu.c149
-rw-r--r--drivers/iommu/exynos-iommu.c7
-rw-r--r--drivers/iommu/fsl_pamu.c293
-rw-r--r--drivers/iommu/fsl_pamu.h12
-rw-r--r--drivers/iommu/fsl_pamu_domain.c693
-rw-r--r--drivers/iommu/fsl_pamu_domain.h46
-rw-r--r--drivers/iommu/intel/dmar.c72
-rw-r--r--drivers/iommu/intel/iommu.c233
-rw-r--r--drivers/iommu/intel/irq_remapping.c5
-rw-r--r--drivers/iommu/intel/pasid.c75
-rw-r--r--drivers/iommu/intel/pasid.h6
-rw-r--r--drivers/iommu/intel/svm.c82
-rw-r--r--drivers/iommu/io-pgfault.c461
-rw-r--r--drivers/iommu/iommu-sva-lib.h53
-rw-r--r--drivers/iommu/iommu.c161
-rw-r--r--drivers/iommu/iova.c96
-rw-r--r--drivers/iommu/ipmmu-vmsa.c6
-rw-r--r--drivers/iommu/msm_iommu.c5
-rw-r--r--drivers/iommu/mtk_iommu.c41
-rw-r--r--drivers/iommu/mtk_iommu_v1.c98
-rw-r--r--drivers/iommu/of_iommu.c5
-rw-r--r--drivers/iommu/omap-iommu.c5
-rw-r--r--drivers/iommu/rockchip-iommu.c5
-rw-r--r--drivers/iommu/s390-iommu.c4
-rw-r--r--drivers/iommu/sprd-iommu.c575
-rw-r--r--drivers/iommu/sun50i-iommu.c5
-rw-r--r--drivers/iommu/tegra-gart.c5
-rw-r--r--drivers/iommu/tegra-smmu.c5
-rw-r--r--drivers/iommu/virtio-iommu.c6
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c18
-rw-r--r--drivers/isdn/capi/kcapi_proc.c1
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c9
-rw-r--r--drivers/isdn/hardware/mISDN/iohelper.h14
-rw-r--r--drivers/isdn/mISDN/dsp_core.c13
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c9
-rw-r--r--drivers/leds/Kconfig7
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/blink/Kconfig33
-rw-r--r--drivers/leds/blink/Makefile2
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c2
-rw-r--r--drivers/leds/flash/Kconfig11
-rw-r--r--drivers/leds/flash/Makefile1
-rw-r--r--drivers/leds/flash/leds-rt4505.c430
-rw-r--r--drivers/leds/leds-lm3642.c4
-rw-r--r--drivers/leds/leds-pca9532.c2
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c2
-rw-r--r--drivers/macintosh/via-pmu.c4
-rw-r--r--drivers/macintosh/windfarm_core.c2
-rw-r--r--drivers/macintosh/windfarm_pm121.c2
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c2
-rw-r--r--drivers/md/bcache/super.c1
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-clone-metadata.c6
-rw-r--r--drivers/md/dm-ebs-target.c6
-rw-r--r--drivers/md/dm-integrity.c85
-rw-r--r--drivers/md/dm-ioctl.c294
-rw-r--r--drivers/md/dm-raid.c44
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-snap-persistent.c6
-rw-r--r--drivers/md/dm-snap.c5
-rw-r--r--drivers/md/dm-table.c30
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm-verity-target.c40
-rw-r--r--drivers/md/dm-writecache.c2
-rw-r--r--drivers/md/dm.c63
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c8
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c2
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.h8
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c9
-rw-r--r--drivers/media/usb/pwc/pwc-uncompress.c3
-rw-r--r--drivers/media/usb/uvc/uvc_video.c94
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h5
-rw-r--r--drivers/misc/uacce/uacce.c39
-rw-r--r--drivers/mtd/ubi/build.c1
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/Space.c3
-rw-r--r--drivers/net/bareudp.c1
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/bonding/bond_options.c9
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/c_can/c_can.c153
-rw-r--r--drivers/net/can/c_can/c_can.h43
-rw-r--r--drivers/net/can/c_can/c_can_pci.c31
-rw-r--r--drivers/net/can/c_can/c_can_platform.c6
-rw-r--r--drivers/net/can/dev/bittiming.c28
-rw-r--r--drivers/net/can/dev/netlink.c27
-rw-r--r--drivers/net/can/dev/skb.c37
-rw-r--r--drivers/net/can/grcan.c2
-rw-r--r--drivers/net/can/m_can/m_can.c167
-rw-r--r--drivers/net/can/m_can/m_can.h2
-rw-r--r--drivers/net/can/m_can/tcan4x5x.h1
-rw-r--r--drivers/net/can/rcar/rcar_can.c2
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c2
-rw-r--r--drivers/net/can/spi/hi311x.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/Kconfig1
-rw-r--r--drivers/net/can/spi/mcp251xfd/Makefile3
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c125
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c285
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.h45
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c64
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c71
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h28
-rw-r--r--drivers/net/can/usb/Kconfig10
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/can/usb/esd_usb2.c4
-rw-r--r--drivers/net/can/usb/etas_es58x/Makefile3
-rw-r--r--drivers/net/can/usb/etas_es58x/es581_4.c507
-rw-r--r--drivers/net/can/usb/etas_es58x/es581_4.h207
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c2301
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.h700
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c562
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.h243
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c2
-rw-r--r--drivers/net/can/usb/mcba_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c106
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c64
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h9
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c50
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c52
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h82
-rw-r--r--drivers/net/can/usb/ucan.c8
-rw-r--r--drivers/net/can/usb/usb_8dev.c2
-rw-r--r--drivers/net/can/xilinx_can.c10
-rw-r--r--drivers/net/dsa/Kconfig17
-rw-r--r--drivers/net/dsa/b53/Kconfig1
-rw-r--r--drivers/net/dsa/b53/b53_common.c23
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c55
-rw-r--r--drivers/net/dsa/b53/b53_priv.h4
-rw-r--r--drivers/net/dsa/b53/b53_spi.c14
-rw-r--r--drivers/net/dsa/bcm_sf2.c121
-rw-r--r--drivers/net/dsa/bcm_sf2.h2
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h8
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c378
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h7
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c28
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h4
-rw-r--r--drivers/net/dsa/lantiq_gswip.c162
-rw-r--r--drivers/net/dsa/microchip/Kconfig10
-rw-r--r--drivers/net/dsa/microchip/Makefile1
-rw-r--r--drivers/net/dsa/microchip/ksz8.h69
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c884
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h125
-rw-r--r--drivers/net/dsa/microchip/ksz8795_spi.c46
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c213
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h5
-rw-r--r--drivers/net/dsa/mt7530.c196
-rw-r--r--drivers/net/dsa/mt7530.h15
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c599
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h44
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c58
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c19
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c17
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h12
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c26
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h10
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c418
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h50
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c344
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h98
-rw-r--r--drivers/net/dsa/ocelot/felix.c23
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c12
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c9
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c18
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c16
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h4
-rw-r--r--drivers/net/ethernet/3com/3c509.c1
-rw-r--r--drivers/net/ethernet/Kconfig5
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/actions/Kconfig26
-rw-r--r--drivers/net/ethernet/actions/Makefile6
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c1625
-rw-r--r--drivers/net/ethernet/actions/owl-emac.h280
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c8
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c6
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c10
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c25
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c23
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c362
-rw-r--r--drivers/net/ethernet/amd/atarilance.c8
-rw-r--r--drivers/net/ethernet/amd/hplance.c3
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c10
-rw-r--r--drivers/net/ethernet/arc/emac_main.c8
-rw-r--r--drivers/net/ethernet/atheros/Kconfig1
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c31
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c74
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c24
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c143
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c10
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c268
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h33
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c154
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c74
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c122
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c20
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c1
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c266
-rw-r--r--drivers/net/ethernet/cadence/macb.h14
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c59
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h2
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c10
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c13
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c11
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c13
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c13
-rw-r--r--drivers/net/ethernet/dlink/sundance.c15
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c7
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c13
-rw-r--r--drivers/net/ethernet/fealnx.c13
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/Makefile4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c12
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c68
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h10
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c40
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c (renamed from drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c)2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c492
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c3394
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h246
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpkg.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.h24
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h162
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h (renamed from drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h)219
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw.c (renamed from drivers/staging/fsl-dpaa2/ethsw/dpsw.c)781
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw.h755
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig9
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c1340
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h129
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c82
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c40
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h16
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.c155
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.h20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c229
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c21
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c17
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c7
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c9
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c5
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c178
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h17
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c5
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c95
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c26
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c106
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c210
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h70
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c2137
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h64
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c38
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c39
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c101
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c29
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c1
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c124
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h94
-rw-r--r--drivers/net/ethernet/intel/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c108
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c13
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h22
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.c218
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.h95
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c883
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.c779
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.h118
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c62
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c17
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c360
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h111
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c197
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c373
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c488
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c571
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c835
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h166
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c443
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c784
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c133
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c400
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c338
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h117
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c171
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c2204
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c774
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c21
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h8
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c27
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c41
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c1
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h31
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h68
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c539
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c320
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h10
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c60
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.h13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c40
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h3
-rw-r--r--drivers/net/ethernet/korina.c617
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c11
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c11
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c31
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c13
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c107
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c60
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h89
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c192
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c196
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c79
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c47
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c37
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c787
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c14
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c9
-rw-r--r--drivers/net/ethernet/marvell/skge.c9
-rw-r--r--drivers/net/ethernet/marvell/sky2.c19
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig2
-rw-r--r--drivers/net/ethernet/mediatek/Makefile2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c315
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h73
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c509
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h288
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c217
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c495
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_regs.h144
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c605
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c548
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c399
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c203
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c434
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c309
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c292
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c225
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c510
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c585
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c140
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c979
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h277
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c724
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c269
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c242
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c256
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c368
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c289
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h205
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h71
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h130
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c215
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h76
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c245
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c453
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c682
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c213
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c7
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c15
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c7
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig29
-rw-r--r--drivers/net/ethernet/microsoft/Makefile5
-rw-r--r--drivers/net/ethernet/microsoft/mana/Makefile6
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma.h673
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c1415
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c843
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.h190
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h533
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c1895
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c250
-rw-r--r--drivers/net/ethernet/microsoft/mana/shm_channel.c291
-rw-r--r--drivers/net/ethernet/microsoft/mana/shm_channel.h21
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/mscc/Kconfig3
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c188
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c5
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c225
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c234
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h14
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c156
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c79
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c13
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h9
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/Makefile1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c107
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h33
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c109
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h242
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c536
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h104
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c43
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c615
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c21
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c392
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c871
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c26
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c4
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c10
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c9
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c10
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h12
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c11
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c56
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c82
-rw-r--r--drivers/net/ethernet/renesas/ravb.h1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c62
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c15
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h114
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c13
-rw-r--r--drivers/net/ethernet/sfc/ef10.c20
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c2
-rw-r--r--drivers/net/ethernet/sfc/enum.h1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c10
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/farch.c16
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h3
-rw-r--r--drivers/net/ethernet/sfc/rx.c11
-rw-r--r--drivers/net/ethernet/sfc/tx.c15
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c16
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c410
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c136
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h44
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h92
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c124
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c50
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2777
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c111
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c44
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c75
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c124
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c135
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h15
-rw-r--r--drivers/net/ethernet/sun/cassini.c1
-rw-r--r--drivers/net/ethernet/sun/sungem.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c19
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c21
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c21
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c11
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c12
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c7
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c42
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c3
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100-spi.c8
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig3
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c8
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h8
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c50
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c8
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/ethernet/xscale/Kconfig1
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c215
-rw-r--r--drivers/net/fddi/Kconfig16
-rw-r--r--drivers/net/fddi/defxx.c96
-rw-r--r--drivers/net/fddi/defxx.h5
-rw-r--r--drivers/net/fddi/defza.c2
-rw-r--r--drivers/net/fddi/skfp/h/smc.h2
-rw-r--r--drivers/net/fddi/skfp/h/smt.h12
-rw-r--r--drivers/net/fddi/skfp/smt.c4
-rw-r--r--drivers/net/geneve.c5
-rw-r--r--drivers/net/hyperv/hyperv_net.h6
-rw-r--r--drivers/net/hyperv/netvsc.c55
-rw-r--r--drivers/net/hyperv/netvsc_drv.c65
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c2
-rw-r--r--drivers/net/ipa/Kconfig5
-rw-r--r--drivers/net/ipa/Makefile6
-rw-r--r--drivers/net/ipa/gsi.c106
-rw-r--r--drivers/net/ipa/gsi.h4
-rw-r--r--drivers/net/ipa/gsi_private.h4
-rw-r--r--drivers/net/ipa/gsi_reg.h69
-rw-r--r--drivers/net/ipa/gsi_trans.c13
-rw-r--r--drivers/net/ipa/gsi_trans.h5
-rw-r--r--drivers/net/ipa/ipa.h7
-rw-r--r--drivers/net/ipa/ipa_cmd.c28
-rw-r--r--drivers/net/ipa/ipa_cmd.h19
-rw-r--r--drivers/net/ipa/ipa_data-v3.5.1.c (renamed from drivers/net/ipa/ipa_data-sdm845.c)229
-rw-r--r--drivers/net/ipa/ipa_data-v4.11.c382
-rw-r--r--drivers/net/ipa/ipa_data-v4.2.c (renamed from drivers/net/ipa/ipa_data-sc7180.c)158
-rw-r--r--drivers/net/ipa/ipa_data-v4.5.c437
-rw-r--r--drivers/net/ipa/ipa_data-v4.9.c430
-rw-r--r--drivers/net/ipa/ipa_data.h131
-rw-r--r--drivers/net/ipa/ipa_endpoint.c82
-rw-r--r--drivers/net/ipa/ipa_endpoint.h32
-rw-r--r--drivers/net/ipa/ipa_interrupt.c54
-rw-r--r--drivers/net/ipa/ipa_interrupt.h1
-rw-r--r--drivers/net/ipa/ipa_main.c330
-rw-r--r--drivers/net/ipa/ipa_mem.c15
-rw-r--r--drivers/net/ipa/ipa_mem.h21
-rw-r--r--drivers/net/ipa/ipa_modem.c34
-rw-r--r--drivers/net/ipa/ipa_qmi.c14
-rw-r--r--drivers/net/ipa/ipa_qmi.h14
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c78
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h6
-rw-r--r--drivers/net/ipa/ipa_reg.h495
-rw-r--r--drivers/net/ipa/ipa_resource.c176
-rw-r--r--drivers/net/ipa/ipa_resource.h23
-rw-r--r--drivers/net/ipa/ipa_smp2p.h2
-rw-r--r--drivers/net/ipa/ipa_table.c117
-rw-r--r--drivers/net/ipa/ipa_table.h27
-rw-r--r--drivers/net/ipa/ipa_uc.c5
-rw-r--r--drivers/net/ipa/ipa_version.h29
-rw-r--r--drivers/net/macvlan.c64
-rw-r--r--drivers/net/mdio.c2
-rw-r--r--drivers/net/mdio/Kconfig11
-rw-r--r--drivers/net/mdio/Makefile1
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c16
-rw-r--r--drivers/net/mdio/mdio-bitbang.c12
-rw-r--r--drivers/net/mdio/mdio-cavium.c2
-rw-r--r--drivers/net/mdio/mdio-gpio.c18
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c4
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c4
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c8
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c14
-rw-r--r--drivers/net/mdio/mdio-mux-bcm6368.c184
-rw-r--r--drivers/net/mdio/mdio-mux-gpio.c8
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c6
-rw-r--r--drivers/net/mdio/mdio-mux-multiplexer.c2
-rw-r--r--drivers/net/mdio/mdio-mux.c6
-rw-r--r--drivers/net/mdio/mdio-octeon.c8
-rw-r--r--drivers/net/mdio/mdio-thunder.c10
-rw-r--r--drivers/net/mdio/mdio-xgene.c6
-rw-r--r--drivers/net/mdio/of_mdio.c10
-rw-r--r--drivers/net/mhi/mhi.h1
-rw-r--r--drivers/net/mhi/net.c7
-rw-r--r--drivers/net/mhi/proto_mbim.c62
-rw-r--r--drivers/net/netdevsim/Makefile4
-rw-r--r--drivers/net/netdevsim/dev.c17
-rw-r--r--drivers/net/netdevsim/ethtool.c36
-rw-r--r--drivers/net/netdevsim/fib.c147
-rw-r--r--drivers/net/netdevsim/health.c11
-rw-r--r--drivers/net/netdevsim/netdevsim.h18
-rw-r--r--drivers/net/netdevsim/psample.c265
-rw-r--r--drivers/net/pcs/pcs-xpcs.c257
-rw-r--r--drivers/net/phy/Kconfig12
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/at803x.c100
-rw-r--r--drivers/net/phy/broadcom.c76
-rw-r--r--drivers/net/phy/intel-xway.c21
-rw-r--r--drivers/net/phy/marvell-88x2222.c621
-rw-r--r--drivers/net/phy/marvell.c559
-rw-r--r--drivers/net/phy/marvell10g.c386
-rw-r--r--drivers/net/phy/mdio-boardinfo.c2
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/phy/mscc/mscc_main.c217
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c621
-rw-r--r--drivers/net/phy/phy-c45.c51
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c52
-rw-r--r--drivers/net/phy/phylink.c5
-rw-r--r--drivers/net/phy/sfp-bus.c20
-rw-r--r--drivers/net/phy/sfp.c25
-rw-r--r--drivers/net/phy/sfp.h3
-rw-r--r--drivers/net/phy/smsc.c7
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/ppp/ppp_deflate.c1
-rw-r--r--drivers/net/ppp/ppp_generic.c22
-rw-r--r--drivers/net/ppp/pppoe.c27
-rw-r--r--drivers/net/tun.c16
-rw-r--r--drivers/net/usb/asix_devices.c12
-rw-r--r--drivers/net/usb/ax88179_178a.c6
-rw-r--r--drivers/net/usb/cdc_ether.c27
-rw-r--r--drivers/net/usb/cdc_ncm.c56
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/lan78xx.c1
-rw-r--r--drivers/net/usb/mcs7830.c4
-rw-r--r--drivers/net/usb/r8152.c3005
-rw-r--r--drivers/net/usb/sierra_net.c4
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/sr9700.c4
-rw-r--r--drivers/net/usb/sr9800.c4
-rw-r--r--drivers/net/usb/usbnet.c38
-rw-r--r--drivers/net/veth.c199
-rw-r--r--drivers/net/virtio_net.c200
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c53
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wan/farsync.c3
-rw-r--r--drivers/net/wan/hdlc_x25.c30
-rw-r--r--drivers/net/wan/lapbether.c85
-rw-r--r--drivers/net/wan/z85230.h39
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c29
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c58
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c45
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c476
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c96
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h33
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h13
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c796
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h53
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c103
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c125
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c194
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h21
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c118
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h212
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c64
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h7
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h2
-rw-r--r--drivers/net/wireless/cisco/airo.c117
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c232
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c128
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.h3
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c1
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_nortel.c8
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_pci.c8
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_plx.c8
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_tmd.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c24
-rw-r--r--drivers/net/wireless/marvell/libertas/decl.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.h12
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/libertas_tf.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c3
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c65
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c240
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c77
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h99
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c71
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c424
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c209
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c299
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c183
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h59
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c272
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c116
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c112
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c184
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h51
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c220
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c258
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c132
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c437
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c152
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h106
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c168
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/dma.c242
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c525
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.h10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c236
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c210
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921_trace.h51
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c54
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h54
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/trace.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c159
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c94
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/Kconfig1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c39
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c298
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c56
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c27
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c67
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c38
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c500
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c134
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c27
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h18
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h16
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c104
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h57
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c98
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c95
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h15
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c892
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h339
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c686
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_ps.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_boot_params.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_coex.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_debugfs.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_ps.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h2
-rw-r--r--drivers/net/wireless/st/cw1200/bh.c3
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.h12
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h7
-rw-r--r--drivers/net/wireless/wl3501.h49
-rw-r--r--drivers/net/wireless/wl3501_cs.c54
-rw-r--r--drivers/net/wwan/Kconfig37
-rw-r--r--drivers/net/wwan/Makefile9
-rw-r--r--drivers/net/wwan/mhi_wwan_ctrl.c284
-rw-r--r--drivers/net/wwan/wwan_core.c554
-rw-r--r--drivers/net/xen-netfront.c18
-rw-r--r--drivers/nfc/fdp/fdp.c49
-rw-r--r--drivers/nfc/pn533/i2c.c8
-rw-r--r--drivers/nfc/pn533/pn533.c20
-rw-r--r--drivers/nfc/s3fwrn5/core.c12
-rw-r--r--drivers/nfc/st-nci/spi.c7
-rw-r--r--drivers/nvdimm/btt.c1
-rw-r--r--drivers/nvdimm/pmem.c1
-rw-r--r--drivers/of/of_net.c85
-rw-r--r--drivers/of/overlay.c3
-rw-r--r--drivers/parport/parport_ip32.c12
-rw-r--r--drivers/pci/ats.c2
-rw-r--r--drivers/pci/controller/Kconfig17
-rw-r--r--drivers/pci/controller/Makefile8
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c24
-rw-r--r--drivers/pci/controller/dwc/Kconfig12
-rw-r--r--drivers/pci/controller/dwc/Makefile10
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c14
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c11
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h1
-rw-r--r--drivers/pci/controller/dwc/pcie-fu740.c309
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c5
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c108
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig3
-rw-r--r--drivers/pci/controller/pci-host-common.c1
-rw-r--r--drivers/pci/controller/pci-hyperv.c4
-rw-r--r--drivers/pci/controller/pci-tegra.c349
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c2
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c13
-rw-r--r--drivers/pci/controller/pci-xgene.c3
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c4
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c20
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c2
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c1027
-rw-r--r--drivers/pci/controller/pcie-mediatek.c7
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c12
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c355
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c7
-rw-r--r--drivers/pci/controller/pcie-xilinx.c246
-rw-r--r--drivers/pci/controller/vmd.c63
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c16
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c22
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c2
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c2
-rw-r--r--drivers/pci/hotplug/acpiphp.h3
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c1
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c5
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c2
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c5
-rw-r--r--drivers/pci/iov.c102
-rw-r--r--drivers/pci/msi.c45
-rw-r--r--drivers/pci/of.c22
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/pci/pci-label.c218
-rw-r--r--drivers/pci/pci-sysfs.c263
-rw-r--r--drivers/pci/pci.c50
-rw-r--r--drivers/pci/pci.h27
-rw-r--r--drivers/pci/pcie/aer.c6
-rw-r--r--drivers/pci/pcie/pme.c2
-rw-r--r--drivers/pci/pcie/rcec.c2
-rw-r--r--drivers/pci/probe.c5
-rw-r--r--drivers/pci/quirks.c29
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/vpd.c232
-rw-r--r--drivers/pci/xen-pcifront.c2
-rw-r--r--drivers/pcmcia/cistpl.c12
-rw-r--r--drivers/pcmcia/ds.c7
-rw-r--r--drivers/pcmcia/pcmcia_cis.c10
-rw-r--r--drivers/pcmcia/pcmcia_resource.c11
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c22
-rw-r--r--drivers/perf/arm_pmu.c30
-rw-r--r--drivers/phy/phy-core-mipi-dphy.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c8
-rw-r--r--drivers/pinctrl/Kconfig21
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/bcm/Kconfig62
-rw-r--r--drivers/pinctrl/bcm/Makefile7
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6318.c498
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm63268.c643
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6328.c404
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6358.c369
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6362.c617
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6368.c523
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm63xx.c109
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm63xx.h43
-rw-r--r--drivers/pinctrl/core.c39
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx25.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx27.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx35.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx50.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx51.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx53.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6dl.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6q.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sl.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sll.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sx.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6ul.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7d.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7ulp.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8dxl.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mm.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mn.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mp.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mq.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8qm.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8qxp.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c5
-rw-r--r--drivers/pinctrl/mediatek/Kconfig6
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8195.c850
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c19
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8195.h1669
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-cp110.c4
-rw-r--r--drivers/pinctrl/pinconf-generic.c6
-rw-r--r--drivers/pinctrl/pinconf.c4
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c8
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c16
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c1649
-rw-r--r--drivers/pinctrl/pinctrl-k210.c1
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c4
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c956
-rw-r--r--drivers/pinctrl/pinctrl-single.c71
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c4
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c906
-rw-r--r--drivers/pinctrl/pinmux.c106
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c4
-rw-r--r--drivers/pinctrl/qcom/Kconfig4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c24
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c123
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c21
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c7
-rw-r--r--drivers/pinctrl/renesas/core.c20
-rw-r--r--drivers/pinctrl/renesas/core.h8
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a73a4.c48
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7740.c46
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7778.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7791.c387
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7792.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77950.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77951.c31
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7796.c31
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77965.c35
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77970.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77980.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77990.c35
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77995.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779a0.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-sh73a0.c46
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c109
-rw-r--r--drivers/pinctrl/renesas/sh_pfc.h24
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c10
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c18
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c7
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c100
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c3
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_microb.c6
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/ptp/Makefile2
-rw-r--r--drivers/ptp/ptp_clockmatrix.c4
-rw-r--r--drivers/ptp/ptp_kvm_arm.c28
-rw-r--r--drivers/ptp/ptp_kvm_common.c (renamed from drivers/ptp/ptp_kvm.c)85
-rw-r--r--drivers/ptp/ptp_kvm_x86.c97
-rw-r--r--drivers/ptp/ptp_pch.c21
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c48
-rw-r--r--drivers/pwm/pwm-ab8500.c54
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c3
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c42
-rw-r--r--drivers/pwm/pwm-atmel.c30
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c5
-rw-r--r--drivers/pwm/pwm-bcm-kona.c8
-rw-r--r--drivers/pwm/pwm-bcm2835.c40
-rw-r--r--drivers/pwm/pwm-berlin.c1
-rw-r--r--drivers/pwm/pwm-brcmstb.c1
-rw-r--r--drivers/pwm/pwm-clps711x.c1
-rw-r--r--drivers/pwm/pwm-crc.c1
-rw-r--r--drivers/pwm/pwm-cros-ec.c4
-rw-r--r--drivers/pwm/pwm-dwc.c1
-rw-r--r--drivers/pwm/pwm-ep93xx.c1
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c1
-rw-r--r--drivers/pwm/pwm-hibvt.c1
-rw-r--r--drivers/pwm/pwm-img.c1
-rw-r--r--drivers/pwm/pwm-imx-tpm.c5
-rw-r--r--drivers/pwm/pwm-imx1.c1
-rw-r--r--drivers/pwm/pwm-imx27.c1
-rw-r--r--drivers/pwm/pwm-intel-lgm.c1
-rw-r--r--drivers/pwm/pwm-iqs620a.c1
-rw-r--r--drivers/pwm/pwm-jz4740.c1
-rw-r--r--drivers/pwm/pwm-keembay.c1
-rw-r--r--drivers/pwm/pwm-lp3943.c1
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c5
-rw-r--r--drivers/pwm/pwm-lpc32xx.c5
-rw-r--r--drivers/pwm/pwm-lpss.c7
-rw-r--r--drivers/pwm/pwm-mediatek.c7
-rw-r--r--drivers/pwm/pwm-meson.c1
-rw-r--r--drivers/pwm/pwm-mtk-disp.c1
-rw-r--r--drivers/pwm/pwm-mxs.c1
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c1
-rw-r--r--drivers/pwm/pwm-pca9685.c303
-rw-r--r--drivers/pwm/pwm-pxa.c1
-rw-r--r--drivers/pwm/pwm-rcar.c1
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c1
-rw-r--r--drivers/pwm/pwm-rockchip.c1
-rw-r--r--drivers/pwm/pwm-samsung.c1
-rw-r--r--drivers/pwm/pwm-sifive.c1
-rw-r--r--drivers/pwm/pwm-sl28cpld.c1
-rw-r--r--drivers/pwm/pwm-spear.c1
-rw-r--r--drivers/pwm/pwm-sprd.c4
-rw-r--r--drivers/pwm/pwm-sti.c7
-rw-r--r--drivers/pwm/pwm-stm32-lp.c1
-rw-r--r--drivers/pwm/pwm-stm32.c1
-rw-r--r--drivers/pwm/pwm-stmpe.c1
-rw-r--r--drivers/pwm/pwm-sun4i.c1
-rw-r--r--drivers/pwm/pwm-tegra.c1
-rw-r--r--drivers/pwm/pwm-tiecap.c1
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c1
-rw-r--r--drivers/pwm/pwm-twl-led.c1
-rw-r--r--drivers/pwm/pwm-twl.c1
-rw-r--r--drivers/pwm/pwm-visconti.c190
-rw-r--r--drivers/pwm/pwm-vt8500.c1
-rw-r--r--drivers/remoteproc/Kconfig7
-rw-r--r--drivers/remoteproc/imx_rproc.c322
-rw-r--r--drivers/remoteproc/ingenic_rproc.c2
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c2
-rw-r--r--drivers/remoteproc/mtk_scp.c6
-rw-r--r--drivers/remoteproc/omap_remoteproc.c2
-rw-r--r--drivers/remoteproc/pru_rproc.c47
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c26
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c19
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c599
-rw-r--r--drivers/remoteproc/qcom_wcnss.c10
-rw-r--r--drivers/remoteproc/remoteproc_cdev.c21
-rw-r--r--drivers/remoteproc/remoteproc_core.c337
-rw-r--r--drivers/remoteproc/remoteproc_coredump.c8
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c2
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c21
-rw-r--r--drivers/remoteproc/remoteproc_internal.h12
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c21
-rw-r--r--drivers/remoteproc/st_slim_rproc.c2
-rw-r--r--drivers/remoteproc/stm32_rproc.c205
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c2
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c2
-rw-r--r--drivers/reset/Kconfig1
-rw-r--r--drivers/reset/core.c215
-rw-r--r--drivers/rpmsg/qcom_glink_native.c17
-rw-r--r--drivers/rpmsg/qcom_smd.c16
-rw-r--r--drivers/rpmsg/rpmsg_char.c11
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c62
-rw-r--r--drivers/rtc/Kconfig3
-rw-r--r--drivers/rtc/interface.c34
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c135
-rw-r--r--drivers/rtc/rtc-ds1307.c56
-rw-r--r--drivers/rtc/rtc-ds1511.c6
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c1
-rw-r--r--drivers/rtc/rtc-imx-sc.c11
-rw-r--r--drivers/rtc/rtc-imxdi.c4
-rw-r--r--drivers/rtc/rtc-m48t59.c2
-rw-r--r--drivers/rtc/rtc-mxc.c2
-rw-r--r--drivers/rtc/rtc-omap.c5
-rw-r--r--drivers/rtc/rtc-pcf85063.c7
-rw-r--r--drivers/rtc/rtc-pcf8523.c196
-rw-r--r--drivers/rtc/rtc-pm8xxx.c11
-rw-r--r--drivers/rtc/rtc-rv3028.c4
-rw-r--r--drivers/rtc/rtc-rx6110.c7
-rw-r--r--drivers/rtc/rtc-s5m.c6
-rw-r--r--drivers/rtc/rtc-spear.c6
-rw-r--r--drivers/rtc/rtc-tps65910.c1
-rw-r--r--drivers/rtc/sysfs.c2
-rw-r--r--drivers/s390/cio/device.c3
-rw-r--r--drivers/s390/net/qeth_core_main.c18
-rw-r--r--drivers/s390/net/qeth_l3_main.c31
-rw-r--r--drivers/scsi/53c700.c1
-rw-r--r--drivers/scsi/53c700.h1
-rw-r--r--drivers/scsi/aacraid/TODO3
-rw-r--r--drivers/scsi/ch.c6
-rw-r--r--drivers/scsi/cxlflash/main.c3
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c1
-rw-r--r--drivers/scsi/ips.c20
-rw-r--r--drivers/scsi/ips.h20
-rw-r--r--drivers/scsi/lasi700.c1
-rw-r--r--drivers/scsi/megaraid/mbox_defs.h2
-rw-r--r--drivers/scsi/megaraid/mega_common.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h2
-rw-r--r--drivers/scsi/qla1280.c12
-rw-r--r--drivers/scsi/scsicam.c1
-rw-r--r--drivers/scsi/sni_53c710.c1
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c55
-rw-r--r--drivers/soc/tegra/pmc.c4
-rw-r--r--drivers/spi/spi-imx.c5
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/fsl-dpaa2/Kconfig19
-rw-r--r--drivers/staging/fsl-dpaa2/Makefile6
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/Makefile10
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/README106
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/TODO13
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/dpsw.h594
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c1839
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.h80
-rw-r--r--drivers/staging/media/tegra-video/vi.c6
-rw-r--r--drivers/staging/octeon/ethernet.c10
-rw-r--r--drivers/staging/wfx/main.c7
-rw-r--r--drivers/thermal/amlogic_thermal.c4
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c1
-rw-r--r--drivers/thermal/cpufreq_cooling.c49
-rw-r--r--drivers/thermal/cpuidle_cooling.c37
-rw-r--r--drivers/thermal/devfreq_cooling.c25
-rw-r--r--drivers/thermal/gov_fair_share.c11
-rw-r--r--drivers/thermal/gov_power_allocator.c32
-rw-r--r--drivers/thermal/hisi_thermal.c10
-rw-r--r--drivers/thermal/intel/Kconfig11
-rw-r--r--drivers/thermal/intel/Makefile1
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c129
-rw-r--r--drivers/thermal/mtk_thermal.c12
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c91
-rw-r--r--drivers/thermal/qcom/tsens-8960.c235
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c98
-rw-r--r--drivers/thermal/qcom/tsens-v1.c4
-rw-r--r--drivers/thermal/qcom/tsens.c165
-rw-r--r--drivers/thermal/qcom/tsens.h6
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c3
-rw-r--r--drivers/thermal/sun8i_thermal.c4
-rw-r--r--drivers/thermal/tegra/soctherm.c15
-rw-r--r--drivers/thermal/thermal_core.c57
-rw-r--r--drivers/thermal/thermal_core.h1
-rw-r--r--drivers/thermal/thermal_helpers.c27
-rw-r--r--drivers/thermal/thermal_mmio.c5
-rw-r--r--drivers/thermal/thermal_of.c7
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c37
-rw-r--r--drivers/vdpa/Kconfig15
-rw-r--r--drivers/vdpa/Makefile1
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c24
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h26
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c86
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c85
-rw-r--r--drivers/vdpa/vdpa.c12
-rw-r--r--drivers/vdpa/vdpa_sim/Makefile1
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c127
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h2
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c338
-rw-r--r--drivers/vdpa/virtio_pci/Makefile2
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c484
-rw-r--r--drivers/vfio/vfio_iommu_type1.c31
-rw-r--r--drivers/vhost/vdpa.c26
-rw-r--r--drivers/vhost/vringh.c69
-rw-r--r--drivers/video/fbdev/efifb.c6
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c9
-rw-r--r--drivers/video/fbdev/vga16fb.c10
-rw-r--r--drivers/virt/nitro_enclaves/ne_misc_dev.c43
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c27
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c67
-rw-r--r--drivers/xen/swiotlb-xen.c182
2070 files changed, 124782 insertions, 36550 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 8f3fee8281ad..5a6d613e868d 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_DMADEVICES) += dma/
obj-y += soc/
obj-$(CONFIG_VIRTIO) += virtio/
+obj-$(CONFIG_VIRTIO_PCI_LIB) += virtio/
obj-$(CONFIG_VDPA) += vdpa/
obj-$(CONFIG_XEN) += xen/
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index b02fd51e5589..8cc195c4c861 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -171,6 +171,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
acpi_handle handle = mem_device->device->handle;
int result, num_enabled = 0;
struct acpi_memory_info *info;
+ mhp_t mhp_flags = MHP_NONE;
int node;
node = acpi_get_node(handle);
@@ -194,8 +195,10 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
if (node < 0)
node = memory_add_physaddr_to_nid(info->start_addr);
+ if (mhp_supports_memmap_on_memory(info->length))
+ mhp_flags |= MHP_MEMMAP_ON_MEMORY;
result = __add_memory(node, info->start_addr, info->length,
- MHP_NONE);
+ mhp_flags);
/*
* If the memory block has been used by the kernel, add_memory()
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 2494138a6905..3912a1f6058e 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -968,15 +968,16 @@ static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
static void iort_named_component_init(struct device *dev,
struct acpi_iort_node *node)
{
+ struct property_entry props[2] = {};
struct acpi_iort_named_component *nc;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-
- if (!fwspec)
- return;
nc = (struct acpi_iort_named_component *)node->node_data;
- fwspec->num_pasid_bits = FIELD_GET(ACPI_IORT_NC_PASID_BITS,
- nc->node_flags);
+ props[0] = PROPERTY_ENTRY_U32("pasid-num-bits",
+ FIELD_GET(ACPI_IORT_NC_PASID_BITS,
+ nc->node_flags));
+
+ if (device_add_properties(dev, props))
+ dev_warn(dev, "Could not add device properties\n");
}
static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
index 443fdf62dd22..d39a9b474727 100644
--- a/drivers/acpi/custom_method.c
+++ b/drivers/acpi/custom_method.c
@@ -42,6 +42,8 @@ static ssize_t cm_write(struct file *file, const char __user *user_buf,
sizeof(struct acpi_table_header)))
return -EFAULT;
uncopied_bytes = max_size = table.length;
+ /* make sure the buf is not allocated */
+ kfree(buf);
buf = kzalloc(max_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -55,6 +57,7 @@ static ssize_t cm_write(struct file *file, const char __user *user_buf,
(*ppos + count < count) ||
(count > uncopied_bytes)) {
kfree(buf);
+ buf = NULL;
return -EINVAL;
}
@@ -76,7 +79,6 @@ static ssize_t cm_write(struct file *file, const char __user *user_buf,
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
}
- kfree(buf);
return count;
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index f973bbe90e5e..b852cff80287 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -142,7 +142,6 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
int acpi_power_on_resources(struct acpi_device *device, int state);
int acpi_power_transition(struct acpi_device *device, int state);
-void acpi_turn_off_unused_power_resources(void);
/* --------------------------------------------------------------------------
Device Power Management
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 95f23acd5b80..53cab975f612 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -116,6 +116,13 @@ static struct mcfg_fixup mcfg_quirks[] = {
THUNDER_ECAM_QUIRK(2, 12),
THUNDER_ECAM_QUIRK(2, 13),
+ { "NVIDIA", "TEGRA194", 1, 0, MCFG_BUS_ANY, &tegra194_pcie_ops},
+ { "NVIDIA", "TEGRA194", 1, 1, MCFG_BUS_ANY, &tegra194_pcie_ops},
+ { "NVIDIA", "TEGRA194", 1, 2, MCFG_BUS_ANY, &tegra194_pcie_ops},
+ { "NVIDIA", "TEGRA194", 1, 3, MCFG_BUS_ANY, &tegra194_pcie_ops},
+ { "NVIDIA", "TEGRA194", 1, 4, MCFG_BUS_ANY, &tegra194_pcie_ops},
+ { "NVIDIA", "TEGRA194", 1, 5, MCFG_BUS_ANY, &tegra194_pcie_ops},
+
#define XGENE_V1_ECAM_MCFG(rev, seg) \
{"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \
&xgene_v1_pcie_ecam_ops }
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 56102eaaa2da..32974b575e46 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -995,7 +995,6 @@ void acpi_resume_power_resources(void)
mutex_unlock(&power_resource_list_lock);
}
-#endif
void acpi_turn_off_unused_power_resources(void)
{
@@ -1016,3 +1015,4 @@ void acpi_turn_off_unused_power_resources(void)
mutex_unlock(&power_resource_list_lock);
}
+#endif
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index bc973fbd70b2..a22778e880c2 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2359,8 +2359,6 @@ int __init acpi_scan_init(void)
}
}
- acpi_turn_off_unused_power_resources();
-
acpi_scan_initialized = true;
out:
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 7fe41ee489d6..1856f76ac83f 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -8,6 +8,7 @@ extern struct list_head acpi_wakeup_device_list;
extern struct mutex acpi_device_lock;
extern void acpi_resume_power_resources(void);
+extern void acpi_turn_off_unused_power_resources(void);
static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
{
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 5b32df5d33ad..6e9c5ade4c2e 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -86,7 +86,8 @@ struct brcm_ahci_priv {
u32 port_mask;
u32 quirks;
enum brcm_ahci_version version;
- struct reset_control *rcdev;
+ struct reset_control *rcdev_rescal;
+ struct reset_control *rcdev_ahci;
};
static inline u32 brcm_sata_readreg(void __iomem *addr)
@@ -352,8 +353,8 @@ static int brcm_ahci_suspend(struct device *dev)
else
ret = 0;
- if (priv->version != BRCM_SATA_BCM7216)
- reset_control_assert(priv->rcdev);
+ reset_control_assert(priv->rcdev_ahci);
+ reset_control_rearm(priv->rcdev_rescal);
return ret;
}
@@ -365,10 +366,10 @@ static int __maybe_unused brcm_ahci_resume(struct device *dev)
struct brcm_ahci_priv *priv = hpriv->plat_data;
int ret = 0;
- if (priv->version == BRCM_SATA_BCM7216)
- ret = reset_control_reset(priv->rcdev);
- else
- ret = reset_control_deassert(priv->rcdev);
+ ret = reset_control_deassert(priv->rcdev_ahci);
+ if (ret)
+ return ret;
+ ret = reset_control_reset(priv->rcdev_rescal);
if (ret)
return ret;
@@ -434,7 +435,6 @@ static int brcm_ahci_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
- const char *reset_name = NULL;
struct brcm_ahci_priv *priv;
struct ahci_host_priv *hpriv;
struct resource *res;
@@ -456,15 +456,15 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (IS_ERR(priv->top_ctrl))
return PTR_ERR(priv->top_ctrl);
- /* Reset is optional depending on platform and named differently */
- if (priv->version == BRCM_SATA_BCM7216)
- reset_name = "rescal";
- else
- reset_name = "ahci";
-
- priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name);
- if (IS_ERR(priv->rcdev))
- return PTR_ERR(priv->rcdev);
+ if (priv->version == BRCM_SATA_BCM7216) {
+ priv->rcdev_rescal = devm_reset_control_get_optional_shared(
+ &pdev->dev, "rescal");
+ if (IS_ERR(priv->rcdev_rescal))
+ return PTR_ERR(priv->rcdev_rescal);
+ }
+ priv->rcdev_ahci = devm_reset_control_get_optional(&pdev->dev, "ahci");
+ if (IS_ERR(priv->rcdev_ahci))
+ return PTR_ERR(priv->rcdev_ahci);
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
@@ -485,10 +485,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
break;
}
- if (priv->version == BRCM_SATA_BCM7216)
- ret = reset_control_reset(priv->rcdev);
- else
- ret = reset_control_deassert(priv->rcdev);
+ ret = reset_control_reset(priv->rcdev_rescal);
+ if (ret)
+ return ret;
+ ret = reset_control_deassert(priv->rcdev_ahci);
if (ret)
return ret;
@@ -539,8 +539,8 @@ out_disable_regulators:
out_disable_clks:
ahci_platform_disable_clks(hpriv);
out_reset:
- if (priv->version != BRCM_SATA_BCM7216)
- reset_control_assert(priv->rcdev);
+ reset_control_assert(priv->rcdev_ahci);
+ reset_control_rearm(priv->rcdev_rescal);
return ret;
}
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 495fd0a1f040..b508df2ecada 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -21,7 +21,6 @@
#include <linux/module.h>
#include <linux/atmdev.h>
#include <linux/sonet.h>
-#include <linux/atm_suni.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/firmware.h>
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 0c13cac903de..9e4bd751db79 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1784,12 +1784,6 @@ set_tct(struct idt77252_dev *card, struct vc_map *vc)
/*****************************************************************************/
static __inline__ int
-idt77252_fbq_level(struct idt77252_dev *card, int queue)
-{
- return (readl(SAR_REG_STAT) >> (16 + (queue << 2))) & 0x0f;
-}
-
-static __inline__ int
idt77252_fbq_full(struct idt77252_dev *card, int queue)
{
return (readl(SAR_REG_STAT) >> (16 + (queue << 2))) == 0x0f;
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index eef637fd90b3..933e3ff2ee8d 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -680,7 +680,7 @@ static void ia_tx_poll (IADEV *iadev) {
skb1 = skb_dequeue(&iavcc->txing_skb);
}
if (!skb1) {
- IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
+ IF_EVENT(printk("IA: Vci %d - skb not found requeued\n",vcc->vci);)
ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
break;
}
diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
index c920a8c52925..21e5acc766b8 100644
--- a/drivers/atm/suni.c
+++ b/drivers/atm/suni.c
@@ -21,7 +21,6 @@
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/capability.h>
-#include <linux/atm_suni.h>
#include <linux/slab.h>
#include <asm/param.h>
#include <linux/uaccess.h>
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index ff5755ee5694..eba04c0de7eb 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1737,10 +1737,3 @@ module_init(panel_init_module);
module_exit(panel_cleanup_module);
MODULE_AUTHOR("Willy Tarreau");
MODULE_LICENSE("GPL");
-
-/*
- * Local variables:
- * c-indent-level: 4
- * tab-width: 8
- * End:
- */
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 78355095e00d..4fdb8219cd08 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -15,6 +15,7 @@
#include <linux/kernel_read_file.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/initrd.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
@@ -504,6 +505,7 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
if (!path)
return -ENOMEM;
+ wait_for_initramfs();
for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
size_t file_size = 0;
size_t *file_size_ptr = NULL;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index f35298425575..b31b3af5c490 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -169,30 +169,98 @@ int memory_notify(unsigned long val, void *v)
return blocking_notifier_call_chain(&memory_chain, val, v);
}
+static int memory_block_online(struct memory_block *mem)
+{
+ unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
+ struct zone *zone;
+ int ret;
+
+ zone = zone_for_pfn_range(mem->online_type, mem->nid, start_pfn, nr_pages);
+
+ /*
+ * Although vmemmap pages have a different lifecycle than the pages
+ * they describe (they remain until the memory is unplugged), doing
+ * their initialization and accounting at memory onlining/offlining
+ * stage helps to keep accounting easier to follow - e.g vmemmaps
+ * belong to the same zone as the memory they backed.
+ */
+ if (nr_vmemmap_pages) {
+ ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
+ if (ret)
+ return ret;
+ }
+
+ ret = online_pages(start_pfn + nr_vmemmap_pages,
+ nr_pages - nr_vmemmap_pages, zone);
+ if (ret) {
+ if (nr_vmemmap_pages)
+ mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
+ return ret;
+ }
+
+ /*
+ * Account once onlining succeeded. If the zone was unpopulated, it is
+ * now already properly populated.
+ */
+ if (nr_vmemmap_pages)
+ adjust_present_page_count(zone, nr_vmemmap_pages);
+
+ return ret;
+}
+
+static int memory_block_offline(struct memory_block *mem)
+{
+ unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
+ struct zone *zone;
+ int ret;
+
+ zone = page_zone(pfn_to_page(start_pfn));
+
+ /*
+ * Unaccount before offlining, such that unpopulated zone and kthreads
+ * can properly be torn down in offline_pages().
+ */
+ if (nr_vmemmap_pages)
+ adjust_present_page_count(zone, -nr_vmemmap_pages);
+
+ ret = offline_pages(start_pfn + nr_vmemmap_pages,
+ nr_pages - nr_vmemmap_pages);
+ if (ret) {
+ /* offline_pages() failed. Account back. */
+ if (nr_vmemmap_pages)
+ adjust_present_page_count(zone, nr_vmemmap_pages);
+ return ret;
+ }
+
+ if (nr_vmemmap_pages)
+ mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
+
+ return ret;
+}
+
/*
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
* OK to have direct references to sparsemem variables in here.
*/
static int
-memory_block_action(unsigned long start_section_nr, unsigned long action,
- int online_type, int nid)
+memory_block_action(struct memory_block *mem, unsigned long action)
{
- unsigned long start_pfn;
- unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
int ret;
- start_pfn = section_nr_to_pfn(start_section_nr);
-
switch (action) {
case MEM_ONLINE:
- ret = online_pages(start_pfn, nr_pages, online_type, nid);
+ ret = memory_block_online(mem);
break;
case MEM_OFFLINE:
- ret = offline_pages(start_pfn, nr_pages);
+ ret = memory_block_offline(mem);
break;
default:
WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
- "%ld\n", __func__, start_section_nr, action, action);
+ "%ld\n", __func__, mem->start_section_nr, action, action);
ret = -EINVAL;
}
@@ -210,9 +278,7 @@ static int memory_block_change_state(struct memory_block *mem,
if (to_state == MEM_OFFLINE)
mem->state = MEM_GOING_OFFLINE;
- ret = memory_block_action(mem->start_section_nr, to_state,
- mem->online_type, mem->nid);
-
+ ret = memory_block_action(mem, to_state);
mem->state = ret ? from_state_req : to_state;
return ret;
@@ -567,7 +633,8 @@ int register_memory(struct memory_block *memory)
return ret;
}
-static int init_memory_block(unsigned long block_id, unsigned long state)
+static int init_memory_block(unsigned long block_id, unsigned long state,
+ unsigned long nr_vmemmap_pages)
{
struct memory_block *mem;
int ret = 0;
@@ -584,6 +651,7 @@ static int init_memory_block(unsigned long block_id, unsigned long state)
mem->start_section_nr = block_id * sections_per_block;
mem->state = state;
mem->nid = NUMA_NO_NODE;
+ mem->nr_vmemmap_pages = nr_vmemmap_pages;
ret = register_memory(mem);
@@ -603,7 +671,7 @@ static int add_memory_block(unsigned long base_section_nr)
if (section_count == 0)
return 0;
return init_memory_block(memory_block_id(base_section_nr),
- MEM_ONLINE);
+ MEM_ONLINE, 0);
}
static void unregister_memory(struct memory_block *memory)
@@ -625,7 +693,8 @@ static void unregister_memory(struct memory_block *memory)
*
* Called under device_hotplug_lock.
*/
-int create_memory_block_devices(unsigned long start, unsigned long size)
+int create_memory_block_devices(unsigned long start, unsigned long size,
+ unsigned long vmemmap_pages)
{
const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
@@ -638,7 +707,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size)
return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
- ret = init_memory_block(block_id, MEM_OFFLINE);
+ ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages);
if (ret)
break;
}
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 87760aa60446..12aca34e8db0 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -52,13 +52,6 @@ static inline u32 mips_read32(struct bcma_drv_mips *mcore,
return bcma_read32(mcore->core, offset);
}
-static inline void mips_write32(struct bcma_drv_mips *mcore,
- u16 offset,
- u32 value)
-{
- bcma_write32(mcore->core, offset, value);
-}
-
static u32 bcma_core_mips_irqflag(struct bcma_device *dev)
{
u32 flag;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 6e622c1327ee..7562cf30b14e 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -18,6 +18,7 @@
#include <linux/bio.h>
#include <linux/highmem.h>
#include <linux/mutex.h>
+#include <linux/pagemap.h>
#include <linux/radix-tree.h>
#include <linux/fs.h>
#include <linux/slab.h>
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index a370cde3ddd4..d58d68f3c7cd 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -53,6 +53,7 @@
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/stat.h>
#include <linux/errno.h>
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 4e73a531b377..851842372c9b 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -425,4 +425,14 @@ config BT_HCIRSI
Say Y here to compile support for HCI over Redpine into the
kernel or say M to compile as a module.
+config BT_VIRTIO
+ tristate "Virtio Bluetooth driver"
+ depends on VIRTIO
+ help
+ Virtio Bluetooth support driver.
+ This driver supports Virtio Bluetooth devices.
+
+ Say Y here to compile support for HCI over Virtio into the
+ kernel or say M to compile as a module.
+
endmenu
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 1a58a3ae142c..16286ea2655d 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -26,6 +26,8 @@ obj-$(CONFIG_BT_BCM) += btbcm.o
obj-$(CONFIG_BT_RTL) += btrtl.o
obj-$(CONFIG_BT_QCA) += btqca.o
+obj-$(CONFIG_BT_VIRTIO) += virtio_bt.o
+
obj-$(CONFIG_BT_HCIUART_NOKIA) += hci_nokia.o
obj-$(CONFIG_BT_HCIRSI) += btrsi.o
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 88ce5f0ffc4b..e44b6993cf91 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -24,6 +24,14 @@
#define ECDSA_OFFSET 644
#define ECDSA_HEADER_LEN 320
+#define CMD_WRITE_BOOT_PARAMS 0xfc0e
+struct cmd_write_boot_params {
+ u32 boot_addr;
+ u8 fw_build_num;
+ u8 fw_build_ww;
+ u8 fw_build_yy;
+} __packed;
+
int btintel_check_bdaddr(struct hci_dev *hdev)
{
struct hci_rp_read_bd_addr *bda;
@@ -208,10 +216,39 @@ void btintel_hw_error(struct hci_dev *hdev, u8 code)
}
EXPORT_SYMBOL_GPL(btintel_hw_error);
-void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
+int btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
{
const char *variant;
+ /* The hardware platform number has a fixed value of 0x37 and
+ * for now only accept this single value.
+ */
+ if (ver->hw_platform != 0x37) {
+ bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)",
+ ver->hw_platform);
+ return -EINVAL;
+ }
+
+ /* Check for supported iBT hardware variants of this firmware
+ * loading method.
+ *
+ * This check has been put in place to ensure correct forward
+ * compatibility options when newer hardware variants come along.
+ */
+ switch (ver->hw_variant) {
+ case 0x0b: /* SfP */
+ case 0x0c: /* WsP */
+ case 0x11: /* JfP */
+ case 0x12: /* ThP */
+ case 0x13: /* HrP */
+ case 0x14: /* CcP */
+ break;
+ default:
+ bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
+ ver->hw_variant);
+ return -EINVAL;
+ }
+
switch (ver->fw_variant) {
case 0x06:
variant = "Bootloader";
@@ -220,13 +257,16 @@ void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
variant = "Firmware";
break;
default:
- return;
+ bt_dev_err(hdev, "Unsupported firmware variant(%02x)", ver->fw_variant);
+ return -EINVAL;
}
bt_dev_info(hdev, "%s revision %u.%u build %u week %u %u",
variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
ver->fw_build_num, ver->fw_build_ww,
2000 + ver->fw_build_yy);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(btintel_version_info);
@@ -364,13 +404,56 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
}
EXPORT_SYMBOL_GPL(btintel_read_version);
-void btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version)
+int btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version)
{
const char *variant;
+ /* The hardware platform number has a fixed value of 0x37 and
+ * for now only accept this single value.
+ */
+ if (INTEL_HW_PLATFORM(version->cnvi_bt) != 0x37) {
+ bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)",
+ INTEL_HW_PLATFORM(version->cnvi_bt));
+ return -EINVAL;
+ }
+
+ /* Check for supported iBT hardware variants of this firmware
+ * loading method.
+ *
+ * This check has been put in place to ensure correct forward
+ * compatibility options when newer hardware variants come along.
+ */
+ switch (INTEL_HW_VARIANT(version->cnvi_bt)) {
+ case 0x17: /* TyP */
+ case 0x18: /* Slr */
+ case 0x19: /* Slr-F */
+ break;
+ default:
+ bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)",
+ INTEL_HW_VARIANT(version->cnvi_bt));
+ return -EINVAL;
+ }
+
switch (version->img_type) {
case 0x01:
variant = "Bootloader";
+ /* It is required that every single firmware fragment is acknowledged
+ * with a command complete event. If the boot parameters indicate
+ * that this bootloader does not send them, then abort the setup.
+ */
+ if (version->limited_cce != 0x00) {
+ bt_dev_err(hdev, "Unsupported Intel firmware loading method (0x%x)",
+ version->limited_cce);
+ return -EINVAL;
+ }
+
+ /* Secure boot engine type should be either 1 (ECDSA) or 0 (RSA) */
+ if (version->sbe_type > 0x01) {
+ bt_dev_err(hdev, "Unsupported Intel secure boot engine type (0x%x)",
+ version->sbe_type);
+ return -EINVAL;
+ }
+
bt_dev_info(hdev, "Device revision is %u", version->dev_rev_id);
bt_dev_info(hdev, "Secure boot is %s",
version->secure_boot ? "enabled" : "disabled");
@@ -389,15 +472,14 @@ void btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *ve
break;
default:
bt_dev_err(hdev, "Unsupported image type(%02x)", version->img_type);
- goto done;
+ return -EINVAL;
}
bt_dev_info(hdev, "%s timestamp %u.%u buildtype %u build %u", variant,
2000 + (version->timestamp >> 8), version->timestamp & 0xff,
version->build_type, version->build_num);
-done:
- return;
+ return 0;
}
EXPORT_SYMBOL_GPL(btintel_version_info_tlv);
@@ -455,12 +537,23 @@ int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver
version->img_type = tlv->val[0];
break;
case INTEL_TLV_TIME_STAMP:
+ /* If image type is Operational firmware (0x03), then
+ * running FW Calendar Week and Year information can
+ * be extracted from Timestamp information
+ */
+ version->min_fw_build_cw = tlv->val[0];
+ version->min_fw_build_yy = tlv->val[1];
version->timestamp = get_unaligned_le16(tlv->val);
break;
case INTEL_TLV_BUILD_TYPE:
version->build_type = tlv->val[0];
break;
case INTEL_TLV_BUILD_NUM:
+ /* If image type is Operational firmware (0x03), then
+ * running FW build number can be extracted from the
+ * Build information
+ */
+ version->min_fw_build_nn = tlv->val[0];
version->build_num = get_unaligned_le32(tlv->val);
break;
case INTEL_TLV_SECURE_BOOT:
@@ -841,7 +934,7 @@ static int btintel_sfi_ecdsa_header_secure_send(struct hci_dev *hdev,
static int btintel_download_firmware_payload(struct hci_dev *hdev,
const struct firmware *fw,
- u32 *boot_param, size_t offset)
+ size_t offset)
{
int err;
const u8 *fw_ptr;
@@ -854,20 +947,6 @@ static int btintel_download_firmware_payload(struct hci_dev *hdev,
while (fw_ptr - fw->data < fw->size) {
struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
- /* Each SKU has a different reset parameter to use in the
- * HCI_Intel_Reset command and it is embedded in the firmware
- * data. So, instead of using static value per SKU, check
- * the firmware data and save it for later use.
- */
- if (le16_to_cpu(cmd->opcode) == 0xfc0e) {
- /* The boot parameter is the first 32-bit value
- * and rest of 3 octets are reserved.
- */
- *boot_param = get_unaligned_le32(fw_ptr + sizeof(*cmd));
-
- bt_dev_dbg(hdev, "boot_param=0x%x", *boot_param);
- }
-
frag_len += sizeof(*cmd) + cmd->plen;
/* The parameter length of the secure send command requires
@@ -896,28 +975,131 @@ done:
return err;
}
+static bool btintel_firmware_version(struct hci_dev *hdev,
+ u8 num, u8 ww, u8 yy,
+ const struct firmware *fw,
+ u32 *boot_addr)
+{
+ const u8 *fw_ptr;
+
+ fw_ptr = fw->data;
+
+ while (fw_ptr - fw->data < fw->size) {
+ struct hci_command_hdr *cmd = (void *)(fw_ptr);
+
+ /* Each SKU has a different reset parameter to use in the
+ * HCI_Intel_Reset command and it is embedded in the firmware
+ * data. So, instead of using static value per SKU, check
+ * the firmware data and save it for later use.
+ */
+ if (le16_to_cpu(cmd->opcode) == CMD_WRITE_BOOT_PARAMS) {
+ struct cmd_write_boot_params *params;
+
+ params = (void *)(fw_ptr + sizeof(*cmd));
+
+ bt_dev_info(hdev, "Boot Address: 0x%x",
+ le32_to_cpu(params->boot_addr));
+
+ bt_dev_info(hdev, "Firmware Version: %u-%u.%u",
+ params->fw_build_num, params->fw_build_ww,
+ params->fw_build_yy);
+
+ return (num == params->fw_build_num &&
+ ww == params->fw_build_ww &&
+ yy == params->fw_build_yy);
+ }
+
+ fw_ptr += sizeof(*cmd) + cmd->plen;
+ }
+
+ return false;
+}
+
int btintel_download_firmware(struct hci_dev *hdev,
+ struct intel_version *ver,
const struct firmware *fw,
u32 *boot_param)
{
int err;
+ /* SfP and WsP don't seem to update the firmware version on file
+ * so version checking is currently not possible.
+ */
+ switch (ver->hw_variant) {
+ case 0x0b: /* SfP */
+ case 0x0c: /* WsP */
+ /* Skip version checking */
+ break;
+ default:
+ /* Skip reading firmware file version in bootloader mode */
+ if (ver->fw_variant == 0x06)
+ break;
+
+ /* Skip download if firmware has the same version */
+ if (btintel_firmware_version(hdev, ver->fw_build_num,
+ ver->fw_build_ww, ver->fw_build_yy,
+ fw, boot_param)) {
+ bt_dev_info(hdev, "Firmware already loaded");
+ /* Return -EALREADY to indicate that the firmware has
+ * already been loaded.
+ */
+ return -EALREADY;
+ }
+ }
+
+ /* The firmware variant determines if the device is in bootloader
+ * mode or is running operational firmware. The value 0x06 identifies
+ * the bootloader and the value 0x23 identifies the operational
+ * firmware.
+ *
+ * If the firmware version has changed that means it needs to be reset
+ * to bootloader when operational so the new firmware can be loaded.
+ */
+ if (ver->fw_variant == 0x23)
+ return -EINVAL;
+
err = btintel_sfi_rsa_header_secure_send(hdev, fw);
if (err)
return err;
- return btintel_download_firmware_payload(hdev, fw, boot_param,
- RSA_HEADER_LEN);
+ return btintel_download_firmware_payload(hdev, fw, RSA_HEADER_LEN);
}
EXPORT_SYMBOL_GPL(btintel_download_firmware);
int btintel_download_firmware_newgen(struct hci_dev *hdev,
+ struct intel_version_tlv *ver,
const struct firmware *fw, u32 *boot_param,
u8 hw_variant, u8 sbe_type)
{
int err;
u32 css_header_ver;
+ /* Skip reading firmware file version in bootloader mode */
+ if (ver->img_type != 0x01) {
+ /* Skip download if firmware has the same version */
+ if (btintel_firmware_version(hdev, ver->min_fw_build_nn,
+ ver->min_fw_build_cw,
+ ver->min_fw_build_yy,
+ fw, boot_param)) {
+ bt_dev_info(hdev, "Firmware already loaded");
+ /* Return -EALREADY to indicate that firmware has
+ * already been loaded.
+ */
+ return -EALREADY;
+ }
+ }
+
+ /* The firmware variant determines if the device is in bootloader
+ * mode or is running operational firmware. The value 0x01 identifies
+ * the bootloader and the value 0x03 identifies the operational
+ * firmware.
+ *
+ * If the firmware version has changed that means it needs to be reset
+ * to bootloader when operational so the new firmware can be loaded.
+ */
+ if (ver->img_type == 0x03)
+ return -EINVAL;
+
/* iBT hardware variants 0x0b, 0x0c, 0x11, 0x12, 0x13, 0x14 support
* only RSA secure boot engine. Hence, the corresponding sfi file will
* have RSA header of 644 bytes followed by Command Buffer.
@@ -947,7 +1129,7 @@ int btintel_download_firmware_newgen(struct hci_dev *hdev,
if (err)
return err;
- err = btintel_download_firmware_payload(hdev, fw, boot_param, RSA_HEADER_LEN);
+ err = btintel_download_firmware_payload(hdev, fw, RSA_HEADER_LEN);
if (err)
return err;
} else if (hw_variant >= 0x17) {
@@ -968,7 +1150,6 @@ int btintel_download_firmware_newgen(struct hci_dev *hdev,
return err;
err = btintel_download_firmware_payload(hdev, fw,
- boot_param,
RSA_HEADER_LEN + ECDSA_HEADER_LEN);
if (err)
return err;
@@ -978,7 +1159,6 @@ int btintel_download_firmware_newgen(struct hci_dev *hdev,
return err;
err = btintel_download_firmware_payload(hdev, fw,
- boot_param,
RSA_HEADER_LEN + ECDSA_HEADER_LEN);
if (err)
return err;
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 6511b091caf5..d184064a5e7c 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -148,8 +148,8 @@ int btintel_set_diag(struct hci_dev *hdev, bool enable);
int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable);
void btintel_hw_error(struct hci_dev *hdev, u8 code);
-void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
-void btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version);
+int btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
+int btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version);
int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
const void *param);
int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name);
@@ -163,9 +163,10 @@ struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param);
int btintel_read_boot_params(struct hci_dev *hdev,
struct intel_boot_params *params);
-int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw,
- u32 *boot_param);
+int btintel_download_firmware(struct hci_dev *dev, struct intel_version *ver,
+ const struct firmware *fw, u32 *boot_param);
int btintel_download_firmware_newgen(struct hci_dev *hdev,
+ struct intel_version_tlv *ver,
const struct firmware *fw,
u32 *boot_param, u8 hw_variant,
u8 sbe_type);
@@ -210,14 +211,16 @@ static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
{
}
-static inline void btintel_version_info(struct hci_dev *hdev,
- struct intel_version *ver)
+static inline int btintel_version_info(struct hci_dev *hdev,
+ struct intel_version *ver)
{
+ return -EOPNOTSUPP;
}
-static inline void btintel_version_info_tlv(struct hci_dev *hdev,
- struct intel_version_tlv *version)
+static inline int btintel_version_info_tlv(struct hci_dev *hdev,
+ struct intel_version_tlv *version)
{
+ return -EOPNOTSUPP;
}
static inline int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type,
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 5cbfbd948f67..5d603ef39bad 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -399,7 +399,9 @@ static const struct usb_device_id blacklist_table[] = {
/* MediaTek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01),
- .driver_info = BTUSB_MEDIATEK },
+ .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
/* Additional MediaTek MT7615E Bluetooth devices */
{ USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK},
@@ -455,6 +457,8 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0xc123), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Silicon Wave based devices */
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
@@ -2400,7 +2404,7 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
return -EILSEQ;
}
-static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver,
+static int btusb_setup_intel_new_get_fw_name(struct intel_version *ver,
struct intel_boot_params *params,
char *fw_name, size_t len,
const char *suffix)
@@ -2424,9 +2428,10 @@ static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver,
suffix);
break;
default:
- return false;
+ return -EINVAL;
}
- return true;
+
+ return 0;
}
static void btusb_setup_intel_newgen_get_fw_name(const struct intel_version_tlv *ver_tlv,
@@ -2444,6 +2449,44 @@ static void btusb_setup_intel_newgen_get_fw_name(const struct intel_version_tlv
suffix);
}
+static int btusb_download_wait(struct hci_dev *hdev, ktime_t calltime, int msec)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ ktime_t delta, rettime;
+ unsigned long long duration;
+ int err;
+
+ set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+
+ bt_dev_info(hdev, "Waiting for firmware download to complete");
+
+ err = wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(msec));
+ if (err == -EINTR) {
+ bt_dev_err(hdev, "Firmware loading interrupted");
+ return err;
+ }
+
+ if (err) {
+ bt_dev_err(hdev, "Firmware loading timeout");
+ return -ETIMEDOUT;
+ }
+
+ if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) {
+ bt_dev_err(hdev, "Firmware loading failed");
+ return -ENOEXEC;
+ }
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long)ktime_to_ns(delta) >> 10;
+
+ bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration);
+
+ return 0;
+}
+
static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev,
struct intel_version_tlv *ver,
u32 *boot_param)
@@ -2452,19 +2495,11 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev,
char fwname[64];
int err;
struct btusb_data *data = hci_get_drvdata(hdev);
+ ktime_t calltime;
if (!ver || !boot_param)
return -EINVAL;
- /* The hardware platform number has a fixed value of 0x37 and
- * for now only accept this single value.
- */
- if (INTEL_HW_PLATFORM(ver->cnvi_bt) != 0x37) {
- bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)",
- INTEL_HW_PLATFORM(ver->cnvi_bt));
- return -EINVAL;
- }
-
/* The firmware variant determines if the device is in bootloader
* mode or is running operational firmware. The value 0x03 identifies
* the bootloader and the value 0x23 identifies the operational
@@ -2481,50 +2516,6 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev,
if (ver->img_type == 0x03) {
clear_bit(BTUSB_BOOTLOADER, &data->flags);
btintel_check_bdaddr(hdev);
- return 0;
- }
-
- /* Check for supported iBT hardware variants of this firmware
- * loading method.
- *
- * This check has been put in place to ensure correct forward
- * compatibility options when newer hardware variants come along.
- */
- switch (INTEL_HW_VARIANT(ver->cnvi_bt)) {
- case 0x17: /* TyP */
- case 0x18: /* Slr */
- case 0x19: /* Slr-F */
- break;
- default:
- bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)",
- INTEL_HW_VARIANT(ver->cnvi_bt));
- return -EINVAL;
- }
-
- /* If the device is not in bootloader mode, then the only possible
- * choice is to return an error and abort the device initialization.
- */
- if (ver->img_type != 0x01) {
- bt_dev_err(hdev, "Unsupported Intel firmware variant (0x%x)",
- ver->img_type);
- return -ENODEV;
- }
-
- /* It is required that every single firmware fragment is acknowledged
- * with a command complete event. If the boot parameters indicate
- * that this bootloader does not send them, then abort the setup.
- */
- if (ver->limited_cce != 0x00) {
- bt_dev_err(hdev, "Unsupported Intel firmware loading method (0x%x)",
- ver->limited_cce);
- return -EINVAL;
- }
-
- /* Secure boot engine type should be either 1 (ECDSA) or 0 (RSA) */
- if (ver->sbe_type > 0x01) {
- bt_dev_err(hdev, "Unsupported Intel secure boot engine type (0x%x)",
- ver->sbe_type);
- return -EINVAL;
}
/* If the OTP has no valid Bluetooth device address, then there will
@@ -2538,7 +2529,8 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev,
btusb_setup_intel_newgen_get_fw_name(ver, fwname, sizeof(fwname), "sfi");
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
- bt_dev_err(hdev, "Failed to load Intel firmware file (%d)", err);
+ bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
+ fwname, err);
return err;
}
@@ -2551,22 +2543,28 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev,
goto done;
}
+ calltime = ktime_get();
+
set_bit(BTUSB_DOWNLOADING, &data->flags);
/* Start firmware downloading and get boot parameter */
- err = btintel_download_firmware_newgen(hdev, fw, boot_param,
+ err = btintel_download_firmware_newgen(hdev, ver, fw, boot_param,
INTEL_HW_VARIANT(ver->cnvi_bt),
ver->sbe_type);
if (err < 0) {
+ if (err == -EALREADY) {
+ /* Firmware has already been loaded */
+ set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+ err = 0;
+ goto done;
+ }
+
/* When FW download fails, send Intel Reset to retry
* FW download.
*/
btintel_reset_to_bootloader(hdev);
goto done;
}
- set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
-
- bt_dev_info(hdev, "Waiting for firmware download to complete");
/* Before switching the device into operational mode and with that
* booting the loaded firmware, wait for the bootloader notification
@@ -2579,26 +2577,9 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev,
* and thus just timeout if that happens and fail the setup
* of this device.
*/
- err = wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING,
- TASK_INTERRUPTIBLE,
- msecs_to_jiffies(5000));
- if (err == -EINTR) {
- bt_dev_err(hdev, "Firmware loading interrupted");
- goto done;
- }
-
- if (err) {
- bt_dev_err(hdev, "Firmware loading timeout");
- err = -ETIMEDOUT;
+ err = btusb_download_wait(hdev, calltime, 5000);
+ if (err == -ETIMEDOUT)
btintel_reset_to_bootloader(hdev);
- goto done;
- }
-
- if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) {
- bt_dev_err(hdev, "Firmware loading failed");
- err = -ENOEXEC;
- goto done;
- }
done:
release_firmware(fw);
@@ -2614,41 +2595,11 @@ static int btusb_intel_download_firmware(struct hci_dev *hdev,
char fwname[64];
int err;
struct btusb_data *data = hci_get_drvdata(hdev);
+ ktime_t calltime;
if (!ver || !params)
return -EINVAL;
- /* The hardware platform number has a fixed value of 0x37 and
- * for now only accept this single value.
- */
- if (ver->hw_platform != 0x37) {
- bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)",
- ver->hw_platform);
- return -EINVAL;
- }
-
- /* Check for supported iBT hardware variants of this firmware
- * loading method.
- *
- * This check has been put in place to ensure correct forward
- * compatibility options when newer hardware variants come along.
- */
- switch (ver->hw_variant) {
- case 0x0b: /* SfP */
- case 0x0c: /* WsP */
- case 0x11: /* JfP */
- case 0x12: /* ThP */
- case 0x13: /* HrP */
- case 0x14: /* CcP */
- break;
- default:
- bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
- ver->hw_variant);
- return -EINVAL;
- }
-
- btintel_version_info(hdev, ver);
-
/* The firmware variant determines if the device is in bootloader
* mode or is running operational firmware. The value 0x06 identifies
* the bootloader and the value 0x23 identifies the operational
@@ -2665,16 +2616,18 @@ static int btusb_intel_download_firmware(struct hci_dev *hdev,
if (ver->fw_variant == 0x23) {
clear_bit(BTUSB_BOOTLOADER, &data->flags);
btintel_check_bdaddr(hdev);
- return 0;
- }
- /* If the device is not in bootloader mode, then the only possible
- * choice is to return an error and abort the device initialization.
- */
- if (ver->fw_variant != 0x06) {
- bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)",
- ver->fw_variant);
- return -ENODEV;
+ /* SfP and WsP don't seem to update the firmware version on file
+ * so version checking is currently possible.
+ */
+ switch (ver->hw_variant) {
+ case 0x0b: /* SfP */
+ case 0x0c: /* WsP */
+ return 0;
+ }
+
+ /* Proceed to download to check if the version matches */
+ goto download;
}
/* Read the secure boot parameters to identify the operating
@@ -2702,6 +2655,7 @@ static int btusb_intel_download_firmware(struct hci_dev *hdev,
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
}
+download:
/* With this Intel bootloader only the hardware variant and device
* revision information are used to select the right firmware for SfP
* and WsP.
@@ -2725,14 +2679,15 @@ static int btusb_intel_download_firmware(struct hci_dev *hdev,
*/
err = btusb_setup_intel_new_get_fw_name(ver, params, fwname,
sizeof(fwname), "sfi");
- if (!err) {
+ if (err < 0) {
bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL;
}
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
- bt_dev_err(hdev, "Failed to load Intel firmware file (%d)", err);
+ bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
+ fwname, err);
return err;
}
@@ -2745,20 +2700,26 @@ static int btusb_intel_download_firmware(struct hci_dev *hdev,
goto done;
}
+ calltime = ktime_get();
+
set_bit(BTUSB_DOWNLOADING, &data->flags);
/* Start firmware downloading and get boot parameter */
- err = btintel_download_firmware(hdev, fw, boot_param);
+ err = btintel_download_firmware(hdev, ver, fw, boot_param);
if (err < 0) {
+ if (err == -EALREADY) {
+ /* Firmware has already been loaded */
+ set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+ err = 0;
+ goto done;
+ }
+
/* When FW download fails, send Intel Reset to retry
* FW download.
*/
btintel_reset_to_bootloader(hdev);
goto done;
}
- set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
-
- bt_dev_info(hdev, "Waiting for firmware download to complete");
/* Before switching the device into operational mode and with that
* booting the loaded firmware, wait for the bootloader notification
@@ -2771,29 +2732,74 @@ static int btusb_intel_download_firmware(struct hci_dev *hdev,
* and thus just timeout if that happens and fail the setup
* of this device.
*/
- err = wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING,
+ err = btusb_download_wait(hdev, calltime, 5000);
+ if (err == -ETIMEDOUT)
+ btintel_reset_to_bootloader(hdev);
+
+done:
+ release_firmware(fw);
+ return err;
+}
+
+static int btusb_boot_wait(struct hci_dev *hdev, ktime_t calltime, int msec)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ ktime_t delta, rettime;
+ unsigned long long duration;
+ int err;
+
+ bt_dev_info(hdev, "Waiting for device to boot");
+
+ err = wait_on_bit_timeout(&data->flags, BTUSB_BOOTING,
TASK_INTERRUPTIBLE,
- msecs_to_jiffies(5000));
+ msecs_to_jiffies(msec));
if (err == -EINTR) {
- bt_dev_err(hdev, "Firmware loading interrupted");
- goto done;
+ bt_dev_err(hdev, "Device boot interrupted");
+ return -EINTR;
}
if (err) {
- bt_dev_err(hdev, "Firmware loading timeout");
- err = -ETIMEDOUT;
- btintel_reset_to_bootloader(hdev);
- goto done;
+ bt_dev_err(hdev, "Device boot timeout");
+ return -ETIMEDOUT;
}
- if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) {
- bt_dev_err(hdev, "Firmware loading failed");
- err = -ENOEXEC;
- goto done;
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+ bt_dev_info(hdev, "Device booted in %llu usecs", duration);
+
+ return 0;
+}
+
+static int btusb_intel_boot(struct hci_dev *hdev, u32 boot_addr)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ ktime_t calltime;
+ int err;
+
+ calltime = ktime_get();
+
+ set_bit(BTUSB_BOOTING, &data->flags);
+
+ err = btintel_send_intel_reset(hdev, boot_addr);
+ if (err) {
+ bt_dev_err(hdev, "Intel Soft Reset failed (%d)", err);
+ btintel_reset_to_bootloader(hdev);
+ return err;
}
-done:
- release_firmware(fw);
+ /* The bootloader will not indicate when the device is ready. This
+ * is done by the operational firmware sending bootup notification.
+ *
+ * Booting into operational firmware should not take longer than
+ * 1 second. However if that happens, then just fail the setup
+ * since something went wrong.
+ */
+ err = btusb_boot_wait(hdev, calltime, 1000);
+ if (err == -ETIMEDOUT)
+ btintel_reset_to_bootloader(hdev);
+
return err;
}
@@ -2804,8 +2810,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
struct intel_boot_params params;
u32 boot_param;
char ddcname[64];
- ktime_t calltime, delta, rettime;
- unsigned long long duration;
int err;
struct intel_debug_features features;
@@ -2817,8 +2821,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
*/
boot_param = 0x00000000;
- calltime = ktime_get();
-
/* Read the Intel version information to determine if the device
* is in bootloader mode or if it already has operational firmware
* loaded.
@@ -2830,6 +2832,10 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
return err;
}
+ err = btintel_version_info(hdev, &ver);
+ if (err)
+ return err;
+
err = btusb_intel_download_firmware(hdev, &ver, &params, &boot_param);
if (err)
return err;
@@ -2838,59 +2844,16 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
if (ver.fw_variant == 0x23)
goto finish;
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- duration = (unsigned long long) ktime_to_ns(delta) >> 10;
-
- bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration);
-
- calltime = ktime_get();
-
- set_bit(BTUSB_BOOTING, &data->flags);
-
- err = btintel_send_intel_reset(hdev, boot_param);
- if (err) {
- bt_dev_err(hdev, "Intel Soft Reset failed (%d)", err);
- btintel_reset_to_bootloader(hdev);
+ err = btusb_intel_boot(hdev, boot_param);
+ if (err)
return err;
- }
-
- /* The bootloader will not indicate when the device is ready. This
- * is done by the operational firmware sending bootup notification.
- *
- * Booting into operational firmware should not take longer than
- * 1 second. However if that happens, then just fail the setup
- * since something went wrong.
- */
- bt_dev_info(hdev, "Waiting for device to boot");
-
- err = wait_on_bit_timeout(&data->flags, BTUSB_BOOTING,
- TASK_INTERRUPTIBLE,
- msecs_to_jiffies(1000));
-
- if (err == -EINTR) {
- bt_dev_err(hdev, "Device boot interrupted");
- return -EINTR;
- }
-
- if (err) {
- bt_dev_err(hdev, "Device boot timeout");
- btintel_reset_to_bootloader(hdev);
- return -ETIMEDOUT;
- }
-
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- duration = (unsigned long long) ktime_to_ns(delta) >> 10;
-
- bt_dev_info(hdev, "Device booted in %llu usecs", duration);
clear_bit(BTUSB_BOOTLOADER, &data->flags);
err = btusb_setup_intel_new_get_fw_name(&ver, &params, ddcname,
sizeof(ddcname), "ddc");
- if (!err) {
+ if (err < 0) {
bt_dev_err(hdev, "Unsupported Intel firmware naming");
} else {
/* Once the device is running in operational mode, it needs to
@@ -2947,8 +2910,6 @@ static int btusb_setup_intel_newgen(struct hci_dev *hdev)
struct btusb_data *data = hci_get_drvdata(hdev);
u32 boot_param;
char ddcname[64];
- ktime_t calltime, delta, rettime;
- unsigned long long duration;
int err;
struct intel_debug_features features;
struct intel_version_tlv version;
@@ -2961,8 +2922,6 @@ static int btusb_setup_intel_newgen(struct hci_dev *hdev)
*/
boot_param = 0x00000000;
- calltime = ktime_get();
-
/* Read the Intel version information to determine if the device
* is in bootloader mode or if it already has operational firmware
* loaded.
@@ -2974,7 +2933,9 @@ static int btusb_setup_intel_newgen(struct hci_dev *hdev)
return err;
}
- btintel_version_info_tlv(hdev, &version);
+ err = btintel_version_info_tlv(hdev, &version);
+ if (err)
+ return err;
err = btusb_intel_download_firmware_newgen(hdev, &version, &boot_param);
if (err)
@@ -2984,52 +2945,9 @@ static int btusb_setup_intel_newgen(struct hci_dev *hdev)
if (version.img_type == 0x03)
goto finish;
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- duration = (unsigned long long)ktime_to_ns(delta) >> 10;
-
- bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration);
-
- calltime = ktime_get();
-
- set_bit(BTUSB_BOOTING, &data->flags);
-
- err = btintel_send_intel_reset(hdev, boot_param);
- if (err) {
- bt_dev_err(hdev, "Intel Soft Reset failed (%d)", err);
- btintel_reset_to_bootloader(hdev);
+ err = btusb_intel_boot(hdev, boot_param);
+ if (err)
return err;
- }
-
- /* The bootloader will not indicate when the device is ready. This
- * is done by the operational firmware sending bootup notification.
- *
- * Booting into operational firmware should not take longer than
- * 1 second. However if that happens, then just fail the setup
- * since something went wrong.
- */
- bt_dev_info(hdev, "Waiting for device to boot");
-
- err = wait_on_bit_timeout(&data->flags, BTUSB_BOOTING,
- TASK_INTERRUPTIBLE,
- msecs_to_jiffies(1000));
-
- if (err == -EINTR) {
- bt_dev_err(hdev, "Device boot interrupted");
- return -EINTR;
- }
-
- if (err) {
- bt_dev_err(hdev, "Device boot timeout");
- btintel_reset_to_bootloader(hdev);
- return -ETIMEDOUT;
- }
-
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- duration = (unsigned long long)ktime_to_ns(delta) >> 10;
-
- bt_dev_info(hdev, "Device booted in %llu usecs", duration);
clear_bit(BTUSB_BOOTLOADER, &data->flags);
@@ -3495,7 +3413,7 @@ static int btusb_mtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwnam
fw_ptr = fw->data;
fw_bin_ptr = fw_ptr;
globaldesc = (struct btmtk_global_desc *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE);
- section_num = globaldesc->section_num;
+ section_num = le32_to_cpu(globaldesc->section_num);
for (i = 0; i < section_num; i++) {
first_block = 1;
@@ -3503,8 +3421,8 @@ static int btusb_mtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwnam
sectionmap = (struct btmtk_section_map *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i);
- section_offset = sectionmap->secoffset;
- dl_size = sectionmap->bin_info_spec.dlsize;
+ section_offset = le32_to_cpu(sectionmap->secoffset);
+ dl_size = le32_to_cpu(sectionmap->bin_info_spec.dlsize);
if (dl_size > 0) {
retry = 20;
@@ -3740,7 +3658,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
int err, status;
u32 dev_id;
char fw_bin_name[64];
- u32 fw_version;
+ u32 fw_version = 0;
u8 param;
calltime = ktime_get();
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 3764ceb6fa0d..3cd57fc56ade 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -68,6 +68,8 @@ struct bcm_device_data {
* deassert = Bluetooth device may sleep when sleep criteria are met
* @shutdown: BT_REG_ON pin,
* power up or power down Bluetooth device internal regulators
+ * @reset: BT_RST_N pin,
+ * active low resets the Bluetooth logic core
* @set_device_wakeup: callback to toggle BT_WAKE pin
* either by accessing @device_wakeup or by calling @btlp
* @set_shutdown: callback to toggle BT_REG_ON pin
@@ -101,6 +103,7 @@ struct bcm_device {
const char *name;
struct gpio_desc *device_wakeup;
struct gpio_desc *shutdown;
+ struct gpio_desc *reset;
int (*set_device_wakeup)(struct bcm_device *, bool);
int (*set_shutdown)(struct bcm_device *, bool);
#ifdef CONFIG_ACPI
@@ -985,6 +988,15 @@ static int bcm_gpio_set_device_wakeup(struct bcm_device *dev, bool awake)
static int bcm_gpio_set_shutdown(struct bcm_device *dev, bool powered)
{
gpiod_set_value_cansleep(dev->shutdown, powered);
+ if (dev->reset)
+ /*
+ * The reset line is asserted on powerdown and deasserted
+ * on poweron so the inverse of powered is used. Notice
+ * that the GPIO line BT_RST_N needs to be specified as
+ * active low in the device tree or similar system
+ * description.
+ */
+ gpiod_set_value_cansleep(dev->reset, !powered);
return 0;
}
@@ -1050,6 +1062,11 @@ static int bcm_get_resources(struct bcm_device *dev)
if (IS_ERR(dev->shutdown))
return PTR_ERR(dev->shutdown);
+ dev->reset = devm_gpiod_get_optional(dev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(dev->reset))
+ return PTR_ERR(dev->reset);
+
dev->set_device_wakeup = bcm_gpio_set_device_wakeup;
dev->set_shutdown = bcm_gpio_set_shutdown;
@@ -1482,6 +1499,8 @@ static struct bcm_device_data bcm43438_device_data = {
static const struct of_device_id bcm_bluetooth_of_match[] = {
{ .compatible = "brcm,bcm20702a1" },
{ .compatible = "brcm,bcm4329-bt" },
+ { .compatible = "brcm,bcm4330-bt" },
+ { .compatible = "brcm,bcm4334-bt" },
{ .compatible = "brcm,bcm4345c5" },
{ .compatible = "brcm,bcm4330-bt" },
{ .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data },
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index b20a40fab83e..7249b91d9b91 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -735,7 +735,7 @@ static int intel_setup(struct hci_uart *hu)
set_bit(STATE_DOWNLOADING, &intel->flags);
/* Start firmware downloading and get boot parameter */
- err = btintel_download_firmware(hdev, fw, &boot_param);
+ err = btintel_download_firmware(hdev, &ver, fw, &boot_param);
if (err < 0)
goto done;
@@ -784,7 +784,10 @@ static int intel_setup(struct hci_uart *hu)
done:
release_firmware(fw);
- if (err < 0)
+ /* Check if there was an error and if is not -EALREADY which means the
+ * firmware has already been loaded.
+ */
+ if (err < 0 && err != -EALREADY)
return err;
/* We need to restore the default speed before Intel reset */
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index de36af63e182..0a0056912d51 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1066,7 +1066,7 @@ static void qca_controller_memdump(struct work_struct *work)
* packets in the buffer.
*/
/* For QCA6390, controller does not lost packets but
- * sequence number field of packat sometimes has error
+ * sequence number field of packet sometimes has error
* bits, so skip this checking for missing packet.
*/
while ((seq_no > qca_memdump->current_seq_no + 1) &&
@@ -1571,6 +1571,20 @@ static void qca_cmd_timeout(struct hci_dev *hdev)
mutex_unlock(&qca->hci_memdump_lock);
}
+static bool qca_prevent_wake(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ bool wakeup;
+
+ /* UART driver handles the interrupt from BT SoC.So we need to use
+ * device handle of UART driver to get the status of device may wakeup.
+ */
+ wakeup = device_may_wakeup(hu->serdev->ctrl->dev.parent);
+ bt_dev_dbg(hu->hdev, "wakeup status : %d", wakeup);
+
+ return !wakeup;
+}
+
static int qca_wcn3990_init(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
@@ -1721,6 +1735,7 @@ retry:
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
hu->hdev->cmd_timeout = qca_cmd_timeout;
+ hu->hdev->prevent_wake = qca_prevent_wake;
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
set_bit(QCA_ROM_FW, &qca->flags);
diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
new file mode 100644
index 000000000000..c804db7e90f8
--- /dev/null
+++ b/drivers/bluetooth/virtio_bt.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/module.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/skbuff.h>
+
+#include <uapi/linux/virtio_ids.h>
+#include <uapi/linux/virtio_bt.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#define VERSION "0.1"
+
+enum {
+ VIRTBT_VQ_TX,
+ VIRTBT_VQ_RX,
+ VIRTBT_NUM_VQS,
+};
+
+struct virtio_bluetooth {
+ struct virtio_device *vdev;
+ struct virtqueue *vqs[VIRTBT_NUM_VQS];
+ struct work_struct rx;
+ struct hci_dev *hdev;
+};
+
+static int virtbt_add_inbuf(struct virtio_bluetooth *vbt)
+{
+ struct virtqueue *vq = vbt->vqs[VIRTBT_VQ_RX];
+ struct scatterlist sg[1];
+ struct sk_buff *skb;
+ int err;
+
+ skb = alloc_skb(1000, GFP_KERNEL);
+ sg_init_one(sg, skb->data, 1000);
+
+ err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_KERNEL);
+ if (err < 0) {
+ kfree_skb(skb);
+ return err;
+ }
+
+ return 0;
+}
+
+static int virtbt_open(struct hci_dev *hdev)
+{
+ struct virtio_bluetooth *vbt = hci_get_drvdata(hdev);
+
+ if (virtbt_add_inbuf(vbt) < 0)
+ return -EIO;
+
+ virtqueue_kick(vbt->vqs[VIRTBT_VQ_RX]);
+ return 0;
+}
+
+static int virtbt_close(struct hci_dev *hdev)
+{
+ struct virtio_bluetooth *vbt = hci_get_drvdata(hdev);
+ int i;
+
+ cancel_work_sync(&vbt->rx);
+
+ for (i = 0; i < ARRAY_SIZE(vbt->vqs); i++) {
+ struct virtqueue *vq = vbt->vqs[i];
+ struct sk_buff *skb;
+
+ while ((skb = virtqueue_detach_unused_buf(vq)))
+ kfree_skb(skb);
+ }
+
+ return 0;
+}
+
+static int virtbt_flush(struct hci_dev *hdev)
+{
+ return 0;
+}
+
+static int virtbt_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct virtio_bluetooth *vbt = hci_get_drvdata(hdev);
+ struct scatterlist sg[1];
+ int err;
+
+ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
+
+ sg_init_one(sg, skb->data, skb->len);
+ err = virtqueue_add_outbuf(vbt->vqs[VIRTBT_VQ_TX], sg, 1, skb,
+ GFP_KERNEL);
+ if (err) {
+ kfree_skb(skb);
+ return err;
+ }
+
+ virtqueue_kick(vbt->vqs[VIRTBT_VQ_TX]);
+ return 0;
+}
+
+static int virtbt_setup_zephyr(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ /* Read Build Information */
+ skb = __hci_cmd_sync(hdev, 0xfc08, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ bt_dev_info(hdev, "%s", (char *)(skb->data + 1));
+
+ hci_set_fw_info(hdev, "%s", skb->data + 1);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int virtbt_set_bdaddr_zephyr(struct hci_dev *hdev,
+ const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+
+ /* Write BD_ADDR */
+ skb = __hci_cmd_sync(hdev, 0xfc06, 6, bdaddr, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int virtbt_setup_intel(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ /* Intel Read Version */
+ skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int virtbt_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+
+ /* Intel Write BD Address */
+ skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int virtbt_setup_realtek(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ /* Read ROM Version */
+ skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ bt_dev_info(hdev, "ROM version %u", *((__u8 *) (skb->data + 1)));
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int virtbt_shutdown_generic(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ /* Reset */
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static void virtbt_rx_handle(struct virtio_bluetooth *vbt, struct sk_buff *skb)
+{
+ __u8 pkt_type;
+
+ pkt_type = *((__u8 *) skb->data);
+ skb_pull(skb, 1);
+
+ switch (pkt_type) {
+ case HCI_EVENT_PKT:
+ case HCI_ACLDATA_PKT:
+ case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
+ hci_skb_pkt_type(skb) = pkt_type;
+ hci_recv_frame(vbt->hdev, skb);
+ break;
+ }
+}
+
+static void virtbt_rx_work(struct work_struct *work)
+{
+ struct virtio_bluetooth *vbt = container_of(work,
+ struct virtio_bluetooth, rx);
+ struct sk_buff *skb;
+ unsigned int len;
+
+ skb = virtqueue_get_buf(vbt->vqs[VIRTBT_VQ_RX], &len);
+ if (!skb)
+ return;
+
+ skb->len = len;
+ virtbt_rx_handle(vbt, skb);
+
+ if (virtbt_add_inbuf(vbt) < 0)
+ return;
+
+ virtqueue_kick(vbt->vqs[VIRTBT_VQ_RX]);
+}
+
+static void virtbt_tx_done(struct virtqueue *vq)
+{
+ struct sk_buff *skb;
+ unsigned int len;
+
+ while ((skb = virtqueue_get_buf(vq, &len)))
+ kfree_skb(skb);
+}
+
+static void virtbt_rx_done(struct virtqueue *vq)
+{
+ struct virtio_bluetooth *vbt = vq->vdev->priv;
+
+ schedule_work(&vbt->rx);
+}
+
+static int virtbt_probe(struct virtio_device *vdev)
+{
+ vq_callback_t *callbacks[VIRTBT_NUM_VQS] = {
+ [VIRTBT_VQ_TX] = virtbt_tx_done,
+ [VIRTBT_VQ_RX] = virtbt_rx_done,
+ };
+ const char *names[VIRTBT_NUM_VQS] = {
+ [VIRTBT_VQ_TX] = "tx",
+ [VIRTBT_VQ_RX] = "rx",
+ };
+ struct virtio_bluetooth *vbt;
+ struct hci_dev *hdev;
+ int err;
+ __u8 type;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+ return -ENODEV;
+
+ type = virtio_cread8(vdev, offsetof(struct virtio_bt_config, type));
+
+ switch (type) {
+ case VIRTIO_BT_CONFIG_TYPE_PRIMARY:
+ case VIRTIO_BT_CONFIG_TYPE_AMP:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vbt = kzalloc(sizeof(*vbt), GFP_KERNEL);
+ if (!vbt)
+ return -ENOMEM;
+
+ vdev->priv = vbt;
+ vbt->vdev = vdev;
+
+ INIT_WORK(&vbt->rx, virtbt_rx_work);
+
+ err = virtio_find_vqs(vdev, VIRTBT_NUM_VQS, vbt->vqs, callbacks,
+ names, NULL);
+ if (err)
+ return err;
+
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ vbt->hdev = hdev;
+
+ hdev->bus = HCI_VIRTIO;
+ hdev->dev_type = type;
+ hci_set_drvdata(hdev, vbt);
+
+ hdev->open = virtbt_open;
+ hdev->close = virtbt_close;
+ hdev->flush = virtbt_flush;
+ hdev->send = virtbt_send_frame;
+
+ if (virtio_has_feature(vdev, VIRTIO_BT_F_VND_HCI)) {
+ __u16 vendor;
+
+ virtio_cread(vdev, struct virtio_bt_config, vendor, &vendor);
+
+ switch (vendor) {
+ case VIRTIO_BT_CONFIG_VENDOR_ZEPHYR:
+ hdev->manufacturer = 1521;
+ hdev->setup = virtbt_setup_zephyr;
+ hdev->shutdown = virtbt_shutdown_generic;
+ hdev->set_bdaddr = virtbt_set_bdaddr_zephyr;
+ break;
+
+ case VIRTIO_BT_CONFIG_VENDOR_INTEL:
+ hdev->manufacturer = 2;
+ hdev->setup = virtbt_setup_intel;
+ hdev->shutdown = virtbt_shutdown_generic;
+ hdev->set_bdaddr = virtbt_set_bdaddr_intel;
+ set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ break;
+
+ case VIRTIO_BT_CONFIG_VENDOR_REALTEK:
+ hdev->manufacturer = 93;
+ hdev->setup = virtbt_setup_realtek;
+ hdev->shutdown = virtbt_shutdown_generic;
+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ break;
+ }
+ }
+
+ if (virtio_has_feature(vdev, VIRTIO_BT_F_MSFT_EXT)) {
+ __u16 msft_opcode;
+
+ virtio_cread(vdev, struct virtio_bt_config,
+ msft_opcode, &msft_opcode);
+
+ hci_set_msft_opcode(hdev, msft_opcode);
+ }
+
+ if (virtio_has_feature(vdev, VIRTIO_BT_F_AOSP_EXT))
+ hci_set_aosp_capable(hdev);
+
+ if (hci_register_dev(hdev) < 0) {
+ hci_free_dev(hdev);
+ err = -EBUSY;
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ vdev->config->del_vqs(vdev);
+ return err;
+}
+
+static void virtbt_remove(struct virtio_device *vdev)
+{
+ struct virtio_bluetooth *vbt = vdev->priv;
+ struct hci_dev *hdev = vbt->hdev;
+
+ hci_unregister_dev(hdev);
+ vdev->config->reset(vdev);
+
+ hci_free_dev(hdev);
+ vbt->hdev = NULL;
+
+ vdev->config->del_vqs(vdev);
+ kfree(vbt);
+}
+
+static struct virtio_device_id virtbt_table[] = {
+ { VIRTIO_ID_BT, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(virtio, virtbt_table);
+
+static const unsigned int virtbt_features[] = {
+ VIRTIO_BT_F_VND_HCI,
+ VIRTIO_BT_F_MSFT_EXT,
+ VIRTIO_BT_F_AOSP_EXT,
+};
+
+static struct virtio_driver virtbt_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .feature_table = virtbt_features,
+ .feature_table_size = ARRAY_SIZE(virtbt_features),
+ .id_table = virtbt_table,
+ .probe = virtbt_probe,
+ .remove = virtbt_remove,
+};
+
+module_virtio_driver(virtbt_driver);
+
+MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
+MODULE_DESCRIPTION("Generic Bluetooth VIRTIO driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index d229a2d0c017..b151e0fcdeb5 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -334,16 +334,6 @@ config DEVMEM
memory.
When in doubt, say "Y".
-config DEVKMEM
- bool "/dev/kmem virtual device support"
- # On arm64, VMALLOC_START < PAGE_OFFSET, which confuses kmem read/write
- depends on !ARM64
- help
- Say Y here if you want to support the /dev/kmem device. The
- /dev/kmem device is rarely used, but can be used for certain
- kind of kernel debugging operations.
- When in doubt, say "N".
-
config NVRAM
tristate "/dev/nvram support"
depends on X86 || HAVE_ARCH_NVRAM_OPS
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 869b9f5e8e03..15dc54fa1d47 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -403,221 +403,6 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
return 0;
}
-static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
-{
- unsigned long pfn;
-
- /* Turn a kernel-virtual address into a physical page frame */
- pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
-
- /*
- * RED-PEN: on some architectures there is more mapped memory than
- * available in mem_map which pfn_valid checks for. Perhaps should add a
- * new macro here.
- *
- * RED-PEN: vmalloc is not supported right now.
- */
- if (!pfn_valid(pfn))
- return -EIO;
-
- vma->vm_pgoff = pfn;
- return mmap_mem(file, vma);
-}
-
-/*
- * This function reads the *virtual* memory as seen by the kernel.
- */
-static ssize_t read_kmem(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- unsigned long p = *ppos;
- ssize_t low_count, read, sz;
- char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
- int err = 0;
-
- read = 0;
- if (p < (unsigned long) high_memory) {
- low_count = count;
- if (count > (unsigned long)high_memory - p)
- low_count = (unsigned long)high_memory - p;
-
-#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
- /* we don't have page 0 mapped on sparc and m68k.. */
- if (p < PAGE_SIZE && low_count > 0) {
- sz = size_inside_page(p, low_count);
- if (clear_user(buf, sz))
- return -EFAULT;
- buf += sz;
- p += sz;
- read += sz;
- low_count -= sz;
- count -= sz;
- }
-#endif
- while (low_count > 0) {
- sz = size_inside_page(p, low_count);
-
- /*
- * On ia64 if a page has been mapped somewhere as
- * uncached, then it must also be accessed uncached
- * by the kernel or data corruption may occur
- */
- kbuf = xlate_dev_kmem_ptr((void *)p);
- if (!virt_addr_valid(kbuf))
- return -ENXIO;
-
- if (copy_to_user(buf, kbuf, sz))
- return -EFAULT;
- buf += sz;
- p += sz;
- read += sz;
- low_count -= sz;
- count -= sz;
- if (should_stop_iteration()) {
- count = 0;
- break;
- }
- }
- }
-
- if (count > 0) {
- kbuf = (char *)__get_free_page(GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
- while (count > 0) {
- sz = size_inside_page(p, count);
- if (!is_vmalloc_or_module_addr((void *)p)) {
- err = -ENXIO;
- break;
- }
- sz = vread(kbuf, (char *)p, sz);
- if (!sz)
- break;
- if (copy_to_user(buf, kbuf, sz)) {
- err = -EFAULT;
- break;
- }
- count -= sz;
- buf += sz;
- read += sz;
- p += sz;
- if (should_stop_iteration())
- break;
- }
- free_page((unsigned long)kbuf);
- }
- *ppos = p;
- return read ? read : err;
-}
-
-
-static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- ssize_t written, sz;
- unsigned long copied;
-
- written = 0;
-#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
- /* we don't have page 0 mapped on sparc and m68k.. */
- if (p < PAGE_SIZE) {
- sz = size_inside_page(p, count);
- /* Hmm. Do something? */
- buf += sz;
- p += sz;
- count -= sz;
- written += sz;
- }
-#endif
-
- while (count > 0) {
- void *ptr;
-
- sz = size_inside_page(p, count);
-
- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_kmem_ptr((void *)p);
- if (!virt_addr_valid(ptr))
- return -ENXIO;
-
- copied = copy_from_user(ptr, buf, sz);
- if (copied) {
- written += sz - copied;
- if (written)
- break;
- return -EFAULT;
- }
- buf += sz;
- p += sz;
- count -= sz;
- written += sz;
- if (should_stop_iteration())
- break;
- }
-
- *ppos += written;
- return written;
-}
-
-/*
- * This function writes to the *virtual* memory as seen by the kernel.
- */
-static ssize_t write_kmem(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- unsigned long p = *ppos;
- ssize_t wrote = 0;
- ssize_t virtr = 0;
- char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
- int err = 0;
-
- if (p < (unsigned long) high_memory) {
- unsigned long to_write = min_t(unsigned long, count,
- (unsigned long)high_memory - p);
- wrote = do_write_kmem(p, buf, to_write, ppos);
- if (wrote != to_write)
- return wrote;
- p += wrote;
- buf += wrote;
- count -= wrote;
- }
-
- if (count > 0) {
- kbuf = (char *)__get_free_page(GFP_KERNEL);
- if (!kbuf)
- return wrote ? wrote : -ENOMEM;
- while (count > 0) {
- unsigned long sz = size_inside_page(p, count);
- unsigned long n;
-
- if (!is_vmalloc_or_module_addr((void *)p)) {
- err = -ENXIO;
- break;
- }
- n = copy_from_user(kbuf, buf, sz);
- if (n) {
- err = -EFAULT;
- break;
- }
- vwrite(kbuf, (char *)p, sz);
- count -= sz;
- buf += sz;
- virtr += sz;
- p += sz;
- if (should_stop_iteration())
- break;
- }
- free_page((unsigned long)kbuf);
- }
-
- *ppos = p;
- return virtr + wrote ? : err;
-}
-
static ssize_t read_port(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -855,7 +640,6 @@ static int open_port(struct inode *inode, struct file *filp)
#define write_zero write_null
#define write_iter_zero write_iter_null
#define open_mem open_port
-#define open_kmem open_mem
static const struct file_operations __maybe_unused mem_fops = {
.llseek = memory_lseek,
@@ -869,18 +653,6 @@ static const struct file_operations __maybe_unused mem_fops = {
#endif
};
-static const struct file_operations __maybe_unused kmem_fops = {
- .llseek = memory_lseek,
- .read = read_kmem,
- .write = write_kmem,
- .mmap = mmap_kmem,
- .open = open_kmem,
-#ifndef CONFIG_MMU
- .get_unmapped_area = get_unmapped_area_mem,
- .mmap_capabilities = memory_mmap_capabilities,
-#endif
-};
-
static const struct file_operations null_fops = {
.llseek = null_lseek,
.read = read_null,
@@ -925,9 +697,6 @@ static const struct memdev {
#ifdef CONFIG_DEVMEM
[DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
#endif
-#ifdef CONFIG_DEVKMEM
- [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
-#endif
[3] = { "null", 0666, &null_fops, 0 },
#ifdef CONFIG_DEVPORT
[4] = { "port", 0, &port_fops, 0 },
diff --git a/drivers/clk/sifive/Kconfig b/drivers/clk/sifive/Kconfig
index 1c14eb20c066..9132c3c4aa86 100644
--- a/drivers/clk/sifive/Kconfig
+++ b/drivers/clk/sifive/Kconfig
@@ -10,6 +10,8 @@ if CLK_SIFIVE
config CLK_SIFIVE_PRCI
bool "PRCI driver for SiFive SoCs"
+ select RESET_CONTROLLER
+ select RESET_SIMPLE
select CLK_ANALOGBITS_WRPLL_CLN28HPC
help
Supports the Power Reset Clock interface (PRCI) IP block found in
diff --git a/drivers/clk/sifive/fu740-prci.c b/drivers/clk/sifive/fu740-prci.c
index 764d1097aa51..53f6e00a03b9 100644
--- a/drivers/clk/sifive/fu740-prci.c
+++ b/drivers/clk/sifive/fu740-prci.c
@@ -72,6 +72,12 @@ static const struct clk_ops sifive_fu740_prci_hfpclkplldiv_clk_ops = {
.recalc_rate = sifive_prci_hfpclkplldiv_recalc_rate,
};
+static const struct clk_ops sifive_fu740_prci_pcie_aux_clk_ops = {
+ .enable = sifive_prci_pcie_aux_clock_enable,
+ .disable = sifive_prci_pcie_aux_clock_disable,
+ .is_enabled = sifive_prci_pcie_aux_clock_is_enabled,
+};
+
/* List of clock controls provided by the PRCI */
struct __prci_clock __prci_init_clocks_fu740[] = {
[PRCI_CLK_COREPLL] = {
@@ -120,4 +126,9 @@ struct __prci_clock __prci_init_clocks_fu740[] = {
.parent_name = "hfpclkpll",
.ops = &sifive_fu740_prci_hfpclkplldiv_clk_ops,
},
+ [PRCI_CLK_PCIE_AUX] = {
+ .name = "pcie_aux",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu740_prci_pcie_aux_clk_ops,
+ },
};
diff --git a/drivers/clk/sifive/fu740-prci.h b/drivers/clk/sifive/fu740-prci.h
index 13ef971f7764..511a0bf7ba2b 100644
--- a/drivers/clk/sifive/fu740-prci.h
+++ b/drivers/clk/sifive/fu740-prci.h
@@ -9,7 +9,7 @@
#include "sifive-prci.h"
-#define NUM_CLOCK_FU740 8
+#define NUM_CLOCK_FU740 9
extern struct __prci_clock __prci_init_clocks_fu740[NUM_CLOCK_FU740];
diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
index 1490b01ce629..0d79ba31a793 100644
--- a/drivers/clk/sifive/sifive-prci.c
+++ b/drivers/clk/sifive/sifive-prci.c
@@ -453,6 +453,47 @@ void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd)
r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
}
+/* PCIE AUX clock APIs for enable, disable. */
+int sifive_prci_pcie_aux_clock_is_enabled(struct clk_hw *hw)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_data *pd = pc->pd;
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET);
+
+ if (r & PRCI_PCIE_AUX_EN_MASK)
+ return 1;
+ else
+ return 0;
+}
+
+int sifive_prci_pcie_aux_clock_enable(struct clk_hw *hw)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_data *pd = pc->pd;
+ u32 r __maybe_unused;
+
+ if (sifive_prci_pcie_aux_clock_is_enabled(hw))
+ return 0;
+
+ __prci_writel(1, PRCI_PCIE_AUX_OFFSET, pd);
+ r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */
+
+ return 0;
+}
+
+void sifive_prci_pcie_aux_clock_disable(struct clk_hw *hw)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_data *pd = pc->pd;
+ u32 r __maybe_unused;
+
+ __prci_writel(0, PRCI_PCIE_AUX_OFFSET, pd);
+ r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */
+
+}
+
/**
* __prci_register_clocks() - register clock controls in the PRCI
* @dev: Linux struct device
@@ -547,6 +588,19 @@ static int sifive_prci_probe(struct platform_device *pdev)
if (IS_ERR(pd->va))
return PTR_ERR(pd->va);
+ pd->reset.rcdev.owner = THIS_MODULE;
+ pd->reset.rcdev.nr_resets = PRCI_RST_NR;
+ pd->reset.rcdev.ops = &reset_simple_ops;
+ pd->reset.rcdev.of_node = pdev->dev.of_node;
+ pd->reset.active_low = true;
+ pd->reset.membase = pd->va + PRCI_DEVICESRESETREG_OFFSET;
+ spin_lock_init(&pd->reset.lock);
+
+ r = devm_reset_controller_register(&pdev->dev, &pd->reset.rcdev);
+ if (r) {
+ dev_err(dev, "could not register reset controller: %d\n", r);
+ return r;
+ }
r = __prci_register_clocks(dev, pd, desc);
if (r) {
dev_err(dev, "could not register clocks: %d\n", r);
diff --git a/drivers/clk/sifive/sifive-prci.h b/drivers/clk/sifive/sifive-prci.h
index dbdbd1722688..91658a88af4e 100644
--- a/drivers/clk/sifive/sifive-prci.h
+++ b/drivers/clk/sifive/sifive-prci.h
@@ -11,6 +11,7 @@
#include <linux/clk/analogbits-wrpll-cln28hpc.h>
#include <linux/clk-provider.h>
+#include <linux/reset/reset-simple.h>
#include <linux/platform_device.h>
/*
@@ -67,6 +68,11 @@
#define PRCI_DDRPLLCFG1_CKE_SHIFT 31
#define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT)
+/* PCIEAUX */
+#define PRCI_PCIE_AUX_OFFSET 0x14
+#define PRCI_PCIE_AUX_EN_SHIFT 0
+#define PRCI_PCIE_AUX_EN_MASK (0x1 << PRCI_PCIE_AUX_EN_SHIFT)
+
/* GEMGXLPLLCFG0 */
#define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c
#define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0
@@ -116,6 +122,8 @@
#define PRCI_DEVICESRESETREG_CHIPLINK_RST_N_MASK \
(0x1 << PRCI_DEVICESRESETREG_CHIPLINK_RST_N_SHIFT)
+#define PRCI_RST_NR 7
+
/* CLKMUXSTATUSREG */
#define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c
#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1
@@ -216,6 +224,7 @@
*/
struct __prci_data {
void __iomem *va;
+ struct reset_simple_data reset;
struct clk_hw_onecell_data hw_clks;
};
@@ -296,4 +305,8 @@ unsigned long sifive_prci_tlclksel_recalc_rate(struct clk_hw *hw,
unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate);
+int sifive_prci_pcie_aux_clock_is_enabled(struct clk_hw *hw);
+int sifive_prci_pcie_aux_clock_enable(struct clk_hw *hw);
+void sifive_prci_pcie_aux_clock_disable(struct clk_hw *hw);
+
#endif /* __SIFIVE_CLK_SIFIVE_PRCI_H */
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 4fb1f4da27ec..fe1a82627d57 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -16,6 +16,7 @@
#include <linux/cpu_pm.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
+#include <linux/clocksource_ids.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
@@ -24,6 +25,8 @@
#include <linux/sched/clock.h>
#include <linux/sched_clock.h>
#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/ptp_kvm.h>
#include <asm/arch_timer.h>
#include <asm/virt.h>
@@ -200,6 +203,7 @@ static u64 arch_counter_read_cc(const struct cyclecounter *cc)
static struct clocksource clocksource_counter = {
.name = "arch_sys_counter",
+ .id = CSID_ARM_ARCH_COUNTER,
.rating = 400,
.read = arch_counter_read,
.mask = CLOCKSOURCE_MASK(56),
@@ -1676,3 +1680,35 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table)
}
TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
#endif
+
+int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts,
+ struct clocksource **cs)
+{
+ struct arm_smccc_res hvc_res;
+ u32 ptp_counter;
+ ktime_t ktime;
+
+ if (!IS_ENABLED(CONFIG_HAVE_ARM_SMCCC_DISCOVERY))
+ return -EOPNOTSUPP;
+
+ if (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
+ ptp_counter = KVM_PTP_VIRT_COUNTER;
+ else
+ ptp_counter = KVM_PTP_PHYS_COUNTER;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID,
+ ptp_counter, &hvc_res);
+
+ if ((int)(hvc_res.a0) < 0)
+ return -EOPNOTSUPP;
+
+ ktime = (u64)hvc_res.a0 << 32 | hvc_res.a1;
+ *ts = ktime_to_timespec64(ktime);
+ if (cycle)
+ *cycle = (u64)hvc_res.a2 << 32 | hvc_res.a3;
+ if (cs)
+ *cs = &clocksource_counter;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_arch_ptp_get_crosststamp);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index da3872c48308..3506b2050fb8 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -130,6 +130,7 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report);
+ case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel);
default: return 0;
}
@@ -142,6 +143,7 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
struct sev_device *sev;
unsigned int phys_lsb, phys_msb;
unsigned int reg, ret = 0;
+ int buf_len;
if (!psp || !psp->sev_data)
return -ENODEV;
@@ -151,15 +153,27 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
sev = psp->sev_data;
+ buf_len = sev_cmd_buffer_len(cmd);
+ if (WARN_ON_ONCE(!data != !buf_len))
+ return -EINVAL;
+
+ /*
+ * Copy the incoming data to driver's scratch buffer as __pa() will not
+ * work for some memory, e.g. vmalloc'd addresses, and @data may not be
+ * physically contiguous.
+ */
+ if (data)
+ memcpy(sev->cmd_buf, data, buf_len);
+
/* Get the physical address of the command buffer */
- phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
- phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
+ phys_lsb = data ? lower_32_bits(__psp_pa(sev->cmd_buf)) : 0;
+ phys_msb = data ? upper_32_bits(__psp_pa(sev->cmd_buf)) : 0;
dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
cmd, phys_msb, phys_lsb, psp_timeout);
print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- sev_cmd_buffer_len(cmd), false);
+ buf_len, false);
iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
@@ -195,7 +209,14 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
}
print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- sev_cmd_buffer_len(cmd), false);
+ buf_len, false);
+
+ /*
+ * Copy potential output from the PSP back to data. Do this even on
+ * failure in case the caller wants to glean something from the error.
+ */
+ if (data)
+ memcpy(data, sev->cmd_buf, buf_len);
return ret;
}
@@ -214,6 +235,7 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret)
static int __sev_platform_init_locked(int *error)
{
struct psp_device *psp = psp_master;
+ struct sev_data_init data;
struct sev_device *sev;
int rc = 0;
@@ -225,6 +247,7 @@ static int __sev_platform_init_locked(int *error)
if (sev->state == SEV_STATE_INIT)
return 0;
+ memset(&data, 0, sizeof(data));
if (sev_es_tmr) {
u64 tmr_pa;
@@ -234,12 +257,12 @@ static int __sev_platform_init_locked(int *error)
*/
tmr_pa = __pa(sev_es_tmr);
- sev->init_cmd_buf.flags |= SEV_INIT_FLAGS_SEV_ES;
- sev->init_cmd_buf.tmr_address = tmr_pa;
- sev->init_cmd_buf.tmr_len = SEV_ES_TMR_SIZE;
+ data.flags |= SEV_INIT_FLAGS_SEV_ES;
+ data.tmr_address = tmr_pa;
+ data.tmr_len = SEV_ES_TMR_SIZE;
}
- rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error);
+ rc = __sev_do_cmd_locked(SEV_CMD_INIT, &data, error);
if (rc)
return rc;
@@ -296,15 +319,14 @@ static int sev_platform_shutdown(int *error)
static int sev_get_platform_state(int *state, int *error)
{
- struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_status data;
int rc;
- rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
- &sev->status_cmd_buf, error);
+ rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, error);
if (rc)
return rc;
- *state = sev->status_cmd_buf.state;
+ *state = data.state;
return rc;
}
@@ -342,15 +364,14 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable)
static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
{
- struct sev_device *sev = psp_master->sev_data;
- struct sev_user_data_status *data = &sev->status_cmd_buf;
+ struct sev_user_data_status data;
int ret;
- ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
+ ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, &argp->error);
if (ret)
return ret;
- if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
+ if (copy_to_user((void __user *)argp->data, &data, sizeof(data)))
ret = -EFAULT;
return ret;
@@ -377,7 +398,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_csr input;
- struct sev_data_pek_csr *data;
+ struct sev_data_pek_csr data;
void __user *input_address;
void *blob = NULL;
int ret;
@@ -388,9 +409,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ memset(&data, 0, sizeof(data));
/* userspace wants to query CSR length */
if (!input.address || !input.length)
@@ -398,19 +417,15 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
/* allocate a physically contiguous buffer to store the CSR blob */
input_address = (void __user *)input.address;
- if (input.length > SEV_FW_BLOB_MAX_SIZE) {
- ret = -EFAULT;
- goto e_free;
- }
+ if (input.length > SEV_FW_BLOB_MAX_SIZE)
+ return -EFAULT;
blob = kmalloc(input.length, GFP_KERNEL);
- if (!blob) {
- ret = -ENOMEM;
- goto e_free;
- }
+ if (!blob)
+ return -ENOMEM;
- data->address = __psp_pa(blob);
- data->len = input.length;
+ data.address = __psp_pa(blob);
+ data.len = input.length;
cmd:
if (sev->state == SEV_STATE_UNINIT) {
@@ -419,10 +434,10 @@ cmd:
goto e_free_blob;
}
- ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, &data, &argp->error);
/* If we query the CSR length, FW responded with expected data. */
- input.length = data->len;
+ input.length = data.len;
if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
ret = -EFAULT;
@@ -436,8 +451,6 @@ cmd:
e_free_blob:
kfree(blob);
-e_free:
- kfree(data);
return ret;
}
@@ -457,21 +470,20 @@ EXPORT_SYMBOL_GPL(psp_copy_user_blob);
static int sev_get_api_version(void)
{
struct sev_device *sev = psp_master->sev_data;
- struct sev_user_data_status *status;
+ struct sev_user_data_status status;
int error = 0, ret;
- status = &sev->status_cmd_buf;
- ret = sev_platform_status(status, &error);
+ ret = sev_platform_status(&status, &error);
if (ret) {
dev_err(sev->dev,
"SEV: failed to get status. Error: %#x\n", error);
return 1;
}
- sev->api_major = status->api_major;
- sev->api_minor = status->api_minor;
- sev->build = status->build;
- sev->state = status->state;
+ sev->api_major = status.api_major;
+ sev->api_minor = status.api_minor;
+ sev->build = status.build;
+ sev->state = status.state;
return 0;
}
@@ -569,7 +581,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_cert_import input;
- struct sev_data_pek_cert_import *data;
+ struct sev_data_pek_cert_import data;
void *pek_blob, *oca_blob;
int ret;
@@ -579,19 +591,14 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
/* copy PEK certificate blobs from userspace */
pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
- if (IS_ERR(pek_blob)) {
- ret = PTR_ERR(pek_blob);
- goto e_free;
- }
+ if (IS_ERR(pek_blob))
+ return PTR_ERR(pek_blob);
- data->pek_cert_address = __psp_pa(pek_blob);
- data->pek_cert_len = input.pek_cert_len;
+ data.reserved = 0;
+ data.pek_cert_address = __psp_pa(pek_blob);
+ data.pek_cert_len = input.pek_cert_len;
/* copy PEK certificate blobs from userspace */
oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
@@ -600,8 +607,8 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
goto e_free_pek;
}
- data->oca_cert_address = __psp_pa(oca_blob);
- data->oca_cert_len = input.oca_cert_len;
+ data.oca_cert_address = __psp_pa(oca_blob);
+ data.oca_cert_len = input.oca_cert_len;
/* If platform is not in INIT state then transition it to INIT */
if (sev->state != SEV_STATE_INIT) {
@@ -610,21 +617,19 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
goto e_free_oca;
}
- ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error);
e_free_oca:
kfree(oca_blob);
e_free_pek:
kfree(pek_blob);
-e_free:
- kfree(data);
return ret;
}
static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
{
struct sev_user_data_get_id2 input;
- struct sev_data_get_id *data;
+ struct sev_data_get_id data;
void __user *input_address;
void *id_blob = NULL;
int ret;
@@ -638,28 +643,25 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
input_address = (void __user *)input.address;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
if (input.address && input.length) {
id_blob = kmalloc(input.length, GFP_KERNEL);
- if (!id_blob) {
- kfree(data);
+ if (!id_blob)
return -ENOMEM;
- }
- data->address = __psp_pa(id_blob);
- data->len = input.length;
+ data.address = __psp_pa(id_blob);
+ data.len = input.length;
+ } else {
+ data.address = 0;
+ data.len = 0;
}
- ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, &data, &argp->error);
/*
* Firmware will return the length of the ID value (either the minimum
* required length or the actual length written), return it to the user.
*/
- input.length = data->len;
+ input.length = data.len;
if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
ret = -EFAULT;
@@ -667,7 +669,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
}
if (id_blob) {
- if (copy_to_user(input_address, id_blob, data->len)) {
+ if (copy_to_user(input_address, id_blob, data.len)) {
ret = -EFAULT;
goto e_free;
}
@@ -675,7 +677,6 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
e_free:
kfree(id_blob);
- kfree(data);
return ret;
}
@@ -725,7 +726,7 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pdh_cert_export input;
void *pdh_blob = NULL, *cert_blob = NULL;
- struct sev_data_pdh_cert_export *data;
+ struct sev_data_pdh_cert_export data;
void __user *input_cert_chain_address;
void __user *input_pdh_cert_address;
int ret;
@@ -743,9 +744,7 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ memset(&data, 0, sizeof(data));
/* Userspace wants to query the certificate length. */
if (!input.pdh_cert_address ||
@@ -757,25 +756,19 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
input_cert_chain_address = (void __user *)input.cert_chain_address;
/* Allocate a physically contiguous buffer to store the PDH blob. */
- if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) {
- ret = -EFAULT;
- goto e_free;
- }
+ if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE)
+ return -EFAULT;
/* Allocate a physically contiguous buffer to store the cert chain blob. */
- if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) {
- ret = -EFAULT;
- goto e_free;
- }
+ if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE)
+ return -EFAULT;
pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
- if (!pdh_blob) {
- ret = -ENOMEM;
- goto e_free;
- }
+ if (!pdh_blob)
+ return -ENOMEM;
- data->pdh_cert_address = __psp_pa(pdh_blob);
- data->pdh_cert_len = input.pdh_cert_len;
+ data.pdh_cert_address = __psp_pa(pdh_blob);
+ data.pdh_cert_len = input.pdh_cert_len;
cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
if (!cert_blob) {
@@ -783,15 +776,15 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
goto e_free_pdh;
}
- data->cert_chain_address = __psp_pa(cert_blob);
- data->cert_chain_len = input.cert_chain_len;
+ data.cert_chain_address = __psp_pa(cert_blob);
+ data.cert_chain_len = input.cert_chain_len;
cmd:
- ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
+ ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error);
/* If we query the length, FW responded with expected data. */
- input.cert_chain_len = data->cert_chain_len;
- input.pdh_cert_len = data->pdh_cert_len;
+ input.cert_chain_len = data.cert_chain_len;
+ input.pdh_cert_len = data.pdh_cert_len;
if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
ret = -EFAULT;
@@ -816,8 +809,6 @@ e_free_cert:
kfree(cert_blob);
e_free_pdh:
kfree(pdh_blob);
-e_free:
- kfree(data);
return ret;
}
@@ -982,6 +973,10 @@ int sev_dev_init(struct psp_device *psp)
if (!sev)
goto e_err;
+ sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
+ if (!sev->cmd_buf)
+ goto e_sev;
+
psp->sev_data = sev;
sev->dev = dev;
@@ -993,7 +988,7 @@ int sev_dev_init(struct psp_device *psp)
if (!sev->vdata) {
ret = -ENODEV;
dev_err(dev, "sev: missing driver data\n");
- goto e_err;
+ goto e_buf;
}
psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
@@ -1008,6 +1003,10 @@ int sev_dev_init(struct psp_device *psp)
e_irq:
psp_clear_sev_irq_handler(psp);
+e_buf:
+ devm_free_pages(dev, (unsigned long)sev->cmd_buf);
+e_sev:
+ devm_kfree(dev, sev);
e_err:
psp->sev_data = NULL;
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
index dd5c4fe82914..666c21eb81ab 100644
--- a/drivers/crypto/ccp/sev-dev.h
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -46,12 +46,12 @@ struct sev_device {
unsigned int int_rcvd;
wait_queue_head_t int_queue;
struct sev_misc_dev *misc;
- struct sev_user_data_status status_cmd_buf;
- struct sev_data_init init_cmd_buf;
u8 api_major;
u8 api_minor;
u8 build;
+
+ void *cmd_buf;
};
int sev_dev_init(struct psp_device *psp);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a0836ffc22e0..6ab9d9a488a6 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,18 @@ config INTEL_IDXD_SVM
depends on PCI_PASID
depends on PCI_IOV
+config INTEL_IDXD_PERFMON
+ bool "Intel Data Accelerators performance monitor support"
+ depends on INTEL_IDXD
+ help
+ Enable performance monitor (pmu) support for the Intel(R)
+ data accelerators present in Intel Xeon CPU. With this
+ enabled, perf can be used to monitor the DSA (Intel Data
+ Streaming Accelerator) events described in the Intel DSA
+ spec.
+
+ If unsure, say N.
+
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86_64
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index fe45ad5d06c4..64a52bf4d737 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -344,17 +344,6 @@ static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
}
-static inline int at_xdmac_csize(u32 maxburst)
-{
- int csize;
-
- csize = ffs(maxburst) - 1;
- if (csize > 4)
- csize = -EINVAL;
-
- return csize;
-};
-
static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
{
return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 08d71dafa001..53289927dd0d 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -81,8 +81,13 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
* - Even chunks originate CB equal to 1
*/
chunk->cb = !(desc->chunks_alloc % 2);
- chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off;
- chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off;
+ if (chan->dir == EDMA_DIR_WRITE) {
+ chunk->ll_region.paddr = dw->ll_region_wr[chan->id].paddr;
+ chunk->ll_region.vaddr = dw->ll_region_wr[chan->id].vaddr;
+ } else {
+ chunk->ll_region.paddr = dw->ll_region_rd[chan->id].paddr;
+ chunk->ll_region.vaddr = dw->ll_region_rd[chan->id].vaddr;
+ }
if (desc->chunk) {
/* Create and add new element into the linked list */
@@ -329,22 +334,22 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
struct dw_edma_chunk *chunk;
struct dw_edma_burst *burst;
struct dw_edma_desc *desc;
- u32 cnt;
+ u32 cnt = 0;
int i;
if (!chan->configured)
return NULL;
switch (chan->config.direction) {
- case DMA_DEV_TO_MEM: /* local dma */
+ case DMA_DEV_TO_MEM: /* local DMA */
if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)
break;
return NULL;
- case DMA_MEM_TO_DEV: /* local dma */
+ case DMA_MEM_TO_DEV: /* local DMA */
if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE)
break;
return NULL;
- default: /* remote dma */
+ default: /* remote DMA */
if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ)
break;
if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE)
@@ -352,12 +357,19 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
return NULL;
}
- if (xfer->cyclic) {
+ if (xfer->type == EDMA_XFER_CYCLIC) {
if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
return NULL;
- } else {
+ } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
if (xfer->xfer.sg.len < 1)
return NULL;
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
+ if (!xfer->xfer.il->numf)
+ return NULL;
+ if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0)
+ return NULL;
+ } else {
+ return NULL;
}
desc = dw_edma_alloc_desc(chan);
@@ -368,18 +380,28 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
if (unlikely(!chunk))
goto err_alloc;
- src_addr = chan->config.src_addr;
- dst_addr = chan->config.dst_addr;
+ if (xfer->type == EDMA_XFER_INTERLEAVED) {
+ src_addr = xfer->xfer.il->src_start;
+ dst_addr = xfer->xfer.il->dst_start;
+ } else {
+ src_addr = chan->config.src_addr;
+ dst_addr = chan->config.dst_addr;
+ }
- if (xfer->cyclic) {
+ if (xfer->type == EDMA_XFER_CYCLIC) {
cnt = xfer->xfer.cyclic.cnt;
- } else {
+ } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
cnt = xfer->xfer.sg.len;
sg = xfer->xfer.sg.sgl;
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
+ if (xfer->xfer.il->numf > 0)
+ cnt = xfer->xfer.il->numf;
+ else
+ cnt = xfer->xfer.il->frame_size;
}
for (i = 0; i < cnt; i++) {
- if (!xfer->cyclic && !sg)
+ if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg)
break;
if (chunk->bursts_alloc == chan->ll_max) {
@@ -392,20 +414,23 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
if (unlikely(!burst))
goto err_alloc;
- if (xfer->cyclic)
+ if (xfer->type == EDMA_XFER_CYCLIC)
burst->sz = xfer->xfer.cyclic.len;
- else
+ else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
burst->sz = sg_dma_len(sg);
+ else if (xfer->type == EDMA_XFER_INTERLEAVED)
+ burst->sz = xfer->xfer.il->sgl[i].size;
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
if (chan->dir == EDMA_DIR_WRITE) {
burst->sar = src_addr;
- if (xfer->cyclic) {
+ if (xfer->type == EDMA_XFER_CYCLIC) {
burst->dar = xfer->xfer.cyclic.paddr;
- } else {
- burst->dar = dst_addr;
+ } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
+ src_addr += sg_dma_len(sg);
+ burst->dar = sg_dma_address(sg);
/* Unlike the typical assumption by other
* drivers/IPs the peripheral memory isn't
* a FIFO memory, in this case, it's a
@@ -416,10 +441,11 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
}
} else {
burst->dar = dst_addr;
- if (xfer->cyclic) {
+ if (xfer->type == EDMA_XFER_CYCLIC) {
burst->sar = xfer->xfer.cyclic.paddr;
- } else {
- burst->sar = src_addr;
+ } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
+ dst_addr += sg_dma_len(sg);
+ burst->sar = sg_dma_address(sg);
/* Unlike the typical assumption by other
* drivers/IPs the peripheral memory isn't
* a FIFO memory, in this case, it's a
@@ -430,10 +456,22 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
}
}
- if (!xfer->cyclic) {
- src_addr += sg_dma_len(sg);
- dst_addr += sg_dma_len(sg);
+ if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
sg = sg_next(sg);
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED &&
+ xfer->xfer.il->frame_size > 0) {
+ struct dma_interleaved_template *il = xfer->xfer.il;
+ struct data_chunk *dc = &il->sgl[i];
+
+ if (il->src_sgl) {
+ src_addr += burst->sz;
+ src_addr += dmaengine_get_src_icg(il, dc);
+ }
+
+ if (il->dst_sgl) {
+ dst_addr += burst->sz;
+ dst_addr += dmaengine_get_dst_icg(il, dc);
+ }
}
}
@@ -459,7 +497,7 @@ dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
xfer.xfer.sg.sgl = sgl;
xfer.xfer.sg.len = len;
xfer.flags = flags;
- xfer.cyclic = false;
+ xfer.type = EDMA_XFER_SCATTER_GATHER;
return dw_edma_device_transfer(&xfer);
}
@@ -478,7 +516,23 @@ dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
xfer.xfer.cyclic.len = len;
xfer.xfer.cyclic.cnt = count;
xfer.flags = flags;
- xfer.cyclic = true;
+ xfer.type = EDMA_XFER_CYCLIC;
+
+ return dw_edma_device_transfer(&xfer);
+}
+
+static struct dma_async_tx_descriptor *
+dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
+ struct dma_interleaved_template *ilt,
+ unsigned long flags)
+{
+ struct dw_edma_transfer xfer;
+
+ xfer.dchan = dchan;
+ xfer.direction = ilt->dir;
+ xfer.xfer.il = ilt;
+ xfer.flags = flags;
+ xfer.type = EDMA_XFER_INTERLEAVED;
return dw_edma_device_transfer(&xfer);
}
@@ -642,24 +696,13 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
struct device *dev = chip->dev;
struct dw_edma *dw = chip->dw;
struct dw_edma_chan *chan;
- size_t ll_chunk, dt_chunk;
struct dw_edma_irq *irq;
struct dma_device *dma;
- u32 i, j, cnt, ch_cnt;
u32 alloc, off_alloc;
+ u32 i, j, cnt;
int err = 0;
u32 pos;
- ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
- ll_chunk = dw->ll_region.sz;
- dt_chunk = dw->dt_region.sz;
-
- /* Calculate linked list chunk for each channel */
- ll_chunk /= roundup_pow_of_two(ch_cnt);
-
- /* Calculate linked list chunk for each channel */
- dt_chunk /= roundup_pow_of_two(ch_cnt);
-
if (write) {
i = 0;
cnt = dw->wr_ch_cnt;
@@ -691,14 +734,14 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
chan->request = EDMA_REQ_NONE;
chan->status = EDMA_ST_IDLE;
- chan->ll_off = (ll_chunk * i);
- chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1;
-
- chan->dt_off = (dt_chunk * i);
+ if (write)
+ chan->ll_max = (dw->ll_region_wr[j].sz / EDMA_LL_SZ);
+ else
+ chan->ll_max = (dw->ll_region_rd[j].sz / EDMA_LL_SZ);
+ chan->ll_max -= 1;
- dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n",
- write ? "write" : "read", j,
- chan->ll_off, chan->ll_max);
+ dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
+ write ? "write" : "read", j, chan->ll_max);
if (dw->nr_irqs == 1)
pos = 0;
@@ -723,12 +766,15 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
chan->vc.desc_free = vchan_free_desc;
vchan_init(&chan->vc, dma);
- dt_region->paddr = dw->dt_region.paddr + chan->dt_off;
- dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off;
- dt_region->sz = dt_chunk;
-
- dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n",
- write ? "write" : "read", j, chan->dt_off);
+ if (write) {
+ dt_region->paddr = dw->dt_region_wr[j].paddr;
+ dt_region->vaddr = dw->dt_region_wr[j].vaddr;
+ dt_region->sz = dw->dt_region_wr[j].sz;
+ } else {
+ dt_region->paddr = dw->dt_region_rd[j].paddr;
+ dt_region->vaddr = dw->dt_region_rd[j].vaddr;
+ dt_region->sz = dw->dt_region_rd[j].sz;
+ }
dw_edma_v0_core_device_config(chan);
}
@@ -738,6 +784,7 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
dma_cap_set(DMA_SLAVE, dma->cap_mask);
dma_cap_set(DMA_CYCLIC, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
@@ -756,6 +803,7 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
dma->device_tx_status = dw_edma_device_tx_status;
dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
+ dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma;
dma_set_max_seg_size(dma->dev, U32_MAX);
@@ -863,14 +911,15 @@ int dw_edma_probe(struct dw_edma_chip *chip)
raw_spin_lock_init(&dw->lock);
- /* Find out how many write channels are supported by hardware */
- dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE);
- if (!dw->wr_ch_cnt)
- return -EINVAL;
+ dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt,
+ dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));
+ dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
- /* Find out how many read channels are supported by hardware */
- dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ);
- if (!dw->rd_ch_cnt)
+ dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt,
+ dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));
+ dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
+
+ if (!dw->wr_ch_cnt && !dw->rd_ch_cnt)
return -EINVAL;
dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
@@ -937,24 +986,23 @@ int dw_edma_remove(struct dw_edma_chip *chip)
/* Power management */
pm_runtime_disable(dev);
+ /* Deregister eDMA device */
+ dma_async_device_unregister(&dw->wr_edma);
list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
vc.chan.device_node) {
- list_del(&chan->vc.chan.device_node);
tasklet_kill(&chan->vc.task);
+ list_del(&chan->vc.chan.device_node);
}
+ dma_async_device_unregister(&dw->rd_edma);
list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
vc.chan.device_node) {
- list_del(&chan->vc.chan.device_node);
tasklet_kill(&chan->vc.task);
+ list_del(&chan->vc.chan.device_node);
}
- /* Deregister eDMA device */
- dma_async_device_unregister(&dw->wr_edma);
- dma_async_device_unregister(&dw->rd_edma);
-
/* Turn debugfs off */
- dw_edma_v0_core_debugfs_off();
+ dw_edma_v0_core_debugfs_off(chip);
return 0;
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index 31fc50d31792..60316d408c3e 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -15,15 +15,18 @@
#include "../virt-dma.h"
#define EDMA_LL_SZ 24
+#define EDMA_MAX_WR_CH 8
+#define EDMA_MAX_RD_CH 8
enum dw_edma_dir {
EDMA_DIR_WRITE = 0,
EDMA_DIR_READ
};
-enum dw_edma_mode {
- EDMA_MODE_LEGACY = 0,
- EDMA_MODE_UNROLL
+enum dw_edma_map_format {
+ EDMA_MF_EDMA_LEGACY = 0x0,
+ EDMA_MF_EDMA_UNROLL = 0x1,
+ EDMA_MF_HDMA_COMPAT = 0x5
};
enum dw_edma_request {
@@ -38,6 +41,12 @@ enum dw_edma_status {
EDMA_ST_BUSY
};
+enum dw_edma_xfer_type {
+ EDMA_XFER_SCATTER_GATHER = 0,
+ EDMA_XFER_CYCLIC,
+ EDMA_XFER_INTERLEAVED
+};
+
struct dw_edma_chan;
struct dw_edma_chunk;
@@ -82,11 +91,8 @@ struct dw_edma_chan {
int id;
enum dw_edma_dir dir;
- off_t ll_off;
u32 ll_max;
- off_t dt_off;
-
struct msi_msg msi;
enum dw_edma_request request;
@@ -117,19 +123,23 @@ struct dw_edma {
u16 rd_ch_cnt;
struct dw_edma_region rg_region; /* Registers */
- struct dw_edma_region ll_region; /* Linked list */
- struct dw_edma_region dt_region; /* Data */
+ struct dw_edma_region ll_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region ll_region_rd[EDMA_MAX_RD_CH];
+ struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH];
struct dw_edma_irq *irq;
int nr_irqs;
- u32 version;
- enum dw_edma_mode mode;
+ enum dw_edma_map_format mf;
struct dw_edma_chan *chan;
const struct dw_edma_core_ops *ops;
raw_spinlock_t lock; /* Only for legacy */
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif /* CONFIG_DEBUG_FS */
};
struct dw_edma_sg {
@@ -146,12 +156,13 @@ struct dw_edma_cyclic {
struct dw_edma_transfer {
struct dma_chan *dchan;
union dw_edma_xfer {
- struct dw_edma_sg sg;
- struct dw_edma_cyclic cyclic;
+ struct dw_edma_sg sg;
+ struct dw_edma_cyclic cyclic;
+ struct dma_interleaved_template *il;
} xfer;
enum dma_transfer_direction direction;
unsigned long flags;
- bool cyclic;
+ enum dw_edma_xfer_type type;
};
static inline
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 1eafc602e17e..44f6e09bdb53 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -13,45 +13,81 @@
#include <linux/dma/edma.h>
#include <linux/pci-epf.h>
#include <linux/msi.h>
+#include <linux/bitfield.h>
#include "dw-edma-core.h"
+#define DW_PCIE_VSEC_DMA_ID 0x6
+#define DW_PCIE_VSEC_DMA_BAR GENMASK(10, 8)
+#define DW_PCIE_VSEC_DMA_MAP GENMASK(2, 0)
+#define DW_PCIE_VSEC_DMA_WR_CH GENMASK(9, 0)
+#define DW_PCIE_VSEC_DMA_RD_CH GENMASK(25, 16)
+
+#define DW_BLOCK(a, b, c) \
+ { \
+ .bar = a, \
+ .off = b, \
+ .sz = c, \
+ },
+
+struct dw_edma_block {
+ enum pci_barno bar;
+ off_t off;
+ size_t sz;
+};
+
struct dw_edma_pcie_data {
/* eDMA registers location */
- enum pci_barno rg_bar;
- off_t rg_off;
- size_t rg_sz;
+ struct dw_edma_block rg;
/* eDMA memory linked list location */
- enum pci_barno ll_bar;
- off_t ll_off;
- size_t ll_sz;
+ struct dw_edma_block ll_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_block ll_rd[EDMA_MAX_RD_CH];
/* eDMA memory data location */
- enum pci_barno dt_bar;
- off_t dt_off;
- size_t dt_sz;
+ struct dw_edma_block dt_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_block dt_rd[EDMA_MAX_RD_CH];
/* Other */
- u32 version;
- enum dw_edma_mode mode;
+ enum dw_edma_map_format mf;
u8 irqs;
+ u16 wr_ch_cnt;
+ u16 rd_ch_cnt;
};
static const struct dw_edma_pcie_data snps_edda_data = {
/* eDMA registers location */
- .rg_bar = BAR_0,
- .rg_off = 0x00001000, /* 4 Kbytes */
- .rg_sz = 0x00002000, /* 8 Kbytes */
+ .rg.bar = BAR_0,
+ .rg.off = 0x00001000, /* 4 Kbytes */
+ .rg.sz = 0x00002000, /* 8 Kbytes */
/* eDMA memory linked list location */
- .ll_bar = BAR_2,
- .ll_off = 0x00000000, /* 0 Kbytes */
- .ll_sz = 0x00800000, /* 8 Mbytes */
+ .ll_wr = {
+ /* Channel 0 - BAR 2, offset 0 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00000000, 0x00000800)
+ /* Channel 1 - BAR 2, offset 2 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00200000, 0x00000800)
+ },
+ .ll_rd = {
+ /* Channel 0 - BAR 2, offset 4 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00400000, 0x00000800)
+ /* Channel 1 - BAR 2, offset 6 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00600000, 0x00000800)
+ },
/* eDMA memory data location */
- .dt_bar = BAR_2,
- .dt_off = 0x00800000, /* 8 Mbytes */
- .dt_sz = 0x03800000, /* 56 Mbytes */
+ .dt_wr = {
+ /* Channel 0 - BAR 2, offset 8 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00800000, 0x00000800)
+ /* Channel 1 - BAR 2, offset 9 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00900000, 0x00000800)
+ },
+ .dt_rd = {
+ /* Channel 0 - BAR 2, offset 10 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00a00000, 0x00000800)
+ /* Channel 1 - BAR 2, offset 11 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00b00000, 0x00000800)
+ },
/* Other */
- .version = 0,
- .mode = EDMA_MODE_UNROLL,
+ .mf = EDMA_MF_EDMA_UNROLL,
.irqs = 1,
+ .wr_ch_cnt = 2,
+ .rd_ch_cnt = 2,
};
static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr)
@@ -63,14 +99,58 @@ static const struct dw_edma_core_ops dw_edma_pcie_core_ops = {
.irq_vector = dw_edma_pcie_irq_vector,
};
+static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
+ struct dw_edma_pcie_data *pdata)
+{
+ u32 val, map;
+ u16 vsec;
+ u64 off;
+
+ vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_SYNOPSYS,
+ DW_PCIE_VSEC_DMA_ID);
+ if (!vsec)
+ return;
+
+ pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
+ if (PCI_VNDR_HEADER_REV(val) != 0x00 ||
+ PCI_VNDR_HEADER_LEN(val) != 0x18)
+ return;
+
+ pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability DMA\n");
+ pci_read_config_dword(pdev, vsec + 0x8, &val);
+ map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val);
+ if (map != EDMA_MF_EDMA_LEGACY &&
+ map != EDMA_MF_EDMA_UNROLL &&
+ map != EDMA_MF_HDMA_COMPAT)
+ return;
+
+ pdata->mf = map;
+ pdata->rg.bar = FIELD_GET(DW_PCIE_VSEC_DMA_BAR, val);
+
+ pci_read_config_dword(pdev, vsec + 0xc, &val);
+ pdata->wr_ch_cnt = min_t(u16, pdata->wr_ch_cnt,
+ FIELD_GET(DW_PCIE_VSEC_DMA_WR_CH, val));
+ pdata->rd_ch_cnt = min_t(u16, pdata->rd_ch_cnt,
+ FIELD_GET(DW_PCIE_VSEC_DMA_RD_CH, val));
+
+ pci_read_config_dword(pdev, vsec + 0x14, &val);
+ off = val;
+ pci_read_config_dword(pdev, vsec + 0x10, &val);
+ off <<= 32;
+ off |= val;
+ pdata->rg.off = off;
+}
+
static int dw_edma_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *pid)
{
- const struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
+ struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
+ struct dw_edma_pcie_data vsec_data;
struct device *dev = &pdev->dev;
struct dw_edma_chip *chip;
- int err, nr_irqs;
struct dw_edma *dw;
+ int err, nr_irqs;
+ int i, mask;
/* Enable PCI device */
err = pcim_enable_device(pdev);
@@ -79,11 +159,25 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return err;
}
+ memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
+
+ /*
+ * Tries to find if exists a PCIe Vendor-Specific Extended Capability
+ * for the DMA, if one exists, then reconfigures it.
+ */
+ dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data);
+
/* Mapping PCI BAR regions */
- err = pcim_iomap_regions(pdev, BIT(pdata->rg_bar) |
- BIT(pdata->ll_bar) |
- BIT(pdata->dt_bar),
- pci_name(pdev));
+ mask = BIT(vsec_data.rg.bar);
+ for (i = 0; i < vsec_data.wr_ch_cnt; i++) {
+ mask |= BIT(vsec_data.ll_wr[i].bar);
+ mask |= BIT(vsec_data.dt_wr[i].bar);
+ }
+ for (i = 0; i < vsec_data.rd_ch_cnt; i++) {
+ mask |= BIT(vsec_data.ll_rd[i].bar);
+ mask |= BIT(vsec_data.dt_rd[i].bar);
+ }
+ err = pcim_iomap_regions(pdev, mask, pci_name(pdev));
if (err) {
pci_err(pdev, "eDMA BAR I/O remapping failed\n");
return err;
@@ -125,7 +219,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return -ENOMEM;
/* IRQs allocation */
- nr_irqs = pci_alloc_irq_vectors(pdev, 1, pdata->irqs,
+ nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (nr_irqs < 1) {
pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n",
@@ -139,46 +233,109 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
chip->id = pdev->devfn;
chip->irq = pdev->irq;
- dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar];
- dw->rg_region.vaddr += pdata->rg_off;
- dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
- dw->rg_region.paddr += pdata->rg_off;
- dw->rg_region.sz = pdata->rg_sz;
-
- dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar];
- dw->ll_region.vaddr += pdata->ll_off;
- dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
- dw->ll_region.paddr += pdata->ll_off;
- dw->ll_region.sz = pdata->ll_sz;
-
- dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar];
- dw->dt_region.vaddr += pdata->dt_off;
- dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
- dw->dt_region.paddr += pdata->dt_off;
- dw->dt_region.sz = pdata->dt_sz;
-
- dw->version = pdata->version;
- dw->mode = pdata->mode;
+ dw->mf = vsec_data.mf;
dw->nr_irqs = nr_irqs;
dw->ops = &dw_edma_pcie_core_ops;
+ dw->wr_ch_cnt = vsec_data.wr_ch_cnt;
+ dw->rd_ch_cnt = vsec_data.rd_ch_cnt;
- /* Debug info */
- pci_dbg(pdev, "Version:\t%u\n", dw->version);
+ dw->rg_region.vaddr = pcim_iomap_table(pdev)[vsec_data.rg.bar];
+ if (!dw->rg_region.vaddr)
+ return -ENOMEM;
+
+ dw->rg_region.vaddr += vsec_data.rg.off;
+ dw->rg_region.paddr = pdev->resource[vsec_data.rg.bar].start;
+ dw->rg_region.paddr += vsec_data.rg.off;
+ dw->rg_region.sz = vsec_data.rg.sz;
+
+ for (i = 0; i < dw->wr_ch_cnt; i++) {
+ struct dw_edma_region *ll_region = &dw->ll_region_wr[i];
+ struct dw_edma_region *dt_region = &dw->dt_region_wr[i];
+ struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
+ struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
+
+ ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
+ if (!ll_region->vaddr)
+ return -ENOMEM;
+
+ ll_region->vaddr += ll_block->off;
+ ll_region->paddr = pdev->resource[ll_block->bar].start;
+ ll_region->paddr += ll_block->off;
+ ll_region->sz = ll_block->sz;
+
+ dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
+ if (!dt_region->vaddr)
+ return -ENOMEM;
+
+ dt_region->vaddr += dt_block->off;
+ dt_region->paddr = pdev->resource[dt_block->bar].start;
+ dt_region->paddr += dt_block->off;
+ dt_region->sz = dt_block->sz;
+ }
- pci_dbg(pdev, "Mode:\t%s\n",
- dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
+ for (i = 0; i < dw->rd_ch_cnt; i++) {
+ struct dw_edma_region *ll_region = &dw->ll_region_rd[i];
+ struct dw_edma_region *dt_region = &dw->dt_region_rd[i];
+ struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
+ struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
+
+ ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
+ if (!ll_region->vaddr)
+ return -ENOMEM;
+
+ ll_region->vaddr += ll_block->off;
+ ll_region->paddr = pdev->resource[ll_block->bar].start;
+ ll_region->paddr += ll_block->off;
+ ll_region->sz = ll_block->sz;
+
+ dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
+ if (!dt_region->vaddr)
+ return -ENOMEM;
+
+ dt_region->vaddr += dt_block->off;
+ dt_region->paddr = pdev->resource[dt_block->bar].start;
+ dt_region->paddr += dt_block->off;
+ dt_region->sz = dt_block->sz;
+ }
+
+ /* Debug info */
+ if (dw->mf == EDMA_MF_EDMA_LEGACY)
+ pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", dw->mf);
+ else if (dw->mf == EDMA_MF_EDMA_UNROLL)
+ pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", dw->mf);
+ else if (dw->mf == EDMA_MF_HDMA_COMPAT)
+ pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", dw->mf);
+ else
+ pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", dw->mf);
pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
+ vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz,
dw->rg_region.vaddr, &dw->rg_region.paddr);
- pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
- dw->ll_region.vaddr, &dw->ll_region.paddr);
- pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
- dw->dt_region.vaddr, &dw->dt_region.paddr);
+ for (i = 0; i < dw->wr_ch_cnt; i++) {
+ pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+ i, vsec_data.ll_wr[i].bar,
+ vsec_data.ll_wr[i].off, dw->ll_region_wr[i].sz,
+ dw->ll_region_wr[i].vaddr, &dw->ll_region_wr[i].paddr);
+
+ pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+ i, vsec_data.dt_wr[i].bar,
+ vsec_data.dt_wr[i].off, dw->dt_region_wr[i].sz,
+ dw->dt_region_wr[i].vaddr, &dw->dt_region_wr[i].paddr);
+ }
+
+ for (i = 0; i < dw->rd_ch_cnt; i++) {
+ pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+ i, vsec_data.ll_rd[i].bar,
+ vsec_data.ll_rd[i].off, dw->ll_region_rd[i].sz,
+ dw->ll_region_rd[i].vaddr, &dw->ll_region_rd[i].paddr);
+
+ pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+ i, vsec_data.dt_rd[i].bar,
+ vsec_data.dt_rd[i].off, dw->dt_region_rd[i].sz,
+ dw->dt_region_rd[i].vaddr, &dw->dt_region_rd[i].paddr);
+ }
pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 692de47b1670..329fc2e57b70 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -28,35 +28,75 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
return dw->rg_region.vaddr;
}
-#define SET(dw, name, value) \
+#define SET_32(dw, name, value) \
writel(value, &(__dw_regs(dw)->name))
-#define GET(dw, name) \
+#define GET_32(dw, name) \
readl(&(__dw_regs(dw)->name))
-#define SET_RW(dw, dir, name, value) \
+#define SET_RW_32(dw, dir, name, value) \
do { \
if ((dir) == EDMA_DIR_WRITE) \
- SET(dw, wr_##name, value); \
+ SET_32(dw, wr_##name, value); \
else \
- SET(dw, rd_##name, value); \
+ SET_32(dw, rd_##name, value); \
} while (0)
-#define GET_RW(dw, dir, name) \
+#define GET_RW_32(dw, dir, name) \
((dir) == EDMA_DIR_WRITE \
- ? GET(dw, wr_##name) \
- : GET(dw, rd_##name))
+ ? GET_32(dw, wr_##name) \
+ : GET_32(dw, rd_##name))
-#define SET_BOTH(dw, name, value) \
+#define SET_BOTH_32(dw, name, value) \
do { \
- SET(dw, wr_##name, value); \
- SET(dw, rd_##name, value); \
+ SET_32(dw, wr_##name, value); \
+ SET_32(dw, rd_##name, value); \
+ } while (0)
+
+#ifdef CONFIG_64BIT
+
+#define SET_64(dw, name, value) \
+ writeq(value, &(__dw_regs(dw)->name))
+
+#define GET_64(dw, name) \
+ readq(&(__dw_regs(dw)->name))
+
+#define SET_RW_64(dw, dir, name, value) \
+ do { \
+ if ((dir) == EDMA_DIR_WRITE) \
+ SET_64(dw, wr_##name, value); \
+ else \
+ SET_64(dw, rd_##name, value); \
+ } while (0)
+
+#define GET_RW_64(dw, dir, name) \
+ ((dir) == EDMA_DIR_WRITE \
+ ? GET_64(dw, wr_##name) \
+ : GET_64(dw, rd_##name))
+
+#define SET_BOTH_64(dw, name, value) \
+ do { \
+ SET_64(dw, wr_##name, value); \
+ SET_64(dw, rd_##name, value); \
+ } while (0)
+
+#endif /* CONFIG_64BIT */
+
+#define SET_COMPAT(dw, name, value) \
+ writel(value, &(__dw_regs(dw)->type.unroll.name))
+
+#define SET_RW_COMPAT(dw, dir, name, value) \
+ do { \
+ if ((dir) == EDMA_DIR_WRITE) \
+ SET_COMPAT(dw, wr_##name, value); \
+ else \
+ SET_COMPAT(dw, rd_##name, value); \
} while (0)
static inline struct dw_edma_v0_ch_regs __iomem *
__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
{
- if (dw->mode == EDMA_MODE_LEGACY)
+ if (dw->mf == EDMA_MF_EDMA_LEGACY)
return &(__dw_regs(dw)->type.legacy.ch);
if (dir == EDMA_DIR_WRITE)
@@ -68,7 +108,7 @@ __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u32 value, void __iomem *addr)
{
- if (dw->mode == EDMA_MODE_LEGACY) {
+ if (dw->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -93,7 +133,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
{
u32 value;
- if (dw->mode == EDMA_MODE_LEGACY) {
+ if (dw->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -115,21 +155,86 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
return value;
}
-#define SET_CH(dw, dir, ch, name, value) \
+#define SET_CH_32(dw, dir, ch, name, value) \
writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
-#define GET_CH(dw, dir, ch, name) \
+#define GET_CH_32(dw, dir, ch, name) \
readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
-#define SET_LL(ll, value) \
+#define SET_LL_32(ll, value) \
writel(value, ll)
+#ifdef CONFIG_64BIT
+
+static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
+ u64 value, void __iomem *addr)
+{
+ if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ u32 viewport_sel;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&dw->lock, flags);
+
+ viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
+ if (dir == EDMA_DIR_READ)
+ viewport_sel |= BIT(31);
+
+ writel(viewport_sel,
+ &(__dw_regs(dw)->type.legacy.viewport_sel));
+ writeq(value, addr);
+
+ raw_spin_unlock_irqrestore(&dw->lock, flags);
+ } else {
+ writeq(value, addr);
+ }
+}
+
+static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
+ const void __iomem *addr)
+{
+ u32 value;
+
+ if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ u32 viewport_sel;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&dw->lock, flags);
+
+ viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
+ if (dir == EDMA_DIR_READ)
+ viewport_sel |= BIT(31);
+
+ writel(viewport_sel,
+ &(__dw_regs(dw)->type.legacy.viewport_sel));
+ value = readq(addr);
+
+ raw_spin_unlock_irqrestore(&dw->lock, flags);
+ } else {
+ value = readq(addr);
+ }
+
+ return value;
+}
+
+#define SET_CH_64(dw, dir, ch, name, value) \
+ writeq_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
+
+#define GET_CH_64(dw, dir, ch, name) \
+ readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
+
+#define SET_LL_64(ll, value) \
+ writeq(value, ll)
+
+#endif /* CONFIG_64BIT */
+
/* eDMA management callbacks */
void dw_edma_v0_core_off(struct dw_edma *dw)
{
- SET_BOTH(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
- SET_BOTH(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
- SET_BOTH(dw, engine_en, 0);
+ SET_BOTH_32(dw, int_mask,
+ EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
+ SET_BOTH_32(dw, int_clear,
+ EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
+ SET_BOTH_32(dw, engine_en, 0);
}
u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
@@ -137,9 +242,11 @@ u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
u32 num_ch;
if (dir == EDMA_DIR_WRITE)
- num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET(dw, ctrl));
+ num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK,
+ GET_32(dw, ctrl));
else
- num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET(dw, ctrl));
+ num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK,
+ GET_32(dw, ctrl));
if (num_ch > EDMA_V0_MAX_NR_CH)
num_ch = EDMA_V0_MAX_NR_CH;
@@ -153,7 +260,7 @@ enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
u32 tmp;
tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
- GET_CH(dw, chan->dir, chan->id, ch_control1));
+ GET_CH_32(dw, chan->dir, chan->id, ch_control1));
if (tmp == 1)
return DMA_IN_PROGRESS;
@@ -167,26 +274,28 @@ void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->chip->dw;
- SET_RW(dw, chan->dir, int_clear,
- FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
+ SET_RW_32(dw, chan->dir, int_clear,
+ FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
}
void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->chip->dw;
- SET_RW(dw, chan->dir, int_clear,
- FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
+ SET_RW_32(dw, chan->dir, int_clear,
+ FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
}
u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
{
- return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW(dw, dir, int_status));
+ return FIELD_GET(EDMA_V0_DONE_INT_MASK,
+ GET_RW_32(dw, dir, int_status));
}
u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
{
- return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW(dw, dir, int_status));
+ return FIELD_GET(EDMA_V0_ABORT_INT_MASK,
+ GET_RW_32(dw, dir, int_status));
}
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
@@ -209,15 +318,23 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE);
/* Channel control */
- SET_LL(&lli[i].control, control);
+ SET_LL_32(&lli[i].control, control);
/* Transfer size */
- SET_LL(&lli[i].transfer_size, child->sz);
- /* SAR - low, high */
- SET_LL(&lli[i].sar_low, lower_32_bits(child->sar));
- SET_LL(&lli[i].sar_high, upper_32_bits(child->sar));
- /* DAR - low, high */
- SET_LL(&lli[i].dar_low, lower_32_bits(child->dar));
- SET_LL(&lli[i].dar_high, upper_32_bits(child->dar));
+ SET_LL_32(&lli[i].transfer_size, child->sz);
+ /* SAR */
+ #ifdef CONFIG_64BIT
+ SET_LL_64(&lli[i].sar.reg, child->sar);
+ #else /* CONFIG_64BIT */
+ SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar));
+ SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar));
+ #endif /* CONFIG_64BIT */
+ /* DAR */
+ #ifdef CONFIG_64BIT
+ SET_LL_64(&lli[i].dar.reg, child->dar);
+ #else /* CONFIG_64BIT */
+ SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar));
+ SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar));
+ #endif /* CONFIG_64BIT */
i++;
}
@@ -227,10 +344,14 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
control |= DW_EDMA_V0_CB;
/* Channel control */
- SET_LL(&llp->control, control);
- /* Linked list - low, high */
- SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr));
- SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr));
+ SET_LL_32(&llp->control, control);
+ /* Linked list */
+ #ifdef CONFIG_64BIT
+ SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr);
+ #else /* CONFIG_64BIT */
+ SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr));
+ SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr));
+ #endif /* CONFIG_64BIT */
}
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@@ -243,28 +364,69 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
if (first) {
/* Enable engine */
- SET_RW(dw, chan->dir, engine_en, BIT(0));
+ SET_RW_32(dw, chan->dir, engine_en, BIT(0));
+ if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+ switch (chan->id) {
+ case 0:
+ SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en,
+ BIT(0));
+ break;
+ case 1:
+ SET_RW_COMPAT(dw, chan->dir, ch1_pwr_en,
+ BIT(0));
+ break;
+ case 2:
+ SET_RW_COMPAT(dw, chan->dir, ch2_pwr_en,
+ BIT(0));
+ break;
+ case 3:
+ SET_RW_COMPAT(dw, chan->dir, ch3_pwr_en,
+ BIT(0));
+ break;
+ case 4:
+ SET_RW_COMPAT(dw, chan->dir, ch4_pwr_en,
+ BIT(0));
+ break;
+ case 5:
+ SET_RW_COMPAT(dw, chan->dir, ch5_pwr_en,
+ BIT(0));
+ break;
+ case 6:
+ SET_RW_COMPAT(dw, chan->dir, ch6_pwr_en,
+ BIT(0));
+ break;
+ case 7:
+ SET_RW_COMPAT(dw, chan->dir, ch7_pwr_en,
+ BIT(0));
+ break;
+ }
+ }
/* Interrupt unmask - done, abort */
- tmp = GET_RW(dw, chan->dir, int_mask);
+ tmp = GET_RW_32(dw, chan->dir, int_mask);
tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id));
- SET_RW(dw, chan->dir, int_mask, tmp);
+ SET_RW_32(dw, chan->dir, int_mask, tmp);
/* Linked list error */
- tmp = GET_RW(dw, chan->dir, linked_list_err_en);
+ tmp = GET_RW_32(dw, chan->dir, linked_list_err_en);
tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id));
- SET_RW(dw, chan->dir, linked_list_err_en, tmp);
+ SET_RW_32(dw, chan->dir, linked_list_err_en, tmp);
/* Channel control */
- SET_CH(dw, chan->dir, chan->id, ch_control1,
- (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
- /* Linked list - low, high */
- SET_CH(dw, chan->dir, chan->id, llp_low,
- lower_32_bits(chunk->ll_region.paddr));
- SET_CH(dw, chan->dir, chan->id, llp_high,
- upper_32_bits(chunk->ll_region.paddr));
+ SET_CH_32(dw, chan->dir, chan->id, ch_control1,
+ (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
+ /* Linked list */
+ #ifdef CONFIG_64BIT
+ SET_CH_64(dw, chan->dir, chan->id, llp.reg,
+ chunk->ll_region.paddr);
+ #else /* CONFIG_64BIT */
+ SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
+ lower_32_bits(chunk->ll_region.paddr));
+ SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+ upper_32_bits(chunk->ll_region.paddr));
+ #endif /* CONFIG_64BIT */
}
/* Doorbell */
- SET_RW(dw, chan->dir, doorbell,
- FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
+ SET_RW_32(dw, chan->dir, doorbell,
+ FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
}
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
@@ -273,31 +435,31 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
u32 tmp = 0;
/* MSI done addr - low, high */
- SET_RW(dw, chan->dir, done_imwr_low, chan->msi.address_lo);
- SET_RW(dw, chan->dir, done_imwr_high, chan->msi.address_hi);
+ SET_RW_32(dw, chan->dir, done_imwr.lsb, chan->msi.address_lo);
+ SET_RW_32(dw, chan->dir, done_imwr.msb, chan->msi.address_hi);
/* MSI abort addr - low, high */
- SET_RW(dw, chan->dir, abort_imwr_low, chan->msi.address_lo);
- SET_RW(dw, chan->dir, abort_imwr_high, chan->msi.address_hi);
+ SET_RW_32(dw, chan->dir, abort_imwr.lsb, chan->msi.address_lo);
+ SET_RW_32(dw, chan->dir, abort_imwr.msb, chan->msi.address_hi);
/* MSI data - low, high */
switch (chan->id) {
case 0:
case 1:
- tmp = GET_RW(dw, chan->dir, ch01_imwr_data);
+ tmp = GET_RW_32(dw, chan->dir, ch01_imwr_data);
break;
case 2:
case 3:
- tmp = GET_RW(dw, chan->dir, ch23_imwr_data);
+ tmp = GET_RW_32(dw, chan->dir, ch23_imwr_data);
break;
case 4:
case 5:
- tmp = GET_RW(dw, chan->dir, ch45_imwr_data);
+ tmp = GET_RW_32(dw, chan->dir, ch45_imwr_data);
break;
case 6:
case 7:
- tmp = GET_RW(dw, chan->dir, ch67_imwr_data);
+ tmp = GET_RW_32(dw, chan->dir, ch67_imwr_data);
break;
}
@@ -316,22 +478,22 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
switch (chan->id) {
case 0:
case 1:
- SET_RW(dw, chan->dir, ch01_imwr_data, tmp);
+ SET_RW_32(dw, chan->dir, ch01_imwr_data, tmp);
break;
case 2:
case 3:
- SET_RW(dw, chan->dir, ch23_imwr_data, tmp);
+ SET_RW_32(dw, chan->dir, ch23_imwr_data, tmp);
break;
case 4:
case 5:
- SET_RW(dw, chan->dir, ch45_imwr_data, tmp);
+ SET_RW_32(dw, chan->dir, ch45_imwr_data, tmp);
break;
case 6:
case 7:
- SET_RW(dw, chan->dir, ch67_imwr_data, tmp);
+ SET_RW_32(dw, chan->dir, ch67_imwr_data, tmp);
break;
}
@@ -344,7 +506,7 @@ void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip)
dw_edma_v0_debugfs_on(chip);
}
-void dw_edma_v0_core_debugfs_off(void)
+void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip)
{
- dw_edma_v0_debugfs_off();
+ dw_edma_v0_debugfs_off(chip);
}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h
index abae1527f1f9..2afa626b8300 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.h
@@ -23,6 +23,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
/* eDMA debug fs callbacks */
void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_core_debugfs_off(void);
+void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip);
#endif /* _DW_EDMA_V0_CORE_H */
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 6f62711a4c94..4b3bcffd15ef 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -38,7 +38,6 @@
#define CHANNEL_STR "channel"
#define REGISTERS_STR "registers"
-static struct dentry *base_dir;
static struct dw_edma *dw;
static struct dw_edma_v0_regs __iomem *regs;
@@ -55,7 +54,7 @@ struct debugfs_entries {
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
void __iomem *reg = (void __force __iomem *)data;
- if (dw->mode == EDMA_MODE_LEGACY &&
+ if (dw->mf == EDMA_MF_EDMA_LEGACY &&
reg >= (void __iomem *)&regs->type.legacy.ch) {
void __iomem *ptr = &regs->type.legacy.ch;
u32 viewport_sel = 0;
@@ -114,12 +113,12 @@ static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
REGISTER(ch_control1),
REGISTER(ch_control2),
REGISTER(transfer_size),
- REGISTER(sar_low),
- REGISTER(sar_high),
- REGISTER(dar_low),
- REGISTER(dar_high),
- REGISTER(llp_low),
- REGISTER(llp_high),
+ REGISTER(sar.lsb),
+ REGISTER(sar.msb),
+ REGISTER(dar.lsb),
+ REGISTER(dar.msb),
+ REGISTER(llp.lsb),
+ REGISTER(llp.msb),
};
nr_entries = ARRAY_SIZE(debugfs_regs);
@@ -132,17 +131,17 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir)
/* eDMA global registers */
WR_REGISTER(engine_en),
WR_REGISTER(doorbell),
- WR_REGISTER(ch_arb_weight_low),
- WR_REGISTER(ch_arb_weight_high),
+ WR_REGISTER(ch_arb_weight.lsb),
+ WR_REGISTER(ch_arb_weight.msb),
/* eDMA interrupts registers */
WR_REGISTER(int_status),
WR_REGISTER(int_mask),
WR_REGISTER(int_clear),
WR_REGISTER(err_status),
- WR_REGISTER(done_imwr_low),
- WR_REGISTER(done_imwr_high),
- WR_REGISTER(abort_imwr_low),
- WR_REGISTER(abort_imwr_high),
+ WR_REGISTER(done_imwr.lsb),
+ WR_REGISTER(done_imwr.msb),
+ WR_REGISTER(abort_imwr.lsb),
+ WR_REGISTER(abort_imwr.msb),
WR_REGISTER(ch01_imwr_data),
WR_REGISTER(ch23_imwr_data),
WR_REGISTER(ch45_imwr_data),
@@ -152,8 +151,8 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir)
const struct debugfs_entries debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
WR_REGISTER_UNROLL(engine_chgroup),
- WR_REGISTER_UNROLL(engine_hshake_cnt_low),
- WR_REGISTER_UNROLL(engine_hshake_cnt_high),
+ WR_REGISTER_UNROLL(engine_hshake_cnt.lsb),
+ WR_REGISTER_UNROLL(engine_hshake_cnt.msb),
WR_REGISTER_UNROLL(ch0_pwr_en),
WR_REGISTER_UNROLL(ch1_pwr_en),
WR_REGISTER_UNROLL(ch2_pwr_en),
@@ -174,7 +173,7 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir)
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
- if (dw->mode == EDMA_MODE_UNROLL) {
+ if (dw->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
@@ -200,19 +199,19 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir)
/* eDMA global registers */
RD_REGISTER(engine_en),
RD_REGISTER(doorbell),
- RD_REGISTER(ch_arb_weight_low),
- RD_REGISTER(ch_arb_weight_high),
+ RD_REGISTER(ch_arb_weight.lsb),
+ RD_REGISTER(ch_arb_weight.msb),
/* eDMA interrupts registers */
RD_REGISTER(int_status),
RD_REGISTER(int_mask),
RD_REGISTER(int_clear),
- RD_REGISTER(err_status_low),
- RD_REGISTER(err_status_high),
+ RD_REGISTER(err_status.lsb),
+ RD_REGISTER(err_status.msb),
RD_REGISTER(linked_list_err_en),
- RD_REGISTER(done_imwr_low),
- RD_REGISTER(done_imwr_high),
- RD_REGISTER(abort_imwr_low),
- RD_REGISTER(abort_imwr_high),
+ RD_REGISTER(done_imwr.lsb),
+ RD_REGISTER(done_imwr.msb),
+ RD_REGISTER(abort_imwr.lsb),
+ RD_REGISTER(abort_imwr.msb),
RD_REGISTER(ch01_imwr_data),
RD_REGISTER(ch23_imwr_data),
RD_REGISTER(ch45_imwr_data),
@@ -221,8 +220,8 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir)
const struct debugfs_entries debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
RD_REGISTER_UNROLL(engine_chgroup),
- RD_REGISTER_UNROLL(engine_hshake_cnt_low),
- RD_REGISTER_UNROLL(engine_hshake_cnt_high),
+ RD_REGISTER_UNROLL(engine_hshake_cnt.lsb),
+ RD_REGISTER_UNROLL(engine_hshake_cnt.msb),
RD_REGISTER_UNROLL(ch0_pwr_en),
RD_REGISTER_UNROLL(ch1_pwr_en),
RD_REGISTER_UNROLL(ch2_pwr_en),
@@ -243,7 +242,7 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir)
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
- if (dw->mode == EDMA_MODE_UNROLL) {
+ if (dw->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
@@ -272,7 +271,7 @@ static void dw_edma_debugfs_regs(void)
struct dentry *regs_dir;
int nr_entries;
- regs_dir = debugfs_create_dir(REGISTERS_STR, base_dir);
+ regs_dir = debugfs_create_dir(REGISTERS_STR, dw->debugfs);
if (!regs_dir)
return;
@@ -293,19 +292,23 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
if (!regs)
return;
- base_dir = debugfs_create_dir(dw->name, NULL);
- if (!base_dir)
+ dw->debugfs = debugfs_create_dir(dw->name, NULL);
+ if (!dw->debugfs)
return;
- debugfs_create_u32("version", 0444, base_dir, &dw->version);
- debugfs_create_u32("mode", 0444, base_dir, &dw->mode);
- debugfs_create_u16("wr_ch_cnt", 0444, base_dir, &dw->wr_ch_cnt);
- debugfs_create_u16("rd_ch_cnt", 0444, base_dir, &dw->rd_ch_cnt);
+ debugfs_create_u32("mf", 0444, dw->debugfs, &dw->mf);
+ debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt);
+ debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt);
dw_edma_debugfs_regs();
}
-void dw_edma_v0_debugfs_off(void)
+void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
{
- debugfs_remove_recursive(base_dir);
+ dw = chip->dw;
+ if (!dw)
+ return;
+
+ debugfs_remove_recursive(dw->debugfs);
+ dw->debugfs = NULL;
}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
index 5450a0a94193..d0ff25a9ea5c 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
@@ -13,13 +13,13 @@
#ifdef CONFIG_DEBUG_FS
void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_debugfs_off(void);
+void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip);
#else
static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
{
}
-static inline void dw_edma_v0_debugfs_off(void)
+static inline void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
{
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/dma/dw-edma/dw-edma-v0-regs.h b/drivers/dma/dw-edma/dw-edma-v0-regs.h
index dfd70e223c2f..e175f7b20480 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-regs.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-regs.h
@@ -25,134 +25,209 @@
#define EDMA_V0_CH_EVEN_MSI_DATA_MASK GENMASK(15, 0)
struct dw_edma_v0_ch_regs {
- u32 ch_control1; /* 0x000 */
- u32 ch_control2; /* 0x004 */
- u32 transfer_size; /* 0x008 */
- u32 sar_low; /* 0x00c */
- u32 sar_high; /* 0x010 */
- u32 dar_low; /* 0x014 */
- u32 dar_high; /* 0x018 */
- u32 llp_low; /* 0x01c */
- u32 llp_high; /* 0x020 */
-};
+ u32 ch_control1; /* 0x0000 */
+ u32 ch_control2; /* 0x0004 */
+ u32 transfer_size; /* 0x0008 */
+ union {
+ u64 reg; /* 0x000c..0x0010 */
+ struct {
+ u32 lsb; /* 0x000c */
+ u32 msb; /* 0x0010 */
+ };
+ } sar;
+ union {
+ u64 reg; /* 0x0014..0x0018 */
+ struct {
+ u32 lsb; /* 0x0014 */
+ u32 msb; /* 0x0018 */
+ };
+ } dar;
+ union {
+ u64 reg; /* 0x001c..0x0020 */
+ struct {
+ u32 lsb; /* 0x001c */
+ u32 msb; /* 0x0020 */
+ };
+ } llp;
+} __packed;
struct dw_edma_v0_ch {
- struct dw_edma_v0_ch_regs wr; /* 0x200 */
- u32 padding_1[55]; /* [0x224..0x2fc] */
- struct dw_edma_v0_ch_regs rd; /* 0x300 */
- u32 padding_2[55]; /* [0x324..0x3fc] */
-};
+ struct dw_edma_v0_ch_regs wr; /* 0x0200 */
+ u32 padding_1[55]; /* 0x0224..0x02fc */
+ struct dw_edma_v0_ch_regs rd; /* 0x0300 */
+ u32 padding_2[55]; /* 0x0324..0x03fc */
+} __packed;
struct dw_edma_v0_unroll {
- u32 padding_1; /* 0x0f8 */
- u32 wr_engine_chgroup; /* 0x100 */
- u32 rd_engine_chgroup; /* 0x104 */
- u32 wr_engine_hshake_cnt_low; /* 0x108 */
- u32 wr_engine_hshake_cnt_high; /* 0x10c */
- u32 padding_2[2]; /* [0x110..0x114] */
- u32 rd_engine_hshake_cnt_low; /* 0x118 */
- u32 rd_engine_hshake_cnt_high; /* 0x11c */
- u32 padding_3[2]; /* [0x120..0x124] */
- u32 wr_ch0_pwr_en; /* 0x128 */
- u32 wr_ch1_pwr_en; /* 0x12c */
- u32 wr_ch2_pwr_en; /* 0x130 */
- u32 wr_ch3_pwr_en; /* 0x134 */
- u32 wr_ch4_pwr_en; /* 0x138 */
- u32 wr_ch5_pwr_en; /* 0x13c */
- u32 wr_ch6_pwr_en; /* 0x140 */
- u32 wr_ch7_pwr_en; /* 0x144 */
- u32 padding_4[8]; /* [0x148..0x164] */
- u32 rd_ch0_pwr_en; /* 0x168 */
- u32 rd_ch1_pwr_en; /* 0x16c */
- u32 rd_ch2_pwr_en; /* 0x170 */
- u32 rd_ch3_pwr_en; /* 0x174 */
- u32 rd_ch4_pwr_en; /* 0x178 */
- u32 rd_ch5_pwr_en; /* 0x18c */
- u32 rd_ch6_pwr_en; /* 0x180 */
- u32 rd_ch7_pwr_en; /* 0x184 */
- u32 padding_5[30]; /* [0x188..0x1fc] */
- struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* [0x200..0x1120] */
-};
+ u32 padding_1; /* 0x00f8 */
+ u32 wr_engine_chgroup; /* 0x0100 */
+ u32 rd_engine_chgroup; /* 0x0104 */
+ union {
+ u64 reg; /* 0x0108..0x010c */
+ struct {
+ u32 lsb; /* 0x0108 */
+ u32 msb; /* 0x010c */
+ };
+ } wr_engine_hshake_cnt;
+ u32 padding_2[2]; /* 0x0110..0x0114 */
+ union {
+ u64 reg; /* 0x0120..0x0124 */
+ struct {
+ u32 lsb; /* 0x0120 */
+ u32 msb; /* 0x0124 */
+ };
+ } rd_engine_hshake_cnt;
+ u32 padding_3[2]; /* 0x0120..0x0124 */
+ u32 wr_ch0_pwr_en; /* 0x0128 */
+ u32 wr_ch1_pwr_en; /* 0x012c */
+ u32 wr_ch2_pwr_en; /* 0x0130 */
+ u32 wr_ch3_pwr_en; /* 0x0134 */
+ u32 wr_ch4_pwr_en; /* 0x0138 */
+ u32 wr_ch5_pwr_en; /* 0x013c */
+ u32 wr_ch6_pwr_en; /* 0x0140 */
+ u32 wr_ch7_pwr_en; /* 0x0144 */
+ u32 padding_4[8]; /* 0x0148..0x0164 */
+ u32 rd_ch0_pwr_en; /* 0x0168 */
+ u32 rd_ch1_pwr_en; /* 0x016c */
+ u32 rd_ch2_pwr_en; /* 0x0170 */
+ u32 rd_ch3_pwr_en; /* 0x0174 */
+ u32 rd_ch4_pwr_en; /* 0x0178 */
+ u32 rd_ch5_pwr_en; /* 0x018c */
+ u32 rd_ch6_pwr_en; /* 0x0180 */
+ u32 rd_ch7_pwr_en; /* 0x0184 */
+ u32 padding_5[30]; /* 0x0188..0x01fc */
+ struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* 0x0200..0x1120 */
+} __packed;
struct dw_edma_v0_legacy {
- u32 viewport_sel; /* 0x0f8 */
- struct dw_edma_v0_ch_regs ch; /* [0x100..0x120] */
-};
+ u32 viewport_sel; /* 0x00f8 */
+ struct dw_edma_v0_ch_regs ch; /* 0x0100..0x0120 */
+} __packed;
struct dw_edma_v0_regs {
/* eDMA global registers */
- u32 ctrl_data_arb_prior; /* 0x000 */
- u32 padding_1; /* 0x004 */
- u32 ctrl; /* 0x008 */
- u32 wr_engine_en; /* 0x00c */
- u32 wr_doorbell; /* 0x010 */
- u32 padding_2; /* 0x014 */
- u32 wr_ch_arb_weight_low; /* 0x018 */
- u32 wr_ch_arb_weight_high; /* 0x01c */
- u32 padding_3[3]; /* [0x020..0x028] */
- u32 rd_engine_en; /* 0x02c */
- u32 rd_doorbell; /* 0x030 */
- u32 padding_4; /* 0x034 */
- u32 rd_ch_arb_weight_low; /* 0x038 */
- u32 rd_ch_arb_weight_high; /* 0x03c */
- u32 padding_5[3]; /* [0x040..0x048] */
+ u32 ctrl_data_arb_prior; /* 0x0000 */
+ u32 padding_1; /* 0x0004 */
+ u32 ctrl; /* 0x0008 */
+ u32 wr_engine_en; /* 0x000c */
+ u32 wr_doorbell; /* 0x0010 */
+ u32 padding_2; /* 0x0014 */
+ union {
+ u64 reg; /* 0x0018..0x001c */
+ struct {
+ u32 lsb; /* 0x0018 */
+ u32 msb; /* 0x001c */
+ };
+ } wr_ch_arb_weight;
+ u32 padding_3[3]; /* 0x0020..0x0028 */
+ u32 rd_engine_en; /* 0x002c */
+ u32 rd_doorbell; /* 0x0030 */
+ u32 padding_4; /* 0x0034 */
+ union {
+ u64 reg; /* 0x0038..0x003c */
+ struct {
+ u32 lsb; /* 0x0038 */
+ u32 msb; /* 0x003c */
+ };
+ } rd_ch_arb_weight;
+ u32 padding_5[3]; /* 0x0040..0x0048 */
/* eDMA interrupts registers */
- u32 wr_int_status; /* 0x04c */
- u32 padding_6; /* 0x050 */
- u32 wr_int_mask; /* 0x054 */
- u32 wr_int_clear; /* 0x058 */
- u32 wr_err_status; /* 0x05c */
- u32 wr_done_imwr_low; /* 0x060 */
- u32 wr_done_imwr_high; /* 0x064 */
- u32 wr_abort_imwr_low; /* 0x068 */
- u32 wr_abort_imwr_high; /* 0x06c */
- u32 wr_ch01_imwr_data; /* 0x070 */
- u32 wr_ch23_imwr_data; /* 0x074 */
- u32 wr_ch45_imwr_data; /* 0x078 */
- u32 wr_ch67_imwr_data; /* 0x07c */
- u32 padding_7[4]; /* [0x080..0x08c] */
- u32 wr_linked_list_err_en; /* 0x090 */
- u32 padding_8[3]; /* [0x094..0x09c] */
- u32 rd_int_status; /* 0x0a0 */
- u32 padding_9; /* 0x0a4 */
- u32 rd_int_mask; /* 0x0a8 */
- u32 rd_int_clear; /* 0x0ac */
- u32 padding_10; /* 0x0b0 */
- u32 rd_err_status_low; /* 0x0b4 */
- u32 rd_err_status_high; /* 0x0b8 */
- u32 padding_11[2]; /* [0x0bc..0x0c0] */
- u32 rd_linked_list_err_en; /* 0x0c4 */
- u32 padding_12; /* 0x0c8 */
- u32 rd_done_imwr_low; /* 0x0cc */
- u32 rd_done_imwr_high; /* 0x0d0 */
- u32 rd_abort_imwr_low; /* 0x0d4 */
- u32 rd_abort_imwr_high; /* 0x0d8 */
- u32 rd_ch01_imwr_data; /* 0x0dc */
- u32 rd_ch23_imwr_data; /* 0x0e0 */
- u32 rd_ch45_imwr_data; /* 0x0e4 */
- u32 rd_ch67_imwr_data; /* 0x0e8 */
- u32 padding_13[4]; /* [0x0ec..0x0f8] */
+ u32 wr_int_status; /* 0x004c */
+ u32 padding_6; /* 0x0050 */
+ u32 wr_int_mask; /* 0x0054 */
+ u32 wr_int_clear; /* 0x0058 */
+ u32 wr_err_status; /* 0x005c */
+ union {
+ u64 reg; /* 0x0060..0x0064 */
+ struct {
+ u32 lsb; /* 0x0060 */
+ u32 msb; /* 0x0064 */
+ };
+ } wr_done_imwr;
+ union {
+ u64 reg; /* 0x0068..0x006c */
+ struct {
+ u32 lsb; /* 0x0068 */
+ u32 msb; /* 0x006c */
+ };
+ } wr_abort_imwr;
+ u32 wr_ch01_imwr_data; /* 0x0070 */
+ u32 wr_ch23_imwr_data; /* 0x0074 */
+ u32 wr_ch45_imwr_data; /* 0x0078 */
+ u32 wr_ch67_imwr_data; /* 0x007c */
+ u32 padding_7[4]; /* 0x0080..0x008c */
+ u32 wr_linked_list_err_en; /* 0x0090 */
+ u32 padding_8[3]; /* 0x0094..0x009c */
+ u32 rd_int_status; /* 0x00a0 */
+ u32 padding_9; /* 0x00a4 */
+ u32 rd_int_mask; /* 0x00a8 */
+ u32 rd_int_clear; /* 0x00ac */
+ u32 padding_10; /* 0x00b0 */
+ union {
+ u64 reg; /* 0x00b4..0x00b8 */
+ struct {
+ u32 lsb; /* 0x00b4 */
+ u32 msb; /* 0x00b8 */
+ };
+ } rd_err_status;
+ u32 padding_11[2]; /* 0x00bc..0x00c0 */
+ u32 rd_linked_list_err_en; /* 0x00c4 */
+ u32 padding_12; /* 0x00c8 */
+ union {
+ u64 reg; /* 0x00cc..0x00d0 */
+ struct {
+ u32 lsb; /* 0x00cc */
+ u32 msb; /* 0x00d0 */
+ };
+ } rd_done_imwr;
+ union {
+ u64 reg; /* 0x00d4..0x00d8 */
+ struct {
+ u32 lsb; /* 0x00d4 */
+ u32 msb; /* 0x00d8 */
+ };
+ } rd_abort_imwr;
+ u32 rd_ch01_imwr_data; /* 0x00dc */
+ u32 rd_ch23_imwr_data; /* 0x00e0 */
+ u32 rd_ch45_imwr_data; /* 0x00e4 */
+ u32 rd_ch67_imwr_data; /* 0x00e8 */
+ u32 padding_13[4]; /* 0x00ec..0x00f8 */
/* eDMA channel context grouping */
union dw_edma_v0_type {
- struct dw_edma_v0_legacy legacy; /* [0x0f8..0x120] */
- struct dw_edma_v0_unroll unroll; /* [0x0f8..0x1120] */
+ struct dw_edma_v0_legacy legacy; /* 0x00f8..0x0120 */
+ struct dw_edma_v0_unroll unroll; /* 0x00f8..0x1120 */
} type;
-};
+} __packed;
struct dw_edma_v0_lli {
u32 control;
u32 transfer_size;
- u32 sar_low;
- u32 sar_high;
- u32 dar_low;
- u32 dar_high;
-};
+ union {
+ u64 reg;
+ struct {
+ u32 lsb;
+ u32 msb;
+ };
+ } sar;
+ union {
+ u64 reg;
+ struct {
+ u32 lsb;
+ u32 msb;
+ };
+ } dar;
+} __packed;
struct dw_edma_v0_llp {
u32 control;
u32 reserved;
- u32 llp_low;
- u32 llp_high;
-};
+ union {
+ u64 reg;
+ struct {
+ u32 lsb;
+ u32 msb;
+ };
+ } llp;
+} __packed;
#endif /* _DW_EDMA_V0_REGS_H */
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
index 8978b898d777..6d11558756f8 100644
--- a/drivers/dma/idxd/Makefile
+++ b/drivers/dma/idxd/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_INTEL_IDXD) += idxd.o
idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
+
+idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 0db9b82ed8cf..302cba5ff779 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -39,15 +39,15 @@ struct idxd_user_context {
struct iommu_sva *sva;
};
-enum idxd_cdev_cleanup {
- CDEV_NORMAL = 0,
- CDEV_FAILED,
-};
-
static void idxd_cdev_dev_release(struct device *dev)
{
- dev_dbg(dev, "releasing cdev device\n");
- kfree(dev);
+ struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
+ struct idxd_cdev_context *cdev_ctx;
+ struct idxd_wq *wq = idxd_cdev->wq;
+
+ cdev_ctx = &ictx[wq->idxd->data->type];
+ ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
+ kfree(idxd_cdev);
}
static struct device_type idxd_cdev_device_type = {
@@ -62,14 +62,11 @@ static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
return container_of(cdev, struct idxd_cdev, cdev);
}
-static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
-{
- return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
-}
-
static inline struct idxd_wq *inode_wq(struct inode *inode)
{
- return idxd_cdev_wq(inode_idxd_cdev(inode));
+ struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode);
+
+ return idxd_cdev->wq;
}
static int idxd_cdev_open(struct inode *inode, struct file *filp)
@@ -220,11 +217,10 @@ static __poll_t idxd_cdev_poll(struct file *filp,
struct idxd_user_context *ctx = filp->private_data;
struct idxd_wq *wq = ctx->wq;
struct idxd_device *idxd = wq->idxd;
- struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
unsigned long flags;
__poll_t out = 0;
- poll_wait(filp, &idxd_cdev->err_queue, wait);
+ poll_wait(filp, &wq->err_queue, wait);
spin_lock_irqsave(&idxd->dev_lock, flags);
if (idxd->sw_err.valid)
out = EPOLLIN | EPOLLRDNORM;
@@ -243,101 +239,69 @@ static const struct file_operations idxd_cdev_fops = {
int idxd_cdev_get_major(struct idxd_device *idxd)
{
- return MAJOR(ictx[idxd->type].devt);
+ return MAJOR(ictx[idxd->data->type].devt);
}
-static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
+int idxd_wq_add_cdev(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
- struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
- struct idxd_cdev_context *cdev_ctx;
+ struct idxd_cdev *idxd_cdev;
+ struct cdev *cdev;
struct device *dev;
- int minor, rc;
+ struct idxd_cdev_context *cdev_ctx;
+ int rc, minor;
- idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
- if (!idxd_cdev->dev)
+ idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL);
+ if (!idxd_cdev)
return -ENOMEM;
- dev = idxd_cdev->dev;
- dev->parent = &idxd->pdev->dev;
- dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
- idxd->id, wq->id);
- dev->bus = idxd_get_bus_type(idxd);
-
- cdev_ctx = &ictx[wq->idxd->type];
+ idxd_cdev->wq = wq;
+ cdev = &idxd_cdev->cdev;
+ dev = &idxd_cdev->dev;
+ cdev_ctx = &ictx[wq->idxd->data->type];
minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
if (minor < 0) {
- rc = minor;
- kfree(dev);
- goto ida_err;
- }
-
- dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
- dev->type = &idxd_cdev_device_type;
- rc = device_register(dev);
- if (rc < 0) {
- dev_err(&idxd->pdev->dev, "device register failed\n");
- goto dev_reg_err;
+ kfree(idxd_cdev);
+ return minor;
}
idxd_cdev->minor = minor;
- return 0;
-
- dev_reg_err:
- ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
- put_device(dev);
- ida_err:
- idxd_cdev->dev = NULL;
- return rc;
-}
-
-static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
- enum idxd_cdev_cleanup cdev_state)
-{
- struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
- struct idxd_cdev_context *cdev_ctx;
-
- cdev_ctx = &ictx[wq->idxd->type];
- if (cdev_state == CDEV_NORMAL)
- cdev_del(&idxd_cdev->cdev);
- device_unregister(idxd_cdev->dev);
- /*
- * The device_type->release() will be called on the device and free
- * the allocated struct device. We can just forget it.
- */
- ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
- idxd_cdev->dev = NULL;
- idxd_cdev->minor = -1;
-}
-
-int idxd_wq_add_cdev(struct idxd_wq *wq)
-{
- struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
- struct cdev *cdev = &idxd_cdev->cdev;
- struct device *dev;
- int rc;
+ device_initialize(dev);
+ dev->parent = &wq->conf_dev;
+ dev->bus = &dsa_bus_type;
+ dev->type = &idxd_cdev_device_type;
+ dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
- rc = idxd_wq_cdev_dev_setup(wq);
+ rc = dev_set_name(dev, "%s/wq%u.%u", idxd->data->name_prefix, idxd->id, wq->id);
if (rc < 0)
- return rc;
+ goto err;
- dev = idxd_cdev->dev;
+ wq->idxd_cdev = idxd_cdev;
cdev_init(cdev, &idxd_cdev_fops);
- cdev_set_parent(cdev, &dev->kobj);
- rc = cdev_add(cdev, dev->devt, 1);
+ rc = cdev_device_add(cdev, dev);
if (rc) {
dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
- idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
- return rc;
+ goto err;
}
- init_waitqueue_head(&idxd_cdev->err_queue);
return 0;
+
+ err:
+ put_device(dev);
+ wq->idxd_cdev = NULL;
+ return rc;
}
void idxd_wq_del_cdev(struct idxd_wq *wq)
{
- idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
+ struct idxd_cdev *idxd_cdev;
+ struct idxd_cdev_context *cdev_ctx;
+
+ cdev_ctx = &ictx[wq->idxd->data->type];
+ idxd_cdev = wq->idxd_cdev;
+ wq->idxd_cdev = NULL;
+ cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
+ put_device(&idxd_cdev->dev);
}
int idxd_cdev_register(void)
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 31c819544a22..420b93fe5feb 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -19,7 +19,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
/* Interrupt control bits */
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
{
- struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
+ struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
pci_msi_mask_irq(data);
}
@@ -36,7 +36,7 @@ void idxd_mask_msix_vectors(struct idxd_device *idxd)
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
{
- struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
+ struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
pci_msi_unmask_irq(data);
}
@@ -47,6 +47,7 @@ void idxd_unmask_error_interrupts(struct idxd_device *idxd)
genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
genctrl.softerr_int_en = 1;
+ genctrl.halt_int_en = 1;
iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
}
@@ -56,6 +57,7 @@ void idxd_mask_error_interrupts(struct idxd_device *idxd)
genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
genctrl.softerr_int_en = 0;
+ genctrl.halt_int_en = 0;
iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
}
@@ -144,14 +146,8 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
if (rc < 0)
return rc;
- if (idxd->type == IDXD_TYPE_DSA)
- align = 32;
- else if (idxd->type == IDXD_TYPE_IAX)
- align = 64;
- else
- return -ENODEV;
-
- wq->compls_size = num_descs * idxd->compl_size + align;
+ align = idxd->data->align;
+ wq->compls_size = num_descs * idxd->data->compl_size + align;
wq->compls_raw = dma_alloc_coherent(dev, wq->compls_size,
&wq->compls_addr_raw, GFP_KERNEL);
if (!wq->compls_raw) {
@@ -178,16 +174,14 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
struct idxd_desc *desc = wq->descs[i];
desc->hw = wq->hw_descs[i];
- if (idxd->type == IDXD_TYPE_DSA)
+ if (idxd->data->type == IDXD_TYPE_DSA)
desc->completion = &wq->compls[i];
- else if (idxd->type == IDXD_TYPE_IAX)
+ else if (idxd->data->type == IDXD_TYPE_IAX)
desc->iax_completion = &wq->iax_compls[i];
- desc->compl_dma = wq->compls_addr + idxd->compl_size * i;
+ desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i;
desc->id = i;
desc->wq = wq;
desc->cpu = -1;
- dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
- desc->txd.tx_submit = idxd_dma_tx_submit;
}
return 0;
@@ -320,6 +314,19 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
struct device *dev = &wq->idxd->pdev->dev;
devm_iounmap(dev, wq->portal);
+ wq->portal = NULL;
+}
+
+void idxd_wqs_unmap_portal(struct idxd_device *idxd)
+{
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = idxd->wqs[i];
+
+ if (wq->portal)
+ idxd_wq_unmap_portal(wq);
+ }
}
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
@@ -392,6 +399,32 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
memset(wq->name, 0, WQ_NAME_SIZE);
}
+static void idxd_wq_ref_release(struct percpu_ref *ref)
+{
+ struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active);
+
+ complete(&wq->wq_dead);
+}
+
+int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
+{
+ int rc;
+
+ memset(&wq->wq_active, 0, sizeof(wq->wq_active));
+ rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, 0, GFP_KERNEL);
+ if (rc < 0)
+ return rc;
+ reinit_completion(&wq->wq_dead);
+ return 0;
+}
+
+void idxd_wq_quiesce(struct idxd_wq *wq)
+{
+ percpu_ref_kill(&wq->wq_active);
+ wait_for_completion(&wq->wq_dead);
+ percpu_ref_exit(&wq->wq_active);
+}
+
/* Device control bits */
static inline bool idxd_is_enabled(struct idxd_device *idxd)
{
@@ -432,13 +465,13 @@ int idxd_device_init_reset(struct idxd_device *idxd)
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = IDXD_CMD_RESET_DEVICE;
dev_dbg(dev, "%s: sending reset for init.\n", __func__);
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock_irqsave(&idxd->cmd_lock, flags);
iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
IDXD_CMDSTS_ACTIVE)
cpu_relax();
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock_irqrestore(&idxd->cmd_lock, flags);
return 0;
}
@@ -451,7 +484,8 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
if (idxd_device_is_halted(idxd)) {
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
- *status = IDXD_CMDSTS_HW_ERR;
+ if (status)
+ *status = IDXD_CMDSTS_HW_ERR;
return;
}
@@ -460,10 +494,10 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
cmd.operand = operand;
cmd.int_req = 1;
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock_irqsave(&idxd->cmd_lock, flags);
wait_event_lock_irq(idxd->cmd_waitq,
!test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
- idxd->dev_lock);
+ idxd->cmd_lock);
dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
__func__, cmd_code, operand);
@@ -477,9 +511,9 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
* After command submitted, release lock and go to sleep until
* the command completes via interrupt.
*/
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock_irqrestore(&idxd->cmd_lock, flags);
wait_for_completion(&done);
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock_irqsave(&idxd->cmd_lock, flags);
if (status) {
*status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
idxd->cmd_status = *status & GENMASK(7, 0);
@@ -488,7 +522,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
__clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
/* Wake up other pending commands */
wake_up(&idxd->cmd_waitq);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock_irqrestore(&idxd->cmd_lock, flags);
}
int idxd_device_enable(struct idxd_device *idxd)
@@ -521,7 +555,7 @@ void idxd_device_wqs_clear_state(struct idxd_device *idxd)
lockdep_assert_held(&idxd->dev_lock);
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
if (wq->state == IDXD_WQ_ENABLED) {
idxd_wq_disable_cleanup(wq);
@@ -579,6 +613,77 @@ void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
dev_dbg(dev, "pasid %d drained\n", pasid);
}
+int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
+ enum idxd_interrupt_type irq_type)
+{
+ struct device *dev = &idxd->pdev->dev;
+ u32 operand, status;
+
+ if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)))
+ return -EOPNOTSUPP;
+
+ dev_dbg(dev, "get int handle, idx %d\n", idx);
+
+ operand = idx & GENMASK(15, 0);
+ if (irq_type == IDXD_IRQ_IMS)
+ operand |= CMD_INT_HANDLE_IMS;
+
+ dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand);
+
+ idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status);
+
+ if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
+ dev_dbg(dev, "request int handle failed: %#x\n", status);
+ return -ENXIO;
+ }
+
+ *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0);
+
+ dev_dbg(dev, "int handle acquired: %u\n", *handle);
+ return 0;
+}
+
+int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
+ enum idxd_interrupt_type irq_type)
+{
+ struct device *dev = &idxd->pdev->dev;
+ u32 operand, status;
+ union idxd_command_reg cmd;
+ unsigned long flags;
+
+ if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
+ return -EOPNOTSUPP;
+
+ dev_dbg(dev, "release int handle, handle %d\n", handle);
+
+ memset(&cmd, 0, sizeof(cmd));
+ operand = handle & GENMASK(15, 0);
+
+ if (irq_type == IDXD_IRQ_IMS)
+ operand |= CMD_INT_HANDLE_IMS;
+
+ cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE;
+ cmd.operand = operand;
+
+ dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
+
+ spin_lock_irqsave(&idxd->cmd_lock, flags);
+ iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
+
+ while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
+ cpu_relax();
+ status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+
+ if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
+ dev_dbg(dev, "release int handle failed: %#x\n", status);
+ return -ENXIO;
+ }
+
+ dev_dbg(dev, "int handle released.\n");
+ return 0;
+}
+
/* Device configuration bits */
void idxd_msix_perm_setup(struct idxd_device *idxd)
{
@@ -660,7 +765,7 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
for (i = 0; i < idxd->max_groups; i++) {
- struct idxd_group *group = &idxd->groups[i];
+ struct idxd_group *group = idxd->groups[i];
idxd_group_config_write(group);
}
@@ -739,7 +844,7 @@ static int idxd_wqs_config_write(struct idxd_device *idxd)
int i, rc;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
rc = idxd_wq_config_write(wq);
if (rc < 0)
@@ -755,7 +860,7 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
/* TC-A 0 and TC-B 1 should be defaults */
for (i = 0; i < idxd->max_groups; i++) {
- struct idxd_group *group = &idxd->groups[i];
+ struct idxd_group *group = idxd->groups[i];
if (group->tc_a == -1)
group->tc_a = group->grpcfg.flags.tc_a = 0;
@@ -782,12 +887,12 @@ static int idxd_engines_setup(struct idxd_device *idxd)
struct idxd_group *group;
for (i = 0; i < idxd->max_groups; i++) {
- group = &idxd->groups[i];
+ group = idxd->groups[i];
group->grpcfg.engines = 0;
}
for (i = 0; i < idxd->max_engines; i++) {
- eng = &idxd->engines[i];
+ eng = idxd->engines[i];
group = eng->group;
if (!group)
@@ -811,13 +916,13 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
struct device *dev = &idxd->pdev->dev;
for (i = 0; i < idxd->max_groups; i++) {
- group = &idxd->groups[i];
+ group = idxd->groups[i];
for (j = 0; j < 4; j++)
group->grpcfg.wqs[j] = 0;
}
for (i = 0; i < idxd->max_wqs; i++) {
- wq = &idxd->wqs[i];
+ wq = idxd->wqs[i];
group = wq->group;
if (!wq->group)
@@ -865,3 +970,119 @@ int idxd_device_config(struct idxd_device *idxd)
return 0;
}
+
+static int idxd_wq_load_config(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ int wqcfg_offset;
+ int i;
+
+ wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0);
+ memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size);
+
+ wq->size = wq->wqcfg->wq_size;
+ wq->threshold = wq->wqcfg->wq_thresh;
+ if (wq->wqcfg->priv)
+ wq->type = IDXD_WQT_KERNEL;
+
+ /* The driver does not support shared WQ mode in read-only config yet */
+ if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
+ return -EOPNOTSUPP;
+
+ set_bit(WQ_FLAG_DEDICATED, &wq->flags);
+
+ wq->priority = wq->wqcfg->priority;
+
+ for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
+ wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
+ dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
+ }
+
+ return 0;
+}
+
+static void idxd_group_load_config(struct idxd_group *group)
+{
+ struct idxd_device *idxd = group->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ int i, j, grpcfg_offset;
+
+ /*
+ * Load WQS bit fields
+ * Iterate through all 256 bits 64 bits at a time
+ */
+ for (i = 0; i < GRPWQCFG_STRIDES; i++) {
+ struct idxd_wq *wq;
+
+ grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
+ group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
+ group->id, i, grpcfg_offset, group->grpcfg.wqs[i]);
+
+ if (i * 64 >= idxd->max_wqs)
+ break;
+
+ /* Iterate through all 64 bits and check for wq set */
+ for (j = 0; j < 64; j++) {
+ int id = i * 64 + j;
+
+ /* No need to check beyond max wqs */
+ if (id >= idxd->max_wqs)
+ break;
+
+ /* Set group assignment for wq if wq bit is set */
+ if (group->grpcfg.wqs[i] & BIT(j)) {
+ wq = idxd->wqs[id];
+ wq->group = group;
+ }
+ }
+ }
+
+ grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
+ group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
+ grpcfg_offset, group->grpcfg.engines);
+
+ /* Iterate through all 64 bits to check engines set */
+ for (i = 0; i < 64; i++) {
+ if (i >= idxd->max_engines)
+ break;
+
+ if (group->grpcfg.engines & BIT(i)) {
+ struct idxd_engine *engine = idxd->engines[i];
+
+ engine->group = group;
+ }
+ }
+
+ grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
+ group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
+ group->id, grpcfg_offset, group->grpcfg.flags.bits);
+}
+
+int idxd_device_load_config(struct idxd_device *idxd)
+{
+ union gencfg_reg reg;
+ int i, rc;
+
+ reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+ idxd->token_limit = reg.token_limit;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = idxd->groups[i];
+
+ idxd_group_load_config(group);
+ }
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = idxd->wqs[i];
+
+ rc = idxd_wq_load_config(wq);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index a15e50126434..77439b645044 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -14,7 +14,10 @@
static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
{
- return container_of(c, struct idxd_wq, dma_chan);
+ struct idxd_dma_chan *idxd_chan;
+
+ idxd_chan = container_of(c, struct idxd_dma_chan, chan);
+ return idxd_chan->wq;
}
void idxd_dma_complete_txd(struct idxd_desc *desc,
@@ -135,7 +138,7 @@ static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
{
}
-dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct dma_chan *c = tx->chan;
struct idxd_wq *wq = to_idxd_wq(c);
@@ -156,14 +159,25 @@ dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
static void idxd_dma_release(struct dma_device *device)
{
+ struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
+
+ kfree(idxd_dma);
}
int idxd_register_dma_device(struct idxd_device *idxd)
{
- struct dma_device *dma = &idxd->dma_dev;
+ struct idxd_dma_dev *idxd_dma;
+ struct dma_device *dma;
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+ idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
+ if (!idxd_dma)
+ return -ENOMEM;
+
+ dma = &idxd_dma->dma;
INIT_LIST_HEAD(&dma->channels);
- dma->dev = &idxd->pdev->dev;
+ dma->dev = dev;
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
@@ -179,35 +193,72 @@ int idxd_register_dma_device(struct idxd_device *idxd)
dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
dma->device_free_chan_resources = idxd_dma_free_chan_resources;
- return dma_async_device_register(&idxd->dma_dev);
+ rc = dma_async_device_register(dma);
+ if (rc < 0) {
+ kfree(idxd_dma);
+ return rc;
+ }
+
+ idxd_dma->idxd = idxd;
+ /*
+ * This pointer is protected by the refs taken by the dma_chan. It will remain valid
+ * as long as there are outstanding channels.
+ */
+ idxd->idxd_dma = idxd_dma;
+ return 0;
}
void idxd_unregister_dma_device(struct idxd_device *idxd)
{
- dma_async_device_unregister(&idxd->dma_dev);
+ dma_async_device_unregister(&idxd->idxd_dma->dma);
}
int idxd_register_dma_channel(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
- struct dma_device *dma = &idxd->dma_dev;
- struct dma_chan *chan = &wq->dma_chan;
- int rc;
+ struct dma_device *dma = &idxd->idxd_dma->dma;
+ struct device *dev = &idxd->pdev->dev;
+ struct idxd_dma_chan *idxd_chan;
+ struct dma_chan *chan;
+ int rc, i;
+
+ idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
+ if (!idxd_chan)
+ return -ENOMEM;
- memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
+ chan = &idxd_chan->chan;
chan->device = dma;
list_add_tail(&chan->device_node, &dma->channels);
+
+ for (i = 0; i < wq->num_descs; i++) {
+ struct idxd_desc *desc = wq->descs[i];
+
+ dma_async_tx_descriptor_init(&desc->txd, chan);
+ desc->txd.tx_submit = idxd_dma_tx_submit;
+ }
+
rc = dma_async_device_channel_register(dma, chan);
- if (rc < 0)
+ if (rc < 0) {
+ kfree(idxd_chan);
return rc;
+ }
+
+ wq->idxd_chan = idxd_chan;
+ idxd_chan->wq = wq;
+ get_device(&wq->conf_dev);
return 0;
}
void idxd_unregister_dma_channel(struct idxd_wq *wq)
{
- struct dma_chan *chan = &wq->dma_chan;
+ struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
+ struct dma_chan *chan = &idxd_chan->chan;
+ struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
- dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
+ dma_async_device_channel_unregister(&idxd_dma->dma, chan);
list_del(&chan->device_node);
+ kfree(wq->idxd_chan);
+ wq->idxd_chan = NULL;
+ put_device(&wq->conf_dev);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 76014c14f473..26482c7d4c3a 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -8,12 +8,18 @@
#include <linux/percpu-rwsem.h>
#include <linux/wait.h>
#include <linux/cdev.h>
+#include <linux/idr.h>
+#include <linux/pci.h>
+#include <linux/perf_event.h>
#include "registers.h"
#define IDXD_DRIVER_VERSION "1.00"
extern struct kmem_cache *idxd_desc_pool;
+struct idxd_device;
+struct idxd_wq;
+
#define IDXD_REG_TIMEOUT 50
#define IDXD_DRAIN_TIMEOUT 5000
@@ -25,6 +31,7 @@ enum idxd_type {
};
#define IDXD_NAME_SIZE 128
+#define IDXD_PMU_EVENT_MAX 64
struct idxd_device_driver {
struct device_driver drv;
@@ -33,6 +40,7 @@ struct idxd_device_driver {
struct idxd_irq_entry {
struct idxd_device *idxd;
int id;
+ int vector;
struct llist_head pending_llist;
struct list_head work_list;
/*
@@ -56,6 +64,31 @@ struct idxd_group {
int tc_b;
};
+struct idxd_pmu {
+ struct idxd_device *idxd;
+
+ struct perf_event *event_list[IDXD_PMU_EVENT_MAX];
+ int n_events;
+
+ DECLARE_BITMAP(used_mask, IDXD_PMU_EVENT_MAX);
+
+ struct pmu pmu;
+ char name[IDXD_NAME_SIZE];
+ int cpu;
+
+ int n_counters;
+ int counter_width;
+ int n_event_categories;
+
+ bool per_counter_caps_supported;
+ unsigned long supported_event_categories;
+
+ unsigned long supported_filters;
+ int n_filters;
+
+ struct hlist_node cpuhp_node;
+};
+
#define IDXD_MAX_PRIORITY 0xf
enum idxd_wq_state {
@@ -75,10 +108,10 @@ enum idxd_wq_type {
};
struct idxd_cdev {
+ struct idxd_wq *wq;
struct cdev cdev;
- struct device *dev;
+ struct device dev;
int minor;
- struct wait_queue_head err_queue;
};
#define IDXD_ALLOCATED_BATCH_SIZE 128U
@@ -96,10 +129,18 @@ enum idxd_complete_type {
IDXD_COMPLETE_DEV_FAIL,
};
+struct idxd_dma_chan {
+ struct dma_chan chan;
+ struct idxd_wq *wq;
+};
+
struct idxd_wq {
void __iomem *portal;
+ struct percpu_ref wq_active;
+ struct completion wq_dead;
struct device conf_dev;
- struct idxd_cdev idxd_cdev;
+ struct idxd_cdev *idxd_cdev;
+ struct wait_queue_head err_queue;
struct idxd_device *idxd;
int id;
enum idxd_wq_type type;
@@ -125,7 +166,7 @@ struct idxd_wq {
int compls_size;
struct idxd_desc **descs;
struct sbitmap_queue sbq;
- struct dma_chan dma_chan;
+ struct idxd_dma_chan *idxd_chan;
char name[WQ_NAME_SIZE + 1];
u64 max_xfer_bytes;
u32 max_batch_size;
@@ -147,6 +188,7 @@ struct idxd_hw {
union group_cap_reg group_cap;
union engine_cap_reg engine_cap;
struct opcap opcap;
+ u32 cmd_cap;
};
enum idxd_device_state {
@@ -162,9 +204,22 @@ enum idxd_device_flag {
IDXD_FLAG_PASID_ENABLED,
};
-struct idxd_device {
+struct idxd_dma_dev {
+ struct idxd_device *idxd;
+ struct dma_device dma;
+};
+
+struct idxd_driver_data {
+ const char *name_prefix;
enum idxd_type type;
+ struct device_type *dev_type;
+ int compl_size;
+ int align;
+};
+
+struct idxd_device {
struct device conf_dev;
+ struct idxd_driver_data *data;
struct list_head list;
struct idxd_hw hw;
enum idxd_device_state state;
@@ -177,10 +232,11 @@ struct idxd_device {
void __iomem *reg_base;
spinlock_t dev_lock; /* spinlock for device */
+ spinlock_t cmd_lock; /* spinlock for device commands */
struct completion *cmd_done;
- struct idxd_group *groups;
- struct idxd_wq *wqs;
- struct idxd_engine *engines;
+ struct idxd_group **groups;
+ struct idxd_wq **wqs;
+ struct idxd_engine **engines;
struct iommu_sva *sva;
unsigned int pasid;
@@ -202,17 +258,19 @@ struct idxd_device {
int token_limit;
int nr_tokens; /* non-reserved tokens */
unsigned int wqcfg_size;
- int compl_size;
union sw_err_reg sw_err;
wait_queue_head_t cmd_waitq;
- struct msix_entry *msix_entries;
int num_wq_irqs;
struct idxd_irq_entry *irq_entries;
- struct dma_device dma_dev;
+ struct idxd_dma_dev *idxd_dma;
struct workqueue_struct *wq;
struct work_struct work;
+
+ int *int_handles;
+
+ struct idxd_pmu *idxd_pmu;
};
/* IDXD software descriptor */
@@ -232,6 +290,7 @@ struct idxd_desc {
struct list_head list;
int id;
int cpu;
+ unsigned int vector;
struct idxd_wq *wq;
};
@@ -242,6 +301,44 @@ extern struct bus_type dsa_bus_type;
extern struct bus_type iax_bus_type;
extern bool support_enqcmd;
+extern struct ida idxd_ida;
+extern struct device_type dsa_device_type;
+extern struct device_type iax_device_type;
+extern struct device_type idxd_wq_device_type;
+extern struct device_type idxd_engine_device_type;
+extern struct device_type idxd_group_device_type;
+
+static inline bool is_dsa_dev(struct device *dev)
+{
+ return dev->type == &dsa_device_type;
+}
+
+static inline bool is_iax_dev(struct device *dev)
+{
+ return dev->type == &iax_device_type;
+}
+
+static inline bool is_idxd_dev(struct device *dev)
+{
+ return is_dsa_dev(dev) || is_iax_dev(dev);
+}
+
+static inline bool is_idxd_wq_dev(struct device *dev)
+{
+ return dev->type == &idxd_wq_device_type;
+}
+
+static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
+{
+ if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
+ return true;
+ return false;
+}
+
+static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
+{
+ return wq->type == IDXD_WQT_USER;
+}
static inline bool wq_dedicated(struct idxd_wq *wq)
{
@@ -268,6 +365,11 @@ enum idxd_portal_prot {
IDXD_PORTAL_LIMITED,
};
+enum idxd_interrupt_type {
+ IDXD_IRQ_MSIX = 0,
+ IDXD_IRQ_IMS,
+};
+
static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
{
return prot * 0x1000;
@@ -279,18 +381,6 @@ static inline int idxd_get_wq_portal_full_offset(int wq_id,
return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
}
-static inline void idxd_set_type(struct idxd_device *idxd)
-{
- struct pci_dev *pdev = idxd->pdev;
-
- if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
- idxd->type = IDXD_TYPE_DSA;
- else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
- idxd->type = IDXD_TYPE_IAX;
- else
- idxd->type = IDXD_TYPE_UNKNOWN;
-}
-
static inline void idxd_wq_get(struct idxd_wq *wq)
{
wq->client_count++;
@@ -306,19 +396,17 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
return wq->client_count;
};
-const char *idxd_get_dev_name(struct idxd_device *idxd);
int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
-int idxd_setup_sysfs(struct idxd_device *idxd);
-void idxd_cleanup_sysfs(struct idxd_device *idxd);
+int idxd_register_devices(struct idxd_device *idxd);
+void idxd_unregister_devices(struct idxd_device *idxd);
int idxd_register_driver(void);
void idxd_unregister_driver(void);
-struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
+void idxd_wqs_quiesce(struct idxd_device *idxd);
/* device interrupt control */
void idxd_msix_perm_setup(struct idxd_device *idxd);
void idxd_msix_perm_clear(struct idxd_device *idxd);
-irqreturn_t idxd_irq_handler(int vec, void *data);
irqreturn_t idxd_misc_thread(int vec, void *data);
irqreturn_t idxd_wq_thread(int irq, void *data);
void idxd_mask_error_interrupts(struct idxd_device *idxd);
@@ -336,8 +424,14 @@ void idxd_device_cleanup(struct idxd_device *idxd);
int idxd_device_config(struct idxd_device *idxd);
void idxd_device_wqs_clear_state(struct idxd_device *idxd);
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
+int idxd_device_load_config(struct idxd_device *idxd);
+int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
+ enum idxd_interrupt_type irq_type);
+int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
+ enum idxd_interrupt_type irq_type);
/* work queue control */
+void idxd_wqs_unmap_portal(struct idxd_device *idxd);
int idxd_wq_alloc_resources(struct idxd_wq *wq);
void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
@@ -349,6 +443,8 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq);
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
+void idxd_wq_quiesce(struct idxd_wq *wq);
+int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
@@ -363,7 +459,6 @@ void idxd_unregister_dma_channel(struct idxd_wq *wq);
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type);
-dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
/* cdev */
int idxd_cdev_register(void);
@@ -372,4 +467,19 @@ int idxd_cdev_get_major(struct idxd_device *idxd);
int idxd_wq_add_cdev(struct idxd_wq *wq);
void idxd_wq_del_cdev(struct idxd_wq *wq);
+/* perfmon */
+#if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON)
+int perfmon_pmu_init(struct idxd_device *idxd);
+void perfmon_pmu_remove(struct idxd_device *idxd);
+void perfmon_counter_overflow(struct idxd_device *idxd);
+void perfmon_init(void);
+void perfmon_exit(void);
+#else
+static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; }
+static inline void perfmon_pmu_remove(struct idxd_device *idxd) {}
+static inline void perfmon_counter_overflow(struct idxd_device *idxd) {}
+static inline void perfmon_init(void) {}
+static inline void perfmon_exit(void) {}
+#endif
+
#endif
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 6584b0ec07d5..2a926bef87f2 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -21,6 +21,7 @@
#include "../dmaengine.h"
#include "registers.h"
#include "idxd.h"
+#include "perfmon.h"
MODULE_VERSION(IDXD_DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
@@ -33,35 +34,39 @@ MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
#define DRV_NAME "idxd"
bool support_enqcmd;
-
-static struct idr idxd_idrs[IDXD_TYPE_MAX];
-static DEFINE_MUTEX(idxd_idr_lock);
+DEFINE_IDA(idxd_ida);
+
+static struct idxd_driver_data idxd_driver_data[] = {
+ [IDXD_TYPE_DSA] = {
+ .name_prefix = "dsa",
+ .type = IDXD_TYPE_DSA,
+ .compl_size = sizeof(struct dsa_completion_record),
+ .align = 32,
+ .dev_type = &dsa_device_type,
+ },
+ [IDXD_TYPE_IAX] = {
+ .name_prefix = "iax",
+ .type = IDXD_TYPE_IAX,
+ .compl_size = sizeof(struct iax_completion_record),
+ .align = 64,
+ .dev_type = &iax_device_type,
+ },
+};
static struct pci_device_id idxd_pci_tbl[] = {
/* DSA ver 1.0 platforms */
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
+ { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) },
/* IAX ver 1.0 platforms */
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IAX_SPR0) },
+ { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
-static char *idxd_name[] = {
- "dsa",
- "iax"
-};
-
-const char *idxd_get_dev_name(struct idxd_device *idxd)
-{
- return idxd_name[idxd->type];
-}
-
static int idxd_setup_interrupts(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
struct device *dev = &pdev->dev;
- struct msix_entry *msix;
struct idxd_irq_entry *irq_entry;
int i, msixcnt;
int rc = 0;
@@ -69,23 +74,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
msixcnt = pci_msix_vec_count(pdev);
if (msixcnt < 0) {
dev_err(dev, "Not MSI-X interrupt capable.\n");
- goto err_no_irq;
+ return -ENOSPC;
}
- idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
- msixcnt, GFP_KERNEL);
- if (!idxd->msix_entries) {
- rc = -ENOMEM;
- goto err_no_irq;
- }
-
- for (i = 0; i < msixcnt; i++)
- idxd->msix_entries[i].entry = i;
-
- rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
- if (rc) {
- dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
- goto err_no_irq;
+ rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
+ if (rc != msixcnt) {
+ dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
+ return -ENOSPC;
}
dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
@@ -93,119 +88,266 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
* We implement 1 completion list per MSI-X entry except for
* entry 0, which is for errors and others.
*/
- idxd->irq_entries = devm_kcalloc(dev, msixcnt,
- sizeof(struct idxd_irq_entry),
- GFP_KERNEL);
+ idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
+ GFP_KERNEL, dev_to_node(dev));
if (!idxd->irq_entries) {
rc = -ENOMEM;
- goto err_no_irq;
+ goto err_irq_entries;
}
for (i = 0; i < msixcnt; i++) {
idxd->irq_entries[i].id = i;
idxd->irq_entries[i].idxd = idxd;
+ idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
spin_lock_init(&idxd->irq_entries[i].list_lock);
}
- msix = &idxd->msix_entries[0];
irq_entry = &idxd->irq_entries[0];
- rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
- idxd_misc_thread, 0, "idxd-misc",
- irq_entry);
+ rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
+ 0, "idxd-misc", irq_entry);
if (rc < 0) {
dev_err(dev, "Failed to allocate misc interrupt.\n");
- goto err_no_irq;
+ goto err_misc_irq;
}
- dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
- msix->vector);
+ dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
/* first MSI-X entry is not for wq interrupts */
idxd->num_wq_irqs = msixcnt - 1;
for (i = 1; i < msixcnt; i++) {
- msix = &idxd->msix_entries[i];
irq_entry = &idxd->irq_entries[i];
init_llist_head(&idxd->irq_entries[i].pending_llist);
INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
- rc = devm_request_threaded_irq(dev, msix->vector,
- idxd_irq_handler,
- idxd_wq_thread, 0,
- "idxd-portal", irq_entry);
+ rc = request_threaded_irq(irq_entry->vector, NULL,
+ idxd_wq_thread, 0, "idxd-portal", irq_entry);
if (rc < 0) {
- dev_err(dev, "Failed to allocate irq %d.\n",
- msix->vector);
- goto err_no_irq;
+ dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
+ goto err_wq_irqs;
+ }
+
+ dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
+ if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
+ /*
+ * The MSIX vector enumeration starts at 1 with vector 0 being the
+ * misc interrupt that handles non I/O completion events. The
+ * interrupt handles are for IMS enumeration on guest. The misc
+ * interrupt vector does not require a handle and therefore we start
+ * the int_handles at index 0. Since 'i' starts at 1, the first
+ * int_handles index will be 0.
+ */
+ rc = idxd_device_request_int_handle(idxd, i, &idxd->int_handles[i - 1],
+ IDXD_IRQ_MSIX);
+ if (rc < 0) {
+ free_irq(irq_entry->vector, irq_entry);
+ goto err_wq_irqs;
+ }
+ dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i - 1]);
}
- dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
- i, msix->vector);
}
idxd_unmask_error_interrupts(idxd);
idxd_msix_perm_setup(idxd);
return 0;
- err_no_irq:
+ err_wq_irqs:
+ while (--i >= 0) {
+ irq_entry = &idxd->irq_entries[i];
+ free_irq(irq_entry->vector, irq_entry);
+ if (i != 0)
+ idxd_device_release_int_handle(idxd,
+ idxd->int_handles[i], IDXD_IRQ_MSIX);
+ }
+ err_misc_irq:
/* Disable error interrupt generation */
idxd_mask_error_interrupts(idxd);
- pci_disable_msix(pdev);
+ err_irq_entries:
+ pci_free_irq_vectors(pdev);
dev_err(dev, "No usable interrupts\n");
return rc;
}
-static int idxd_setup_internals(struct idxd_device *idxd)
+static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
- int i;
+ struct idxd_wq *wq;
+ int i, rc;
- init_waitqueue_head(&idxd->cmd_waitq);
- idxd->groups = devm_kcalloc(dev, idxd->max_groups,
- sizeof(struct idxd_group), GFP_KERNEL);
- if (!idxd->groups)
- return -ENOMEM;
-
- for (i = 0; i < idxd->max_groups; i++) {
- idxd->groups[i].idxd = idxd;
- idxd->groups[i].id = i;
- idxd->groups[i].tc_a = -1;
- idxd->groups[i].tc_b = -1;
- }
-
- idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
- GFP_KERNEL);
+ idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
+ GFP_KERNEL, dev_to_node(dev));
if (!idxd->wqs)
return -ENOMEM;
- idxd->engines = devm_kcalloc(dev, idxd->max_engines,
- sizeof(struct idxd_engine), GFP_KERNEL);
- if (!idxd->engines)
- return -ENOMEM;
-
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
+ if (!wq) {
+ rc = -ENOMEM;
+ goto err;
+ }
wq->id = i;
wq->idxd = idxd;
+ device_initialize(&wq->conf_dev);
+ wq->conf_dev.parent = &idxd->conf_dev;
+ wq->conf_dev.bus = &dsa_bus_type;
+ wq->conf_dev.type = &idxd_wq_device_type;
+ rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
+ if (rc < 0) {
+ put_device(&wq->conf_dev);
+ goto err;
+ }
+
mutex_init(&wq->wq_lock);
- wq->idxd_cdev.minor = -1;
+ init_waitqueue_head(&wq->err_queue);
+ init_completion(&wq->wq_dead);
wq->max_xfer_bytes = idxd->max_xfer_bytes;
wq->max_batch_size = idxd->max_batch_size;
- wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
- if (!wq->wqcfg)
- return -ENOMEM;
+ wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
+ if (!wq->wqcfg) {
+ put_device(&wq->conf_dev);
+ rc = -ENOMEM;
+ goto err;
+ }
+ idxd->wqs[i] = wq;
}
+ return 0;
+
+ err:
+ while (--i >= 0)
+ put_device(&idxd->wqs[i]->conf_dev);
+ return rc;
+}
+
+static int idxd_setup_engines(struct idxd_device *idxd)
+{
+ struct idxd_engine *engine;
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!idxd->engines)
+ return -ENOMEM;
+
for (i = 0; i < idxd->max_engines; i++) {
- idxd->engines[i].idxd = idxd;
- idxd->engines[i].id = i;
+ engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
+ if (!engine) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ engine->id = i;
+ engine->idxd = idxd;
+ device_initialize(&engine->conf_dev);
+ engine->conf_dev.parent = &idxd->conf_dev;
+ engine->conf_dev.type = &idxd_engine_device_type;
+ rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
+ if (rc < 0) {
+ put_device(&engine->conf_dev);
+ goto err;
+ }
+
+ idxd->engines[i] = engine;
}
- idxd->wq = create_workqueue(dev_name(dev));
- if (!idxd->wq)
+ return 0;
+
+ err:
+ while (--i >= 0)
+ put_device(&idxd->engines[i]->conf_dev);
+ return rc;
+}
+
+static int idxd_setup_groups(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ struct idxd_group *group;
+ int i, rc;
+
+ idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!idxd->groups)
return -ENOMEM;
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
+ if (!group) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ group->id = i;
+ group->idxd = idxd;
+ device_initialize(&group->conf_dev);
+ group->conf_dev.parent = &idxd->conf_dev;
+ group->conf_dev.bus = &dsa_bus_type;
+ group->conf_dev.type = &idxd_group_device_type;
+ rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
+ if (rc < 0) {
+ put_device(&group->conf_dev);
+ goto err;
+ }
+
+ idxd->groups[i] = group;
+ group->tc_a = -1;
+ group->tc_b = -1;
+ }
+
+ return 0;
+
+ err:
+ while (--i >= 0)
+ put_device(&idxd->groups[i]->conf_dev);
+ return rc;
+}
+
+static int idxd_setup_internals(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc, i;
+
+ init_waitqueue_head(&idxd->cmd_waitq);
+
+ if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
+ idxd->int_handles = devm_kcalloc(dev, idxd->max_wqs, sizeof(int), GFP_KERNEL);
+ if (!idxd->int_handles)
+ return -ENOMEM;
+ }
+
+ rc = idxd_setup_wqs(idxd);
+ if (rc < 0)
+ goto err_wqs;
+
+ rc = idxd_setup_engines(idxd);
+ if (rc < 0)
+ goto err_engine;
+
+ rc = idxd_setup_groups(idxd);
+ if (rc < 0)
+ goto err_group;
+
+ idxd->wq = create_workqueue(dev_name(dev));
+ if (!idxd->wq) {
+ rc = -ENOMEM;
+ goto err_wkq_create;
+ }
+
return 0;
+
+ err_wkq_create:
+ for (i = 0; i < idxd->max_groups; i++)
+ put_device(&idxd->groups[i]->conf_dev);
+ err_group:
+ for (i = 0; i < idxd->max_engines; i++)
+ put_device(&idxd->engines[i]->conf_dev);
+ err_engine:
+ for (i = 0; i < idxd->max_wqs; i++)
+ put_device(&idxd->wqs[i]->conf_dev);
+ err_wqs:
+ kfree(idxd->int_handles);
+ return rc;
}
static void idxd_read_table_offsets(struct idxd_device *idxd)
@@ -233,6 +375,12 @@ static void idxd_read_caps(struct idxd_device *idxd)
/* reading generic capabilities */
idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
+
+ if (idxd->hw.gen_cap.cmd_cap) {
+ idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET);
+ dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
+ }
+
idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
@@ -275,17 +423,34 @@ static void idxd_read_caps(struct idxd_device *idxd)
}
}
-static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
+static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
{
struct device *dev = &pdev->dev;
struct idxd_device *idxd;
+ int rc;
- idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
+ idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
if (!idxd)
return NULL;
idxd->pdev = pdev;
+ idxd->data = data;
+ idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
+ if (idxd->id < 0)
+ return NULL;
+
+ device_initialize(&idxd->conf_dev);
+ idxd->conf_dev.parent = dev;
+ idxd->conf_dev.bus = &dsa_bus_type;
+ idxd->conf_dev.type = idxd->data->dev_type;
+ rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
+ if (rc < 0) {
+ put_device(&idxd->conf_dev);
+ return NULL;
+ }
+
spin_lock_init(&idxd->dev_lock);
+ spin_lock_init(&idxd->cmd_lock);
return idxd;
}
@@ -338,11 +503,18 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
- rc = idxd_enable_system_pasid(idxd);
- if (rc < 0)
- dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
- else
- set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
+ rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA);
+ if (rc == 0) {
+ rc = idxd_enable_system_pasid(idxd);
+ if (rc < 0) {
+ iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+ dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
+ } else {
+ set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
+ }
+ } else {
+ dev_warn(dev, "Unable to turn on SVA feature.\n");
+ }
} else if (!sva) {
dev_warn(dev, "User forced SVA off via module param.\n");
}
@@ -352,80 +524,75 @@ static int idxd_probe(struct idxd_device *idxd)
rc = idxd_setup_internals(idxd);
if (rc)
- goto err_setup;
+ goto err;
+
+ /* If the configs are readonly, then load them from device */
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
+ dev_dbg(dev, "Loading RO device config\n");
+ rc = idxd_device_load_config(idxd);
+ if (rc < 0)
+ goto err;
+ }
rc = idxd_setup_interrupts(idxd);
if (rc)
- goto err_setup;
+ goto err;
dev_dbg(dev, "IDXD interrupt setup complete.\n");
- mutex_lock(&idxd_idr_lock);
- idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
- mutex_unlock(&idxd_idr_lock);
- if (idxd->id < 0) {
- rc = -ENOMEM;
- goto err_idr_fail;
- }
-
idxd->major = idxd_cdev_get_major(idxd);
+ rc = perfmon_pmu_init(idxd);
+ if (rc < 0)
+ dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc);
+
dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
return 0;
- err_idr_fail:
- idxd_mask_error_interrupts(idxd);
- idxd_mask_msix_vectors(idxd);
- err_setup:
+ err:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
+ iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
return rc;
}
-static void idxd_type_init(struct idxd_device *idxd)
-{
- if (idxd->type == IDXD_TYPE_DSA)
- idxd->compl_size = sizeof(struct dsa_completion_record);
- else if (idxd->type == IDXD_TYPE_IAX)
- idxd->compl_size = sizeof(struct iax_completion_record);
-}
-
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
struct idxd_device *idxd;
+ struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data;
int rc;
- rc = pcim_enable_device(pdev);
+ rc = pci_enable_device(pdev);
if (rc)
return rc;
dev_dbg(dev, "Alloc IDXD context\n");
- idxd = idxd_alloc(pdev);
- if (!idxd)
- return -ENOMEM;
+ idxd = idxd_alloc(pdev, data);
+ if (!idxd) {
+ rc = -ENOMEM;
+ goto err_idxd_alloc;
+ }
dev_dbg(dev, "Mapping BARs\n");
- idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0);
- if (!idxd->reg_base)
- return -ENOMEM;
+ idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
+ if (!idxd->reg_base) {
+ rc = -ENOMEM;
+ goto err_iomap;
+ }
dev_dbg(dev, "Set DMA masks\n");
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc)
- return rc;
+ goto err;
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc)
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc)
- return rc;
-
- idxd_set_type(idxd);
-
- idxd_type_init(idxd);
+ goto err;
dev_dbg(dev, "Set PCI master\n");
pci_set_master(pdev);
@@ -435,13 +602,13 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rc = idxd_probe(idxd);
if (rc) {
dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
- return -ENODEV;
+ goto err;
}
- rc = idxd_setup_sysfs(idxd);
+ rc = idxd_register_devices(idxd);
if (rc) {
dev_err(dev, "IDXD sysfs setup failed\n");
- return -ENODEV;
+ goto err;
}
idxd->state = IDXD_DEV_CONF_READY;
@@ -450,6 +617,14 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
idxd->hw.version);
return 0;
+
+ err:
+ pci_iounmap(pdev, idxd->reg_base);
+ err_iomap:
+ put_device(&idxd->conf_dev);
+ err_idxd_alloc:
+ pci_disable_device(pdev);
+ return rc;
}
static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
@@ -478,6 +653,36 @@ static void idxd_flush_work_list(struct idxd_irq_entry *ie)
}
}
+void idxd_wqs_quiesce(struct idxd_device *idxd)
+{
+ struct idxd_wq *wq;
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ wq = idxd->wqs[i];
+ if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL)
+ idxd_wq_quiesce(wq);
+ }
+}
+
+static void idxd_release_int_handles(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ for (i = 0; i < idxd->num_wq_irqs; i++) {
+ if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) {
+ rc = idxd_device_release_int_handle(idxd, idxd->int_handles[i],
+ IDXD_IRQ_MSIX);
+ if (rc < 0)
+ dev_warn(dev, "irq handle %d release failed\n",
+ idxd->int_handles[i]);
+ else
+ dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i]);
+ }
+ }
+}
+
static void idxd_shutdown(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
@@ -495,7 +700,8 @@ static void idxd_shutdown(struct pci_dev *pdev)
for (i = 0; i < msixcnt; i++) {
irq_entry = &idxd->irq_entries[i];
- synchronize_irq(idxd->msix_entries[i].vector);
+ synchronize_irq(irq_entry->vector);
+ free_irq(irq_entry->vector, irq_entry);
if (i == 0)
continue;
idxd_flush_pending_llist(irq_entry);
@@ -503,6 +709,10 @@ static void idxd_shutdown(struct pci_dev *pdev)
}
idxd_msix_perm_clear(idxd);
+ idxd_release_int_handles(idxd);
+ pci_free_irq_vectors(pdev);
+ pci_iounmap(pdev, idxd->reg_base);
+ pci_disable_device(pdev);
destroy_workqueue(idxd->wq);
}
@@ -511,13 +721,12 @@ static void idxd_remove(struct pci_dev *pdev)
struct idxd_device *idxd = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "%s called\n", __func__);
- idxd_cleanup_sysfs(idxd);
idxd_shutdown(pdev);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- mutex_lock(&idxd_idr_lock);
- idr_remove(&idxd_idrs[idxd->type], idxd->id);
- mutex_unlock(&idxd_idr_lock);
+ idxd_unregister_devices(idxd);
+ perfmon_pmu_remove(idxd);
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
}
static struct pci_driver idxd_pci_driver = {
@@ -530,7 +739,7 @@ static struct pci_driver idxd_pci_driver = {
static int __init idxd_init_module(void)
{
- int err, i;
+ int err;
/*
* If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
@@ -546,8 +755,7 @@ static int __init idxd_init_module(void)
else
support_enqcmd = true;
- for (i = 0; i < IDXD_TYPE_MAX; i++)
- idr_init(&idxd_idrs[i]);
+ perfmon_init();
err = idxd_register_bus_type();
if (err < 0)
@@ -582,5 +790,6 @@ static void __exit idxd_exit_module(void)
pci_unregister_driver(&idxd_pci_driver);
idxd_cdev_remove();
idxd_unregister_bus_type();
+ perfmon_exit();
}
module_exit(idxd_exit_module);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index f1463fc58112..ae68e1e5487a 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -45,7 +45,7 @@ static void idxd_device_reinit(struct work_struct *work)
goto out;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
if (wq->state == IDXD_WQ_ENABLED) {
rc = idxd_wq_enable(wq);
@@ -102,15 +102,6 @@ static int idxd_device_schedule_fault_process(struct idxd_device *idxd,
return 0;
}
-irqreturn_t idxd_irq_handler(int vec, void *data)
-{
- struct idxd_irq_entry *irq_entry = data;
- struct idxd_device *idxd = irq_entry->idxd;
-
- idxd_mask_msix_vector(idxd, irq_entry->id);
- return IRQ_WAKE_THREAD;
-}
-
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
{
struct device *dev = &idxd->pdev->dev;
@@ -130,18 +121,18 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
int id = idxd->sw_err.wq_idx;
- struct idxd_wq *wq = &idxd->wqs[id];
+ struct idxd_wq *wq = idxd->wqs[id];
if (wq->type == IDXD_WQT_USER)
- wake_up_interruptible(&wq->idxd_cdev.err_queue);
+ wake_up_interruptible(&wq->err_queue);
} else {
int i;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
if (wq->type == IDXD_WQT_USER)
- wake_up_interruptible(&wq->idxd_cdev.err_queue);
+ wake_up_interruptible(&wq->err_queue);
}
}
@@ -165,11 +156,8 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
}
if (cause & IDXD_INTC_PERFMON_OVFL) {
- /*
- * Driver does not utilize perfmon counter overflow interrupt
- * yet.
- */
val |= IDXD_INTC_PERFMON_OVFL;
+ perfmon_counter_overflow(idxd);
}
val ^= cause;
@@ -202,6 +190,8 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
queue_work(idxd->wq, &idxd->work);
} else {
spin_lock_bh(&idxd->dev_lock);
+ idxd_wqs_quiesce(idxd);
+ idxd_wqs_unmap_portal(idxd);
idxd_device_wqs_clear_state(idxd);
dev_err(&idxd->pdev->dev,
"idxd halted, need %s.\n",
@@ -235,7 +225,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
}
- idxd_unmask_msix_vector(idxd, irq_entry->id);
return IRQ_HANDLED;
}
@@ -392,8 +381,6 @@ irqreturn_t idxd_wq_thread(int irq, void *data)
int processed;
processed = idxd_desc_process(irq_entry);
- idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
-
if (processed == 0)
return IRQ_NONE;
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
new file mode 100644
index 000000000000..d73004f47cf4
--- /dev/null
+++ b/drivers/dma/idxd/perfmon.c
@@ -0,0 +1,662 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2020 Intel Corporation. All rights rsvd. */
+
+#include <linux/sched/task.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include "idxd.h"
+#include "perfmon.h"
+
+static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
+
+static cpumask_t perfmon_dsa_cpu_mask;
+static bool cpuhp_set_up;
+static enum cpuhp_state cpuhp_slot;
+
+/*
+ * perf userspace reads this attribute to determine which cpus to open
+ * counters on. It's connected to perfmon_dsa_cpu_mask, which is
+ * maintained by the cpu hotplug handlers.
+ */
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *perfmon_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group cpumask_attr_group = {
+ .attrs = perfmon_cpumask_attrs,
+};
+
+/*
+ * These attributes specify the bits in the config word that the perf
+ * syscall uses to pass the event ids and categories to perfmon.
+ */
+DEFINE_PERFMON_FORMAT_ATTR(event_category, "config:0-3");
+DEFINE_PERFMON_FORMAT_ATTR(event, "config:4-31");
+
+/*
+ * These attributes specify the bits in the config1 word that the perf
+ * syscall uses to pass filter data to perfmon.
+ */
+DEFINE_PERFMON_FORMAT_ATTR(filter_wq, "config1:0-31");
+DEFINE_PERFMON_FORMAT_ATTR(filter_tc, "config1:32-39");
+DEFINE_PERFMON_FORMAT_ATTR(filter_pgsz, "config1:40-43");
+DEFINE_PERFMON_FORMAT_ATTR(filter_sz, "config1:44-51");
+DEFINE_PERFMON_FORMAT_ATTR(filter_eng, "config1:52-59");
+
+#define PERFMON_FILTERS_START 2
+#define PERFMON_FILTERS_MAX 5
+
+static struct attribute *perfmon_format_attrs[] = {
+ &format_attr_idxd_event_category.attr,
+ &format_attr_idxd_event.attr,
+ &format_attr_idxd_filter_wq.attr,
+ &format_attr_idxd_filter_tc.attr,
+ &format_attr_idxd_filter_pgsz.attr,
+ &format_attr_idxd_filter_sz.attr,
+ &format_attr_idxd_filter_eng.attr,
+ NULL,
+};
+
+static struct attribute_group perfmon_format_attr_group = {
+ .name = "format",
+ .attrs = perfmon_format_attrs,
+};
+
+static const struct attribute_group *perfmon_attr_groups[] = {
+ &perfmon_format_attr_group,
+ &cpumask_attr_group,
+ NULL,
+};
+
+static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return cpumap_print_to_pagebuf(true, buf, &perfmon_dsa_cpu_mask);
+}
+
+static bool is_idxd_event(struct idxd_pmu *idxd_pmu, struct perf_event *event)
+{
+ return &idxd_pmu->pmu == event->pmu;
+}
+
+static int perfmon_collect_events(struct idxd_pmu *idxd_pmu,
+ struct perf_event *leader,
+ bool do_grp)
+{
+ struct perf_event *event;
+ int n, max_count;
+
+ max_count = idxd_pmu->n_counters;
+ n = idxd_pmu->n_events;
+
+ if (n >= max_count)
+ return -EINVAL;
+
+ if (is_idxd_event(idxd_pmu, leader)) {
+ idxd_pmu->event_list[n] = leader;
+ idxd_pmu->event_list[n]->hw.idx = n;
+ n++;
+ }
+
+ if (!do_grp)
+ return n;
+
+ for_each_sibling_event(event, leader) {
+ if (!is_idxd_event(idxd_pmu, event) ||
+ event->state <= PERF_EVENT_STATE_OFF)
+ continue;
+
+ if (n >= max_count)
+ return -EINVAL;
+
+ idxd_pmu->event_list[n] = event;
+ idxd_pmu->event_list[n]->hw.idx = n;
+ n++;
+ }
+
+ return n;
+}
+
+static void perfmon_assign_hw_event(struct idxd_pmu *idxd_pmu,
+ struct perf_event *event, int idx)
+{
+ struct idxd_device *idxd = idxd_pmu->idxd;
+ struct hw_perf_event *hwc = &event->hw;
+
+ hwc->idx = idx;
+ hwc->config_base = ioread64(CNTRCFG_REG(idxd, idx));
+ hwc->event_base = ioread64(CNTRCFG_REG(idxd, idx));
+}
+
+static int perfmon_assign_event(struct idxd_pmu *idxd_pmu,
+ struct perf_event *event)
+{
+ int i;
+
+ for (i = 0; i < IDXD_PMU_EVENT_MAX; i++)
+ if (!test_and_set_bit(i, idxd_pmu->used_mask))
+ return i;
+
+ return -EINVAL;
+}
+
+/*
+ * Check whether there are enough counters to satisfy that all the
+ * events in the group can actually be scheduled at the same time.
+ *
+ * To do this, create a fake idxd_pmu object so the event collection
+ * and assignment functions can be used without affecting the internal
+ * state of the real idxd_pmu object.
+ */
+static int perfmon_validate_group(struct idxd_pmu *pmu,
+ struct perf_event *event)
+{
+ struct perf_event *leader = event->group_leader;
+ struct idxd_pmu *fake_pmu;
+ int i, ret = 0, n, idx;
+
+ fake_pmu = kzalloc(sizeof(*fake_pmu), GFP_KERNEL);
+ if (!fake_pmu)
+ return -ENOMEM;
+
+ fake_pmu->pmu.name = pmu->pmu.name;
+ fake_pmu->n_counters = pmu->n_counters;
+
+ n = perfmon_collect_events(fake_pmu, leader, true);
+ if (n < 0) {
+ ret = n;
+ goto out;
+ }
+
+ fake_pmu->n_events = n;
+ n = perfmon_collect_events(fake_pmu, event, false);
+ if (n < 0) {
+ ret = n;
+ goto out;
+ }
+
+ fake_pmu->n_events = n;
+
+ for (i = 0; i < n; i++) {
+ event = fake_pmu->event_list[i];
+
+ idx = perfmon_assign_event(fake_pmu, event);
+ if (idx < 0) {
+ ret = idx;
+ goto out;
+ }
+ }
+out:
+ kfree(fake_pmu);
+
+ return ret;
+}
+
+static int perfmon_pmu_event_init(struct perf_event *event)
+{
+ struct idxd_device *idxd;
+ int ret = 0;
+
+ idxd = event_to_idxd(event);
+ event->hw.idx = -1;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* sampling not supported */
+ if (event->attr.sample_period)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ if (event->pmu != &idxd->idxd_pmu->pmu)
+ return -EINVAL;
+
+ event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd));
+ event->cpu = idxd->idxd_pmu->cpu;
+ event->hw.config = event->attr.config;
+
+ if (event->group_leader != event)
+ /* non-group events have themselves as leader */
+ ret = perfmon_validate_group(idxd->idxd_pmu, event);
+
+ return ret;
+}
+
+static inline u64 perfmon_pmu_read_counter(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct idxd_device *idxd;
+ int cntr = hwc->idx;
+
+ idxd = event_to_idxd(event);
+
+ return ioread64(CNTRDATA_REG(idxd, cntr));
+}
+
+static void perfmon_pmu_event_update(struct perf_event *event)
+{
+ struct idxd_device *idxd = event_to_idxd(event);
+ u64 prev_raw_count, new_raw_count, delta, p, n;
+ int shift = 64 - idxd->idxd_pmu->counter_width;
+ struct hw_perf_event *hwc = &event->hw;
+
+ do {
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = perfmon_pmu_read_counter(event);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count);
+
+ n = (new_raw_count << shift);
+ p = (prev_raw_count << shift);
+
+ delta = ((n - p) >> shift);
+
+ local64_add(delta, &event->count);
+}
+
+void perfmon_counter_overflow(struct idxd_device *idxd)
+{
+ int i, n_counters, max_loop = OVERFLOW_SIZE;
+ struct perf_event *event;
+ unsigned long ovfstatus;
+
+ n_counters = min(idxd->idxd_pmu->n_counters, OVERFLOW_SIZE);
+
+ ovfstatus = ioread32(OVFSTATUS_REG(idxd));
+
+ /*
+ * While updating overflowed counters, other counters behind
+ * them could overflow and be missed in a given pass.
+ * Normally this could happen at most n_counters times, but in
+ * theory a tiny counter width could result in continual
+ * overflows and endless looping. max_loop provides a
+ * failsafe in that highly unlikely case.
+ */
+ while (ovfstatus && max_loop--) {
+ /* Figure out which counter(s) overflowed */
+ for_each_set_bit(i, &ovfstatus, n_counters) {
+ unsigned long ovfstatus_clear = 0;
+
+ /* Update event->count for overflowed counter */
+ event = idxd->idxd_pmu->event_list[i];
+ perfmon_pmu_event_update(event);
+ /* Writing 1 to OVFSTATUS bit clears it */
+ set_bit(i, &ovfstatus_clear);
+ iowrite32(ovfstatus_clear, OVFSTATUS_REG(idxd));
+ }
+
+ ovfstatus = ioread32(OVFSTATUS_REG(idxd));
+ }
+
+ /*
+ * Should never happen. If so, it means a counter(s) looped
+ * around twice while this handler was running.
+ */
+ WARN_ON_ONCE(ovfstatus);
+}
+
+static inline void perfmon_reset_config(struct idxd_device *idxd)
+{
+ iowrite32(CONFIG_RESET, PERFRST_REG(idxd));
+ iowrite32(0, OVFSTATUS_REG(idxd));
+ iowrite32(0, PERFFRZ_REG(idxd));
+}
+
+static inline void perfmon_reset_counters(struct idxd_device *idxd)
+{
+ iowrite32(CNTR_RESET, PERFRST_REG(idxd));
+}
+
+static inline void perfmon_reset(struct idxd_device *idxd)
+{
+ perfmon_reset_config(idxd);
+ perfmon_reset_counters(idxd);
+}
+
+static void perfmon_pmu_event_start(struct perf_event *event, int mode)
+{
+ u32 flt_wq, flt_tc, flt_pg_sz, flt_xfer_sz, flt_eng = 0;
+ u64 cntr_cfg, cntrdata, event_enc, event_cat = 0;
+ struct hw_perf_event *hwc = &event->hw;
+ union filter_cfg flt_cfg;
+ union event_cfg event_cfg;
+ struct idxd_device *idxd;
+ int cntr;
+
+ idxd = event_to_idxd(event);
+
+ event->hw.idx = hwc->idx;
+ cntr = hwc->idx;
+
+ /* Obtain event category and event value from user space */
+ event_cfg.val = event->attr.config;
+ flt_cfg.val = event->attr.config1;
+ event_cat = event_cfg.event_cat;
+ event_enc = event_cfg.event_enc;
+
+ /* Obtain filter configuration from user space */
+ flt_wq = flt_cfg.wq;
+ flt_tc = flt_cfg.tc;
+ flt_pg_sz = flt_cfg.pg_sz;
+ flt_xfer_sz = flt_cfg.xfer_sz;
+ flt_eng = flt_cfg.eng;
+
+ if (flt_wq && test_bit(FLT_WQ, &idxd->idxd_pmu->supported_filters))
+ iowrite32(flt_wq, FLTCFG_REG(idxd, cntr, FLT_WQ));
+ if (flt_tc && test_bit(FLT_TC, &idxd->idxd_pmu->supported_filters))
+ iowrite32(flt_tc, FLTCFG_REG(idxd, cntr, FLT_TC));
+ if (flt_pg_sz && test_bit(FLT_PG_SZ, &idxd->idxd_pmu->supported_filters))
+ iowrite32(flt_pg_sz, FLTCFG_REG(idxd, cntr, FLT_PG_SZ));
+ if (flt_xfer_sz && test_bit(FLT_XFER_SZ, &idxd->idxd_pmu->supported_filters))
+ iowrite32(flt_xfer_sz, FLTCFG_REG(idxd, cntr, FLT_XFER_SZ));
+ if (flt_eng && test_bit(FLT_ENG, &idxd->idxd_pmu->supported_filters))
+ iowrite32(flt_eng, FLTCFG_REG(idxd, cntr, FLT_ENG));
+
+ /* Read the start value */
+ cntrdata = ioread64(CNTRDATA_REG(idxd, cntr));
+ local64_set(&event->hw.prev_count, cntrdata);
+
+ /* Set counter to event/category */
+ cntr_cfg = event_cat << CNTRCFG_CATEGORY_SHIFT;
+ cntr_cfg |= event_enc << CNTRCFG_EVENT_SHIFT;
+ /* Set interrupt on overflow and counter enable bits */
+ cntr_cfg |= (CNTRCFG_IRQ_OVERFLOW | CNTRCFG_ENABLE);
+
+ iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr));
+}
+
+static void perfmon_pmu_event_stop(struct perf_event *event, int mode)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct idxd_device *idxd;
+ int i, cntr = hwc->idx;
+ u64 cntr_cfg;
+
+ idxd = event_to_idxd(event);
+
+ /* remove this event from event list */
+ for (i = 0; i < idxd->idxd_pmu->n_events; i++) {
+ if (event != idxd->idxd_pmu->event_list[i])
+ continue;
+
+ for (++i; i < idxd->idxd_pmu->n_events; i++)
+ idxd->idxd_pmu->event_list[i - 1] = idxd->idxd_pmu->event_list[i];
+ --idxd->idxd_pmu->n_events;
+ break;
+ }
+
+ cntr_cfg = ioread64(CNTRCFG_REG(idxd, cntr));
+ cntr_cfg &= ~CNTRCFG_ENABLE;
+ iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr));
+
+ if (mode == PERF_EF_UPDATE)
+ perfmon_pmu_event_update(event);
+
+ event->hw.idx = -1;
+ clear_bit(cntr, idxd->idxd_pmu->used_mask);
+}
+
+static void perfmon_pmu_event_del(struct perf_event *event, int mode)
+{
+ perfmon_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+static int perfmon_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct idxd_device *idxd = event_to_idxd(event);
+ struct idxd_pmu *idxd_pmu = idxd->idxd_pmu;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx, n;
+
+ n = perfmon_collect_events(idxd_pmu, event, false);
+ if (n < 0)
+ return n;
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ if (!(flags & PERF_EF_START))
+ hwc->state |= PERF_HES_ARCH;
+
+ idx = perfmon_assign_event(idxd_pmu, event);
+ if (idx < 0)
+ return idx;
+
+ perfmon_assign_hw_event(idxd_pmu, event, idx);
+
+ if (flags & PERF_EF_START)
+ perfmon_pmu_event_start(event, 0);
+
+ idxd_pmu->n_events = n;
+
+ return 0;
+}
+
+static void enable_perfmon_pmu(struct idxd_device *idxd)
+{
+ iowrite32(COUNTER_UNFREEZE, PERFFRZ_REG(idxd));
+}
+
+static void disable_perfmon_pmu(struct idxd_device *idxd)
+{
+ iowrite32(COUNTER_FREEZE, PERFFRZ_REG(idxd));
+}
+
+static void perfmon_pmu_enable(struct pmu *pmu)
+{
+ struct idxd_device *idxd = pmu_to_idxd(pmu);
+
+ enable_perfmon_pmu(idxd);
+}
+
+static void perfmon_pmu_disable(struct pmu *pmu)
+{
+ struct idxd_device *idxd = pmu_to_idxd(pmu);
+
+ disable_perfmon_pmu(idxd);
+}
+
+static void skip_filter(int i)
+{
+ int j;
+
+ for (j = i; j < PERFMON_FILTERS_MAX; j++)
+ perfmon_format_attrs[PERFMON_FILTERS_START + j] =
+ perfmon_format_attrs[PERFMON_FILTERS_START + j + 1];
+}
+
+static void idxd_pmu_init(struct idxd_pmu *idxd_pmu)
+{
+ int i;
+
+ for (i = 0 ; i < PERFMON_FILTERS_MAX; i++) {
+ if (!test_bit(i, &idxd_pmu->supported_filters))
+ skip_filter(i);
+ }
+
+ idxd_pmu->pmu.name = idxd_pmu->name;
+ idxd_pmu->pmu.attr_groups = perfmon_attr_groups;
+ idxd_pmu->pmu.task_ctx_nr = perf_invalid_context;
+ idxd_pmu->pmu.event_init = perfmon_pmu_event_init;
+ idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable,
+ idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable,
+ idxd_pmu->pmu.add = perfmon_pmu_event_add;
+ idxd_pmu->pmu.del = perfmon_pmu_event_del;
+ idxd_pmu->pmu.start = perfmon_pmu_event_start;
+ idxd_pmu->pmu.stop = perfmon_pmu_event_stop;
+ idxd_pmu->pmu.read = perfmon_pmu_event_update;
+ idxd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
+ idxd_pmu->pmu.module = THIS_MODULE;
+}
+
+void perfmon_pmu_remove(struct idxd_device *idxd)
+{
+ if (!idxd->idxd_pmu)
+ return;
+
+ cpuhp_state_remove_instance(cpuhp_slot, &idxd->idxd_pmu->cpuhp_node);
+ perf_pmu_unregister(&idxd->idxd_pmu->pmu);
+ kfree(idxd->idxd_pmu);
+ idxd->idxd_pmu = NULL;
+}
+
+static int perf_event_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct idxd_pmu *idxd_pmu;
+
+ idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node);
+
+ /* select the first online CPU as the designated reader */
+ if (cpumask_empty(&perfmon_dsa_cpu_mask)) {
+ cpumask_set_cpu(cpu, &perfmon_dsa_cpu_mask);
+ idxd_pmu->cpu = cpu;
+ }
+
+ return 0;
+}
+
+static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+ struct idxd_pmu *idxd_pmu;
+ unsigned int target;
+
+ idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node);
+
+ if (!cpumask_test_and_clear_cpu(cpu, &perfmon_dsa_cpu_mask))
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ /* migrate events if there is a valid target */
+ if (target < nr_cpu_ids)
+ cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
+ else
+ target = -1;
+
+ perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+
+ return 0;
+}
+
+int perfmon_pmu_init(struct idxd_device *idxd)
+{
+ union idxd_perfcap perfcap;
+ struct idxd_pmu *idxd_pmu;
+ int rc = -ENODEV;
+
+ /*
+ * perfmon module initialization failed, nothing to do
+ */
+ if (!cpuhp_set_up)
+ return -ENODEV;
+
+ /*
+ * If perfmon_offset or num_counters is 0, it means perfmon is
+ * not supported on this hardware.
+ */
+ if (idxd->perfmon_offset == 0)
+ return -ENODEV;
+
+ idxd_pmu = kzalloc(sizeof(*idxd_pmu), GFP_KERNEL);
+ if (!idxd_pmu)
+ return -ENOMEM;
+
+ idxd_pmu->idxd = idxd;
+ idxd->idxd_pmu = idxd_pmu;
+
+ if (idxd->data->type == IDXD_TYPE_DSA) {
+ rc = sprintf(idxd_pmu->name, "dsa%d", idxd->id);
+ if (rc < 0)
+ goto free;
+ } else if (idxd->data->type == IDXD_TYPE_IAX) {
+ rc = sprintf(idxd_pmu->name, "iax%d", idxd->id);
+ if (rc < 0)
+ goto free;
+ } else {
+ goto free;
+ }
+
+ perfmon_reset(idxd);
+
+ perfcap.bits = ioread64(PERFCAP_REG(idxd));
+
+ /*
+ * If total perf counter is 0, stop further registration.
+ * This is necessary in order to support driver running on
+ * guest which does not have pmon support.
+ */
+ if (perfcap.num_perf_counter == 0)
+ goto free;
+
+ /* A counter width of 0 means it can't count */
+ if (perfcap.counter_width == 0)
+ goto free;
+
+ /* Overflow interrupt and counter freeze support must be available */
+ if (!perfcap.overflow_interrupt || !perfcap.counter_freeze)
+ goto free;
+
+ /* Number of event categories cannot be 0 */
+ if (perfcap.num_event_category == 0)
+ goto free;
+
+ /*
+ * We don't support per-counter capabilities for now.
+ */
+ if (perfcap.cap_per_counter)
+ goto free;
+
+ idxd_pmu->n_event_categories = perfcap.num_event_category;
+ idxd_pmu->supported_event_categories = perfcap.global_event_category;
+ idxd_pmu->per_counter_caps_supported = perfcap.cap_per_counter;
+
+ /* check filter capability. If 0, then filters are not supported */
+ idxd_pmu->supported_filters = perfcap.filter;
+ if (perfcap.filter)
+ idxd_pmu->n_filters = hweight8(perfcap.filter);
+
+ /* Store the total number of counters categories, and counter width */
+ idxd_pmu->n_counters = perfcap.num_perf_counter;
+ idxd_pmu->counter_width = perfcap.counter_width;
+
+ idxd_pmu_init(idxd_pmu);
+
+ rc = perf_pmu_register(&idxd_pmu->pmu, idxd_pmu->name, -1);
+ if (rc)
+ goto free;
+
+ rc = cpuhp_state_add_instance(cpuhp_slot, &idxd_pmu->cpuhp_node);
+ if (rc) {
+ perf_pmu_unregister(&idxd->idxd_pmu->pmu);
+ goto free;
+ }
+out:
+ return rc;
+free:
+ kfree(idxd_pmu);
+ idxd->idxd_pmu = NULL;
+
+ goto out;
+}
+
+void __init perfmon_init(void)
+{
+ int rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "driver/dma/idxd/perf:online",
+ perf_event_cpu_online,
+ perf_event_cpu_offline);
+ if (WARN_ON(rc < 0))
+ return;
+
+ cpuhp_slot = rc;
+ cpuhp_set_up = true;
+}
+
+void __exit perfmon_exit(void)
+{
+ if (cpuhp_set_up)
+ cpuhp_remove_multi_state(cpuhp_slot);
+}
diff --git a/drivers/dma/idxd/perfmon.h b/drivers/dma/idxd/perfmon.h
new file mode 100644
index 000000000000..9a081a1bc605
--- /dev/null
+++ b/drivers/dma/idxd/perfmon.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 Intel Corporation. All rights rsvd. */
+
+#ifndef _PERFMON_H_
+#define _PERFMON_H_
+
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/sbitmap.h>
+#include <linux/dmaengine.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include <linux/uuid.h>
+#include <linux/idxd.h>
+#include <linux/perf_event.h>
+#include "registers.h"
+
+static inline struct idxd_pmu *event_to_pmu(struct perf_event *event)
+{
+ struct idxd_pmu *idxd_pmu;
+ struct pmu *pmu;
+
+ pmu = event->pmu;
+ idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
+
+ return idxd_pmu;
+}
+
+static inline struct idxd_device *event_to_idxd(struct perf_event *event)
+{
+ struct idxd_pmu *idxd_pmu;
+ struct pmu *pmu;
+
+ pmu = event->pmu;
+ idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
+
+ return idxd_pmu->idxd;
+}
+
+static inline struct idxd_device *pmu_to_idxd(struct pmu *pmu)
+{
+ struct idxd_pmu *idxd_pmu;
+
+ idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
+
+ return idxd_pmu->idxd;
+}
+
+enum dsa_perf_events {
+ DSA_PERF_EVENT_WQ = 0,
+ DSA_PERF_EVENT_ENGINE,
+ DSA_PERF_EVENT_ADDR_TRANS,
+ DSA_PERF_EVENT_OP,
+ DSA_PERF_EVENT_COMPL,
+ DSA_PERF_EVENT_MAX,
+};
+
+enum filter_enc {
+ FLT_WQ = 0,
+ FLT_TC,
+ FLT_PG_SZ,
+ FLT_XFER_SZ,
+ FLT_ENG,
+ FLT_MAX,
+};
+
+#define CONFIG_RESET 0x0000000000000001
+#define CNTR_RESET 0x0000000000000002
+#define CNTR_ENABLE 0x0000000000000001
+#define INTR_OVFL 0x0000000000000002
+
+#define COUNTER_FREEZE 0x00000000FFFFFFFF
+#define COUNTER_UNFREEZE 0x0000000000000000
+#define OVERFLOW_SIZE 32
+
+#define CNTRCFG_ENABLE BIT(0)
+#define CNTRCFG_IRQ_OVERFLOW BIT(1)
+#define CNTRCFG_CATEGORY_SHIFT 8
+#define CNTRCFG_EVENT_SHIFT 32
+
+#define PERFMON_TABLE_OFFSET(_idxd) \
+({ \
+ typeof(_idxd) __idxd = (_idxd); \
+ ((__idxd)->reg_base + (__idxd)->perfmon_offset); \
+})
+#define PERFMON_REG_OFFSET(idxd, offset) \
+ (PERFMON_TABLE_OFFSET(idxd) + (offset))
+
+#define PERFCAP_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFCAP_OFFSET))
+#define PERFRST_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFRST_OFFSET))
+#define OVFSTATUS_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_OVFSTATUS_OFFSET))
+#define PERFFRZ_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFFRZ_OFFSET))
+
+#define FLTCFG_REG(idxd, cntr, flt) \
+ (PERFMON_REG_OFFSET(idxd, IDXD_FLTCFG_OFFSET) + ((cntr) * 32) + ((flt) * 4))
+
+#define CNTRCFG_REG(idxd, cntr) \
+ (PERFMON_REG_OFFSET(idxd, IDXD_CNTRCFG_OFFSET) + ((cntr) * 8))
+#define CNTRDATA_REG(idxd, cntr) \
+ (PERFMON_REG_OFFSET(idxd, IDXD_CNTRDATA_OFFSET) + ((cntr) * 8))
+#define CNTRCAP_REG(idxd, cntr) \
+ (PERFMON_REG_OFFSET(idxd, IDXD_CNTRCAP_OFFSET) + ((cntr) * 8))
+
+#define EVNTCAP_REG(idxd, category) \
+ (PERFMON_REG_OFFSET(idxd, IDXD_EVNTCAP_OFFSET) + ((category) * 8))
+
+#define DEFINE_PERFMON_FORMAT_ATTR(_name, _format) \
+static ssize_t __perfmon_idxd_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *page) \
+{ \
+ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
+ return sprintf(page, _format "\n"); \
+} \
+static struct kobj_attribute format_attr_idxd_##_name = \
+ __ATTR(_name, 0444, __perfmon_idxd_##_name##_show, NULL)
+
+#endif
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 751ecb4f9f81..c970c3f025f0 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -24,8 +24,8 @@ union gen_cap_reg {
u64 overlap_copy:1;
u64 cache_control_mem:1;
u64 cache_control_cache:1;
+ u64 cmd_cap:1;
u64 rsvd:3;
- u64 int_handle_req:1;
u64 dest_readback:1;
u64 drain_readback:1;
u64 rsvd2:6;
@@ -120,7 +120,8 @@ union gencfg_reg {
union genctrl_reg {
struct {
u32 softerr_int_en:1;
- u32 rsvd:31;
+ u32 halt_int_en:1;
+ u32 rsvd:30;
};
u32 bits;
} __packed;
@@ -180,8 +181,11 @@ enum idxd_cmd {
IDXD_CMD_DRAIN_PASID,
IDXD_CMD_ABORT_PASID,
IDXD_CMD_REQUEST_INT_HANDLE,
+ IDXD_CMD_RELEASE_INT_HANDLE,
};
+#define CMD_INT_HANDLE_IMS 0x10000
+
#define IDXD_CMDSTS_OFFSET 0xa8
union cmdsts_reg {
struct {
@@ -193,6 +197,8 @@ union cmdsts_reg {
u32 bits;
} __packed;
#define IDXD_CMDSTS_ACTIVE 0x80000000
+#define IDXD_CMDSTS_ERR_MASK 0xff
+#define IDXD_CMDSTS_RES_SHIFT 8
enum idxd_cmdsts_err {
IDXD_CMDSTS_SUCCESS = 0,
@@ -228,6 +234,8 @@ enum idxd_cmdsts_err {
IDXD_CMDSTS_ERR_NO_HANDLE,
};
+#define IDXD_CMDCAP_OFFSET 0xb0
+
#define IDXD_SWERR_OFFSET 0xc0
#define IDXD_SWERR_VALID 0x00000001
#define IDXD_SWERR_OVERFLOW 0x00000002
@@ -378,4 +386,112 @@ union wqcfg {
#define GRPENGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 32)
#define GRPFLGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 40)
+/* Following is performance monitor registers */
+#define IDXD_PERFCAP_OFFSET 0x0
+union idxd_perfcap {
+ struct {
+ u64 num_perf_counter:6;
+ u64 rsvd1:2;
+ u64 counter_width:8;
+ u64 num_event_category:4;
+ u64 global_event_category:16;
+ u64 filter:8;
+ u64 rsvd2:8;
+ u64 cap_per_counter:1;
+ u64 writeable_counter:1;
+ u64 counter_freeze:1;
+ u64 overflow_interrupt:1;
+ u64 rsvd3:8;
+ };
+ u64 bits;
+} __packed;
+
+#define IDXD_EVNTCAP_OFFSET 0x80
+union idxd_evntcap {
+ struct {
+ u64 events:28;
+ u64 rsvd:36;
+ };
+ u64 bits;
+} __packed;
+
+struct idxd_event {
+ union {
+ struct {
+ u32 event_category:4;
+ u32 events:28;
+ };
+ u32 val;
+ };
+} __packed;
+
+#define IDXD_CNTRCAP_OFFSET 0x800
+struct idxd_cntrcap {
+ union {
+ struct {
+ u32 counter_width:8;
+ u32 rsvd:20;
+ u32 num_events:4;
+ };
+ u32 val;
+ };
+ struct idxd_event events[];
+} __packed;
+
+#define IDXD_PERFRST_OFFSET 0x10
+union idxd_perfrst {
+ struct {
+ u32 perfrst_config:1;
+ u32 perfrst_counter:1;
+ u32 rsvd:30;
+ };
+ u32 val;
+} __packed;
+
+#define IDXD_OVFSTATUS_OFFSET 0x30
+#define IDXD_PERFFRZ_OFFSET 0x20
+#define IDXD_CNTRCFG_OFFSET 0x100
+union idxd_cntrcfg {
+ struct {
+ u64 enable:1;
+ u64 interrupt_ovf:1;
+ u64 global_freeze_ovf:1;
+ u64 rsvd1:5;
+ u64 event_category:4;
+ u64 rsvd2:20;
+ u64 events:28;
+ u64 rsvd3:4;
+ };
+ u64 val;
+} __packed;
+
+#define IDXD_FLTCFG_OFFSET 0x300
+
+#define IDXD_CNTRDATA_OFFSET 0x200
+union idxd_cntrdata {
+ struct {
+ u64 event_count_value;
+ };
+ u64 val;
+} __packed;
+
+union event_cfg {
+ struct {
+ u64 event_cat:4;
+ u64 event_enc:28;
+ };
+ u64 val;
+} __packed;
+
+union filter_cfg {
+ struct {
+ u64 wq:32;
+ u64 tc:8;
+ u64 pg_sz:4;
+ u64 xfer_sz:8;
+ u64 eng:8;
+ };
+ u64 val;
+} __packed;
+
#endif
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index a7a61bcc17d5..19afb62abaff 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -15,18 +15,30 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
desc = wq->descs[idx];
memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
- memset(desc->completion, 0, idxd->compl_size);
+ memset(desc->completion, 0, idxd->data->compl_size);
desc->cpu = cpu;
if (device_pasid_enabled(idxd))
desc->hw->pasid = idxd->pasid;
/*
- * Descriptor completion vectors are 1-8 for MSIX. We will round
- * robin through the 8 vectors.
+ * Descriptor completion vectors are 1...N for MSIX. We will round
+ * robin through the N vectors.
*/
wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
- desc->hw->int_handle = wq->vec_ptr;
+ if (!idxd->int_handles) {
+ desc->hw->int_handle = wq->vec_ptr;
+ } else {
+ desc->vector = wq->vec_ptr;
+ /*
+ * int_handles are only for descriptor completion. However for device
+ * MSIX enumeration, vec 0 is used for misc interrupts. Therefore even
+ * though we are rotating through 1...N for descriptor interrupts, we
+ * need to acqurie the int_handles from 0..N-1.
+ */
+ desc->hw->int_handle = idxd->int_handles[desc->vector - 1];
+ }
+
return desc;
}
@@ -79,13 +91,15 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
struct idxd_device *idxd = wq->idxd;
- int vec = desc->hw->int_handle;
void __iomem *portal;
int rc;
if (idxd->state != IDXD_DEV_ENABLED)
return -EIO;
+ if (!percpu_ref_tryget_live(&wq->wq_active))
+ return -ENXIO;
+
portal = wq->portal;
/*
@@ -108,13 +122,25 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
return rc;
}
+ percpu_ref_put(&wq->wq_active);
+
/*
* Pending the descriptor to the lockless list for the irq_entry
* that we designated the descriptor to.
*/
- if (desc->hw->flags & IDXD_OP_FLAG_RCI)
- llist_add(&desc->llnode,
- &idxd->irq_entries[vec].pending_llist);
+ if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
+ int vec;
+
+ /*
+ * If the driver is on host kernel, it would be the value
+ * assigned to interrupt handle, which is index for MSIX
+ * vector. If it's guest then can't use the int_handle since
+ * that is the index to IMS for the entire device. The guest
+ * device local index will be used.
+ */
+ vec = !idxd->int_handles ? desc->hw->int_handle : desc->vector;
+ llist_add(&desc->llnode, &idxd->irq_entries[vec].pending_llist);
+ }
return 0;
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 18bf4d148989..0460d58e3941 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -16,69 +16,6 @@ static char *idxd_wq_type_names[] = {
[IDXD_WQT_USER] = "user",
};
-static void idxd_conf_device_release(struct device *dev)
-{
- dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
-}
-
-static struct device_type idxd_group_device_type = {
- .name = "group",
- .release = idxd_conf_device_release,
-};
-
-static struct device_type idxd_wq_device_type = {
- .name = "wq",
- .release = idxd_conf_device_release,
-};
-
-static struct device_type idxd_engine_device_type = {
- .name = "engine",
- .release = idxd_conf_device_release,
-};
-
-static struct device_type dsa_device_type = {
- .name = "dsa",
- .release = idxd_conf_device_release,
-};
-
-static struct device_type iax_device_type = {
- .name = "iax",
- .release = idxd_conf_device_release,
-};
-
-static inline bool is_dsa_dev(struct device *dev)
-{
- return dev ? dev->type == &dsa_device_type : false;
-}
-
-static inline bool is_iax_dev(struct device *dev)
-{
- return dev ? dev->type == &iax_device_type : false;
-}
-
-static inline bool is_idxd_dev(struct device *dev)
-{
- return is_dsa_dev(dev) || is_iax_dev(dev);
-}
-
-static inline bool is_idxd_wq_dev(struct device *dev)
-{
- return dev ? dev->type == &idxd_wq_device_type : false;
-}
-
-static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
-{
- if (wq->type == IDXD_WQT_KERNEL &&
- strcmp(wq->name, "dmaengine") == 0)
- return true;
- return false;
-}
-
-static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
-{
- return wq->type == IDXD_WQT_USER;
-}
-
static int idxd_config_bus_match(struct device *dev,
struct device_driver *drv)
{
@@ -110,9 +47,131 @@ static int idxd_config_bus_match(struct device *dev,
return matched;
}
-static int idxd_config_bus_probe(struct device *dev)
+static int enable_wq(struct idxd_wq *wq)
{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ unsigned long flags;
int rc;
+
+ mutex_lock(&wq->wq_lock);
+
+ if (idxd->state != IDXD_DEV_ENABLED) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "Enabling while device not enabled.\n");
+ return -EPERM;
+ }
+
+ if (wq->state != IDXD_WQ_DISABLED) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ %d already enabled.\n", wq->id);
+ return -EBUSY;
+ }
+
+ if (!wq->group) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ not attached to group.\n");
+ return -EINVAL;
+ }
+
+ if (strlen(wq->name) == 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ name not set.\n");
+ return -EINVAL;
+ }
+
+ /* Shared WQ checks */
+ if (wq_shared(wq)) {
+ if (!device_swq_supported(idxd)) {
+ dev_warn(dev, "PASID not enabled and shared WQ.\n");
+ mutex_unlock(&wq->wq_lock);
+ return -ENXIO;
+ }
+ /*
+ * Shared wq with the threshold set to 0 means the user
+ * did not set the threshold or transitioned from a
+ * dedicated wq but did not set threshold. A value
+ * of 0 would effectively disable the shared wq. The
+ * driver does not allow a value of 0 to be set for
+ * threshold via sysfs.
+ */
+ if (wq->threshold == 0) {
+ dev_warn(dev, "Shared WQ and threshold 0.\n");
+ mutex_unlock(&wq->wq_lock);
+ return -EINVAL;
+ }
+ }
+
+ rc = idxd_wq_alloc_resources(wq);
+ if (rc < 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ resource alloc failed\n");
+ return rc;
+ }
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ rc = idxd_device_config(idxd);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ if (rc < 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "Writing WQ %d config failed: %d\n", wq->id, rc);
+ return rc;
+ }
+
+ rc = idxd_wq_enable(wq);
+ if (rc < 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ %d enabling failed: %d\n", wq->id, rc);
+ return rc;
+ }
+
+ rc = idxd_wq_map_portal(wq);
+ if (rc < 0) {
+ dev_warn(dev, "wq portal mapping failed: %d\n", rc);
+ rc = idxd_wq_disable(wq);
+ if (rc < 0)
+ dev_warn(dev, "IDXD wq disable failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+
+ wq->client_count = 0;
+
+ if (wq->type == IDXD_WQT_KERNEL) {
+ rc = idxd_wq_init_percpu_ref(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "percpu_ref setup failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+ }
+
+ if (is_idxd_wq_dmaengine(wq)) {
+ rc = idxd_register_dma_channel(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "DMA channel register failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+ } else if (is_idxd_wq_cdev(wq)) {
+ rc = idxd_wq_add_cdev(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "Cdev creation failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+ }
+
+ mutex_unlock(&wq->wq_lock);
+ dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
+
+ return 0;
+}
+
+static int idxd_config_bus_probe(struct device *dev)
+{
+ int rc = 0;
unsigned long flags;
dev_dbg(dev, "%s called\n", __func__);
@@ -130,7 +189,8 @@ static int idxd_config_bus_probe(struct device *dev)
/* Perform IDXD configuration and enabling */
spin_lock_irqsave(&idxd->dev_lock, flags);
- rc = idxd_device_config(idxd);
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ rc = idxd_device_config(idxd);
spin_unlock_irqrestore(&idxd->dev_lock, flags);
if (rc < 0) {
module_put(THIS_MODULE);
@@ -157,115 +217,8 @@ static int idxd_config_bus_probe(struct device *dev)
return 0;
} else if (is_idxd_wq_dev(dev)) {
struct idxd_wq *wq = confdev_to_wq(dev);
- struct idxd_device *idxd = wq->idxd;
-
- mutex_lock(&wq->wq_lock);
-
- if (idxd->state != IDXD_DEV_ENABLED) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "Enabling while device not enabled.\n");
- return -EPERM;
- }
-
- if (wq->state != IDXD_WQ_DISABLED) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ %d already enabled.\n", wq->id);
- return -EBUSY;
- }
-
- if (!wq->group) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ not attached to group.\n");
- return -EINVAL;
- }
-
- if (strlen(wq->name) == 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ name not set.\n");
- return -EINVAL;
- }
-
- /* Shared WQ checks */
- if (wq_shared(wq)) {
- if (!device_swq_supported(idxd)) {
- dev_warn(dev,
- "PASID not enabled and shared WQ.\n");
- mutex_unlock(&wq->wq_lock);
- return -ENXIO;
- }
- /*
- * Shared wq with the threshold set to 0 means the user
- * did not set the threshold or transitioned from a
- * dedicated wq but did not set threshold. A value
- * of 0 would effectively disable the shared wq. The
- * driver does not allow a value of 0 to be set for
- * threshold via sysfs.
- */
- if (wq->threshold == 0) {
- dev_warn(dev,
- "Shared WQ and threshold 0.\n");
- mutex_unlock(&wq->wq_lock);
- return -EINVAL;
- }
- }
-
- rc = idxd_wq_alloc_resources(wq);
- if (rc < 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ resource alloc failed\n");
- return rc;
- }
-
- spin_lock_irqsave(&idxd->dev_lock, flags);
- rc = idxd_device_config(idxd);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
- if (rc < 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "Writing WQ %d config failed: %d\n",
- wq->id, rc);
- return rc;
- }
-
- rc = idxd_wq_enable(wq);
- if (rc < 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ %d enabling failed: %d\n",
- wq->id, rc);
- return rc;
- }
-
- rc = idxd_wq_map_portal(wq);
- if (rc < 0) {
- dev_warn(dev, "wq portal mapping failed: %d\n", rc);
- rc = idxd_wq_disable(wq);
- if (rc < 0)
- dev_warn(dev, "IDXD wq disable failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
-
- wq->client_count = 0;
-
- dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
- if (is_idxd_wq_dmaengine(wq)) {
- rc = idxd_register_dma_channel(wq);
- if (rc < 0) {
- dev_dbg(dev, "DMA channel register failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
- } else if (is_idxd_wq_cdev(wq)) {
- rc = idxd_wq_add_cdev(wq);
- if (rc < 0) {
- dev_dbg(dev, "Cdev creation failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
- }
-
- mutex_unlock(&wq->wq_lock);
- return 0;
+ return enable_wq(wq);
}
return -ENODEV;
@@ -283,6 +236,9 @@ static void disable_wq(struct idxd_wq *wq)
return;
}
+ if (wq->type == IDXD_WQT_KERNEL)
+ idxd_wq_quiesce(wq);
+
if (is_idxd_wq_dmaengine(wq))
idxd_unregister_dma_channel(wq);
else if (is_idxd_wq_cdev(wq))
@@ -322,7 +278,7 @@ static int idxd_config_bus_remove(struct device *dev)
dev_dbg(dev, "%s removing dev %s\n", __func__,
dev_name(&idxd->conf_dev));
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
if (wq->state == IDXD_WQ_DISABLED)
continue;
@@ -333,12 +289,14 @@ static int idxd_config_bus_remove(struct device *dev)
idxd_unregister_dma_device(idxd);
rc = idxd_device_disable(idxd);
- for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = idxd->wqs[i];
- mutex_lock(&wq->wq_lock);
- idxd_wq_disable_cleanup(wq);
- mutex_unlock(&wq->wq_lock);
+ mutex_lock(&wq->wq_lock);
+ idxd_wq_disable_cleanup(wq);
+ mutex_unlock(&wq->wq_lock);
+ }
}
module_put(THIS_MODULE);
if (rc < 0)
@@ -364,19 +322,6 @@ struct bus_type dsa_bus_type = {
.shutdown = idxd_config_bus_shutdown,
};
-struct bus_type iax_bus_type = {
- .name = "iax",
- .match = idxd_config_bus_match,
- .probe = idxd_config_bus_probe,
- .remove = idxd_config_bus_remove,
- .shutdown = idxd_config_bus_shutdown,
-};
-
-static struct bus_type *idxd_bus_types[] = {
- &dsa_bus_type,
- &iax_bus_type
-};
-
static struct idxd_device_driver dsa_drv = {
.drv = {
.name = "dsa",
@@ -386,60 +331,15 @@ static struct idxd_device_driver dsa_drv = {
},
};
-static struct idxd_device_driver iax_drv = {
- .drv = {
- .name = "iax",
- .bus = &iax_bus_type,
- .owner = THIS_MODULE,
- .mod_name = KBUILD_MODNAME,
- },
-};
-
-static struct idxd_device_driver *idxd_drvs[] = {
- &dsa_drv,
- &iax_drv
-};
-
-struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
-{
- return idxd_bus_types[idxd->type];
-}
-
-static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
-{
- if (idxd->type == IDXD_TYPE_DSA)
- return &dsa_device_type;
- else if (idxd->type == IDXD_TYPE_IAX)
- return &iax_device_type;
- else
- return NULL;
-}
-
/* IDXD generic driver setup */
int idxd_register_driver(void)
{
- int i, rc;
-
- for (i = 0; i < IDXD_TYPE_MAX; i++) {
- rc = driver_register(&idxd_drvs[i]->drv);
- if (rc < 0)
- goto drv_fail;
- }
-
- return 0;
-
-drv_fail:
- while (--i >= 0)
- driver_unregister(&idxd_drvs[i]->drv);
- return rc;
+ return driver_register(&dsa_drv.drv);
}
void idxd_unregister_driver(void)
{
- int i;
-
- for (i = 0; i < IDXD_TYPE_MAX; i++)
- driver_unregister(&idxd_drvs[i]->drv);
+ driver_unregister(&dsa_drv.drv);
}
/* IDXD engine attributes */
@@ -450,9 +350,9 @@ static ssize_t engine_group_id_show(struct device *dev,
container_of(dev, struct idxd_engine, conf_dev);
if (engine->group)
- return sprintf(buf, "%d\n", engine->group->id);
+ return sysfs_emit(buf, "%d\n", engine->group->id);
else
- return sprintf(buf, "%d\n", -1);
+ return sysfs_emit(buf, "%d\n", -1);
}
static ssize_t engine_group_id_store(struct device *dev,
@@ -488,7 +388,7 @@ static ssize_t engine_group_id_store(struct device *dev,
if (prevg)
prevg->num_engines--;
- engine->group = &idxd->groups[id];
+ engine->group = idxd->groups[id];
engine->group->num_engines++;
return count;
@@ -512,6 +412,19 @@ static const struct attribute_group *idxd_engine_attribute_groups[] = {
NULL,
};
+static void idxd_conf_engine_release(struct device *dev)
+{
+ struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
+
+ kfree(engine);
+}
+
+struct device_type idxd_engine_device_type = {
+ .name = "engine",
+ .release = idxd_conf_engine_release,
+ .groups = idxd_engine_attribute_groups,
+};
+
/* Group attributes */
static void idxd_set_free_tokens(struct idxd_device *idxd)
@@ -519,7 +432,7 @@ static void idxd_set_free_tokens(struct idxd_device *idxd)
int i, tokens;
for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
- struct idxd_group *g = &idxd->groups[i];
+ struct idxd_group *g = idxd->groups[i];
tokens += g->tokens_reserved;
}
@@ -534,7 +447,7 @@ static ssize_t group_tokens_reserved_show(struct device *dev,
struct idxd_group *group =
container_of(dev, struct idxd_group, conf_dev);
- return sprintf(buf, "%u\n", group->tokens_reserved);
+ return sysfs_emit(buf, "%u\n", group->tokens_reserved);
}
static ssize_t group_tokens_reserved_store(struct device *dev,
@@ -551,7 +464,7 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
if (rc < 0)
return -EINVAL;
- if (idxd->type == IDXD_TYPE_IAX)
+ if (idxd->data->type == IDXD_TYPE_IAX)
return -EOPNOTSUPP;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
@@ -582,7 +495,7 @@ static ssize_t group_tokens_allowed_show(struct device *dev,
struct idxd_group *group =
container_of(dev, struct idxd_group, conf_dev);
- return sprintf(buf, "%u\n", group->tokens_allowed);
+ return sysfs_emit(buf, "%u\n", group->tokens_allowed);
}
static ssize_t group_tokens_allowed_store(struct device *dev,
@@ -599,7 +512,7 @@ static ssize_t group_tokens_allowed_store(struct device *dev,
if (rc < 0)
return -EINVAL;
- if (idxd->type == IDXD_TYPE_IAX)
+ if (idxd->data->type == IDXD_TYPE_IAX)
return -EOPNOTSUPP;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
@@ -627,7 +540,7 @@ static ssize_t group_use_token_limit_show(struct device *dev,
struct idxd_group *group =
container_of(dev, struct idxd_group, conf_dev);
- return sprintf(buf, "%u\n", group->use_token_limit);
+ return sysfs_emit(buf, "%u\n", group->use_token_limit);
}
static ssize_t group_use_token_limit_store(struct device *dev,
@@ -644,7 +557,7 @@ static ssize_t group_use_token_limit_store(struct device *dev,
if (rc < 0)
return -EINVAL;
- if (idxd->type == IDXD_TYPE_IAX)
+ if (idxd->data->type == IDXD_TYPE_IAX)
return -EOPNOTSUPP;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
@@ -670,22 +583,22 @@ static ssize_t group_engines_show(struct device *dev,
struct idxd_group *group =
container_of(dev, struct idxd_group, conf_dev);
int i, rc = 0;
- char *tmp = buf;
struct idxd_device *idxd = group->idxd;
for (i = 0; i < idxd->max_engines; i++) {
- struct idxd_engine *engine = &idxd->engines[i];
+ struct idxd_engine *engine = idxd->engines[i];
if (!engine->group)
continue;
if (engine->group->id == group->id)
- rc += sprintf(tmp + rc, "engine%d.%d ",
- idxd->id, engine->id);
+ rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
}
+ if (!rc)
+ return 0;
rc--;
- rc += sprintf(tmp + rc, "\n");
+ rc += sysfs_emit_at(buf, rc, "\n");
return rc;
}
@@ -699,22 +612,22 @@ static ssize_t group_work_queues_show(struct device *dev,
struct idxd_group *group =
container_of(dev, struct idxd_group, conf_dev);
int i, rc = 0;
- char *tmp = buf;
struct idxd_device *idxd = group->idxd;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
if (!wq->group)
continue;
if (wq->group->id == group->id)
- rc += sprintf(tmp + rc, "wq%d.%d ",
- idxd->id, wq->id);
+ rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
}
+ if (!rc)
+ return 0;
rc--;
- rc += sprintf(tmp + rc, "\n");
+ rc += sysfs_emit_at(buf, rc, "\n");
return rc;
}
@@ -729,7 +642,7 @@ static ssize_t group_traffic_class_a_show(struct device *dev,
struct idxd_group *group =
container_of(dev, struct idxd_group, conf_dev);
- return sprintf(buf, "%d\n", group->tc_a);
+ return sysfs_emit(buf, "%d\n", group->tc_a);
}
static ssize_t group_traffic_class_a_store(struct device *dev,
@@ -770,7 +683,7 @@ static ssize_t group_traffic_class_b_show(struct device *dev,
struct idxd_group *group =
container_of(dev, struct idxd_group, conf_dev);
- return sprintf(buf, "%d\n", group->tc_b);
+ return sysfs_emit(buf, "%d\n", group->tc_b);
}
static ssize_t group_traffic_class_b_store(struct device *dev,
@@ -824,13 +737,26 @@ static const struct attribute_group *idxd_group_attribute_groups[] = {
NULL,
};
+static void idxd_conf_group_release(struct device *dev)
+{
+ struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
+
+ kfree(group);
+}
+
+struct device_type idxd_group_device_type = {
+ .name = "group",
+ .release = idxd_conf_group_release,
+ .groups = idxd_group_attribute_groups,
+};
+
/* IDXD work queue attribs */
static ssize_t wq_clients_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%d\n", wq->client_count);
+ return sysfs_emit(buf, "%d\n", wq->client_count);
}
static struct device_attribute dev_attr_wq_clients =
@@ -843,12 +769,12 @@ static ssize_t wq_state_show(struct device *dev,
switch (wq->state) {
case IDXD_WQ_DISABLED:
- return sprintf(buf, "disabled\n");
+ return sysfs_emit(buf, "disabled\n");
case IDXD_WQ_ENABLED:
- return sprintf(buf, "enabled\n");
+ return sysfs_emit(buf, "enabled\n");
}
- return sprintf(buf, "unknown\n");
+ return sysfs_emit(buf, "unknown\n");
}
static struct device_attribute dev_attr_wq_state =
@@ -860,9 +786,9 @@ static ssize_t wq_group_id_show(struct device *dev,
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
if (wq->group)
- return sprintf(buf, "%u\n", wq->group->id);
+ return sysfs_emit(buf, "%u\n", wq->group->id);
else
- return sprintf(buf, "-1\n");
+ return sysfs_emit(buf, "-1\n");
}
static ssize_t wq_group_id_store(struct device *dev,
@@ -896,7 +822,7 @@ static ssize_t wq_group_id_store(struct device *dev,
return count;
}
- group = &idxd->groups[id];
+ group = idxd->groups[id];
prevg = wq->group;
if (prevg)
@@ -914,8 +840,7 @@ static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%s\n",
- wq_dedicated(wq) ? "dedicated" : "shared");
+ return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
}
static ssize_t wq_mode_store(struct device *dev,
@@ -951,7 +876,7 @@ static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%u\n", wq->size);
+ return sysfs_emit(buf, "%u\n", wq->size);
}
static int total_claimed_wq_size(struct idxd_device *idxd)
@@ -960,7 +885,7 @@ static int total_claimed_wq_size(struct idxd_device *idxd)
int wq_size = 0;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
wq_size += wq->size;
}
@@ -1002,7 +927,7 @@ static ssize_t wq_priority_show(struct device *dev,
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%u\n", wq->priority);
+ return sysfs_emit(buf, "%u\n", wq->priority);
}
static ssize_t wq_priority_store(struct device *dev,
@@ -1039,8 +964,7 @@ static ssize_t wq_block_on_fault_show(struct device *dev,
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%u\n",
- test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
+ return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
}
static ssize_t wq_block_on_fault_store(struct device *dev,
@@ -1079,7 +1003,7 @@ static ssize_t wq_threshold_show(struct device *dev,
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%u\n", wq->threshold);
+ return sysfs_emit(buf, "%u\n", wq->threshold);
}
static ssize_t wq_threshold_store(struct device *dev,
@@ -1122,15 +1046,12 @@ static ssize_t wq_type_show(struct device *dev,
switch (wq->type) {
case IDXD_WQT_KERNEL:
- return sprintf(buf, "%s\n",
- idxd_wq_type_names[IDXD_WQT_KERNEL]);
+ return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
case IDXD_WQT_USER:
- return sprintf(buf, "%s\n",
- idxd_wq_type_names[IDXD_WQT_USER]);
+ return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
case IDXD_WQT_NONE:
default:
- return sprintf(buf, "%s\n",
- idxd_wq_type_names[IDXD_WQT_NONE]);
+ return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
}
return -EINVAL;
@@ -1171,7 +1092,7 @@ static ssize_t wq_name_show(struct device *dev,
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%s\n", wq->name);
+ return sysfs_emit(buf, "%s\n", wq->name);
}
static ssize_t wq_name_store(struct device *dev,
@@ -1206,8 +1127,16 @@ static ssize_t wq_cdev_minor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ int minor = -1;
- return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
+ mutex_lock(&wq->wq_lock);
+ if (wq->idxd_cdev)
+ minor = wq->idxd_cdev->minor;
+ mutex_unlock(&wq->wq_lock);
+
+ if (minor == -1)
+ return -ENXIO;
+ return sysfs_emit(buf, "%d\n", minor);
}
static struct device_attribute dev_attr_wq_cdev_minor =
@@ -1233,7 +1162,7 @@ static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attri
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%llu\n", wq->max_xfer_bytes);
+ return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
}
static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
@@ -1267,7 +1196,7 @@ static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribut
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%u\n", wq->max_batch_size);
+ return sysfs_emit(buf, "%u\n", wq->max_batch_size);
}
static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
@@ -1300,7 +1229,7 @@ static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%u\n", wq->ats_dis);
+ return sysfs_emit(buf, "%u\n", wq->ats_dis);
}
static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
@@ -1356,6 +1285,20 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = {
NULL,
};
+static void idxd_conf_wq_release(struct device *dev)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ kfree(wq->wqcfg);
+ kfree(wq);
+}
+
+struct device_type idxd_wq_device_type = {
+ .name = "wq",
+ .release = idxd_conf_wq_release,
+ .groups = idxd_wq_attribute_groups,
+};
+
/* IDXD device attribs */
static ssize_t version_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1363,7 +1306,7 @@ static ssize_t version_show(struct device *dev, struct device_attribute *attr,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%#x\n", idxd->hw.version);
+ return sysfs_emit(buf, "%#x\n", idxd->hw.version);
}
static DEVICE_ATTR_RO(version);
@@ -1374,7 +1317,7 @@ static ssize_t max_work_queues_size_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->max_wq_size);
+ return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
}
static DEVICE_ATTR_RO(max_work_queues_size);
@@ -1384,7 +1327,7 @@ static ssize_t max_groups_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->max_groups);
+ return sysfs_emit(buf, "%u\n", idxd->max_groups);
}
static DEVICE_ATTR_RO(max_groups);
@@ -1394,7 +1337,7 @@ static ssize_t max_work_queues_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->max_wqs);
+ return sysfs_emit(buf, "%u\n", idxd->max_wqs);
}
static DEVICE_ATTR_RO(max_work_queues);
@@ -1404,7 +1347,7 @@ static ssize_t max_engines_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->max_engines);
+ return sysfs_emit(buf, "%u\n", idxd->max_engines);
}
static DEVICE_ATTR_RO(max_engines);
@@ -1414,7 +1357,7 @@ static ssize_t numa_node_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
+ return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
}
static DEVICE_ATTR_RO(numa_node);
@@ -1424,7 +1367,7 @@ static ssize_t max_batch_size_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->max_batch_size);
+ return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
}
static DEVICE_ATTR_RO(max_batch_size);
@@ -1435,7 +1378,7 @@ static ssize_t max_transfer_size_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
+ return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
}
static DEVICE_ATTR_RO(max_transfer_size);
@@ -1461,7 +1404,7 @@ static ssize_t gen_cap_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
+ return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
}
static DEVICE_ATTR_RO(gen_cap);
@@ -1471,8 +1414,7 @@ static ssize_t configurable_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n",
- test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
+ return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
}
static DEVICE_ATTR_RO(configurable);
@@ -1486,13 +1428,13 @@ static ssize_t clients_show(struct device *dev,
spin_lock_irqsave(&idxd->dev_lock, flags);
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
count += wq->client_count;
}
spin_unlock_irqrestore(&idxd->dev_lock, flags);
- return sprintf(buf, "%d\n", count);
+ return sysfs_emit(buf, "%d\n", count);
}
static DEVICE_ATTR_RO(clients);
@@ -1502,7 +1444,7 @@ static ssize_t pasid_enabled_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", device_pasid_enabled(idxd));
+ return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
}
static DEVICE_ATTR_RO(pasid_enabled);
@@ -1515,14 +1457,14 @@ static ssize_t state_show(struct device *dev,
switch (idxd->state) {
case IDXD_DEV_DISABLED:
case IDXD_DEV_CONF_READY:
- return sprintf(buf, "disabled\n");
+ return sysfs_emit(buf, "disabled\n");
case IDXD_DEV_ENABLED:
- return sprintf(buf, "enabled\n");
+ return sysfs_emit(buf, "enabled\n");
case IDXD_DEV_HALTED:
- return sprintf(buf, "halted\n");
+ return sysfs_emit(buf, "halted\n");
}
- return sprintf(buf, "unknown\n");
+ return sysfs_emit(buf, "unknown\n");
}
static DEVICE_ATTR_RO(state);
@@ -1536,10 +1478,10 @@ static ssize_t errors_show(struct device *dev,
spin_lock_irqsave(&idxd->dev_lock, flags);
for (i = 0; i < 4; i++)
- out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
+ out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
spin_unlock_irqrestore(&idxd->dev_lock, flags);
out--;
- out += sprintf(buf + out, "\n");
+ out += sysfs_emit_at(buf, out, "\n");
return out;
}
static DEVICE_ATTR_RO(errors);
@@ -1550,7 +1492,7 @@ static ssize_t max_tokens_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->max_tokens);
+ return sysfs_emit(buf, "%u\n", idxd->max_tokens);
}
static DEVICE_ATTR_RO(max_tokens);
@@ -1560,7 +1502,7 @@ static ssize_t token_limit_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->token_limit);
+ return sysfs_emit(buf, "%u\n", idxd->token_limit);
}
static ssize_t token_limit_store(struct device *dev,
@@ -1599,7 +1541,7 @@ static ssize_t cdev_major_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->major);
+ return sysfs_emit(buf, "%u\n", idxd->major);
}
static DEVICE_ATTR_RO(cdev_major);
@@ -1608,7 +1550,7 @@ static ssize_t cmd_status_show(struct device *dev,
{
struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%#x\n", idxd->cmd_status);
+ return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
}
static DEVICE_ATTR_RO(cmd_status);
@@ -1644,183 +1586,161 @@ static const struct attribute_group *idxd_attribute_groups[] = {
NULL,
};
-static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
+static void idxd_conf_device_release(struct device *dev)
{
- struct device *dev = &idxd->pdev->dev;
- int i, rc;
+ struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
+
+ kfree(idxd->groups);
+ kfree(idxd->wqs);
+ kfree(idxd->engines);
+ kfree(idxd->irq_entries);
+ kfree(idxd->int_handles);
+ ida_free(&idxd_ida, idxd->id);
+ kfree(idxd);
+}
+
+struct device_type dsa_device_type = {
+ .name = "dsa",
+ .release = idxd_conf_device_release,
+ .groups = idxd_attribute_groups,
+};
+
+struct device_type iax_device_type = {
+ .name = "iax",
+ .release = idxd_conf_device_release,
+ .groups = idxd_attribute_groups,
+};
+
+static int idxd_register_engine_devices(struct idxd_device *idxd)
+{
+ int i, j, rc;
for (i = 0; i < idxd->max_engines; i++) {
- struct idxd_engine *engine = &idxd->engines[i];
-
- engine->conf_dev.parent = &idxd->conf_dev;
- dev_set_name(&engine->conf_dev, "engine%d.%d",
- idxd->id, engine->id);
- engine->conf_dev.bus = idxd_get_bus_type(idxd);
- engine->conf_dev.groups = idxd_engine_attribute_groups;
- engine->conf_dev.type = &idxd_engine_device_type;
- dev_dbg(dev, "Engine device register: %s\n",
- dev_name(&engine->conf_dev));
- rc = device_register(&engine->conf_dev);
- if (rc < 0) {
- put_device(&engine->conf_dev);
+ struct idxd_engine *engine = idxd->engines[i];
+
+ rc = device_add(&engine->conf_dev);
+ if (rc < 0)
goto cleanup;
- }
}
return 0;
cleanup:
- while (i--) {
- struct idxd_engine *engine = &idxd->engines[i];
+ j = i - 1;
+ for (; i < idxd->max_engines; i++)
+ put_device(&idxd->engines[i]->conf_dev);
- device_unregister(&engine->conf_dev);
- }
+ while (j--)
+ device_unregister(&idxd->engines[j]->conf_dev);
return rc;
}
-static int idxd_setup_group_sysfs(struct idxd_device *idxd)
+static int idxd_register_group_devices(struct idxd_device *idxd)
{
- struct device *dev = &idxd->pdev->dev;
- int i, rc;
+ int i, j, rc;
for (i = 0; i < idxd->max_groups; i++) {
- struct idxd_group *group = &idxd->groups[i];
-
- group->conf_dev.parent = &idxd->conf_dev;
- dev_set_name(&group->conf_dev, "group%d.%d",
- idxd->id, group->id);
- group->conf_dev.bus = idxd_get_bus_type(idxd);
- group->conf_dev.groups = idxd_group_attribute_groups;
- group->conf_dev.type = &idxd_group_device_type;
- dev_dbg(dev, "Group device register: %s\n",
- dev_name(&group->conf_dev));
- rc = device_register(&group->conf_dev);
- if (rc < 0) {
- put_device(&group->conf_dev);
+ struct idxd_group *group = idxd->groups[i];
+
+ rc = device_add(&group->conf_dev);
+ if (rc < 0)
goto cleanup;
- }
}
return 0;
cleanup:
- while (i--) {
- struct idxd_group *group = &idxd->groups[i];
+ j = i - 1;
+ for (; i < idxd->max_groups; i++)
+ put_device(&idxd->groups[i]->conf_dev);
- device_unregister(&group->conf_dev);
- }
+ while (j--)
+ device_unregister(&idxd->groups[j]->conf_dev);
return rc;
}
-static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
+static int idxd_register_wq_devices(struct idxd_device *idxd)
{
- struct device *dev = &idxd->pdev->dev;
- int i, rc;
+ int i, rc, j;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
-
- wq->conf_dev.parent = &idxd->conf_dev;
- dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
- wq->conf_dev.bus = idxd_get_bus_type(idxd);
- wq->conf_dev.groups = idxd_wq_attribute_groups;
- wq->conf_dev.type = &idxd_wq_device_type;
- dev_dbg(dev, "WQ device register: %s\n",
- dev_name(&wq->conf_dev));
- rc = device_register(&wq->conf_dev);
- if (rc < 0) {
- put_device(&wq->conf_dev);
+ struct idxd_wq *wq = idxd->wqs[i];
+
+ rc = device_add(&wq->conf_dev);
+ if (rc < 0)
goto cleanup;
- }
}
return 0;
cleanup:
- while (i--) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ j = i - 1;
+ for (; i < idxd->max_wqs; i++)
+ put_device(&idxd->wqs[i]->conf_dev);
- device_unregister(&wq->conf_dev);
- }
+ while (j--)
+ device_unregister(&idxd->wqs[j]->conf_dev);
return rc;
}
-static int idxd_setup_device_sysfs(struct idxd_device *idxd)
+int idxd_register_devices(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
- int rc;
- char devname[IDXD_NAME_SIZE];
+ int rc, i;
- sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
- idxd->conf_dev.parent = dev;
- dev_set_name(&idxd->conf_dev, "%s", devname);
- idxd->conf_dev.bus = idxd_get_bus_type(idxd);
- idxd->conf_dev.groups = idxd_attribute_groups;
- idxd->conf_dev.type = idxd_get_device_type(idxd);
-
- dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
- rc = device_register(&idxd->conf_dev);
- if (rc < 0) {
- put_device(&idxd->conf_dev);
- return rc;
- }
-
- return 0;
-}
-
-int idxd_setup_sysfs(struct idxd_device *idxd)
-{
- struct device *dev = &idxd->pdev->dev;
- int rc;
-
- rc = idxd_setup_device_sysfs(idxd);
- if (rc < 0) {
- dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
+ rc = device_add(&idxd->conf_dev);
+ if (rc < 0)
return rc;
- }
- rc = idxd_setup_wq_sysfs(idxd);
+ rc = idxd_register_wq_devices(idxd);
if (rc < 0) {
- /* unregister conf dev */
- dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
- return rc;
+ dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
+ goto err_wq;
}
- rc = idxd_setup_group_sysfs(idxd);
+ rc = idxd_register_engine_devices(idxd);
if (rc < 0) {
- /* unregister conf dev */
- dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
- return rc;
+ dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
+ goto err_engine;
}
- rc = idxd_setup_engine_sysfs(idxd);
+ rc = idxd_register_group_devices(idxd);
if (rc < 0) {
- /* unregister conf dev */
- dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
- return rc;
+ dev_dbg(dev, "Group device registering failed: %d\n", rc);
+ goto err_group;
}
return 0;
+
+ err_group:
+ for (i = 0; i < idxd->max_engines; i++)
+ device_unregister(&idxd->engines[i]->conf_dev);
+ err_engine:
+ for (i = 0; i < idxd->max_wqs; i++)
+ device_unregister(&idxd->wqs[i]->conf_dev);
+ err_wq:
+ device_del(&idxd->conf_dev);
+ return rc;
}
-void idxd_cleanup_sysfs(struct idxd_device *idxd)
+void idxd_unregister_devices(struct idxd_device *idxd)
{
int i;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
device_unregister(&wq->conf_dev);
}
for (i = 0; i < idxd->max_engines; i++) {
- struct idxd_engine *engine = &idxd->engines[i];
+ struct idxd_engine *engine = idxd->engines[i];
device_unregister(&engine->conf_dev);
}
for (i = 0; i < idxd->max_groups; i++) {
- struct idxd_group *group = &idxd->groups[i];
+ struct idxd_group *group = idxd->groups[i];
device_unregister(&group->conf_dev);
}
@@ -1830,26 +1750,10 @@ void idxd_cleanup_sysfs(struct idxd_device *idxd)
int idxd_register_bus_type(void)
{
- int i, rc;
-
- for (i = 0; i < IDXD_TYPE_MAX; i++) {
- rc = bus_register(idxd_bus_types[i]);
- if (rc < 0)
- goto bus_err;
- }
-
- return 0;
-
-bus_err:
- while (--i >= 0)
- bus_unregister(idxd_bus_types[i]);
- return rc;
+ return bus_register(&dsa_bus_type);
}
void idxd_unregister_bus_type(void)
{
- int i;
-
- for (i = 0; i < IDXD_TYPE_MAX; i++)
- bus_unregister(idxd_bus_types[i]);
+ bus_unregister(&dsa_bus_type);
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index d0b2e601e3e5..ecdaada95120 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013 - 2015 Linaro Ltd.
- * Copyright (c) 2013 Hisilicon Limited.
+ * Copyright (c) 2013 HiSilicon Limited.
*/
#include <linux/sched.h>
#include <linux/device.h>
@@ -1039,6 +1039,6 @@ static struct platform_driver k3_pdma_driver = {
module_platform_driver(k3_pdma_driver);
-MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
+MODULE_DESCRIPTION("HiSilicon k3 DMA Driver");
MODULE_ALIAS("platform:k3dma");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 57f5ee4235c7..43ac3ab23d4c 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -2281,6 +2281,7 @@ static int gpi_probe(struct platform_device *pdev)
static const struct of_device_id gpi_of_match[] = {
{ .compatible = "qcom,sdm845-gpi-dma" },
+ { .compatible = "qcom,sm8150-gpi-dma" },
{ },
};
MODULE_DEVICE_TABLE(of, gpi_of_match);
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 6c0f9eb8ecc6..23d64489d25f 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -90,12 +90,6 @@ static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
return container_of(dmach, struct hidma_chan, chan);
}
-static inline
-struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
-{
- return container_of(t, struct hidma_desc, desc);
-}
-
static void hidma_free(struct hidma_dev *dmadev)
{
INIT_LIST_HEAD(&dmadev->ddev.channels);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 3aded7861fef..75c0b8e904e5 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2453,6 +2453,13 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
return 0;
}
+static void xilinx_dma_synchronize(struct dma_chan *dchan)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+
+ tasklet_kill(&chan->tasklet);
+}
+
/**
* xilinx_dma_channel_set_config - Configure VDMA channel
* Run-time configuration for Axi VDMA, supports:
@@ -3074,6 +3081,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_free_chan_resources =
xilinx_dma_free_chan_resources;
xdev->common.device_terminate_all = xilinx_dma_terminate_all;
+ xdev->common.device_synchronize = xilinx_dma_synchronize;
xdev->common.device_tx_status = xilinx_dma_tx_status;
xdev->common.device_issue_pending = xilinx_dma_issue_pending;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index d51ca0428bb8..f191a1f901ac 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -166,6 +166,7 @@ static int __init dmi_checksum(const u8 *buf, u8 len)
static const char *dmi_ident[DMI_STRING_MAX];
static LIST_HEAD(dmi_devices);
int dmi_available;
+EXPORT_SYMBOL_GPL(dmi_available);
/*
* Save a DMI string
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 64344e84bd63..f5bd0dcbbb1c 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -23,6 +23,7 @@
#include <asm/cpuidle.h>
#include <asm/cputype.h>
+#include <asm/hypervisor.h>
#include <asm/system_misc.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
@@ -501,6 +502,7 @@ static int __init psci_probe(void)
psci_init_cpu_suspend();
psci_init_system_suspend();
psci_init_system_reset2();
+ kvm_init_hyp_services();
}
return 0;
diff --git a/drivers/firmware/smccc/Makefile b/drivers/firmware/smccc/Makefile
index 72ab84042832..40d19144a860 100644
--- a/drivers/firmware/smccc/Makefile
+++ b/drivers/firmware/smccc/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
#
-obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o
+obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o kvm_guest.o
obj-$(CONFIG_ARM_SMCCC_SOC_ID) += soc_id.o
diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c
new file mode 100644
index 000000000000..2d3e866decaa
--- /dev/null
+++ b/drivers/firmware/smccc/kvm_guest.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) "smccc: KVM: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/hypervisor.h>
+
+static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { };
+
+void __init kvm_init_hyp_services(void)
+{
+ struct arm_smccc_res res;
+ u32 val[4];
+
+ if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC)
+ return;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
+ if (res.a0 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 ||
+ res.a1 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 ||
+ res.a2 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 ||
+ res.a3 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3)
+ return;
+
+ memset(&res, 0, sizeof(res));
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID, &res);
+
+ val[0] = lower_32_bits(res.a0);
+ val[1] = lower_32_bits(res.a1);
+ val[2] = lower_32_bits(res.a2);
+ val[3] = lower_32_bits(res.a3);
+
+ bitmap_from_arr32(__kvm_arm_hyp_services, val, ARM_SMCCC_KVM_NUM_FUNCS);
+
+ pr_info("hypervisor services detected (0x%08lx 0x%08lx 0x%08lx 0x%08lx)\n",
+ res.a3, res.a2, res.a1, res.a0);
+}
+
+bool kvm_arm_hyp_service_available(u32 func_id)
+{
+ if (func_id >= ARM_SMCCC_KVM_NUM_FUNCS)
+ return false;
+
+ return test_bit(func_id, __kvm_arm_hyp_services);
+}
+EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available);
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index d52bfc5ed5e4..028f81d702cc 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -8,6 +8,7 @@
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/arm-smccc.h>
+#include <linux/kernel.h>
#include <asm/archrandom.h>
static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 83082e2f2e44..15b138326ecc 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -812,6 +812,120 @@ int zynqmp_pm_fpga_get_status(u32 *value)
EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
/**
+ * zynqmp_pm_pinctrl_request - Request Pin from firmware
+ * @pin: Pin number to request
+ *
+ * This function requests pin from firmware.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+int zynqmp_pm_pinctrl_request(const u32 pin)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_REQUEST, pin, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_request);
+
+/**
+ * zynqmp_pm_pinctrl_release - Inform firmware that Pin control is released
+ * @pin: Pin number to release
+ *
+ * This function release pin from firmware.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+int zynqmp_pm_pinctrl_release(const u32 pin)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_RELEASE, pin, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_release);
+
+/**
+ * zynqmp_pm_pinctrl_get_function - Read function id set for the given pin
+ * @pin: Pin number
+ * @id: Buffer to store function ID
+ *
+ * This function provides the function currently set for the given pin.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!id)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_PINCTRL_GET_FUNCTION, pin, 0,
+ 0, 0, ret_payload);
+ *id = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_function);
+
+/**
+ * zynqmp_pm_pinctrl_set_function - Set requested function for the pin
+ * @pin: Pin number
+ * @id: Function ID to set
+ *
+ * This function sets requested function for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_SET_FUNCTION, pin, id,
+ 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_function);
+
+/**
+ * zynqmp_pm_pinctrl_get_config - Get configuration parameter for the pin
+ * @pin: Pin number
+ * @param: Parameter to get
+ * @value: Buffer to store parameter value
+ *
+ * This function gets requested configuration parameter for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
+ u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_GET, pin, param,
+ 0, 0, ret_payload);
+ *value = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_config);
+
+/**
+ * zynqmp_pm_pinctrl_set_config - Set configuration parameter for the pin
+ * @pin: Pin number
+ * @param: Parameter to set
+ * @value: Parameter value to set
+ *
+ * This function sets requested configuration parameter for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
+ u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, pin,
+ param, value, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_config);
+
+/**
* zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
* master has initialized its own power management
*
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d3b3de514f6e..1dd0ec6727fd 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -321,9 +321,8 @@ config GPIO_HLWD
config GPIO_ICH
tristate "Intel ICH GPIO"
- depends on PCI && X86
- select MFD_CORE
- select LPC_ICH
+ depends on X86
+ depends on LPC_ICH
help
Say yes here to support the GPIO functionality of a number of Intel
ICH-based chipsets. Currently supported devices: ICH6, ICH7, ICH8
@@ -502,6 +501,19 @@ config GPIO_RDA
help
Say Y here to support RDA Micro GPIO controller.
+config GPIO_REALTEK_OTTO
+ tristate "Realtek Otto GPIO support"
+ depends on MACH_REALTEK_RTL
+ default MACH_REALTEK_RTL
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ The GPIO controller on the Otto MIPS platform supports up to two
+ banks of 32 GPIOs, with edge triggered interrupts. The 32 GPIOs
+ are grouped in four 8-bit wide ports.
+
+ When built as a module, the module will be called realtek_otto_gpio.
+
config GPIO_REG
bool
help
@@ -847,9 +859,9 @@ config GPIO_IT87
config GPIO_SCH
tristate "Intel SCH/TunnelCreek/Centerton/Quark X1000 GPIO"
- depends on (X86 || COMPILE_TEST) && PCI
- select MFD_CORE
- select LPC_SCH
+ depends on (X86 || COMPILE_TEST) && ACPI
+ depends on LPC_SCH
+ select GPIOLIB_IRQCHIP
help
Say yes here to support GPIO interface on Intel Poulsbo SCH,
Intel Tunnel Creek processor, Intel Centerton processor or
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 4c12f31db31f..d7c81e1611a4 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -125,6 +125,7 @@ obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
obj-$(CONFIG_GPIO_RDA) += gpio-rda.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
+obj-$(CONFIG_GPIO_REALTEK_OTTO) += gpio-realtek-otto.o
obj-$(CONFIG_GPIO_REG) += gpio-reg.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 7a9021c4fa48..71c0bea34d7b 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -49,15 +49,15 @@ struct dio48e_gpio {
unsigned char out_state[6];
unsigned char control[2];
raw_spinlock_t lock;
- unsigned base;
+ unsigned int base;
unsigned char irq_mask;
};
-static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned port = offset / 8;
- const unsigned mask = BIT(offset % 8);
+ const unsigned int port = offset / 8;
+ const unsigned int mask = BIT(offset % 8);
if (dio48egpio->io_state[port] & mask)
return GPIO_LINE_DIRECTION_IN;
@@ -65,14 +65,14 @@ static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
return GPIO_LINE_DIRECTION_OUT;
}
-static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned io_port = offset / 8;
+ const unsigned int io_port = offset / 8;
const unsigned int control_port = io_port / 3;
- const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
+ const unsigned int control_addr = dio48egpio->base + 3 + control_port * 4;
unsigned long flags;
- unsigned control;
+ unsigned int control;
raw_spin_lock_irqsave(&dio48egpio->lock, flags);
@@ -104,17 +104,17 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
+static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned io_port = offset / 8;
+ const unsigned int io_port = offset / 8;
const unsigned int control_port = io_port / 3;
- const unsigned mask = BIT(offset % 8);
- const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
- const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port;
+ const unsigned int mask = BIT(offset % 8);
+ const unsigned int control_addr = dio48egpio->base + 3 + control_port * 4;
+ const unsigned int out_port = (io_port > 2) ? io_port + 1 : io_port;
unsigned long flags;
- unsigned control;
+ unsigned int control;
raw_spin_lock_irqsave(&dio48egpio->lock, flags);
@@ -154,14 +154,14 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
return 0;
}
-static int dio48e_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int dio48e_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned port = offset / 8;
- const unsigned mask = BIT(offset % 8);
- const unsigned in_port = (port > 2) ? port + 1 : port;
+ const unsigned int port = offset / 8;
+ const unsigned int mask = BIT(offset % 8);
+ const unsigned int in_port = (port > 2) ? port + 1 : port;
unsigned long flags;
- unsigned port_state;
+ unsigned int port_state;
raw_spin_lock_irqsave(&dio48egpio->lock, flags);
@@ -202,12 +202,12 @@ static int dio48e_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
return 0;
}
-static void dio48e_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static void dio48e_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned port = offset / 8;
- const unsigned mask = BIT(offset % 8);
- const unsigned out_port = (port > 2) ? port + 1 : port;
+ const unsigned int port = offset / 8;
+ const unsigned int mask = BIT(offset % 8);
+ const unsigned int out_port = (port > 2) ? port + 1 : port;
unsigned long flags;
raw_spin_lock_irqsave(&dio48egpio->lock, flags);
@@ -306,7 +306,7 @@ static void dio48e_irq_unmask(struct irq_data *data)
raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
}
-static int dio48e_irq_set_type(struct irq_data *data, unsigned flow_type)
+static int dio48e_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
const unsigned long offset = irqd_to_hwirq(data);
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 08171431bb8f..34e35b64dcdc 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -37,31 +37,6 @@ struct gpio_aggregator {
static DEFINE_MUTEX(gpio_aggregator_lock); /* protects idr */
static DEFINE_IDR(gpio_aggregator_idr);
-static char *get_arg(char **args)
-{
- char *start, *end;
-
- start = skip_spaces(*args);
- if (!*start)
- return NULL;
-
- if (*start == '"') {
- /* Quoted arg */
- end = strchr(++start, '"');
- if (!end)
- return ERR_PTR(-EINVAL);
- } else {
- /* Unquoted arg */
- for (end = start; *end && !isspace(*end); end++) ;
- }
-
- if (*end)
- *end++ = '\0';
-
- *args = end;
- return start;
-}
-
static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
int hwnum, unsigned int *n)
{
@@ -83,8 +58,8 @@ static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
static int aggr_parse(struct gpio_aggregator *aggr)
{
+ char *args = skip_spaces(aggr->args);
char *name, *offsets, *p;
- char *args = aggr->args;
unsigned long *bitmap;
unsigned int i, n = 0;
int error = 0;
@@ -93,13 +68,9 @@ static int aggr_parse(struct gpio_aggregator *aggr)
if (!bitmap)
return -ENOMEM;
- for (name = get_arg(&args), offsets = get_arg(&args); name;
- offsets = get_arg(&args)) {
- if (IS_ERR(name)) {
- pr_err("Cannot get GPIO specifier: %pe\n", name);
- error = PTR_ERR(name);
- goto free_bitmap;
- }
+ args = next_arg(args, &name, &p);
+ while (*args) {
+ args = next_arg(args, &offsets, &p);
p = get_options(offsets, 0, &error);
if (error == 0 || *p) {
@@ -125,7 +96,7 @@ static int aggr_parse(struct gpio_aggregator *aggr)
goto free_bitmap;
}
- name = get_arg(&args);
+ args = next_arg(args, &name, &p);
}
if (!n) {
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index de56c013a658..3b31f5e9bf40 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -5,13 +5,11 @@
* Copyright (C) 2010 Extreme Engineering Solutions.
*/
-
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/ioport.h>
#include <linux/mfd/lpc_ich.h>
#include <linux/module.h>
-#include <linux/pci.h>
#include <linux/platform_device.h>
#define DRV_NAME "gpio_ich"
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index 8f1be34953ce..f332341fd4c8 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -125,14 +125,6 @@ static inline int superio_inw(int reg)
return val;
}
-static inline void superio_outw(int val, int reg)
-{
- outb(reg++, REG);
- outb(val >> 8, VAL);
- outb(reg, REG);
- outb(val, VAL);
-}
-
static inline void superio_set_mask(int mask, int reg)
{
u8 curr_val = superio_inb(reg);
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 28b757d34046..d7e73876a3b9 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -479,15 +479,10 @@ static struct platform_device *gpio_mockup_pdevs[GPIO_MOCKUP_MAX_GC];
static void gpio_mockup_unregister_pdevs(void)
{
- struct platform_device *pdev;
int i;
- for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++) {
- pdev = gpio_mockup_pdevs[i];
-
- if (pdev)
- platform_device_unregister(pdev);
- }
+ for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++)
+ platform_device_unregister(gpio_mockup_pdevs[i]);
}
static __init char **gpio_mockup_make_line_names(const char *label,
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 6dfca83bcd90..4b9157a69fca 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -9,6 +9,7 @@
* kind, whether express or implied.
*/
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
@@ -18,6 +19,8 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/property.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/gpio/driver.h>
@@ -303,8 +306,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mpc8xxx_gpio_chip *mpc8xxx_gc;
struct gpio_chip *gc;
- const struct mpc8xxx_gpio_devtype *devtype =
- of_device_get_match_data(&pdev->dev);
+ const struct mpc8xxx_gpio_devtype *devtype = NULL;
+ struct fwnode_handle *fwnode;
int ret;
mpc8xxx_gc = devm_kzalloc(&pdev->dev, sizeof(*mpc8xxx_gc), GFP_KERNEL);
@@ -315,14 +318,14 @@ static int mpc8xxx_probe(struct platform_device *pdev)
raw_spin_lock_init(&mpc8xxx_gc->lock);
- mpc8xxx_gc->regs = of_iomap(np, 0);
- if (!mpc8xxx_gc->regs)
- return -ENOMEM;
+ mpc8xxx_gc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mpc8xxx_gc->regs))
+ return PTR_ERR(mpc8xxx_gc->regs);
gc = &mpc8xxx_gc->gc;
gc->parent = &pdev->dev;
- if (of_property_read_bool(np, "little-endian")) {
+ if (device_property_read_bool(&pdev->dev, "little-endian")) {
ret = bgpio_init(gc, &pdev->dev, 4,
mpc8xxx_gc->regs + GPIO_DAT,
NULL, NULL,
@@ -345,6 +348,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
mpc8xxx_gc->direction_output = gc->direction_output;
+ devtype = device_get_match_data(&pdev->dev);
if (!devtype)
devtype = &mpc8xxx_gpio_devtype_default;
@@ -369,24 +373,29 @@ static int mpc8xxx_probe(struct platform_device *pdev)
* associated input enable must be set (GPIOxGPIE[IEn]=1) to propagate
* the port value to the GPIO Data Register.
*/
+ fwnode = dev_fwnode(&pdev->dev);
if (of_device_is_compatible(np, "fsl,qoriq-gpio") ||
of_device_is_compatible(np, "fsl,ls1028a-gpio") ||
- of_device_is_compatible(np, "fsl,ls1088a-gpio"))
+ of_device_is_compatible(np, "fsl,ls1088a-gpio") ||
+ is_acpi_node(fwnode))
gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
ret = gpiochip_add_data(gc, mpc8xxx_gc);
if (ret) {
- pr_err("%pOF: GPIO chip registration failed with status %d\n",
- np, ret);
+ dev_err(&pdev->dev,
+ "GPIO chip registration failed with status %d\n", ret);
goto err;
}
- mpc8xxx_gc->irqn = irq_of_parse_and_map(np, 0);
+ mpc8xxx_gc->irqn = platform_get_irq(pdev, 0);
if (!mpc8xxx_gc->irqn)
return 0;
- mpc8xxx_gc->irq = irq_domain_add_linear(np, MPC8XXX_GPIO_PINS,
- &mpc8xxx_gpio_irq_ops, mpc8xxx_gc);
+ mpc8xxx_gc->irq = irq_domain_create_linear(fwnode,
+ MPC8XXX_GPIO_PINS,
+ &mpc8xxx_gpio_irq_ops,
+ mpc8xxx_gc);
+
if (!mpc8xxx_gc->irq)
return 0;
@@ -399,8 +408,9 @@ static int mpc8xxx_probe(struct platform_device *pdev)
IRQF_SHARED, "gpio-cascade",
mpc8xxx_gc);
if (ret) {
- dev_err(&pdev->dev, "%s: failed to devm_request_irq(%d), ret = %d\n",
- np->full_name, mpc8xxx_gc->irqn, ret);
+ dev_err(&pdev->dev,
+ "failed to devm_request_irq(%d), ret = %d\n",
+ mpc8xxx_gc->irqn, ret);
goto err;
}
@@ -425,12 +435,21 @@ static int mpc8xxx_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id gpio_acpi_ids[] = {
+ {"NXP0031",},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, gpio_acpi_ids);
+#endif
+
static struct platform_driver mpc8xxx_plat_driver = {
.probe = mpc8xxx_probe,
.remove = mpc8xxx_remove,
.driver = {
.name = "gpio-mpc8xxx",
.of_match_table = mpc8xxx_gpio_ids,
+ .acpi_match_table = ACPI_PTR(gpio_acpi_ids),
},
};
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index dfc0c1eb1b33..524b668eb1ac 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -60,11 +60,6 @@ static inline int is_imx23_gpio(struct mxs_gpio_port *port)
return port->devid == IMX23_GPIO;
}
-static inline int is_imx28_gpio(struct mxs_gpio_port *port)
-{
- return port->devid == IMX28_GPIO;
-}
-
/* Note: This driver assumes 32 GPIOs are handled in one register */
static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 56152263ab38..ca23f72165ca 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1373,15 +1373,14 @@ static int omap_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
- const struct of_device_id *match;
const struct omap_gpio_platform_data *pdata;
struct gpio_bank *bank;
struct irq_chip *irqc;
int ret;
- match = of_match_device(of_match_ptr(omap_gpio_match), dev);
+ pdata = device_get_match_data(dev);
- pdata = match ? match->data : dev_get_platdata(dev);
+ pdata = pdata ?: dev_get_platdata(dev);
if (!pdata)
return -EINVAL;
diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c
new file mode 100644
index 000000000000..cb64fb5a51aa
--- /dev/null
+++ b/drivers/gpio/gpio-realtek-otto.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+/*
+ * Total register block size is 0x1C for one bank of four ports (A, B, C, D).
+ * An optional second bank, with ports E, F, G, and H, may be present, starting
+ * at register offset 0x1C.
+ */
+
+/*
+ * Pin select: (0) "normal", (1) "dedicate peripheral"
+ * Not used on RTL8380/RTL8390, peripheral selection is managed by control bits
+ * in the peripheral registers.
+ */
+#define REALTEK_GPIO_REG_CNR 0x00
+/* Clear bit (0) for input, set bit (1) for output */
+#define REALTEK_GPIO_REG_DIR 0x08
+#define REALTEK_GPIO_REG_DATA 0x0C
+/* Read bit for IRQ status, write 1 to clear IRQ */
+#define REALTEK_GPIO_REG_ISR 0x10
+/* Two bits per GPIO in IMR registers */
+#define REALTEK_GPIO_REG_IMR 0x14
+#define REALTEK_GPIO_REG_IMR_AB 0x14
+#define REALTEK_GPIO_REG_IMR_CD 0x18
+#define REALTEK_GPIO_IMR_LINE_MASK GENMASK(1, 0)
+#define REALTEK_GPIO_IRQ_EDGE_FALLING 1
+#define REALTEK_GPIO_IRQ_EDGE_RISING 2
+#define REALTEK_GPIO_IRQ_EDGE_BOTH 3
+
+#define REALTEK_GPIO_MAX 32
+#define REALTEK_GPIO_PORTS_PER_BANK 4
+
+/**
+ * realtek_gpio_ctrl - Realtek Otto GPIO driver data
+ *
+ * @gc: Associated gpio_chip instance
+ * @base: Base address of the register block for a GPIO bank
+ * @lock: Lock for accessing the IRQ registers and values
+ * @intr_mask: Mask for interrupts lines
+ * @intr_type: Interrupt type selection
+ *
+ * Because the interrupt mask register (IMR) combines the function of IRQ type
+ * selection and masking, two extra values are stored. @intr_mask is used to
+ * mask/unmask the interrupts for a GPIO port, and @intr_type is used to store
+ * the selected interrupt types. The logical AND of these values is written to
+ * IMR on changes.
+ */
+struct realtek_gpio_ctrl {
+ struct gpio_chip gc;
+ void __iomem *base;
+ raw_spinlock_t lock;
+ u16 intr_mask[REALTEK_GPIO_PORTS_PER_BANK];
+ u16 intr_type[REALTEK_GPIO_PORTS_PER_BANK];
+};
+
+/* Expand with more flags as devices with other quirks are added */
+enum realtek_gpio_flags {
+ /*
+ * Allow disabling interrupts, for cases where the port order is
+ * unknown. This may result in a port mismatch between ISR and IMR.
+ * An interrupt would appear to come from a different line than the
+ * line the IRQ handler was assigned to, causing uncaught interrupts.
+ */
+ GPIO_INTERRUPTS_DISABLED = BIT(0),
+};
+
+static struct realtek_gpio_ctrl *irq_data_to_ctrl(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+
+ return container_of(gc, struct realtek_gpio_ctrl, gc);
+}
+
+/*
+ * Normal port order register access
+ *
+ * Port information is stored with the first port at offset 0, followed by the
+ * second, etc. Most registers store one bit per GPIO and use a u8 value per
+ * port. The two interrupt mask registers store two bits per GPIO, so use u16
+ * values.
+ */
+static void realtek_gpio_write_imr(struct realtek_gpio_ctrl *ctrl,
+ unsigned int port, u16 irq_type, u16 irq_mask)
+{
+ iowrite16(irq_type & irq_mask, ctrl->base + REALTEK_GPIO_REG_IMR + 2 * port);
+}
+
+static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl,
+ unsigned int port, u8 mask)
+{
+ iowrite8(mask, ctrl->base + REALTEK_GPIO_REG_ISR + port);
+}
+
+static u8 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl, unsigned int port)
+{
+ return ioread8(ctrl->base + REALTEK_GPIO_REG_ISR + port);
+}
+
+/* Set the rising and falling edge mask bits for a GPIO port pin */
+static u16 realtek_gpio_imr_bits(unsigned int pin, u16 value)
+{
+ return (value & REALTEK_GPIO_IMR_LINE_MASK) << 2 * pin;
+}
+
+static void realtek_gpio_irq_ack(struct irq_data *data)
+{
+ struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
+ irq_hw_number_t line = irqd_to_hwirq(data);
+ unsigned int port = line / 8;
+ unsigned int port_pin = line % 8;
+
+ realtek_gpio_clear_isr(ctrl, port, BIT(port_pin));
+}
+
+static void realtek_gpio_irq_unmask(struct irq_data *data)
+{
+ struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
+ unsigned int line = irqd_to_hwirq(data);
+ unsigned int port = line / 8;
+ unsigned int port_pin = line % 8;
+ unsigned long flags;
+ u16 m;
+
+ raw_spin_lock_irqsave(&ctrl->lock, flags);
+ m = ctrl->intr_mask[port];
+ m |= realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
+ ctrl->intr_mask[port] = m;
+ realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
+ raw_spin_unlock_irqrestore(&ctrl->lock, flags);
+}
+
+static void realtek_gpio_irq_mask(struct irq_data *data)
+{
+ struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
+ unsigned int line = irqd_to_hwirq(data);
+ unsigned int port = line / 8;
+ unsigned int port_pin = line % 8;
+ unsigned long flags;
+ u16 m;
+
+ raw_spin_lock_irqsave(&ctrl->lock, flags);
+ m = ctrl->intr_mask[port];
+ m &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
+ ctrl->intr_mask[port] = m;
+ realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
+ raw_spin_unlock_irqrestore(&ctrl->lock, flags);
+}
+
+static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
+ unsigned int line = irqd_to_hwirq(data);
+ unsigned int port = line / 8;
+ unsigned int port_pin = line % 8;
+ unsigned long flags;
+ u16 type, t;
+
+ switch (flow_type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_FALLING:
+ type = REALTEK_GPIO_IRQ_EDGE_FALLING;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ type = REALTEK_GPIO_IRQ_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ type = REALTEK_GPIO_IRQ_EDGE_BOTH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irq_set_handler_locked(data, handle_edge_irq);
+
+ raw_spin_lock_irqsave(&ctrl->lock, flags);
+ t = ctrl->intr_type[port];
+ t &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
+ t |= realtek_gpio_imr_bits(port_pin, type);
+ ctrl->intr_type[port] = t;
+ realtek_gpio_write_imr(ctrl, port, t, ctrl->intr_mask[port]);
+ raw_spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ return 0;
+}
+
+static void realtek_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
+ struct irq_chip *irq_chip = irq_desc_get_chip(desc);
+ unsigned int lines_done;
+ unsigned int port_pin_count;
+ unsigned int irq;
+ unsigned long status;
+ int offset;
+
+ chained_irq_enter(irq_chip, desc);
+
+ for (lines_done = 0; lines_done < gc->ngpio; lines_done += 8) {
+ status = realtek_gpio_read_isr(ctrl, lines_done / 8);
+ port_pin_count = min(gc->ngpio - lines_done, 8U);
+ for_each_set_bit(offset, &status, port_pin_count) {
+ irq = irq_find_mapping(gc->irq.domain, offset);
+ generic_handle_irq(irq);
+ }
+ }
+
+ chained_irq_exit(irq_chip, desc);
+}
+
+static int realtek_gpio_irq_init(struct gpio_chip *gc)
+{
+ struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
+ unsigned int port;
+
+ for (port = 0; (port * 8) < gc->ngpio; port++) {
+ realtek_gpio_write_imr(ctrl, port, 0, 0);
+ realtek_gpio_clear_isr(ctrl, port, GENMASK(7, 0));
+ }
+
+ return 0;
+}
+
+static struct irq_chip realtek_gpio_irq_chip = {
+ .name = "realtek-otto-gpio",
+ .irq_ack = realtek_gpio_irq_ack,
+ .irq_mask = realtek_gpio_irq_mask,
+ .irq_unmask = realtek_gpio_irq_unmask,
+ .irq_set_type = realtek_gpio_irq_set_type,
+};
+
+static const struct of_device_id realtek_gpio_of_match[] = {
+ {
+ .compatible = "realtek,otto-gpio",
+ .data = (void *)GPIO_INTERRUPTS_DISABLED,
+ },
+ {
+ .compatible = "realtek,rtl8380-gpio",
+ },
+ {
+ .compatible = "realtek,rtl8390-gpio",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, realtek_gpio_of_match);
+
+static int realtek_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int dev_flags;
+ struct gpio_irq_chip *girq;
+ struct realtek_gpio_ctrl *ctrl;
+ u32 ngpios;
+ int err, irq;
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ dev_flags = (unsigned int) device_get_match_data(dev);
+
+ ngpios = REALTEK_GPIO_MAX;
+ device_property_read_u32(dev, "ngpios", &ngpios);
+
+ if (ngpios > REALTEK_GPIO_MAX) {
+ dev_err(&pdev->dev, "invalid ngpios (max. %d)\n",
+ REALTEK_GPIO_MAX);
+ return -EINVAL;
+ }
+
+ ctrl->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctrl->base))
+ return PTR_ERR(ctrl->base);
+
+ raw_spin_lock_init(&ctrl->lock);
+
+ err = bgpio_init(&ctrl->gc, dev, 4,
+ ctrl->base + REALTEK_GPIO_REG_DATA, NULL, NULL,
+ ctrl->base + REALTEK_GPIO_REG_DIR, NULL,
+ BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ if (err) {
+ dev_err(dev, "unable to init generic GPIO");
+ return err;
+ }
+
+ ctrl->gc.ngpio = ngpios;
+ ctrl->gc.owner = THIS_MODULE;
+
+ irq = platform_get_irq_optional(pdev, 0);
+ if (!(dev_flags & GPIO_INTERRUPTS_DISABLED) && irq > 0) {
+ girq = &ctrl->gc.irq;
+ girq->chip = &realtek_gpio_irq_chip;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ girq->parent_handler = realtek_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, girq->num_parents,
+ sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->init_hw = realtek_gpio_irq_init;
+ }
+
+ return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
+}
+
+static struct platform_driver realtek_gpio_driver = {
+ .driver = {
+ .name = "realtek-otto-gpio",
+ .of_match_table = realtek_gpio_of_match,
+ },
+ .probe = realtek_gpio_probe,
+};
+module_platform_driver(realtek_gpio_driver);
+
+MODULE_DESCRIPTION("Realtek Otto GPIO support");
+MODULE_AUTHOR("Sander Vanheule <sander@svanheule.net>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 5412cb3b0b2a..134cedf151a7 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -254,6 +254,11 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->names = config->names;
chip->label = config->label ?: dev_name(config->parent);
+#if defined(CONFIG_OF_GPIO)
+ /* gpiolib will use of_node of the parent if chip->of_node is NULL */
+ chip->of_node = to_of_node(config->fwnode);
+#endif /* CONFIG_OF_GPIO */
+
/*
* If our regmap is fast_io we should probably set can_sleep to false.
* Right now, the regmap doesn't save this property, nor is there any
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 3a1b1adb08c6..a6f0421d6e50 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -7,33 +7,55 @@
*/
#include <linux/acpi.h>
+#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci_ids.h>
#include <linux/platform_device.h>
+#include <linux/types.h>
#define GEN 0x00
#define GIO 0x04
#define GLV 0x08
+#define GTPE 0x0c
+#define GTNE 0x10
+#define GGPE 0x14
+#define GSMI 0x18
+#define GTS 0x1c
+
+#define CORE_BANK_OFFSET 0x00
+#define RESUME_BANK_OFFSET 0x20
+
+/*
+ * iLB datasheet describes GPE0BLK registers, in particular GPE0E.GPIO bit.
+ * Document Number: 328195-001
+ */
+#define GPE0E_GPIO 14
struct sch_gpio {
struct gpio_chip chip;
+ struct irq_chip irqchip;
spinlock_t lock;
unsigned short iobase;
unsigned short resume_base;
+
+ /* GPE handling */
+ u32 gpe;
+ acpi_gpe_handler gpe_handler;
};
static unsigned int sch_gpio_offset(struct sch_gpio *sch, unsigned int gpio,
unsigned int reg)
{
- unsigned int base = 0;
+ unsigned int base = CORE_BANK_OFFSET;
if (gpio >= sch->resume_base) {
gpio -= sch->resume_base;
- base += 0x20;
+ base = RESUME_BANK_OFFSET;
}
return base + reg + gpio / 8;
@@ -79,10 +101,11 @@ static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned int gpio, unsigned i
static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned int gpio_num)
{
struct sch_gpio *sch = gpiochip_get_data(gc);
+ unsigned long flags;
- spin_lock(&sch->lock);
+ spin_lock_irqsave(&sch->lock, flags);
sch_gpio_reg_set(sch, gpio_num, GIO, 1);
- spin_unlock(&sch->lock);
+ spin_unlock_irqrestore(&sch->lock, flags);
return 0;
}
@@ -96,20 +119,22 @@ static int sch_gpio_get(struct gpio_chip *gc, unsigned int gpio_num)
static void sch_gpio_set(struct gpio_chip *gc, unsigned int gpio_num, int val)
{
struct sch_gpio *sch = gpiochip_get_data(gc);
+ unsigned long flags;
- spin_lock(&sch->lock);
+ spin_lock_irqsave(&sch->lock, flags);
sch_gpio_reg_set(sch, gpio_num, GLV, val);
- spin_unlock(&sch->lock);
+ spin_unlock_irqrestore(&sch->lock, flags);
}
static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned int gpio_num,
int val)
{
struct sch_gpio *sch = gpiochip_get_data(gc);
+ unsigned long flags;
- spin_lock(&sch->lock);
+ spin_lock_irqsave(&sch->lock, flags);
sch_gpio_reg_set(sch, gpio_num, GIO, 0);
- spin_unlock(&sch->lock);
+ spin_unlock_irqrestore(&sch->lock, flags);
/*
* according to the datasheet, writing to the level register has no
@@ -144,10 +169,145 @@ static const struct gpio_chip sch_gpio_chip = {
.get_direction = sch_gpio_get_direction,
};
+static int sch_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sch_gpio *sch = gpiochip_get_data(gc);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+ unsigned long flags;
+ int rising, falling;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ rising = 1;
+ falling = 0;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ rising = 0;
+ falling = 1;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ rising = 1;
+ falling = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&sch->lock, flags);
+
+ sch_gpio_reg_set(sch, gpio_num, GTPE, rising);
+ sch_gpio_reg_set(sch, gpio_num, GTNE, falling);
+
+ irq_set_handler_locked(d, handle_edge_irq);
+
+ spin_unlock_irqrestore(&sch->lock, flags);
+
+ return 0;
+}
+
+static void sch_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sch_gpio *sch = gpiochip_get_data(gc);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sch->lock, flags);
+ sch_gpio_reg_set(sch, gpio_num, GTS, 1);
+ spin_unlock_irqrestore(&sch->lock, flags);
+}
+
+static void sch_irq_mask_unmask(struct irq_data *d, int val)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sch_gpio *sch = gpiochip_get_data(gc);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sch->lock, flags);
+ sch_gpio_reg_set(sch, gpio_num, GGPE, val);
+ spin_unlock_irqrestore(&sch->lock, flags);
+}
+
+static void sch_irq_mask(struct irq_data *d)
+{
+ sch_irq_mask_unmask(d, 0);
+}
+
+static void sch_irq_unmask(struct irq_data *d)
+{
+ sch_irq_mask_unmask(d, 1);
+}
+
+static u32 sch_gpio_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context)
+{
+ struct sch_gpio *sch = context;
+ struct gpio_chip *gc = &sch->chip;
+ unsigned long core_status, resume_status;
+ unsigned long pending;
+ unsigned long flags;
+ int offset;
+ u32 ret;
+
+ spin_lock_irqsave(&sch->lock, flags);
+
+ core_status = inl(sch->iobase + CORE_BANK_OFFSET + GTS);
+ resume_status = inl(sch->iobase + RESUME_BANK_OFFSET + GTS);
+
+ spin_unlock_irqrestore(&sch->lock, flags);
+
+ pending = (resume_status << sch->resume_base) | core_status;
+ for_each_set_bit(offset, &pending, sch->chip.ngpio)
+ generic_handle_irq(irq_find_mapping(gc->irq.domain, offset));
+
+ /* Set returning value depending on whether we handled an interrupt */
+ ret = pending ? ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
+
+ /* Acknowledge GPE to ACPICA */
+ ret |= ACPI_REENABLE_GPE;
+
+ return ret;
+}
+
+static void sch_gpio_remove_gpe_handler(void *data)
+{
+ struct sch_gpio *sch = data;
+
+ acpi_disable_gpe(NULL, sch->gpe);
+ acpi_remove_gpe_handler(NULL, sch->gpe, sch->gpe_handler);
+}
+
+static int sch_gpio_install_gpe_handler(struct sch_gpio *sch)
+{
+ struct device *dev = sch->chip.parent;
+ acpi_status status;
+
+ status = acpi_install_gpe_handler(NULL, sch->gpe, ACPI_GPE_LEVEL_TRIGGERED,
+ sch->gpe_handler, sch);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Failed to install GPE handler for %u: %s\n",
+ sch->gpe, acpi_format_exception(status));
+ return -ENODEV;
+ }
+
+ status = acpi_enable_gpe(NULL, sch->gpe);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Failed to enable GPE handler for %u: %s\n",
+ sch->gpe, acpi_format_exception(status));
+ acpi_remove_gpe_handler(NULL, sch->gpe, sch->gpe_handler);
+ return -ENODEV;
+ }
+
+ return devm_add_action_or_reset(dev, sch_gpio_remove_gpe_handler, sch);
+}
+
static int sch_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_irq_chip *girq;
struct sch_gpio *sch;
struct resource *res;
+ int ret;
sch = devm_kzalloc(&pdev->dev, sizeof(*sch), GFP_KERNEL);
if (!sch)
@@ -207,6 +367,28 @@ static int sch_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sch);
+ sch->irqchip.name = "sch_gpio";
+ sch->irqchip.irq_ack = sch_irq_ack;
+ sch->irqchip.irq_mask = sch_irq_mask;
+ sch->irqchip.irq_unmask = sch_irq_unmask;
+ sch->irqchip.irq_set_type = sch_irq_type;
+
+ girq = &sch->chip.irq;
+ girq->chip = &sch->irqchip;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->parent_handler = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
+ /* GPE setup is optional */
+ sch->gpe = GPE0E_GPIO;
+ sch->gpe_handler = sch_gpio_gpe_handler;
+
+ ret = sch_gpio_install_gpe_handler(sch);
+ if (ret)
+ dev_warn(&pdev->dev, "Can't setup GPE, no IRQ support\n");
+
return devm_gpiochip_add_data(&pdev->dev, &sch->chip, sch);
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 1aacd2a5a1fd..3ef22a3c104d 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1291,6 +1291,13 @@ void acpi_gpiochip_remove(struct gpio_chip *chip)
kfree(acpi_gpio);
}
+void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev)
+{
+ /* Set default fwnode to parent's one if present */
+ if (gc->parent)
+ ACPI_COMPANION_SET(&gdev->dev, ACPI_COMPANION(gc->parent));
+}
+
static int acpi_gpio_package_count(const union acpi_object *obj)
{
const union acpi_object *element = obj->package.elements;
@@ -1440,6 +1447,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
},
{
/*
+ * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FC:02@12",
+ },
+ },
+ {
+ /*
* HP X2 10 models with Cherry Trail SoC + TI PMIC use an
* external embedded-controller connected via I2C + an ACPI GPIO
* event handler on INT33FF:01 pin 0, causing spurious wakeups.
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index e2edb632b2cc..e476558d9471 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -36,6 +36,8 @@ struct acpi_gpio_info {
void acpi_gpiochip_add(struct gpio_chip *chip);
void acpi_gpiochip_remove(struct gpio_chip *chip);
+void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev);
+
void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
@@ -58,6 +60,8 @@ int acpi_gpio_count(struct device *dev, const char *con_id);
static inline void acpi_gpiochip_add(struct gpio_chip *chip) { }
static inline void acpi_gpiochip_remove(struct gpio_chip *chip) { }
+static inline void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev) { }
+
static inline void
acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { }
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index baf0153b7bca..bbcc7c073f63 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -1042,11 +1042,13 @@ void of_gpiochip_remove(struct gpio_chip *chip)
void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev)
{
+ /* Set default OF node to parent's one if present */
+ if (gc->parent)
+ gdev->dev.of_node = gc->parent->of_node;
+
/* If the gpiochip has an assigned OF node this takes precedence */
if (gc->of_node)
gdev->dev.of_node = gc->of_node;
else
gc->of_node = gdev->dev.of_node;
- if (gdev->dev.of_node)
- gdev->dev.fwnode = of_fwnode_handle(gdev->dev.of_node);
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 6367646dce83..1427c1be749b 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -586,14 +586,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (!gdev)
return -ENOMEM;
gdev->dev.bus = &gpio_bus_type;
+ gdev->dev.parent = gc->parent;
gdev->chip = gc;
gc->gpiodev = gdev;
- if (gc->parent) {
- gdev->dev.parent = gc->parent;
- gdev->dev.of_node = gc->parent->of_node;
- }
of_gpio_dev_init(gc, gdev);
+ acpi_gpio_dev_init(gc, gdev);
/*
* Assign fwnode depending on the result of the previous calls,
@@ -1465,9 +1463,8 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
struct lock_class_key *lock_key,
struct lock_class_key *request_key)
{
+ struct fwnode_handle *fwnode = dev_fwnode(&gc->gpiodev->dev);
struct irq_chip *irqchip = gc->irq.chip;
- const struct irq_domain_ops *ops = NULL;
- struct device_node *np;
unsigned int type;
unsigned int i;
@@ -1479,7 +1476,6 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
return -EINVAL;
}
- np = gc->gpiodev->dev.of_node;
type = gc->irq.default_type;
/*
@@ -1487,15 +1483,9 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
* used to configure the interrupts, as you may end up with
* conflicting triggers. Tell the user, and reset to NONE.
*/
- if (WARN(np && type != IRQ_TYPE_NONE,
- "%s: Ignoring %u default trigger\n", np->full_name, type))
- type = IRQ_TYPE_NONE;
-
- if (has_acpi_companion(gc->parent) && type != IRQ_TYPE_NONE) {
- acpi_handle_warn(ACPI_HANDLE(gc->parent),
- "Ignoring %u default trigger\n", type);
+ if (WARN(fwnode && type != IRQ_TYPE_NONE,
+ "%pfw: Ignoring %u default trigger\n", fwnode, type))
type = IRQ_TYPE_NONE;
- }
if (gc->to_irq)
chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__);
@@ -1512,15 +1502,11 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
return ret;
} else {
/* Some drivers provide custom irqdomain ops */
- if (gc->irq.domain_ops)
- ops = gc->irq.domain_ops;
-
- if (!ops)
- ops = &gpiochip_domain_ops;
- gc->irq.domain = irq_domain_add_simple(np,
+ gc->irq.domain = irq_domain_create_simple(fwnode,
gc->ngpio,
gc->irq.first,
- ops, gc);
+ gc->irq.domain_ops ?: &gpiochip_domain_ops,
+ gc);
if (!gc->irq.domain)
return -EINVAL;
}
@@ -3684,11 +3670,12 @@ EXPORT_SYMBOL_GPL(fwnode_gpiod_get_index);
*/
int gpiod_count(struct device *dev, const char *con_id)
{
+ const struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
int count = -ENOENT;
- if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
+ if (is_of_node(fwnode))
count = of_gpio_get_count(dev, con_id);
- else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev))
+ else if (is_acpi_node(fwnode))
count = acpi_gpio_count(dev, con_id);
if (count < 0)
@@ -3826,18 +3813,17 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
int ret;
/* Maybe we have a device name, maybe not */
const char *devname = dev ? dev_name(dev) : "?";
+ const struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
- if (dev) {
- /* Using device tree? */
- if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
- dev_dbg(dev, "using device tree for GPIO lookup\n");
- desc = of_find_gpio(dev, con_id, idx, &lookupflags);
- } else if (ACPI_COMPANION(dev)) {
- dev_dbg(dev, "using ACPI for GPIO lookup\n");
- desc = acpi_find_gpio(dev, con_id, idx, &flags, &lookupflags);
- }
+ /* Using device tree? */
+ if (is_of_node(fwnode)) {
+ dev_dbg(dev, "using device tree for GPIO lookup\n");
+ desc = of_find_gpio(dev, con_id, idx, &lookupflags);
+ } else if (is_acpi_node(fwnode)) {
+ dev_dbg(dev, "using ACPI for GPIO lookup\n");
+ desc = acpi_find_gpio(dev, con_id, idx, &flags, &lookupflags);
}
/*
@@ -3921,9 +3907,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
struct gpio_desc *desc = ERR_PTR(-ENODEV);
int ret;
- if (!fwnode)
- return ERR_PTR(-EINVAL);
-
if (is_of_node(fwnode)) {
desc = gpiod_get_from_of_node(to_of_node(fwnode),
propname, index,
@@ -3939,7 +3922,8 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
acpi_gpio_update_gpiod_flags(&dflags, &info);
acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
- }
+ } else
+ return ERR_PTR(-EINVAL);
/* Currently only ACPI takes this path */
ret = gpiod_request(desc, label);
@@ -4220,11 +4204,13 @@ EXPORT_SYMBOL_GPL(gpiod_put_array);
static int gpio_bus_match(struct device *dev, struct device_driver *drv)
{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
/*
* Only match if the fwnode doesn't already have a proper struct device
* created for it.
*/
- if (dev->fwnode && dev->fwnode->dev != dev)
+ if (fwnode && fwnode->dev != dev)
return 0;
return 1;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 14f68c028126..5ffb07b02810 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -234,14 +234,10 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
})
/* GPUVM API */
-int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
- void **vm, void **process_info,
- struct dma_fence **ef);
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
struct file *filp, u32 pasid,
void **vm, void **process_info,
struct dma_fence **ef);
-void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm);
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index e93850f2f3b1..7d4118c8128a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -445,22 +445,19 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
mapping_flags |= AMDGPU_VM_MTYPE_UC;
} else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
if (bo_adev == adev) {
- mapping_flags |= AMDGPU_VM_MTYPE_RW;
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
if (adev->gmc.xgmi.connected_to_cpu)
snoop = true;
} else {
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
if (amdgpu_xgmi_same_hive(adev, bo_adev))
snoop = true;
}
} else {
snoop = true;
- if (adev->gmc.xgmi.connected_to_cpu)
- /* system memory uses NC on A+A */
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- else
- mapping_flags |= coherent ?
- AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
break;
default:
@@ -1037,41 +1034,6 @@ create_evict_fence_fail:
return ret;
}
-int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
- void **vm, void **process_info,
- struct dma_fence **ef)
-{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct amdgpu_vm *new_vm;
- int ret;
-
- new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
- if (!new_vm)
- return -ENOMEM;
-
- /* Initialize AMDGPU part of the VM */
- ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
- if (ret) {
- pr_err("Failed init vm ret %d\n", ret);
- goto amdgpu_vm_init_fail;
- }
-
- /* Initialize KFD part of the VM and process info */
- ret = init_kfd_vm(new_vm, process_info, ef);
- if (ret)
- goto init_kfd_vm_fail;
-
- *vm = (void *) new_vm;
-
- return 0;
-
-init_kfd_vm_fail:
- amdgpu_vm_fini(adev, new_vm);
-amdgpu_vm_init_fail:
- kfree(new_vm);
- return ret;
-}
-
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
struct file *filp, u32 pasid,
void **vm, void **process_info,
@@ -1138,21 +1100,6 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
}
}
-void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
-{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
-
- if (WARN_ON(!kgd || !vm))
- return;
-
- pr_debug("Destroying process vm %p\n", vm);
-
- /* Release the VM context */
- amdgpu_vm_fini(adev, avm);
- kfree(vm);
-}
-
void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 9a2f811450ed..2e622c1675d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -908,6 +908,19 @@ int amdgpu_display_gem_fb_verify_and_init(
&amdgpu_fb_funcs);
if (ret)
goto err;
+ /* Verify that the modifier is supported. */
+ if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
+ mode_cmd->modifier[0])) {
+ struct drm_format_name_buf format_name;
+ drm_dbg_kms(dev,
+ "unsupported pixel format %s / modifier 0x%llx\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name),
+ mode_cmd->modifier[0]);
+
+ ret = -EINVAL;
+ goto err;
+ }
ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e0c4f7c7f1b9..baa980a477d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -291,8 +291,8 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
break;
case TTM_PL_VRAM:
- r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
- dir, &sgt);
+ r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0,
+ bo->tbo.base.size, attach->dev, dir, &sgt);
if (r)
return ERR_PTR(r);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index d8f131ed10cb..922938931e1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -185,7 +185,7 @@ uint amdgpu_ras_mask = 0xffffffff;
int amdgpu_bad_page_threshold = -1;
struct amdgpu_watchdog_timer amdgpu_watchdog_timer = {
.timeout_fatal_disable = false,
- .period = 0x23, /* default to max. timeout = 1 << 0x23 cycles */
+ .period = 0x0, /* default to 0x0 (timeout disable) */
};
/**
@@ -553,7 +553,7 @@ module_param_named(timeout_fatal_disable, amdgpu_watchdog_timer.timeout_fatal_di
* DOC: timeout_period (uint)
* Modify the watchdog timeout max_cycles as (1 << period)
*/
-MODULE_PARM_DESC(timeout_period, "watchdog timeout period (1 to 0x23(default), timeout maxCycles = (1 << period)");
+MODULE_PARM_DESC(timeout_period, "watchdog timeout period (0 = timeout disabled, 1 ~ 0x23 = timeout maxcycles = (1 << period)");
module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 4c5c19820d37..4f10c4529840 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -205,7 +205,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
struct drm_gem_object *gobj = NULL;
struct amdgpu_bo *abo = NULL;
int ret;
- unsigned long tmp;
memset(&mode_cmd, 0, sizeof(mode_cmd));
mode_cmd.width = sizes->surface_width;
@@ -246,8 +245,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
info->fbops = &amdgpufb_ops;
- tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start;
- info->fix.smem_start = adev->gmc.aper_base + tmp;
+ info->fix.smem_start = amdgpu_gmc_vram_cpu_pa(adev, abo);
info->fix.smem_len = amdgpu_bo_size(abo);
info->screen_base = amdgpu_bo_kptr(abo);
info->screen_size = amdgpu_bo_size(abo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 4d32233cde92..c39ed9eb0987 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -487,6 +487,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_RAVEN:
+ case CHIP_RENOIR:
if (amdgpu_tmz == 0) {
adev->gmc.tmz_enabled = false;
dev_info(adev->dev,
@@ -497,7 +498,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
"Trusted Memory Zone (TMZ) feature enabled\n");
}
break;
- case CHIP_RENOIR:
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
@@ -661,8 +661,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
u64 vram_addr = adev->vm_manager.vram_base_offset -
adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
u64 vram_end = vram_addr + vram_size;
- u64 gart_ptb_gpu_pa = amdgpu_bo_gpu_offset(adev->gart.bo) +
- adev->vm_manager.vram_base_offset - adev->gmc.vram_start;
+ u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
flags |= AMDGPU_PTE_WRITEABLE;
@@ -685,3 +684,39 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
/* Requires gart_ptb_gpu_pa to be 4K aligned */
amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
}
+
+/**
+ * amdgpu_gmc_vram_mc2pa - calculate vram buffer's physical address from MC
+ * address
+ *
+ * @adev: amdgpu_device pointer
+ * @mc_addr: MC address of buffer
+ */
+uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr)
+{
+ return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
+}
+
+/**
+ * amdgpu_gmc_vram_pa - calculate vram buffer object's physical address from
+ * GPU's view
+ *
+ * @adev: amdgpu_device pointer
+ * @bo: amdgpu buffer object
+ */
+uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
+{
+ return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo));
+}
+
+/**
+ * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address
+ * from CPU's view
+ *
+ * @adev: amdgpu_device pointer
+ * @bo: amdgpu buffer object
+ */
+uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
+{
+ return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index cbb7735c6988..9d11c02a3938 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -218,15 +218,6 @@ struct amdgpu_gmc {
*/
u64 fb_start;
u64 fb_end;
- /* In the case of use GART table for vmid0 FB access, [fb_start, fb_end]
- * will be squeezed to GART aperture. But we have a PSP FW issue to fix
- * for now. To temporarily workaround the PSP FW issue, added below two
- * variables to remember the original fb_start/end to re-enable FB
- * aperture to workaround the PSP FW issue. Will delete it after we
- * get a proper PSP FW fix.
- */
- u64 fb_start_original;
- u64 fb_end_original;
unsigned vram_width;
u64 real_vram_size;
int vram_mtrr;
@@ -341,4 +332,7 @@ amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev);
void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev);
+uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr);
+uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 148a3b481b12..a2fe2dac32c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -76,6 +76,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
+ /* flush the cache before commit the IB */
+ ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
if (!vm)
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 19c0a3655228..82e9ecf84352 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -519,8 +519,10 @@ static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry,
pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
GFP_KERNEL);
- if (!pmu_entry->pmu.attr_groups)
+ if (!pmu_entry->pmu.attr_groups) {
+ ret = -ENOMEM;
goto err_attr_group;
+ }
snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix,
adev_to_drm(pmu_entry->adev)->primary->index);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 9e769cf6095b..a09483beb968 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -328,8 +328,12 @@ psp_cmd_submit_buf(struct psp_context *psp,
static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
struct psp_gfx_cmd_resp *cmd,
- uint64_t tmr_mc, uint32_t size)
+ uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t size = amdgpu_bo_size(tmr_bo);
+ uint64_t tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
+
if (amdgpu_sriov_vf(psp->adev))
cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
else
@@ -337,6 +341,9 @@ static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_size = size;
+ cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
+ cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
+ cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
}
static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
@@ -407,16 +414,6 @@ static int psp_tmr_init(struct psp_context *psp)
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
- /* workaround the tmr_mc_addr:
- * PSP requires an address in FB aperture. Right now driver produce
- * tmr_mc_addr in the GART aperture. Convert it back to FB aperture
- * for PSP. Will revert it after we get a fix from PSP FW.
- */
- if (psp->adev->asic_type == CHIP_ALDEBARAN) {
- psp->tmr_mc_addr -= psp->adev->gmc.fb_start;
- psp->tmr_mc_addr += psp->adev->gmc.fb_start_original;
- }
-
return ret;
}
@@ -466,8 +463,7 @@ static int psp_tmr_load(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr,
- amdgpu_bo_size(psp->tmr_bo));
+ psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
@@ -561,7 +557,7 @@ static int psp_boot_config_set(struct amdgpu_device *adev)
struct psp_context *psp = &adev->psp;
struct psp_gfx_cmd_resp *cmd = psp->cmd;
- if (adev->asic_type != CHIP_SIENNA_CICHLID)
+ if (adev->asic_type != CHIP_SIENNA_CICHLID || amdgpu_sriov_vf(adev))
return 0;
memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 0541196ae1ed..b0d2fc9454ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -114,7 +114,7 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
if (amdgpu_ras_check_bad_page(adev, address)) {
dev_warn(adev->dev,
- "RAS WARN: 0x%llx has been marked as bad page!\n",
+ "RAS WARN: 0x%llx has already been marked as bad page!\n",
address);
return 0;
}
@@ -221,18 +221,17 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
op = 1;
else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
op = 2;
- else if (sscanf(str, "retire_page") == 0)
+ else if (strstr(str, "retire_page") != NULL)
op = 3;
else if (str[0] && str[1] && str[2] && str[3])
/* ascii string, but commands are not matched. */
return -EINVAL;
if (op != -1) {
-
if (op == 3) {
- if (sscanf(str, "%*s %llu", &address) != 1)
- if (sscanf(str, "%*s 0x%llx", &address) != 1)
- return -EINVAL;
+ if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
+ sscanf(str, "%*s %llu", &address) != 1)
+ return -EINVAL;
data->op = op;
data->inject.address = address;
@@ -255,11 +254,11 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
data->op = op;
if (op == 2) {
- if (sscanf(str, "%*s %*s %*s %u %llu %llu",
- &sub_block, &address, &value) != 3)
- if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
- &sub_block, &address, &value) != 3)
- return -EINVAL;
+ if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
+ &sub_block, &address, &value) != 3 &&
+ sscanf(str, "%*s %*s %*s %u %llu %llu",
+ &sub_block, &address, &value) != 3)
+ return -EINVAL;
data->head.sub_block_index = sub_block;
data->inject.address = address;
data->inject.value = value;
@@ -278,7 +277,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
/**
* DOC: AMDGPU RAS debugfs control interface
*
- * It accepts struct ras_debug_if who has two members.
+ * The control interface accepts struct ras_debug_if which has two members.
*
* First member: ras_debug_if::head or ras_debug_if::inject.
*
@@ -303,32 +302,33 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
*
* How to use the interface?
*
- * Programs
+ * In a program
*
- * Copy the struct ras_debug_if in your codes and initialize it.
- * Write the struct to the control node.
+ * Copy the struct ras_debug_if in your code and initialize it.
+ * Write the struct to the control interface.
*
- * Shells
+ * From shell
*
* .. code-block:: bash
*
- * echo op block [error [sub_block address value]] > .../ras/ras_ctrl
+ * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
+ * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
+ * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
*
- * Parameters:
+ * Where N, is the card which you want to affect.
*
- * op: disable, enable, inject
- * disable: only block is needed
- * enable: block and error are needed
- * inject: error, address, value are needed
- * block: umc, sdma, gfx, .........
+ * "disable" requires only the block.
+ * "enable" requires the block and error type.
+ * "inject" requires the block, error type, address, and value.
+ * The block is one of: umc, sdma, gfx, etc.
* see ras_block_string[] for details
- * error: ue, ce
- * ue: multi_uncorrectable
- * ce: single_correctable
- * sub_block:
- * sub block index, pass 0 if there is no sub block
+ * The error type is one of: ue, ce, where,
+ * ue is multi-uncorrectable
+ * ce is single-correctable
+ * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
+ * The address and value are hexadecimal numbers, leading 0x is optional.
*
- * here are some examples for bash commands:
+ * For instance,
*
* .. code-block:: bash
*
@@ -336,17 +336,17 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
* echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
* echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
*
- * How to check the result?
+ * How to check the result of the operation?
*
- * For disable/enable, please check ras features at
+ * To check disable/enable, see "ras" features at,
* /sys/class/drm/card[0/1/2...]/device/ras/features
*
- * For inject, please check corresponding err count at
- * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
+ * To check inject, see the corresponding error count at,
+ * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
*
* .. note::
* Operations are only allowed on blocks which are supported.
- * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
+ * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
* to see which blocks support RAS on a particular asic.
*
*/
@@ -367,11 +367,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
if (ret)
return -EINVAL;
- if (data.op == 3)
- {
+ if (data.op == 3) {
ret = amdgpu_reserve_page_direct(adev, data.inject.address);
-
- if (ret)
+ if (!ret)
return size;
else
return ret;
@@ -503,6 +501,12 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
if (amdgpu_ras_query_error_status(obj->adev, &info))
return -EINVAL;
+
+ if (obj->adev->asic_type == CHIP_ALDEBARAN) {
+ if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
+ DRM_WARN("Failed to reset error counter and error status");
+ }
+
return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
"ce", info.ce_count);
}
@@ -1269,6 +1273,8 @@ static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *
&amdgpu_ras_debugfs_ctrl_ops);
debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
&amdgpu_ras_debugfs_eeprom_ops);
+ debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
+ &con->bad_page_cnt_threshold);
/*
* After one uncorrectable error happens, usually GPU recovery will
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index dec0db8b0b13..9e38475e0f8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -112,6 +112,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man);
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
struct ttm_resource *mem,
+ u64 offset, u64 size,
struct device *dev,
enum dma_data_direction dir,
struct sg_table **sgt);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 592a2dd16493..bce105e2973e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -25,6 +25,7 @@
#include <linux/dma-mapping.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
+#include "amdgpu_res_cursor.h"
#include "amdgpu_atomfirmware.h"
#include "atom.h"
@@ -565,6 +566,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
*
* @adev: amdgpu device pointer
* @mem: TTM memory object
+ * @offset: byte offset from the base of VRAM BO
+ * @length: number of bytes to export in sg_table
* @dev: the other device
* @dir: dma direction
* @sgt: resulting sg table
@@ -573,39 +576,47 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
*/
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
struct ttm_resource *mem,
+ u64 offset, u64 length,
struct device *dev,
enum dma_data_direction dir,
struct sg_table **sgt)
{
- struct drm_mm_node *node;
+ struct amdgpu_res_cursor cursor;
struct scatterlist *sg;
int num_entries = 0;
- unsigned int pages;
int i, r;
*sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
if (!*sgt)
return -ENOMEM;
- for (pages = mem->num_pages, node = mem->mm_node;
- pages; pages -= node->size, ++node)
- ++num_entries;
+ /* Determine the number of DRM_MM nodes to export */
+ amdgpu_res_first(mem, offset, length, &cursor);
+ while (cursor.remaining) {
+ num_entries++;
+ amdgpu_res_next(&cursor, cursor.size);
+ }
r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
if (r)
goto error_free;
+ /* Initialize scatterlist nodes of sg_table */
for_each_sgtable_sg((*sgt), sg, i)
sg->length = 0;
- node = mem->mm_node;
+ /*
+ * Walk down DRM_MM nodes to populate scatterlist nodes
+ * @note: Use iterator api to get first the DRM_MM node
+ * and the number of bytes from it. Access the following
+ * DRM_MM node(s) if more buffer needs to exported
+ */
+ amdgpu_res_first(mem, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
- phys_addr_t phys = (node->start << PAGE_SHIFT) +
- adev->gmc.aper_base;
- size_t size = node->size << PAGE_SHIFT;
+ phys_addr_t phys = cursor.start + adev->gmc.aper_base;
+ size_t size = cursor.size;
dma_addr_t addr;
- ++node;
addr = dma_map_resource(dev, phys, size, dir,
DMA_ATTR_SKIP_CPU_SYNC);
r = dma_mapping_error(dev, addr);
@@ -615,7 +626,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
sg_set_page(sg, NULL, size, 0);
sg_dma_address(sg) = addr;
sg_dma_len(sg) = size;
+
+ amdgpu_res_next(&cursor, cursor.size);
}
+
return 0;
error_unmap:
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 06811a1f4625..a078a38c2cee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1587,6 +1587,9 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
err = 0;
adev->gfx.mec2_fw = NULL;
}
+ } else {
+ adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
+ adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
}
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index 830080ff90d8..b4789dfc2bb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -994,7 +994,7 @@ static int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
return ret;
}
-static const struct soc15_reg_entry gfx_v9_4_rdrsp_status_regs =
+static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs =
{ SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 };
static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
@@ -1007,15 +1007,21 @@ static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < gfx_v9_4_rdrsp_status_regs.se_num; i++) {
- for (j = 0; j < gfx_v9_4_rdrsp_status_regs.instance;
+ for (i = 0; i < gfx_v9_4_ea_err_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_ea_err_status_regs.instance;
j++) {
gfx_v9_4_select_se_sh(adev, i, 0, j);
reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
- gfx_v9_4_rdrsp_status_regs));
- if (reg_value)
+ gfx_v9_4_ea_err_status_regs));
+ if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
+ /* SDP read/write error/parity error in FUE_IS_FATAL mode
+ * can cause system fatal error in arcturas. Harvest the error
+ * status before GPU reset */
dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n",
j, reg_value);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index 9ca76a3ac38c..a30c7c10cd9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -808,7 +808,7 @@ static struct gfx_v9_4_2_utc_block gfx_v9_4_2_utc_blocks[] = {
REG_SET_FIELD(0, ATC_L2_CACHE_4K_DSM_CNTL, WRITE_COUNTERS, 1) },
};
-static const struct soc15_reg_entry gfx_v9_4_2_rdrsp_status_regs =
+static const struct soc15_reg_entry gfx_v9_4_2_ea_err_status_regs =
{ SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16 };
static int gfx_v9_4_2_get_reg_error_count(struct amdgpu_device *adev,
@@ -997,8 +997,9 @@ static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev,
blk->clear);
/* print the edc count */
- gfx_v9_4_2_log_utc_edc_count(adev, blk, j, sec_cnt,
- ded_cnt);
+ if (sec_cnt || ded_cnt)
+ gfx_v9_4_2_log_utc_edc_count(adev, blk, j, sec_cnt,
+ ded_cnt);
}
}
@@ -1040,11 +1041,11 @@ static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev)
uint32_t i, j;
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < gfx_v9_4_2_rdrsp_status_regs.se_num; i++) {
- for (j = 0; j < gfx_v9_4_2_rdrsp_status_regs.instance;
+ for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance;
j++) {
gfx_v9_4_2_select_se_sh(adev, i, 0, j);
- WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_rdrsp_status_regs), 0x10);
+ WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), 0x10);
}
}
gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
@@ -1089,17 +1090,20 @@ static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < gfx_v9_4_2_rdrsp_status_regs.se_num; i++) {
- for (j = 0; j < gfx_v9_4_2_rdrsp_status_regs.instance;
+ for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance;
j++) {
gfx_v9_4_2_select_se_sh(adev, i, 0, j);
reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
- gfx_v9_4_2_rdrsp_status_regs));
- if (reg_value)
+ gfx_v9_4_2_ea_err_status_regs));
+ if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n",
j, reg_value);
+ }
/* clear after read */
- WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_rdrsp_status_regs), 0x10);
+ WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), 0x10);
}
}
@@ -1112,19 +1116,19 @@ static void gfx_v9_4_2_query_utc_err_status(struct amdgpu_device *adev)
uint32_t data;
data = RREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS);
- if (!data) {
+ if (data) {
dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data);
WREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS, 0x3);
}
data = RREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS);
- if (!data) {
+ if (data) {
dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data);
WREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS, 0x3);
}
data = RREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS);
- if (!data) {
+ if (data) {
dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data);
WREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS, 0x3);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index d189507dcef0..1e4678cb98f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -120,8 +120,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
@@ -141,21 +140,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* FB aperture and AGP aperture. Disable them.
*/
if (adev->gmc.pdb0_bo) {
- if (adev->asic_type == CHIP_ALDEBARAN) {
- WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, adev->gmc.fb_end_original >> 24);
- WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, adev->gmc.fb_start_original >> 24);
- WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF);
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->gmc.fb_start_original >> 18);
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, adev->gmc.fb_end_original >> 18);
- } else {
- WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, 0);
- WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
- WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF);
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
- }
+ WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, 0);
+ WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index 2aecc6a243e8..14c1c1a297dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -165,8 +165,7 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
- + adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index 410fd3a1a388..41807817de7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -164,8 +164,7 @@ static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
- + adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 2bfd620576f2..498b28a35f5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -568,8 +568,7 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
- *addr = adev->vm_manager.vram_base_offset + *addr -
- adev->gmc.vram_start;
+ *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
BUG_ON(*addr & 0xFFFF00000000003FULL);
if (!adev->gmc.translate_further)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index c82d82da2c73..455bb91060d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -574,7 +574,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
* be updated to avoid reading an incorrect value due to
* the new fast GRBM interface.
*/
- if (entry->vmid_src == AMDGPU_GFXHUB_0)
+ if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
+ (adev->asic_type < CHIP_ALDEBARAN))
RREG32(hub->vm_l2_pro_fault_status);
status = RREG32(hub->vm_l2_pro_fault_status);
@@ -802,7 +803,8 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* be cleared to avoid a false ACK due to the new fast
* GRBM interface.
*/
- if (vmhub == AMDGPU_GFXHUB_0)
+ if ((vmhub == AMDGPU_GFXHUB_0) &&
+ (adev->asic_type < CHIP_ALDEBARAN))
RREG32_NO_KIQ(hub->vm_inv_eng0_req +
hub->eng_distance * eng);
@@ -1048,8 +1050,7 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
- *addr = adev->vm_manager.vram_base_offset + *addr -
- adev->gmc.vram_start;
+ *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
BUG_ON(*addr & 0xFFFF00000000003FULL);
if (!adev->gmc.translate_further)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index aa9be5612c89..a99953833820 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -114,8 +114,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
return;
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
index 7977a7879b32..0103a5ab28e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -47,8 +47,6 @@ static u64 mmhub_v1_7_get_fb_location(struct amdgpu_device *adev)
adev->gmc.fb_start = base;
adev->gmc.fb_end = top;
- adev->gmc.fb_start_original = base;
- adev->gmc.fb_end_original = top;
return base;
}
@@ -126,17 +124,16 @@ static void mmhub_v1_7_init_system_aperture_regs(struct amdgpu_device *adev)
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, 0xFFFFFF);
WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, 0);
- WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, adev->gmc.fb_end_original >> 24);
- WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, adev->gmc.fb_start_original >> 24);
- WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->gmc.fb_start_original >> 18);
- WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, adev->gmc.fb_end_original >> 18);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, 0);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
}
if (amdgpu_sriov_vf(adev))
return;
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
@@ -1287,7 +1284,7 @@ static void mmhub_v1_7_reset_ras_error_count(struct amdgpu_device *adev)
}
}
-static const struct soc15_reg_entry mmhub_v1_7_err_status_regs[] = {
+static const struct soc15_reg_entry mmhub_v1_7_ea_err_status_regs[] = {
{ SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_ERR_STATUS), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_ERR_STATUS), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_ERR_STATUS), 0, 0, 0 },
@@ -1304,12 +1301,15 @@ static void mmhub_v1_7_query_ras_error_status(struct amdgpu_device *adev)
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB))
return;
- for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_err_status_regs); i++) {
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_ea_err_status_regs); i++) {
reg_value =
- RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_err_status_regs[i]));
- if (reg_value)
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_ea_err_status_regs[i]));
+ if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n",
i, reg_value);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index da7edd1ed6b2..ac76081b91d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -210,8 +210,7 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
}
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index 1141c37432f0..a9899335d0b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -162,8 +162,7 @@ static void mmhub_v2_3_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 0cffa820ea6e..47c8dd9d1c78 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -136,8 +136,7 @@ static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15_OFFSET(
MMHUB, 0,
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
@@ -1646,9 +1645,15 @@ static void mmhub_v9_4_query_ras_error_status(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_err_status_regs); i++) {
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_err_status_regs[i]));
- if (reg_value)
+ if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
+ /* SDP read/write error/parity error in FUE_IS_FATAL mode
+ * can cause system fatal error in arcturas. Harvest the error
+ * status before GPU reset */
dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n",
i, reg_value);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 83ea063388fd..0d2d629e2d6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -31,6 +31,28 @@
#include "vega10_enum.h"
#include <uapi/linux/kfd_ioctl.h>
+#define smnPCIE_LC_CNTL 0x11140280
+#define smnPCIE_LC_CNTL3 0x111402d4
+#define smnPCIE_LC_CNTL6 0x111402ec
+#define smnPCIE_LC_CNTL7 0x111402f0
+#define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK 0x00001000L
+#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL
+#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L
+#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123530
+#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c
+#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324
+#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4
+#define smnRCC_BIF_STRAP2 0x10123488
+#define smnRCC_BIF_STRAP3 0x1012348c
+#define smnRCC_BIF_STRAP5 0x10123494
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL
+#define RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L
+#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0
+#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10
+#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0
+
static void nbio_v6_1_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
@@ -256,6 +278,111 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
WREG32_PCIE(smnPCIE_CI_CNTL, data);
}
+static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP2);
+ data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP2, data);
+
+ def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
+ data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+}
+
+static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
+ data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL7, data);
+
+ def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
+ data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
+ data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
+
+ def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
+ data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
+ PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
+ data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
+ data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
+ PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL6, data);
+
+ nbio_v6_1_program_ltr(adev);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
+ data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
+ data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
+ data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+}
+
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
@@ -274,4 +401,5 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.ih_control = nbio_v6_1_ih_control,
.init_registers = nbio_v6_1_init_registers,
.remap_hdp_registers = nbio_v6_1_remap_hdp_registers,
+ .program_aspm = nbio_v6_1_program_aspm,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index af44aad78171..cef929746739 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -31,7 +31,26 @@
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
+#define smnPCIE_LC_CNTL 0x11140280
+#define smnPCIE_LC_CNTL3 0x111402d4
+#define smnPCIE_LC_CNTL6 0x111402ec
+#define smnPCIE_LC_CNTL7 0x111402f0
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
+#define smnRCC_BIF_STRAP3 0x1012348c
+#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL
+#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L
+#define smnRCC_BIF_STRAP5 0x10123494
+#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL
+#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324
+#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4
+#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538
+#define smnRCC_BIF_STRAP2 0x10123488
+#define RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L
+#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0
+#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10
+#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0
/*
* These are nbio v7_4_1 registers mask. Temporarily define these here since
@@ -567,6 +586,111 @@ const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs = {
.ras_fini = amdgpu_nbio_ras_fini,
};
+static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP2);
+ data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP2, data);
+
+ def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
+ data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+}
+
+static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
+ data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL7, data);
+
+ def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
+ data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
+ data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
+
+ def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
+ data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
+ PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
+ data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
+ data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
+ PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL6, data);
+
+ nbio_v7_4_program_ltr(adev);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
+ data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
+ data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
+ data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+}
+
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
@@ -587,4 +711,5 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.ih_control = nbio_v7_4_ih_control,
.init_registers = nbio_v7_4_init_registers,
.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
+ .program_aspm = nbio_v7_4_program_aspm,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 46d4bbabce75..d54af7f8801b 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -601,8 +601,7 @@ static void nv_program_aspm(struct amdgpu_device *adev)
if (amdgpu_aspm != 1)
return;
- if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
- !(adev->flags & AMD_IS_APU) &&
+ if (!(adev->flags & AMD_IS_APU) &&
(adev->nbio.funcs->program_aspm))
adev->nbio.funcs->program_aspm(adev);
@@ -934,12 +933,7 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
if (adev->gfx.funcs->update_perfmon_mgcg)
adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
- /*
- * The ASPM function is not fully enabled and verified on
- * Navi yet. Temporarily skip this until ASPM enabled.
- */
- if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
- !(adev->flags & AMD_IS_APU) &&
+ if (!(adev->flags & AMD_IS_APU) &&
(adev->nbio.funcs->enable_aspm))
adev->nbio.funcs->enable_aspm(adev, !enter);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index dd4d65f7e0f0..96064c343163 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -185,10 +185,19 @@ struct psp_gfx_cmd_setup_tmr
uint32_t buf_phy_addr_lo; /* bits [31:0] of GPU Virtual address of TMR buffer (must be 4 KB aligned) */
uint32_t buf_phy_addr_hi; /* bits [63:32] of GPU Virtual address of TMR buffer */
uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB) */
+ union {
+ struct {
+ uint32_t sriov_enabled:1; /* whether the device runs under SR-IOV*/
+ uint32_t virt_phy_addr:1; /* driver passes both virtual and physical address to PSP*/
+ uint32_t reserved:30;
+ } bitfield;
+ uint32_t tmr_flags;
+ };
+ uint32_t system_phy_addr_lo; /* bits [31:0] of system physical address of TMR buffer (must be 4 KB aligned) */
+ uint32_t system_phy_addr_hi; /* bits [63:32] of system physical address of TMR buffer */
};
-
/* FW types for GFX_CMD_ID_LOAD_IP_FW command. Limit 31. */
enum psp_gfx_fw_type {
GFX_FW_TYPE_NONE = 0, /* */
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
index 6fcb95c89999..bf95007f0843 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
@@ -160,6 +160,7 @@ static const struct soc15_ras_field_entry sdma_v4_4_ras_fields[] = {
};
static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev,
+ uint32_t reg_offset,
uint32_t value,
uint32_t instance,
uint32_t *sec_count)
@@ -169,6 +170,9 @@ static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev,
/* double bits error (multiple bits) error detection is not supported */
for (i = 0; i < ARRAY_SIZE(sdma_v4_4_ras_fields); i++) {
+ if (sdma_v4_4_ras_fields[i].reg_offset != reg_offset)
+ continue;
+
/* the SDMA_EDC_COUNTER register in each sdma instance
* shares the same sed shift_mask
* */
@@ -197,13 +201,30 @@ static int sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev,
reg_value = RREG32(reg_offset);
/* double bit error is not supported */
if (reg_value)
- sdma_v4_4_get_ras_error_count(adev, reg_value, instance, &sec_count);
- /* err_data->ce_count should be initialized to 0
- * before calling into this function */
- err_data->ce_count += sec_count;
- /* double bit error is not supported
- * set ue count to 0 */
- err_data->ue_count = 0;
+ sdma_v4_4_get_ras_error_count(adev, regSDMA0_EDC_COUNTER, reg_value,
+ instance, &sec_count);
+
+ reg_offset = sdma_v4_4_get_reg_offset(adev, instance, regSDMA0_EDC_COUNTER2);
+ reg_value = RREG32(reg_offset);
+ /* double bit error is not supported */
+ if (reg_value)
+ sdma_v4_4_get_ras_error_count(adev, regSDMA0_EDC_COUNTER2, reg_value,
+ instance, &sec_count);
+
+ /*
+ * err_data->ue_count should be initialized to 0
+ * before calling into this function
+ *
+ * SDMA RAS supports single bit uncorrectable error detection.
+ * So, increment uncorrectable error count.
+ */
+ err_data->ue_count += sec_count;
+
+ /*
+ * SDMA RAS does not support correctable errors.
+ * Set ce count to 0.
+ */
+ err_data->ce_count = 0;
return 0;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 93f826a7d3f0..b1ad9e52b234 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -370,6 +370,33 @@ static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring,
}
/**
+ * sdma_v5_2_ring_emit_mem_sync - flush the IB by graphics cache rinse
+ *
+ * @ring: amdgpu ring pointer
+ * @job: job to retrieve vmid from
+ * @ib: IB object to schedule
+ *
+ * flush the IB by graphics cache rinse.
+ */
+static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ uint32_t gcr_cntl =
+ SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
+ SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
+ SDMA_GCR_GLI_INV(1);
+
+ /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
+ SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
+ SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
+ SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
+}
+
+/**
* sdma_v5_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
* @ring: amdgpu ring pointer
@@ -1663,6 +1690,7 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
10 + 10 + 10, /* sdma_v5_2_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v5_2_ring_emit_ib */
.emit_ib = sdma_v5_2_ring_emit_ib,
+ .emit_mem_sync = sdma_v5_2_ring_emit_mem_sync,
.emit_fence = sdma_v5_2_ring_emit_fence,
.emit_pipeline_sync = sdma_v5_2_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v5_2_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 5c5eb3aed1b3..d80e12b80c7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -816,11 +816,12 @@ static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
static void soc15_program_aspm(struct amdgpu_device *adev)
{
-
- if (amdgpu_aspm == 0)
+ if (amdgpu_aspm != 1)
return;
- /* todo */
+ if (!(adev->flags & AMD_IS_APU) &&
+ (adev->nbio.funcs->program_aspm))
+ adev->nbio.funcs->program_aspm(adev);
}
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index ea338de5818a..735ebbd1148f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -81,6 +81,30 @@
#include "mxgpu_vi.h"
#include "amdgpu_dm.h"
+#define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
+#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK 0x00000004L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK 0x00000008L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK 0x00000010L
+#define ixPCIE_L1_PM_SUB_CNTL 0x378
+#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK 0x00000004L
+#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK 0x00000008L
+#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK 0x00000001L
+#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK 0x00000002L
+#define PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK 0x00200000L
+#define LINK_CAP 0x64
+#define PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define ixCPM_CONTROL 0x1400118
+#define ixPCIE_LC_CNTL7 0x100100BC
+#define PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK 0x00000400L
+#define PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT 0x00000007
+#define PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT 0x00000009
+#define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK 0x01000000L
+#define PCIE_L1_PM_SUB_CNTL 0x378
+#define ASIC_IS_P22(asic_type, rid) ((asic_type >= CHIP_POLARIS10) && \
+ (asic_type <= CHIP_POLARIS12) && \
+ (rid >= 0x6E))
/* Topaz */
static const struct amdgpu_video_codecs topaz_video_codecs_encode =
{
@@ -1091,13 +1115,178 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
/* todo */
}
+static void vi_enable_aspm(struct amdgpu_device *adev)
+{
+ u32 data, orig;
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
+ data |= PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT <<
+ PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+ data |= PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT <<
+ PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ data |= PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL, data);
+}
+
static void vi_program_aspm(struct amdgpu_device *adev)
{
+ u32 data, data1, orig;
+ bool bL1SS = false;
+ bool bClkReqSupport = true;
- if (amdgpu_aspm == 0)
+ if (amdgpu_aspm != 1)
return;
- /* todo */
+ if (adev->flags & AMD_IS_APU ||
+ adev->asic_type < CHIP_POLARIS10)
+ return;
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
+ data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
+ data |= 0x0024 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT;
+ data |= PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL3, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
+ data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_P_CNTL, data);
+
+ data = RREG32_PCIE(ixPCIE_LC_L1_PM_SUBSTATE);
+ pci_read_config_dword(adev->pdev, PCIE_L1_PM_SUB_CNTL, &data1);
+ if (data & PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK &&
+ (data & (PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK |
+ PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK |
+ PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK |
+ PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK))) {
+ bL1SS = true;
+ } else if (data1 & (PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK |
+ PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK |
+ PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK |
+ PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK)) {
+ bL1SS = true;
+ }
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL6);
+ data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL6, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
+ data |= PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data);
+
+ pci_read_config_dword(adev->pdev, LINK_CAP, &data);
+ if (!(data & PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK))
+ bClkReqSupport = false;
+
+ if (bClkReqSupport) {
+ orig = data = RREG32_SMC(ixTHM_CLK_CNTL);
+ data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK);
+ data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) |
+ (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
+ if (orig != data)
+ WREG32_SMC(ixTHM_CLK_CNTL, data);
+
+ orig = data = RREG32_SMC(ixMISC_CLK_CTRL);
+ data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK |
+ MISC_CLK_CTRL__ZCLK_SEL_MASK | MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK);
+ data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) |
+ (1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT);
+ data |= (0x20 << MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT);
+ if (orig != data)
+ WREG32_SMC(ixMISC_CLK_CTRL, data);
+
+ orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL);
+ data |= CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK;
+ if (orig != data)
+ WREG32_SMC(ixCG_CLKPIN_CNTL, data);
+
+ orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
+ data |= CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK;
+ if (orig != data)
+ WREG32_SMC(ixCG_CLKPIN_CNTL, data);
+
+ orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL);
+ data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
+ data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT);
+ if (orig != data)
+ WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data);
+
+ orig = data = RREG32_PCIE(ixCPM_CONTROL);
+ data |= (CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK |
+ CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK);
+ if (orig != data)
+ WREG32_PCIE(ixCPM_CONTROL, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_CONFIG_CNTL);
+ data &= ~PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK;
+ data |= (0xE << PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT);
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_CONFIG_CNTL, data);
+
+ orig = data = RREG32(mmBIF_CLK_CTRL);
+ data |= BIF_CLK_CTRL__BIF_XSTCLK_READY_MASK;
+ if (orig != data)
+ WREG32(mmBIF_CLK_CTRL, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL7);
+ data |= PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL7, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_HW_DEBUG);
+ data |= PCIE_HW_DEBUG__HW_01_DEBUG_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_HW_DEBUG, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2);
+ data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
+ data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
+ if (bL1SS)
+ data &= ~PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL2, data);
+
+ }
+
+ vi_enable_aspm(adev);
+
+ data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
+ data1 = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ if (((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) &&
+ data1 & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK &&
+ data1 & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK) {
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL, data);
+ }
+
+ if ((adev->asic_type == CHIP_POLARIS12 &&
+ !(ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))) ||
+ ASIC_IS_P22(adev->asic_type, adev->external_rev_id)) {
+ orig = data = RREG32_PCIE(ixPCIE_LC_TRAINING_CNTL);
+ data &= ~PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_TRAINING_CNTL, data);
+ }
}
static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 1c20458f3962..696944fa0177 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -25,6 +25,70 @@
#include "soc15_int.h"
#include "kfd_device_queue_manager.h"
#include "kfd_smi_events.h"
+#include "amdgpu.h"
+
+enum SQ_INTERRUPT_WORD_ENCODING {
+ SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0,
+ SQ_INTERRUPT_WORD_ENCODING_INST,
+ SQ_INTERRUPT_WORD_ENCODING_ERROR,
+};
+
+enum SQ_INTERRUPT_ERROR_TYPE {
+ SQ_INTERRUPT_ERROR_TYPE_EDC_FUE = 0x0,
+ SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST,
+ SQ_INTERRUPT_ERROR_TYPE_MEMVIOL,
+ SQ_INTERRUPT_ERROR_TYPE_EDC_FED,
+};
+
+/* SQ_INTERRUPT_WORD_AUTO_CTXID */
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE__SHIFT 0
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT__SHIFT 1
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL__SHIFT 2
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP__SHIFT 3
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP__SHIFT 4
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW__SHIFT 5
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW__SHIFT 6
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW__SHIFT 7
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR__SHIFT 8
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID__SHIFT 24
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING__SHIFT 26
+
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_MASK 0x00000001
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT_MASK 0x00000002
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL_MASK 0x00000004
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP_MASK 0x00000008
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP_MASK 0x00000010
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW_MASK 0x00000020
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW_MASK 0x00000040
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW_MASK 0x00000080
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR_MASK 0x00000100
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID_MASK 0x03000000
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING_MASK 0x0c000000
+
+/* SQ_INTERRUPT_WORD_WAVE_CTXID */
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA__SHIFT 0
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID__SHIFT 12
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV__SHIFT 13
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID__SHIFT 14
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID__SHIFT 18
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID__SHIFT 20
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID__SHIFT 24
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING__SHIFT 26
+
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA_MASK 0x00000fff
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID_MASK 0x00001000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK 0x00002000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID_MASK 0x0003c000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID_MASK 0x000c0000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID_MASK 0x00f00000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x03000000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0x0c000000
+
+#define KFD_CONTEXT_ID_GET_SQ_INT_DATA(ctx0, ctx1) \
+ ((ctx0 & 0xfff) | ((ctx0 >> 16) & 0xf000) | ((ctx1 << 16) & 0xff0000))
+
+#define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000
+#define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20
static bool event_interrupt_isr_v9(struct kfd_dev *dev,
const uint32_t *ih_ring_entry,
@@ -108,13 +172,15 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
const uint32_t *ih_ring_entry)
{
uint16_t source_id, client_id, pasid, vmid;
- uint32_t context_id;
+ uint32_t context_id0, context_id1;
+ uint32_t sq_intr_err, sq_int_data, encoding;
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
- context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
+ context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
+ context_id1 = SOC15_CONTEXT_ID1_FROM_IH_ENTRY(ih_ring_entry);
if (client_id == SOC15_IH_CLIENTID_GRBM_CP ||
client_id == SOC15_IH_CLIENTID_SE0SH ||
@@ -122,10 +188,59 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
client_id == SOC15_IH_CLIENTID_SE2SH ||
client_id == SOC15_IH_CLIENTID_SE3SH) {
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
- kfd_signal_event_interrupt(pasid, context_id, 32);
- else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
- kfd_signal_event_interrupt(pasid, context_id & 0xffffff, 24);
- else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
+ kfd_signal_event_interrupt(pasid, context_id0, 32);
+ else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) {
+ sq_int_data = KFD_CONTEXT_ID_GET_SQ_INT_DATA(context_id0, context_id1);
+ encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
+ switch (encoding) {
+ case SQ_INTERRUPT_WORD_ENCODING_AUTO:
+ pr_debug(
+ "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, WLT),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, REG_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, CMD_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_REG_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, IMMED_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_INST:
+ pr_debug("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ sq_int_data);
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_ERROR:
+ sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
+ pr_warn("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ sq_intr_err);
+ if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
+ sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
+ kfd_signal_hw_exception_event(pasid);
+ amdgpu_amdkfd_gpu_reset(dev->kgd);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24);
+ } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
kfd_signal_hw_exception_event(pasid);
} else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
client_id == SOC15_IH_CLIENTID_SDMA1 ||
@@ -136,7 +251,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
client_id == SOC15_IH_CLIENTID_SDMA6 ||
client_id == SOC15_IH_CLIENTID_SDMA7) {
if (source_id == SOC15_INTSRC_SDMA_TRAP)
- kfd_signal_event_interrupt(pasid, context_id & 0xfffffff, 28);
+ kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
} else if (client_id == SOC15_IH_CLIENTID_VMC ||
client_id == SOC15_IH_CLIENTID_VMC1 ||
client_id == SOC15_IH_CLIENTID_UTCL2) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 5a1f2433632b..73f2257acc23 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -333,10 +333,6 @@ int kfd_iommu_resume(struct kfd_dev *kfd)
return 0;
}
-extern bool amd_iommu_pc_supported(void);
-extern u8 amd_iommu_pc_get_max_banks(u16 devid);
-extern u8 amd_iommu_pc_get_max_counters(u16 devid);
-
/** kfd_iommu_add_perf_counters - Add IOMMU performance counters to topology
*/
int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index d4241d29ea94..d97e330a5022 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -935,9 +935,6 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
pdd->dev->kgd, pdd->vm);
fput(pdd->drm_file);
}
- else if (pdd->vm)
- amdgpu_amdkfd_gpuvm_destroy_process_vm(
- pdd->dev->kgd, pdd->vm);
if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
@@ -1375,19 +1372,18 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
struct kfd_dev *dev;
int ret;
+ if (!drm_file)
+ return -EINVAL;
+
if (pdd->vm)
- return drm_file ? -EBUSY : 0;
+ return -EBUSY;
p = pdd->process;
dev = pdd->dev;
- if (drm_file)
- ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
- dev->kgd, drm_file, p->pasid,
- &pdd->vm, &p->kgd_process_info, &p->ef);
- else
- ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
- &pdd->vm, &p->kgd_process_info, &p->ef);
+ ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
+ dev->kgd, drm_file, p->pasid,
+ &pdd->vm, &p->kgd_process_info, &p->ef);
if (ret) {
pr_err("Failed to create process VM object\n");
return ret;
@@ -1409,8 +1405,6 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
err_init_cwsr:
err_reserve_ib_mem:
kfd_process_device_free_bos(pdd);
- if (!drm_file)
- amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
pdd->vm = NULL;
return ret;
@@ -1435,6 +1429,9 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
return ERR_PTR(-ENOMEM);
}
+ if (!pdd->vm)
+ return ERR_PTR(-ENODEV);
+
/*
* signal runtime-pm system to auto resume and prevent
* further runtime suspend once device pdd is created until
@@ -1452,10 +1449,6 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
if (err)
goto out;
- err = kfd_process_device_init_vm(pdd, NULL);
- if (err)
- goto out;
-
/*
* make sure that runtime_usage counter is incremented just once
* per pdd
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d3c3b3441ad2..b34ab76c5f4c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2552,11 +2552,14 @@ static void handle_hpd_irq(void *param)
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct amdgpu_device *adev = drm_to_adev(dev);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
#endif
+ if (adev->dm.disable_hpd_irq)
+ return;
+
/*
* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in its own context.
@@ -2696,6 +2699,10 @@ static void handle_hpd_rx_irq(void *param)
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+ if (adev->dm.disable_hpd_irq)
+ return;
+
+
/*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
* conflict, after implement i2c helper, this mutex should be
@@ -4225,6 +4232,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
{
struct amdgpu_device *adev = drm_to_adev(plane->dev);
const struct drm_format_info *info = drm_format_info(format);
+ int i;
enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
@@ -4232,11 +4240,22 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
return false;
/*
- * We always have to allow this modifier, because core DRM still
- * checks LINEAR support if userspace does not provide modifers.
+ * We always have to allow these modifiers:
+ * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
+ * 2. Not passing any modifiers is the same as explicitly passing INVALID.
*/
- if (modifier == DRM_FORMAT_MOD_LINEAR)
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == DRM_FORMAT_MOD_INVALID) {
return true;
+ }
+
+ /* Check that the modifier is on the list of the plane's supported modifiers. */
+ for (i = 0; i < plane->modifier_count; i++) {
+ if (modifier == plane->modifiers[i])
+ break;
+ }
+ if (i == plane->modifier_count)
+ return false;
/*
* For D swizzle the canonical modifier depends on the bpp, so check
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 018943113025..b2f2ccfc20bb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -410,6 +410,7 @@ struct amdgpu_display_manager {
*/
struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
bool force_timing_sync;
+ bool disable_hpd_irq;
bool dmcub_trace_event_en;
/**
* @da_list:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 9a13f47022df..529545045a3e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3077,6 +3077,37 @@ static int force_timing_sync_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(force_timing_sync_ops, force_timing_sync_get,
force_timing_sync_set, "%llu\n");
+
+/*
+ * Disables all HPD and HPD RX interrupt handling in the
+ * driver when set to 1. Default is 0.
+ */
+static int disable_hpd_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = data;
+
+ adev->dm.disable_hpd_irq = (bool)val;
+
+ return 0;
+}
+
+
+/*
+ * Returns 1 if HPD and HPRX interrupt handling is disabled,
+ * 0 otherwise.
+ */
+static int disable_hpd_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = data;
+
+ *val = adev->dm.disable_hpd_irq;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get,
+ disable_hpd_set, "%llu\n");
+
/*
* Sets the DC visual confirm debug option from the given string.
* Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm
@@ -3213,4 +3244,8 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
debugfs_create_file_unsafe("amdgpu_dm_dcc_en", 0644, root, adev,
&dcc_en_bits_fops);
+
+ debugfs_create_file_unsafe("amdgpu_dm_disable_hpd", 0644, root, adev,
+ &disable_hpd_ops);
+
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 60f91853bd82..616f5b1ea3a8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -434,6 +434,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
int link_index = aconnector->dc_link->link_index;
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+ struct drm_connector_state *conn_state;
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
@@ -459,8 +460,13 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
link->adjust.auth_delay = 3;
link->adjust.hdcp1.disable = 0;
+ conn_state = aconnector->base.state;
- hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
+ pr_debug("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
+ (!!aconnector->base.state) ? aconnector->base.state->content_protection : -1,
+ (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1);
+
+ hdcp_update_display(hdcp_work, link_index, aconnector, conn_state->hdcp_content_type, false);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 103e29905b57..e8b325a828c1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -711,3 +711,28 @@ bool dm_helpers_dmub_outbox0_interrupt_control(struct dc_context *ctx, bool enab
enable ? "en" : "dis", ret);
return ret;
}
+
+void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
+{
+ /* TODO: virtual DPCD */
+ struct dc_link *link = stream->link;
+ union down_spread_ctrl old_downspread;
+ union down_spread_ctrl new_downspread;
+
+ if (link->aux_access_disabled)
+ return;
+
+ if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
+ &old_downspread.raw,
+ sizeof(old_downspread)))
+ return;
+
+ new_downspread.raw = old_downspread.raw;
+ new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
+ (stream->ignore_msa_timing_param) ? 1 : 0;
+
+ if (new_downspread.raw != old_downspread.raw)
+ dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
+ &new_downspread.raw,
+ sizeof(new_downspread));
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 73cdb9fe981a..9b221db526dc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -229,6 +229,11 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
(aconnector->edid->extensions + 1) * EDID_LENGTH,
&init_params);
+ if (!dc_sink) {
+ DRM_ERROR("Unable to add a remote sink\n");
+ return 0;
+ }
+
dc_sink->priv = aconnector;
/* dc_link_add_remote_sink returns a new reference */
aconnector->dc_sink = dc_sink;
@@ -745,8 +750,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (!dc_dsc_compute_bandwidth_range(
stream->sink->ctx->dc->res_pool->dscs[0],
stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
- dsc_policy.min_target_bpp,
- dsc_policy.max_target_bpp,
+ dsc_policy.min_target_bpp * 16,
+ dsc_policy.max_target_bpp * 16,
&stream->sink->dsc_caps.dsc_dec_caps,
&stream->timing, &params[count].bw_range))
params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 887a54246bde..a06e86853bb9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -128,7 +128,7 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
+ int display_count, i;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
@@ -210,6 +210,14 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.dppclk_khz,
safe_to_lower);
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->signal == SIGNAL_TYPE_EDP &&
+ context->streams[i]->apply_seamless_boot_optimization) {
+ dc_wait_for_vblank(dc, context->streams[i]);
+ break;
+ }
+ }
+
clk_mgr_base->clks.actual_dppclk_khz =
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
@@ -761,6 +769,43 @@ static struct wm_table ddr4_wm_table_rn = {
}
};
+static struct wm_table ddr4_1R_wm_table_rn = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 13.90,
+ .sr_enter_plus_exit_time_us = 14.80,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 13.90,
+ .sr_enter_plus_exit_time_us = 14.80,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 13.90,
+ .sr_enter_plus_exit_time_us = 14.80,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 13.90,
+ .sr_enter_plus_exit_time_us = 14.80,
+ .valid = true,
+ },
+ }
+};
+
static struct wm_table lpddr4_wm_table_rn = {
.entries = {
{
@@ -945,8 +990,12 @@ void rn_clk_mgr_construct(
} else {
if (is_green_sardine)
rn_bw_params.wm_table = ddr4_wm_table_gs;
- else
- rn_bw_params.wm_table = ddr4_wm_table_rn;
+ else {
+ if (ctx->dc->config.is_single_rank_dimm)
+ rn_bw_params.wm_table = ddr4_1R_wm_table_rn;
+ else
+ rn_bw_params.wm_table = ddr4_wm_table_rn;
+ }
}
/* Saved clocks configured at boot for debug purposes */
rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
@@ -964,6 +1013,9 @@ void rn_clk_mgr_construct(
if (status == PP_SMU_RESULT_OK &&
ctx->dc_bios && ctx->dc_bios->integrated_info) {
rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info);
+ /* treat memory config as single channel if memory is asymmetrics. */
+ if (ctx->dc->config.is_asymmetric_memory)
+ clk_mgr->base.bw_params->num_channels = 1;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 577e7f97045e..652fa89fae5f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -432,7 +432,7 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
}
-static bool dcn3_is_smu_prsent(struct clk_mgr *clk_mgr_base)
+static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
return clk_mgr->smu_present;
@@ -500,7 +500,7 @@ static struct clk_mgr_funcs dcn3_funcs = {
.are_clock_states_equal = dcn3_are_clock_states_equal,
.enable_pme_wa = dcn3_enable_pme_wa,
.notify_link_rate_change = dcn30_notify_link_rate_change,
- .is_smu_present = dcn3_is_smu_prsent
+ .is_smu_present = dcn3_is_smu_present
};
static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 8f0a13807d05..4713f09bcbf1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -55,6 +55,7 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
+#include "dc_link.h"
#include "dc_link_ddc.h"
#include "dm_helpers.h"
#include "mem_input.h"
@@ -1322,11 +1323,10 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
struct dc_link *link = sink->link;
unsigned int i, enc_inst, tg_inst = 0;
- // Seamless port only support single DP and EDP so far
- if ((sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT &&
- sink->sink_signal != SIGNAL_TYPE_EDP) ||
- sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ /* Support seamless boot on EDP displays only */
+ if (sink->sink_signal != SIGNAL_TYPE_EDP) {
return false;
+ }
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
@@ -1399,6 +1399,10 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
return false;
+ /* block DSC for now, as VBIOS does not currently support DSC timings */
+ if (crtc_timing->flags.DSC)
+ return false;
+
if (dc_is_dp_signal(link->connector_signal)) {
unsigned int pix_clk_100hz;
@@ -1429,6 +1433,11 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
return false;
}
+ if (is_edp_ilr_optimization_required(link, crtc_timing)) {
+ DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
+ return false;
+ }
+
return true;
}
@@ -2678,6 +2687,10 @@ static void commit_planes_for_stream(struct dc *dc,
plane_state->triplebuffer_flips = true;
}
}
+ if (update_type == UPDATE_TYPE_FULL) {
+ /* force vsync flip when reconfiguring pipes to prevent underflow */
+ plane_state->flip_immediate = false;
+ }
}
}
@@ -2821,7 +2834,8 @@ static void commit_planes_for_stream(struct dc *dc,
if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
!pipe_ctx->stream || pipe_ctx->stream != stream ||
- !pipe_ctx->plane_state->update_flags.bits.addr_update)
+ !pipe_ctx->plane_state->update_flags.bits.addr_update ||
+ pipe_ctx->plane_state->skip_manual_trigger)
continue;
if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
@@ -3205,6 +3219,19 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
}
}
+void dc_wait_for_vblank(struct dc *dc, struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
+ struct timing_generator *tg =
+ dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
+ tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
+ break;
+ }
+}
+
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
{
info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 29bc2874f6a7..f4374d83662a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1679,21 +1679,27 @@ void link_destroy(struct dc_link **link)
static void enable_stream_features(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- union down_spread_ctrl old_downspread;
- union down_spread_ctrl new_downspread;
- core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
- &old_downspread.raw, sizeof(old_downspread));
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ struct dc_link *link = stream->link;
+ union down_spread_ctrl old_downspread;
+ union down_spread_ctrl new_downspread;
+
+ core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &old_downspread.raw, sizeof(old_downspread));
- new_downspread.raw = old_downspread.raw;
+ new_downspread.raw = old_downspread.raw;
- new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
- (stream->ignore_msa_timing_param) ? 1 : 0;
+ new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
+ (stream->ignore_msa_timing_param) ? 1 : 0;
+
+ if (new_downspread.raw != old_downspread.raw) {
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &new_downspread.raw, sizeof(new_downspread));
+ }
- if (new_downspread.raw != old_downspread.raw) {
- core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
- &new_downspread.raw, sizeof(new_downspread));
+ } else {
+ dm_helpers_mst_enable_stream_features(stream);
}
}
@@ -2813,12 +2819,9 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->psr_level.u32all = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/*skip power down the single pipe since it blocks the cstate*/
- if ((link->ctx->asic_id.chip_family == FAMILY_RV) &&
- ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
+ if (link->ctx->asic_id.chip_family >= FAMILY_RV)
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
-#endif
/* SMU will perform additional powerdown sequence.
* For unsupported ASICs, set psr_level flag to skip PSR
@@ -3139,50 +3142,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
return DC_OK;
}
-enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link)
-{
- int i;
- struct pipe_ctx *pipe_ctx;
-
- // Clear all of MST payload then reallocate
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
-
- /* driver enable split pipe for external monitors
- * we have to check pipe_ctx is split pipe or not
- * If it's split pipe, driver using top pipe to
- * reaallocate.
- */
- if (!pipe_ctx || pipe_ctx->top_pipe)
- continue;
-
- if (pipe_ctx->stream && pipe_ctx->stream->link == link &&
- pipe_ctx->stream->dpms_off == false &&
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- deallocate_mst_payload(pipe_ctx);
- }
- }
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
-
- if (!pipe_ctx || pipe_ctx->top_pipe)
- continue;
-
- if (pipe_ctx->stream && pipe_ctx->stream->link == link &&
- pipe_ctx->stream->dpms_off == false &&
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- /* enable/disable PHY will clear connection between BE and FE
- * need to restore it.
- */
- link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
- pipe_ctx->stream_res.stream_enc->id, true);
- dc_link_allocate_mst_payload(pipe_ctx);
- }
- }
-
- return DC_OK;
-}
#if defined(CONFIG_DRM_AMD_DC_HDCP)
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
@@ -3296,7 +3255,8 @@ void core_link_enable_stream(
/* eDP lit up by bios already, no need to enable again. */
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
- apply_edp_fast_boot_optimization) {
+ apply_edp_fast_boot_optimization &&
+ !pipe_ctx->stream->timing.flags.DSC) {
pipe_ctx->stream->dpms_off = false;
#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, false);
@@ -3358,8 +3318,10 @@ void core_link_enable_stream(
/* Set DPS PPS SDP (AKA "info frames") */
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
+ dc_is_virtual_signal(pipe_ctx->stream->signal)) {
+ dp_set_dsc_on_rx(pipe_ctx, true);
dp_set_dsc_pps_sdp(pipe_ctx, true);
+ }
}
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
@@ -3754,7 +3716,8 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
link->local_sink &&
link->local_sink->edid_caps.panel_patch.disable_fec) ||
- link->connector_signal == SIGNAL_TYPE_EDP) // Disable FEC for eDP
+ (link->connector_signal == SIGNAL_TYPE_EDP &&
+ link->dc->debug.force_enable_edp_fec == false)) // Disable FEC for eDP
is_fec_disable = true;
if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 7d2e433c2275..3ff3d9e90983 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1132,11 +1132,6 @@ static inline enum link_training_result perform_link_training_int(
enum link_training_result status)
{
union lane_count_set lane_count_set = { {0} };
- union dpcd_training_pattern dpcd_pattern = { {0} };
-
- /* 3. set training not in progress*/
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
- dpcd_set_training_pattern(link, dpcd_pattern);
/* 4. mainlink output idle pattern*/
dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
@@ -1560,6 +1555,7 @@ enum link_training_result dc_link_dp_perform_link_training(
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
struct link_training_settings lt_settings;
+ union dpcd_training_pattern dpcd_pattern = { { 0 } };
bool fec_enable;
uint8_t repeater_cnt;
@@ -1624,6 +1620,9 @@ enum link_training_result dc_link_dp_perform_link_training(
}
}
+ /* 3. set training not in progress*/
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
+ dpcd_set_training_pattern(link, dpcd_pattern);
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
status = perform_link_training_int(link,
&lt_settings,
@@ -2490,7 +2489,7 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
return false;
}
-static bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
+bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
{
struct dc_link_settings initial_link_setting;
struct dc_link_settings current_link_setting;
@@ -3582,6 +3581,8 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
if (is_lttpr_present)
CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+ else
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
}
if (!is_lttpr_present)
@@ -3892,7 +3893,7 @@ void detect_edp_sink_caps(struct dc_link *link)
memset(supported_link_rates, 0, sizeof(supported_link_rates));
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
- (link->dc->config.optimize_edp_link_rate ||
+ (link->dc->debug.optimize_edp_link_rate ||
link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
@@ -4718,3 +4719,51 @@ bool dc_link_set_default_brightness_aux(struct dc_link *link)
}
return false;
}
+
+bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing)
+{
+ struct dc_link_settings link_setting;
+ uint8_t link_bw_set;
+ uint8_t link_rate_set;
+ uint32_t req_bw;
+ union lane_count_set lane_count_set = { {0} };
+
+ ASSERT(link || crtc_timing); // invalid input
+
+ if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
+ !link->dc->debug.optimize_edp_link_rate)
+ return false;
+
+
+ // Read DPCD 00100h to find if standard link rates are set
+ core_link_read_dpcd(link, DP_LINK_BW_SET,
+ &link_bw_set, sizeof(link_bw_set));
+
+ if (link_bw_set) {
+ DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS used link_bw_set\n");
+ return true;
+ }
+
+ // Read DPCD 00115h to find the edp link rate set used
+ core_link_read_dpcd(link, DP_LINK_RATE_SET,
+ &link_rate_set, sizeof(link_rate_set));
+
+ // Read DPCD 00101h to find out the number of lanes currently set
+ core_link_read_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, sizeof(lane_count_set));
+
+ req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
+
+ decide_edp_link_settings(link, &link_setting, req_bw);
+
+ if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate ||
+ lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) {
+ DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS link_rate_set not optimal\n");
+ return true;
+ }
+
+ DC_LOG_EVENT_LINK_TRAINING("eDP ILR: No optimization required, VBIOS set optimal link_rate_set\n");
+ return false;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 48ad1a8d4a74..b426f878fb99 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -431,7 +431,7 @@ static void dsc_optc_config_log(struct display_stream_compressor *dsc,
DC_LOG_DSC("\tslice_width %d", config->slice_width);
}
-static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
+bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -541,7 +541,7 @@ bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable)
goto out;
if (enable) {
- if (dp_set_dsc_on_rx(pipe_ctx, true)) {
+ {
dp_set_dsc_on_stream(pipe_ctx, true);
result = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ac7a75887f95..8cb937c046aa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -2506,26 +2506,31 @@ static void set_avi_info_frame(
hdmi_info.bits.ITC = itc_value;
}
+ if (stream->qs_bit == 1) {
+ if (color_space == COLOR_SPACE_SRGB ||
+ color_space == COLOR_SPACE_2020_RGB_FULLRANGE)
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
+ else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
+ color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
+ else
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+ } else
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+
/* TODO : We should handle YCC quantization */
/* but we do not have matrix calculation */
- if (stream->qs_bit == 1 &&
- stream->qy_bit == 1) {
+ if (stream->qy_bit == 1) {
if (color_space == COLOR_SPACE_SRGB ||
- color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
- hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
+ color_space == COLOR_SPACE_2020_RGB_FULLRANGE)
hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- } else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
- color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
- hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
+ else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
+ color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)
hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- } else {
- hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+ else
hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- }
- } else {
- hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- }
+ } else
+ hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
///VIC
format = stream->timing.timing_3d_format;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8108b82bac60..100d434f7a03 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -45,7 +45,7 @@
/* forward declaration */
struct aux_payload;
-#define DC_VER "3.2.130"
+#define DC_VER "3.2.132"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -293,7 +293,6 @@ struct dc_config {
bool gpu_vm_support;
bool disable_disp_pll_sharing;
bool fbc_support;
- bool optimize_edp_link_rate;
bool disable_fractional_pwm;
bool allow_seamless_boot_optimization;
bool power_down_display_on_boot;
@@ -309,6 +308,8 @@ struct dc_config {
#endif
uint64_t vblank_alignment_dto_params;
uint8_t vblank_alignment_max_frame_time_diff;
+ bool is_asymmetric_memory;
+ bool is_single_rank_dimm;
};
enum visual_confirm {
@@ -541,6 +542,11 @@ struct dc_debug_options {
/* Enable dmub aux for legacy ddc */
bool enable_dmub_aux_for_legacy_ddc;
+ bool optimize_edp_link_rate; /* eDP ILR */
+ /* force enable edp FEC */
+ bool force_enable_edp_fec;
+ /* FEC/PSR1 sequence enable delay in 100us */
+ uint8_t fec_enable_delay_in100us;
};
struct dc_debug_data {
@@ -713,6 +719,7 @@ void dc_init_callbacks(struct dc *dc,
void dc_deinit_callbacks(struct dc *dc);
void dc_destroy(struct dc **dc);
+void dc_wait_for_vblank(struct dc *dc, struct dc_stream_state *stream);
/*******************************************************************************
* Surface Interfaces
******************************************************************************/
@@ -900,6 +907,8 @@ struct dc_plane_state {
union surface_update_flags update_flags;
bool flip_int_enabled;
+ bool skip_manual_trigger;
+
/* private to DC core */
struct dc_plane_status status;
struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index b0013e674864..054bab45ee17 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -276,7 +276,6 @@ enum dc_detect_reason {
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
bool dc_link_get_hpd_state(struct dc_link *dc_link);
enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
-enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link);
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
* Return:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index b0297f07f9de..13dae7238a58 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -238,7 +238,6 @@ struct dc_stream_state {
bool apply_seamless_boot_optimization;
uint32_t stream_id;
- bool is_dsc_enabled;
struct test_pattern test_pattern;
union stream_update_flags update_flags;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 28ff059aa7f3..5e99553fcdd4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -284,6 +284,8 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->debug.u32All = 0;
copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR;
copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1;
+ copy_settings_data->fec_enable_status = (link->fec_state == dc_link_fec_enabled);
+ copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 873c6f2d2cd9..5ddeee96bf23 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -48,6 +48,7 @@
#include "stream_encoder.h"
#include "link_encoder.h"
#include "link_hwss.h"
+#include "dc_link_dp.h"
#include "clock_source.h"
#include "clk_mgr.h"
#include "abm.h"
@@ -1694,6 +1695,8 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool can_apply_edp_fast_boot = false;
bool can_apply_seamless_boot = false;
bool keep_edp_vdd_on = false;
+ DC_LOGGER_INIT();
+
get_edp_links_with_sink(dc, edp_links_with_sink, &edp_with_sink_num);
get_edp_links(dc, edp_links, &edp_num);
@@ -1714,8 +1717,11 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
/* Set optimization flag on eDP stream*/
if (edp_stream_num && edp_link->link_status.link_active) {
edp_stream = edp_streams[0];
- edp_stream->apply_edp_fast_boot_optimization = true;
- can_apply_edp_fast_boot = true;
+ can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing);
+ edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot;
+ if (can_apply_edp_fast_boot)
+ DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n");
+
break;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index bec7059f6d5d..a1318c31bcfa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-17 Advanced Micro Devices, Inc.
+ * Copyright 2012-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -181,11 +181,14 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
else
Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
*/
- if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
- + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
- value = 1;
- } else
- value = 0;
+ if (pipe_dest->htotal != 0) {
+ if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
+ + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
+ value = 1;
+ } else
+ value = 0;
+ }
+
REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index f65a6904d09c..527e56c353cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2201,10 +2201,11 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2;
break;
case PIXEL_ENCODING_YCBCR422:
- if (true) /* todo */
- pipes[pipe_cnt].dout.output_format = dm_s422;
- else
+ if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC &&
+ !res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.ycbcr422_simple)
pipes[pipe_cnt].dout.output_format = dm_n422;
+ else
+ pipes[pipe_cnt].dout.output_format = dm_s422;
pipes[pipe_cnt].dout.output_bpp = output_bpc * 2;
break;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index d3b643089603..8fccee5a3036 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -218,6 +218,8 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16;
+ cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_SET_BACKLIGHT_VERSION_1;
+ cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_cntl->inst);
cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index 0d90523c7cdc..70b053d9ba40 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -99,6 +99,8 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.set_pipe = dcn21_set_pipe,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
+ .optimize_pwr_state = dcn21_optimize_pwr_state,
+ .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
};
static const struct hwseq_private_funcs dcn301_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index f41db27c44de..7617fab9e1f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -147,6 +147,8 @@ bool dm_helpers_dp_write_dsc_enable(
bool dm_helpers_is_dp_sink_present(
struct dc_link *link);
+void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream);
+
enum dc_edid_status dm_helpers_read_local_edid(
struct dc_context *ctx,
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index b970a32177af..3ae05c96d557 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -52,6 +52,10 @@ bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing);
+bool decide_edp_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint32_t req_bw);
+
void decide_link_settings(
struct dc_stream_state *stream,
struct dc_link_settings *link_setting);
@@ -71,6 +75,8 @@ void detect_edp_sink_caps(struct dc_link *link);
bool is_dp_active_dongle(const struct dc_link *link);
+bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing);
+
void dp_enable_mst_on_sink(struct dc_link *link, bool enable);
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
@@ -86,5 +92,7 @@ bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable);
void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx);
+bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable);
+
#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 44003836fafd..4195ff10c514 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -47,10 +47,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x7f2db1846
+#define DMUB_FW_VERSION_GIT_HASH 0x23db9b126
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 59
+#define DMUB_FW_VERSION_REVISION 62
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
@@ -121,6 +121,16 @@
#define TRACE_BUFFER_ENTRY_OFFSET 16
/**
+ * ABM backlight control version legacy
+ */
+#define DMUB_CMD_ABM_SET_BACKLIGHT_VERSION_UNKNOWN 0x0
+
+/**
+ * ABM backlight control version with multi edp support
+ */
+#define DMUB_CMD_ABM_SET_BACKLIGHT_VERSION_1 0x1
+
+/**
* Physical framebuffer address location, 64-bit.
*/
#ifndef PHYSICAL_ADDRESS_LOC
@@ -1625,6 +1635,23 @@ struct dmub_cmd_abm_set_backlight_data {
* Requested backlight level from user.
*/
uint32_t backlight_user_level;
+
+ /**
+ * Backlight data version.
+ */
+ uint8_t version;
+
+ /**
+ * Panel Control HW instance mask.
+ * Bit 0 is Panel Control HW instance 0.
+ * Bit 1 is Panel Control HW instance 1.
+ */
+ uint8_t panel_mask;
+
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index eeac14300a2a..2cbd931363bd 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -427,8 +427,6 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
- if (!mod_hdcp_is_link_encryption_enabled(hdcp))
- goto out;
if (status == MOD_HDCP_STATUS_SUCCESS)
mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index f164f6a5d4dc..c1331facdcb4 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -564,8 +564,6 @@ static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
- if (!mod_hdcp_is_link_encryption_enabled(hdcp))
- goto out;
process_rxstatus(hdcp, event_ctx, input, &status);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 9d7ca316dc3f..26f96c05e0ec 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -791,6 +791,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
hdcp->connection.is_hdcp2_revoked = 1;
status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
+ } else {
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
}
}
mutex_unlock(&psp->hdcp_context.mutex);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
index 4089cfa081f5..849450caca15 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
@@ -617,6 +617,22 @@
#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK 0x30000000L
#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK 0xC0000000L
+//GCEA_ERR_STATUS
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define GCEA_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define GCEA_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+
// addressBlock: gc_gfxudec
//GRBM_GFX_INDEX
#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index c77ed38c20fb..f2564ba21c0b 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -3336,6 +3336,47 @@ enum atom_smu11_syspll3_1_clock_id {
SMU11_SYSPLL3_1_LCLK_ID = 2, // LCLK
};
+enum atom_smu12_syspll_id {
+ SMU12_SYSPLL0_ID = 0,
+ SMU12_SYSPLL1_ID = 1,
+ SMU12_SYSPLL2_ID = 2,
+ SMU12_SYSPLL3_0_ID = 3,
+ SMU12_SYSPLL3_1_ID = 4,
+};
+
+enum atom_smu12_syspll0_clock_id {
+ SMU12_SYSPLL0_SMNCLK_ID = 0, // SOCCLK
+ SMU12_SYSPLL0_SOCCLK_ID = 1, // SOCCLK
+ SMU12_SYSPLL0_MP0CLK_ID = 2, // MP0CLK
+ SMU12_SYSPLL0_MP1CLK_ID = 3, // MP1CLK
+ SMU12_SYSPLL0_MP2CLK_ID = 4, // MP2CLK
+ SMU12_SYSPLL0_VCLK_ID = 5, // VCLK
+ SMU12_SYSPLL0_LCLK_ID = 6, // LCLK
+ SMU12_SYSPLL0_DCLK_ID = 7, // DCLK
+ SMU12_SYSPLL0_ACLK_ID = 8, // ACLK
+ SMU12_SYSPLL0_ISPCLK_ID = 9, // ISPCLK
+ SMU12_SYSPLL0_SHUBCLK_ID = 10, // SHUBCLK
+};
+
+enum atom_smu12_syspll1_clock_id {
+ SMU12_SYSPLL1_DISPCLK_ID = 0, // DISPCLK
+ SMU12_SYSPLL1_DPPCLK_ID = 1, // DPPCLK
+ SMU12_SYSPLL1_DPREFCLK_ID = 2, // DPREFCLK
+ SMU12_SYSPLL1_DCFCLK_ID = 3, // DCFCLK
+};
+
+enum atom_smu12_syspll2_clock_id {
+ SMU12_SYSPLL2_Pre_GFXCLK_ID = 0, // Pre_GFXCLK
+};
+
+enum atom_smu12_syspll3_0_clock_id {
+ SMU12_SYSPLL3_0_FCLK_ID = 0, // FCLK
+};
+
+enum atom_smu12_syspll3_1_clock_id {
+ SMU12_SYSPLL3_1_UMCCLK_ID = 0, // UMCCLK
+};
+
struct atom_get_smu_clock_info_output_parameters_v3_1
{
union {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 204e34549013..8128603ef495 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1844,7 +1844,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (asic_type < CHIP_VEGA10)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
- if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
+ if (asic_type < CHIP_VEGA10 ||
+ asic_type == CHIP_ARCTURUS ||
+ asic_type == CHIP_ALDEBARAN)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
if (asic_type < CHIP_VEGA20)
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
index 6e23a3f803a7..8361ebd8d876 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
@@ -26,7 +26,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU13_DRIVER_IF_VERSION 2
+#define SMU13_DRIVER_IF_VERSION 3
typedef struct {
int32_t value;
@@ -191,6 +191,44 @@ typedef struct {
uint16_t SocTemperature; //[centi-Celsius]
uint16_t EdgeTemperature;
uint16_t ThrottlerStatus;
+} SmuMetrics_legacy_t;
+
+typedef struct {
+ uint16_t GfxclkFrequency; //[MHz]
+ uint16_t SocclkFrequency; //[MHz]
+ uint16_t VclkFrequency; //[MHz]
+ uint16_t DclkFrequency; //[MHz]
+ uint16_t MemclkFrequency; //[MHz]
+ uint16_t spare;
+
+ uint16_t GfxActivity; //[centi]
+ uint16_t UvdActivity; //[centi]
+ uint16_t C0Residency[4]; //percentage
+
+ uint16_t Voltage[3]; //[mV] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX
+ uint16_t Current[3]; //[mA] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX
+ uint16_t Power[3]; //[mW] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX
+ uint16_t CurrentSocketPower; //[mW]
+
+ //3rd party tools in Windows need info in the case of APUs
+ uint16_t CoreFrequency[4]; //[MHz]
+ uint16_t CorePower[4]; //[mW]
+ uint16_t CoreTemperature[4]; //[centi-Celsius]
+ uint16_t L3Frequency[1]; //[MHz]
+ uint16_t L3Temperature[1]; //[centi-Celsius]
+
+ uint16_t GfxTemperature; //[centi-Celsius]
+ uint16_t SocTemperature; //[centi-Celsius]
+ uint16_t EdgeTemperature;
+ uint16_t ThrottlerStatus;
+} SmuMetricsTable_t;
+
+typedef struct {
+ SmuMetricsTable_t Current;
+ SmuMetricsTable_t Average;
+ //uint32_t AccCnt;
+ uint32_t SampleStartTime;
+ uint32_t SampleStopTime;
} SmuMetrics_t;
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index d5182bbaa598..bb55a96f98e9 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -32,7 +32,7 @@
#define SMU11_DRIVER_IF_VERSION_NV14 0x38
#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3D
#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
-#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x02
+#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03
#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
/* MP Apertures */
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
index 02de3b6199e5..1ad2dff71090 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
@@ -60,5 +60,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
int smu_v12_0_set_driver_table_location(struct smu_context *smu);
+int smu_v12_0_get_vbios_bootup_values(struct smu_context *smu);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index e0eb7ca112e2..c29d8b3131b7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2221,6 +2221,7 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
dev_err(smu->adev->dev,
"New power limit (%d) is over the max allowed %d\n",
limit, smu->max_power_limit);
+ ret = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 7bcd35840bf2..77f532a49e37 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -194,18 +194,34 @@ static int vangogh_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version;
+ uint32_t ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Failed to get smu if version!\n");
+ goto err0_out;
+ }
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+
+ if (if_version < 0x3) {
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL);
+ } else {
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ }
if (!smu_table->metrics_table)
goto err0_out;
smu_table->metrics_time = 0;
@@ -235,13 +251,12 @@ err0_out:
return -ENOMEM;
}
-static int vangogh_get_smu_metrics_data(struct smu_context *smu,
+static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
-
- SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+ SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
int ret = 0;
mutex_lock(&smu->metrics_lock);
@@ -255,7 +270,7 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
}
switch (member) {
- case METRICS_AVERAGE_GFXCLK:
+ case METRICS_CURR_GFXCLK:
*value = metrics->GfxclkFrequency;
break;
case METRICS_AVERAGE_SOCCLK:
@@ -267,7 +282,7 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_DCLK:
*value = metrics->DclkFrequency;
break;
- case METRICS_AVERAGE_UCLK:
+ case METRICS_CURR_UCLK:
*value = metrics->MemclkFrequency;
break;
case METRICS_AVERAGE_GFXACTIVITY:
@@ -311,6 +326,103 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
return ret;
}
+static int vangogh_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+ int ret = 0;
+
+ mutex_lock(&smu->metrics_lock);
+
+ ret = smu_cmn_get_metrics_table_locked(smu,
+ NULL,
+ false);
+ if (ret) {
+ mutex_unlock(&smu->metrics_lock);
+ return ret;
+ }
+
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ *value = metrics->Current.GfxclkFrequency;
+ break;
+ case METRICS_AVERAGE_SOCCLK:
+ *value = metrics->Current.SocclkFrequency;
+ break;
+ case METRICS_AVERAGE_VCLK:
+ *value = metrics->Current.VclkFrequency;
+ break;
+ case METRICS_AVERAGE_DCLK:
+ *value = metrics->Current.DclkFrequency;
+ break;
+ case METRICS_CURR_UCLK:
+ *value = metrics->Current.MemclkFrequency;
+ break;
+ case METRICS_AVERAGE_GFXACTIVITY:
+ *value = metrics->Current.GfxActivity;
+ break;
+ case METRICS_AVERAGE_VCNACTIVITY:
+ *value = metrics->Current.UvdActivity;
+ break;
+ case METRICS_AVERAGE_SOCKETPOWER:
+ *value = (metrics->Current.CurrentSocketPower << 8) /
+ 1000;
+ break;
+ case METRICS_TEMPERATURE_EDGE:
+ *value = metrics->Current.GfxTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = metrics->Current.SocTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_THROTTLER_STATUS:
+ *value = metrics->Current.ThrottlerStatus;
+ break;
+ case METRICS_VOLTAGE_VDDGFX:
+ *value = metrics->Current.Voltage[2];
+ break;
+ case METRICS_VOLTAGE_VDDSOC:
+ *value = metrics->Current.Voltage[1];
+ break;
+ case METRICS_AVERAGE_CPUCLK:
+ memcpy(value, &metrics->Current.CoreFrequency[0],
+ smu->cpu_core_num * sizeof(uint16_t));
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ mutex_unlock(&smu->metrics_lock);
+
+ return ret;
+}
+
+static int vangogh_common_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Failed to get smu if version!\n");
+ return ret;
+ }
+
+ if (if_version < 0x3)
+ ret = vangogh_get_legacy_smu_metrics_data(smu, member, value);
+ else
+ ret = vangogh_get_smu_metrics_data(smu, member, value);
+
+ return ret;
+}
+
static int vangogh_allocate_dpm_context(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
@@ -447,11 +559,11 @@ static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_typ
return 0;
}
-static int vangogh_print_clk_levels(struct smu_context *smu,
+static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
- SmuMetrics_t metrics;
+ SmuMetrics_legacy_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
@@ -546,6 +658,126 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
return size;
}
+static int vangogh_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, char *buf)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ SmuMetrics_t metrics;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int i, size = 0, ret = 0;
+ uint32_t cur_value = 0, value = 0, count = 0;
+ bool cur_value_match_level = false;
+
+ memset(&metrics, 0, sizeof(metrics));
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, false);
+ if (ret)
+ return ret;
+
+ switch (clk_type) {
+ case SMU_OD_SCLK:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
+ size += sprintf(buf + size, "0: %10uMhz\n",
+ (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
+ }
+ break;
+ case SMU_OD_CCLK:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ size = sprintf(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sprintf(buf + size, "0: %10uMhz\n",
+ (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
+ }
+ break;
+ case SMU_OD_RANGE:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ size = sprintf(buf, "%s:\n", "OD_RANGE");
+ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
+ size += sprintf(buf + size, "CCLK: %7uMhz %10uMhz\n",
+ smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
+ }
+ break;
+ case SMU_SOCCLK:
+ /* the level 3 ~ 6 of socclk use the same frequency for vangogh */
+ count = clk_table->NumSocClkLevelsEnabled;
+ cur_value = metrics.Current.SocclkFrequency;
+ break;
+ case SMU_VCLK:
+ count = clk_table->VcnClkLevelsEnabled;
+ cur_value = metrics.Current.VclkFrequency;
+ break;
+ case SMU_DCLK:
+ count = clk_table->VcnClkLevelsEnabled;
+ cur_value = metrics.Current.DclkFrequency;
+ break;
+ case SMU_MCLK:
+ count = clk_table->NumDfPstatesEnabled;
+ cur_value = metrics.Current.MemclkFrequency;
+ break;
+ case SMU_FCLK:
+ count = clk_table->NumDfPstatesEnabled;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ case SMU_MCLK:
+ case SMU_FCLK:
+ for (i = 0; i < count; i++) {
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
+ if (ret)
+ return ret;
+ if (!value)
+ continue;
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ if (cur_value == value)
+ cur_value_match_level = true;
+ }
+
+ if (!cur_value_match_level)
+ size += sprintf(buf + size, " %uMhz *\n", cur_value);
+ break;
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int vangogh_common_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, char *buf)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Failed to get smu if version!\n");
+ return ret;
+ }
+
+ if (if_version < 0x3)
+ ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf);
+ else
+ ret = vangogh_print_clk_levels(smu, clk_type, buf);
+
+ return ret;
+}
+
static int vangogh_get_profiling_clk_mask(struct smu_context *smu,
enum amd_dpm_forced_level level,
uint32_t *vclk_mask,
@@ -860,7 +1092,6 @@ static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
return ret;
break;
case SMU_FCLK:
- case SMU_MCLK:
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinFclkByFreq,
min, NULL);
@@ -948,7 +1179,6 @@ static int vangogh_force_clk_levels(struct smu_context *smu,
if (ret)
return ret;
break;
- case SMU_MCLK:
case SMU_FCLK:
ret = vangogh_get_dpm_clk_limited(smu,
clk_type, soft_min_level, &min_freq);
@@ -1035,7 +1265,6 @@ static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
SMU_SOCCLK,
SMU_VCLK,
SMU_DCLK,
- SMU_MCLK,
SMU_FCLK,
};
@@ -1064,7 +1293,6 @@ static int vangogh_unforce_dpm_levels(struct smu_context *smu)
enum smu_clk_type clk_type;
uint32_t feature;
} clk_feature_map[] = {
- {SMU_MCLK, SMU_FEATURE_DPM_FCLK_BIT},
{SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT},
{SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
{SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT},
@@ -1196,7 +1424,6 @@ static int vangogh_set_performance_level(struct smu_context *smu,
if (ret)
return ret;
- vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
@@ -1236,7 +1463,6 @@ static int vangogh_set_performance_level(struct smu_context *smu,
if (ret)
return ret;
- vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
@@ -1278,57 +1504,57 @@ static int vangogh_read_sensor(struct smu_context *smu,
mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_GFXACTIVITY,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_POWER:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_EDGE_TEMP:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_EDGE,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_HOTSPOT,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- ret = vangogh_get_smu_metrics_data(smu,
- METRICS_AVERAGE_UCLK,
+ ret = vangogh_common_get_smu_metrics_data(smu,
+ METRICS_CURR_UCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = vangogh_get_smu_metrics_data(smu,
- METRICS_AVERAGE_GFXCLK,
+ ret = vangogh_common_get_smu_metrics_data(smu,
+ METRICS_CURR_GFXCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_VDDGFX:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDGFX,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_VDDNB:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDSOC,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_CPU_CLK:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_CPUCLK,
(uint32_t *)data);
*size = smu->cpu_core_num * sizeof(uint16_t);
@@ -1402,13 +1628,13 @@ static int vangogh_set_watermarks_table(struct smu_context *smu,
return 0;
}
-static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
+static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v2_1 *gpu_metrics =
(struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
- SmuMetrics_t metrics;
+ SmuMetrics_legacy_t metrics;
int ret = 0;
ret = smu_cmn_get_metrics_table(smu, &metrics, true);
@@ -1421,9 +1647,8 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->temperature_soc = metrics.SocTemperature;
memcpy(&gpu_metrics->temperature_core[0],
&metrics.CoreTemperature[0],
- sizeof(uint16_t) * 8);
+ sizeof(uint16_t) * 4);
gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
- gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1];
gpu_metrics->average_gfx_activity = metrics.GfxActivity;
gpu_metrics->average_mm_activity = metrics.UvdActivity;
@@ -1434,7 +1659,7 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_gfx_power = metrics.Power[2];
memcpy(&gpu_metrics->average_core_power[0],
&metrics.CorePower[0],
- sizeof(uint16_t) * 8);
+ sizeof(uint16_t) * 4);
gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
@@ -1445,9 +1670,8 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
memcpy(&gpu_metrics->current_coreclk[0],
&metrics.CoreFrequency[0],
- sizeof(uint16_t) * 8);
+ sizeof(uint16_t) * 4);
gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
- gpu_metrics->current_l3clk[1] = metrics.L3Frequency[1];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
@@ -1458,6 +1682,88 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v2_1);
}
+static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v2_1 *gpu_metrics =
+ (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
+
+ gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
+ memcpy(&gpu_metrics->temperature_core[0],
+ &metrics.Current.CoreTemperature[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
+
+ gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
+ gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
+
+ gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
+ gpu_metrics->average_cpu_power = metrics.Current.Power[0];
+ gpu_metrics->average_soc_power = metrics.Current.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Current.Power[2];
+ memcpy(&gpu_metrics->average_core_power[0],
+ &metrics.Average.CorePower[0],
+ sizeof(uint16_t) * 4);
+
+ gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
+
+ gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
+ gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
+ gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
+ gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
+
+ memcpy(&gpu_metrics->current_coreclk[0],
+ &metrics.Current.CoreFrequency[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
+
+ gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v2_1);
+}
+
+static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Failed to get smu if version!\n");
+ return ret;
+ }
+
+ if (if_version < 0x3)
+ ret = vangogh_get_legacy_gpu_metrics(smu, table);
+ else
+ ret = vangogh_get_gpu_metrics(smu, table);
+
+ return ret;
+}
+
static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
long input[], uint32_t size)
{
@@ -1876,9 +2182,9 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.set_watermarks_table = vangogh_set_watermarks_table,
.set_driver_table_location = smu_v11_0_set_driver_table_location,
.interrupt_work = smu_v11_0_interrupt_work,
- .get_gpu_metrics = vangogh_get_gpu_metrics,
+ .get_gpu_metrics = vangogh_common_get_gpu_metrics,
.od_edit_dpm_table = vangogh_od_edit_dpm_table,
- .print_clk_levels = vangogh_print_clk_levels,
+ .print_clk_levels = vangogh_common_print_clk_levels,
.set_default_dpm_table = vangogh_set_default_dpm_tables,
.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
.system_features_control = vangogh_system_features_control,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index e3232295f2bf..f43b4c623685 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -1332,6 +1332,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.gfx_state_change_set = renoir_gfx_state_change_set,
.set_fine_grain_gfx_freq_parameters = renoir_set_fine_grain_gfx_freq_parameters,
.od_edit_dpm_table = renoir_od_edit_dpm_table,
+ .get_vbios_bootup_values = smu_v12_0_get_vbios_bootup_values,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 6cc4855c8a37..d60b8c5e8715 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -27,6 +27,7 @@
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
#include "smu_v12_0.h"
#include "soc15_common.h"
#include "atom.h"
@@ -278,3 +279,125 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
return ret;
}
+
+static int smu_v12_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
+ uint8_t clk_id,
+ uint8_t syspll_id,
+ uint32_t *clk_freq)
+{
+ struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
+ struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
+ int ret, index;
+
+ input.clk_id = clk_id;
+ input.syspll_id = syspll_id;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ return 0;
+}
+
+int smu_v12_0_get_vbios_bootup_values(struct smu_context *smu)
+{
+ int ret, index;
+ uint16_t size;
+ uint8_t frev, crev;
+ struct atom_common_table_header *header;
+ struct atom_firmware_info_v3_1 *v_3_1;
+ struct atom_firmware_info_v3_3 *v_3_3;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
+ (uint8_t **)&header);
+ if (ret)
+ return ret;
+
+ if (header->format_revision != 3) {
+ dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu12\n");
+ return -EINVAL;
+ }
+
+ switch (header->content_revision) {
+ case 0:
+ case 1:
+ case 2:
+ v_3_1 = (struct atom_firmware_info_v3_1 *)header;
+ smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = 0;
+ smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
+ break;
+ case 3:
+ case 4:
+ default:
+ v_3_3 = (struct atom_firmware_info_v3_3 *)header;
+ smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
+ smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability;
+ }
+
+ smu->smu_table.boot_values.format_revision = header->format_revision;
+ smu->smu_table.boot_values.content_revision = header->content_revision;
+
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL0_SOCCLK_ID,
+ (uint8_t)SMU12_SYSPLL0_ID,
+ &smu->smu_table.boot_values.socclk);
+
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL1_DCFCLK_ID,
+ (uint8_t)SMU12_SYSPLL1_ID,
+ &smu->smu_table.boot_values.dcefclk);
+
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL0_VCLK_ID,
+ (uint8_t)SMU12_SYSPLL0_ID,
+ &smu->smu_table.boot_values.vclk);
+
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL0_DCLK_ID,
+ (uint8_t)SMU12_SYSPLL0_ID,
+ &smu->smu_table.boot_values.dclk);
+
+ if ((smu->smu_table.boot_values.format_revision == 3) &&
+ (smu->smu_table.boot_values.content_revision >= 2))
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL3_0_FCLK_ID,
+ (uint8_t)SMU12_SYSPLL3_0_ID,
+ &smu->smu_table.boot_values.fclk);
+
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL0_LCLK_ID,
+ (uint8_t)SMU12_SYSPLL0_ID,
+ &smu->smu_table.boot_values.lclk);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index bca02a9fb489..dcbe3a72da09 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -78,6 +78,8 @@
#define smnPCIE_ESM_CTRL 0x111003D0
+#define CLOCK_VALID (1 << 31)
+
static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -405,6 +407,9 @@ static int aldebaran_setup_pptable(struct smu_context *smu)
{
int ret = 0;
+ /* VBIOS pptable is the first choice */
+ smu->smu_table.boot_values.pp_table_id = 0;
+
ret = smu_v13_0_setup_pptable(smu);
if (ret)
return ret;
@@ -670,6 +675,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
struct smu_13_0_dpm_context *dpm_context = NULL;
uint32_t display_levels;
uint32_t freq_values[3] = {0};
+ uint32_t min_clk, max_clk;
if (amdgpu_ras_intr_triggered())
return snprintf(buf, PAGE_SIZE, "unavailable\n");
@@ -697,12 +703,20 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
display_levels = clocks.num_levels;
+ min_clk = smu->gfx_actual_hard_min_freq & CLOCK_VALID ?
+ smu->gfx_actual_hard_min_freq & ~CLOCK_VALID :
+ single_dpm_table->dpm_levels[0].value;
+ max_clk = smu->gfx_actual_soft_max_freq & CLOCK_VALID ?
+ smu->gfx_actual_soft_max_freq & ~CLOCK_VALID :
+ single_dpm_table->dpm_levels[1].value;
+
+ freq_values[0] = min_clk;
+ freq_values[1] = max_clk;
+
/* fine-grained dpm has only 2 levels */
- if (now > single_dpm_table->dpm_levels[0].value &&
- now < single_dpm_table->dpm_levels[1].value) {
+ if (now > min_clk && now < max_clk) {
display_levels = clocks.num_levels + 1;
- freq_values[0] = single_dpm_table->dpm_levels[0].value;
- freq_values[2] = single_dpm_table->dpm_levels[1].value;
+ freq_values[2] = max_clk;
freq_values[1] = now;
}
@@ -712,12 +726,15 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
*/
if (display_levels == clocks.num_levels) {
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n", i,
- clocks.data[i].clocks_in_khz / 1000,
- (clocks.num_levels == 1) ? "*" :
+ size += sprintf(
+ buf + size, "%d: %uMhz %s\n", i,
+ freq_values[i],
+ (clocks.num_levels == 1) ?
+ "*" :
(aldebaran_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
+ freq_values[i], now) ?
+ "*" :
+ ""));
} else {
for (i = 0; i < display_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n", i,
@@ -1117,6 +1134,9 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
&& (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
+ /* Reset user min/max gfx clock */
+ smu->gfx_actual_hard_min_freq = 0;
+ smu->gfx_actual_soft_max_freq = 0;
switch (level) {
@@ -1158,7 +1178,14 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
min_clk = max(min, dpm_context->dpm_tables.gfx_table.min);
max_clk = min(max, dpm_context->dpm_tables.gfx_table.max);
- return smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
+ ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK,
+ min_clk, max_clk);
+
+ if (!ret) {
+ smu->gfx_actual_hard_min_freq = min_clk | CLOCK_VALID;
+ smu->gfx_actual_soft_max_freq = max_clk | CLOCK_VALID;
+ }
+ return ret;
}
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
@@ -1178,9 +1205,15 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnableDeterminism,
max, NULL);
- if (ret)
+ if (ret) {
dev_err(adev->dev,
"Failed to enable determinism at GFX clock %d MHz\n", max);
+ } else {
+ smu->gfx_actual_hard_min_freq =
+ min_clk | CLOCK_VALID;
+ smu->gfx_actual_soft_max_freq =
+ max | CLOCK_VALID;
+ }
}
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 30c9ac635105..0864083e7435 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -276,8 +276,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
void *table;
uint16_t version_major, version_minor;
- /* temporarily hardcode to use vbios pptable */
- smu->smu_table.boot_values.pp_table_id = 0;
if (amdgpu_smu_pptable_id >= 0) {
smu->smu_table.boot_values.pp_table_id = amdgpu_smu_pptable_id;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 369695df8b15..69f57ca9c68d 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -20,6 +20,7 @@ config DRM_I915
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
+ select IO_MAPPING
select SYNC_FILE
select IOSF_MBI
select CRC32
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index d74b263c5f4e..64e9107d70f7 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -1403,7 +1403,8 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
* require the entire fb to accommodate that to avoid
* potential runtime errors at plane configuration time.
*/
- if (IS_DISPLAY_VER(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
+ if ((IS_DISPLAY_VER(dev_priv, 9) || IS_GEMINILAKE(dev_priv)) &&
+ color_plane == 0 && fb->width > 3840)
tile_width *= 4;
/*
* The main surface pitch must be padded to a multiple of four
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index cbcfb0c4c370..02a003fd48fb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -96,7 +96,7 @@ static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
* Detecting LTTPRs must be avoided on platforms with an AUX timeout
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
- if (DISPLAY_VER(i915) < 10)
+ if (DISPLAY_VER(i915) < 10 || IS_GEMINILAKE(i915))
return false;
if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 986bbbe3b12f..957252b695d7 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -597,7 +597,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
return false;
/* Display WA #1105: skl,bxt,kbl,cfl,glk */
- if (IS_DISPLAY_VER(dev_priv, 9) &&
+ if ((IS_DISPLAY_VER(dev_priv, 9) || IS_GEMINILAKE(dev_priv)) &&
modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
return false;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index e477b6114a60..e5dadde422f7 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -803,8 +803,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
vma = intel_overlay_pin_fb(new_bo);
- if (IS_ERR(vma))
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto out_pin_section;
+ }
i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 1d561812fcad..8ada4f829cab 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -1519,8 +1519,7 @@ void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
u32 psr_status;
mutex_lock(&intel_dp->psr.lock);
- if (!intel_dp->psr.enabled ||
- (intel_dp->psr.enabled && intel_dp->psr.psr2_enabled)) {
+ if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) {
mutex_unlock(&intel_dp->psr.lock);
continue;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 21cc40897ca8..ce6b664b10aa 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -42,7 +42,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
+ if (is_swiotlb_active()) {
unsigned int max_segment;
max_segment = swiotlb_max_segment();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 2561a2f1e54f..23f6b00e08e2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -367,11 +367,10 @@ retry:
goto err_unpin;
/* Finally, remap it using the new GTT offset */
- ret = remap_io_mapping(area,
- area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
- min_t(u64, vma->size, area->vm_end - area->vm_start),
- &ggtt->iomap);
+ ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start +
+ (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+ (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+ min_t(u64, vma->size, area->vm_end - area->vm_start));
if (ret)
goto err_fence;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 3e248d3bd869..4f9c8d3021ab 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -70,6 +70,7 @@ static void try_to_writeback(struct drm_i915_gem_object *obj,
/**
* i915_gem_shrink - Shrink buffer object caches
+ * @ww: i915 gem ww acquire ctx, or NULL
* @i915: i915 device
* @target: amount of memory to make available, in pages
* @nr_scanned: optional output for number of pages scanned (incremental)
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 01c1d1b36acd..ca9c9e27a43d 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -948,11 +948,6 @@ static int cmd_reg_handler(struct parser_exec_state *s,
/* below are all lri handlers */
vreg = &vgpu_vreg(s->vgpu, offset);
- if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
- gvt_vgpu_err("%s access to non-render register (%x)\n",
- cmd, offset);
- return -EBADRQC;
- }
if (is_cmd_update_pdps(offset, s) &&
cmd_pdp_mmio_update_handler(s, offset, index))
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 67a26923aa0e..9478c132d7b6 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -587,12 +587,6 @@ static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
entry, index, false, 0, mm->vgpu);
}
-static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
- struct intel_gvt_gtt_entry *entry, unsigned long index)
-{
- _ppgtt_set_root_entry(mm, entry, index, true);
-}
-
static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
struct intel_gvt_gtt_entry *entry, unsigned long index)
{
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index a66e7b2531d4..e7c2babcee8b 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -117,7 +117,7 @@ static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups
return true;
}
-static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
{
int i, j;
struct intel_vgpu_type *type;
@@ -135,7 +135,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
gvt_vgpu_type_groups[i] = group;
}
- return true;
+ return 0;
unwind:
for (j = 0; j < i; j++) {
@@ -143,7 +143,7 @@ unwind:
kfree(group);
}
- return false;
+ return -ENOMEM;
}
static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
@@ -364,7 +364,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
goto out_clean_thread;
ret = intel_gvt_init_vgpu_type_groups(gvt);
- if (ret == false) {
+ if (ret) {
gvt_err("failed to init vgpu type groups: %d\n", ret);
goto out_clean_types;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 69e43bf91a15..9ec9277539ec 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1905,9 +1905,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* i915_mm.c */
-int remap_io_mapping(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn, unsigned long size,
- struct io_mapping *iomap);
int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase);
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 666808cb3a32..4c8cd08c672d 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -28,89 +28,9 @@
#include "i915_drv.h"
-struct remap_pfn {
- struct mm_struct *mm;
- unsigned long pfn;
- pgprot_t prot;
-
- struct sgt_iter sgt;
- resource_size_t iobase;
-};
-
-static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
-{
- struct remap_pfn *r = data;
-
- /* Special PTE are not associated with any struct page */
- set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
- r->pfn++;
-
- return 0;
-}
-
-#define use_dma(io) ((io) != -1)
-
-static inline unsigned long sgt_pfn(const struct remap_pfn *r)
-{
- if (use_dma(r->iobase))
- return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
- else
- return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
-}
-
-static int remap_sg(pte_t *pte, unsigned long addr, void *data)
-{
- struct remap_pfn *r = data;
-
- if (GEM_WARN_ON(!r->sgt.sgp))
- return -EINVAL;
-
- /* Special PTE are not associated with any struct page */
- set_pte_at(r->mm, addr, pte,
- pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
- r->pfn++; /* track insertions in case we need to unwind later */
-
- r->sgt.curr += PAGE_SIZE;
- if (r->sgt.curr >= r->sgt.max)
- r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
-
- return 0;
-}
-
-/**
- * remap_io_mapping - remap an IO mapping to userspace
- * @vma: user vma to map to
- * @addr: target user address to start at
- * @pfn: physical address of kernel memory
- * @size: size of map area
- * @iomap: the source io_mapping
- *
- * Note: this is only safe if the mm semaphore is held when called.
- */
-int remap_io_mapping(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn, unsigned long size,
- struct io_mapping *iomap)
-{
- struct remap_pfn r;
- int err;
-
#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
- GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
-
- /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
- r.mm = vma->vm_mm;
- r.pfn = pfn;
- r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
- (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
- err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
- if (unlikely(err)) {
- zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
- return err;
- }
-
- return 0;
-}
+#define use_dma(io) ((io) != -1)
/**
* remap_io_sg - remap an IO mapping to userspace
@@ -126,12 +46,7 @@ int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase)
{
- struct remap_pfn r = {
- .mm = vma->vm_mm,
- .prot = vma->vm_page_prot,
- .sgt = __sgt_iter(sgl, use_dma(iobase)),
- .iobase = iobase,
- };
+ unsigned long pfn, len, remapped = 0;
int err;
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
@@ -140,11 +55,25 @@ int remap_io_sg(struct vm_area_struct *vma,
if (!use_dma(iobase))
flush_cache_range(vma, addr, size);
- err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
- if (unlikely(err)) {
- zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
- return err;
- }
-
- return 0;
+ do {
+ if (use_dma(iobase)) {
+ if (!sg_dma_len(sgl))
+ break;
+ pfn = (sg_dma_address(sgl) + iobase) >> PAGE_SHIFT;
+ len = sg_dma_len(sgl);
+ } else {
+ pfn = page_to_pfn(sg_page(sgl));
+ len = sgl->length;
+ }
+
+ err = remap_pfn_range(vma, addr + remapped, pfn, len,
+ vma->vm_page_prot);
+ if (err)
+ break;
+ remapped += len;
+ } while ((sgl = __sg_next(sgl)));
+
+ if (err)
+ zap_vma_ptes(vma, addr, remapped);
+ return err;
}
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 9165971c3c47..bec9c3652188 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -350,6 +350,8 @@ static void __rq_arm_watchdog(struct i915_request *rq)
if (!ce->watchdog.timeout_us)
return;
+ i915_request_get(rq);
+
hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
wdg->timer.function = __rq_watchdog_expired;
hrtimer_start_range_ns(&wdg->timer,
@@ -357,7 +359,6 @@ static void __rq_arm_watchdog(struct i915_request *rq)
NSEC_PER_USEC),
NSEC_PER_MSEC,
HRTIMER_MODE_REL);
- i915_request_get(rq);
}
static void __rq_cancel_watchdog(struct i915_request *rq)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 6a35a30dd281..cf897297656f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -188,10 +188,7 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
void adreno_set_llc_attributes(struct iommu_domain *iommu)
{
- struct io_pgtable_domain_attr pgtbl_cfg;
-
- pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+ iommu_set_pgtable_quirks(iommu, IO_PGTABLE_QUIRK_ARM_OUTER_WBWA);
}
struct msm_gem_address_space *
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index b81ae90b8449..e8b506a6685b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -321,7 +321,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
}
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
- need_swiotlb = !!swiotlb_nr_tbl();
+ need_swiotlb = is_swiotlb_active();
#endif
ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 1864467f1063..6754f578fed2 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -1,4 +1,3 @@
-/* vim: set ts=8 sw=8 tw=78 ai noexpandtab */
/* qxl_drv.c -- QXL driver -*- linux-c -*-
*
* Copyright 2011 Red Hat, Inc.
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 8b7a4f7b7576..42a8afa839cb 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7948,8 +7948,6 @@ restart_ih:
DRM_ERROR("Illegal register access in command stream\n");
/* XXX check the bitfield order! */
me_id = (ring_id & 0x60) >> 5;
- pipe_id = (ring_id & 0x18) >> 3;
- queue_id = (ring_id & 0x7) >> 0;
switch (me_id) {
case 0:
/* This results in a full GPU reset, but all we need to do is soft
@@ -7971,8 +7969,6 @@ restart_ih:
DRM_ERROR("Illegal instruction in command stream\n");
/* XXX check the bitfield order! */
me_id = (ring_id & 0x60) >> 5;
- pipe_id = (ring_id & 0x18) >> 3;
- queue_id = (ring_id & 0x7) >> 0;
switch (me_id) {
case 0:
/* This results in a full GPU reset, but all we need to do is soft
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 88731b79c8f5..d0e94b10e4c0 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4511,7 +4511,7 @@ static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
} else {
for (i = 0; i < (command & 0x1fffff); i++) {
reg = start_reg + (4 * i);
- if (!si_vm_reg_valid(reg)) {
+ if (!si_vm_reg_valid(reg)) {
DRM_ERROR("CP DMA Bad DST register\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index c9385cfd0fc1..f9120dc24682 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -832,10 +832,14 @@ static struct drm_plane *tegra_primary_plane_create(struct drm_device *drm,
return &plane->base;
}
-static const u32 tegra_cursor_plane_formats[] = {
+static const u32 tegra_legacy_cursor_plane_formats[] = {
DRM_FORMAT_RGBA8888,
};
+static const u32 tegra_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
static int tegra_cursor_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -875,12 +879,24 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
plane);
struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
- u32 value = CURSOR_CLIP_DISPLAY;
+ struct tegra_drm *tegra = plane->dev->dev_private;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ u64 dma_mask = *dc->dev->dma_mask;
+#endif
+ unsigned int x, y;
+ u32 value = 0;
/* rien ne va plus */
if (!new_state->crtc || !new_state->fb)
return;
+ /*
+ * Legacy display supports hardware clipping of the cursor, but
+ * nvdisplay relies on software to clip the cursor to the screen.
+ */
+ if (!dc->soc->has_nvdisplay)
+ value |= CURSOR_CLIP_DISPLAY;
+
switch (new_state->crtc_w) {
case 32:
value |= CURSOR_SIZE_32x32;
@@ -908,7 +924,7 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- value = (tegra_plane_state->iova[0] >> 32) & 0x3;
+ value = (tegra_plane_state->iova[0] >> 32) & (dma_mask >> 32);
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
#endif
@@ -920,15 +936,39 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL);
value &= ~CURSOR_DST_BLEND_MASK;
value &= ~CURSOR_SRC_BLEND_MASK;
- value |= CURSOR_MODE_NORMAL;
+
+ if (dc->soc->has_nvdisplay)
+ value &= ~CURSOR_COMPOSITION_MODE_XOR;
+ else
+ value |= CURSOR_MODE_NORMAL;
+
value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC;
value |= CURSOR_SRC_BLEND_K1_TIMES_SRC;
value |= CURSOR_ALPHA;
tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
+ /* nvdisplay relies on software for clipping */
+ if (dc->soc->has_nvdisplay) {
+ struct drm_rect src;
+
+ x = new_state->dst.x1;
+ y = new_state->dst.y1;
+
+ drm_rect_fp_to_int(&src, &new_state->src);
+
+ value = (src.y1 & tegra->vmask) << 16 | (src.x1 & tegra->hmask);
+ tegra_dc_writel(dc, value, DC_DISP_PCALC_HEAD_SET_CROPPED_POINT_IN_CURSOR);
+
+ value = (drm_rect_height(&src) & tegra->vmask) << 16 |
+ (drm_rect_width(&src) & tegra->hmask);
+ tegra_dc_writel(dc, value, DC_DISP_PCALC_HEAD_SET_CROPPED_SIZE_IN_CURSOR);
+ } else {
+ x = new_state->crtc_x;
+ y = new_state->crtc_y;
+ }
+
/* position the cursor */
- value = (new_state->crtc_y & 0x3fff) << 16 |
- (new_state->crtc_x & 0x3fff);
+ value = ((y & tegra->vmask) << 16) | (x & tegra->hmask);
tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
}
@@ -982,8 +1022,13 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
plane->index = 6;
plane->dc = dc;
- num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
- formats = tegra_cursor_plane_formats;
+ if (!dc->soc->has_nvdisplay) {
+ num_formats = ARRAY_SIZE(tegra_legacy_cursor_plane_formats);
+ formats = tegra_legacy_cursor_plane_formats;
+ } else {
+ num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
+ formats = tegra_cursor_plane_formats;
+ }
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
@@ -2035,6 +2080,16 @@ static bool tegra_dc_has_window_groups(struct tegra_dc *dc)
return false;
}
+static int tegra_dc_early_init(struct host1x_client *client)
+{
+ struct drm_device *drm = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = drm->dev_private;
+
+ tegra->num_crtcs++;
+
+ return 0;
+}
+
static int tegra_dc_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->host);
@@ -2046,6 +2101,12 @@ static int tegra_dc_init(struct host1x_client *client)
int err;
/*
+ * DC has been reset by now, so VBLANK syncpoint can be released
+ * for general use.
+ */
+ host1x_syncpt_release_vblank_reservation(client, 26 + dc->pipe);
+
+ /*
* XXX do not register DCs with no window groups because we cannot
* assign a primary plane to them, which in turn will cause KMS to
* crash.
@@ -2111,6 +2172,12 @@ static int tegra_dc_init(struct host1x_client *client)
if (dc->soc->pitch_align > tegra->pitch_align)
tegra->pitch_align = dc->soc->pitch_align;
+ /* track maximum resolution */
+ if (dc->soc->has_nvdisplay)
+ drm->mode_config.max_width = drm->mode_config.max_height = 16384;
+ else
+ drm->mode_config.max_width = drm->mode_config.max_height = 4096;
+
err = tegra_dc_rgb_init(drm, dc);
if (err < 0 && err != -ENODEV) {
dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
@@ -2141,7 +2208,7 @@ cleanup:
drm_plane_cleanup(primary);
host1x_client_iommu_detach(client);
- host1x_syncpt_free(dc->syncpt);
+ host1x_syncpt_put(dc->syncpt);
return err;
}
@@ -2166,7 +2233,17 @@ static int tegra_dc_exit(struct host1x_client *client)
}
host1x_client_iommu_detach(client);
- host1x_syncpt_free(dc->syncpt);
+ host1x_syncpt_put(dc->syncpt);
+
+ return 0;
+}
+
+static int tegra_dc_late_exit(struct host1x_client *client)
+{
+ struct drm_device *drm = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = drm->dev_private;
+
+ tegra->num_crtcs--;
return 0;
}
@@ -2235,8 +2312,10 @@ put_rpm:
}
static const struct host1x_client_ops dc_client_ops = {
+ .early_init = tegra_dc_early_init,
.init = tegra_dc_init,
.exit = tegra_dc_exit,
+ .late_exit = tegra_dc_late_exit,
.suspend = tegra_dc_runtime_suspend,
.resume = tegra_dc_runtime_resume,
};
@@ -2246,6 +2325,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
.supports_interlacing = false,
.supports_cursor = false,
.supports_block_linear = false,
+ .supports_sector_layout = false,
.has_legacy_blending = true,
.pitch_align = 8,
.has_powergate = false,
@@ -2265,6 +2345,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
.supports_interlacing = false,
.supports_cursor = false,
.supports_block_linear = false,
+ .supports_sector_layout = false,
.has_legacy_blending = true,
.pitch_align = 8,
.has_powergate = false,
@@ -2284,6 +2365,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
.supports_interlacing = false,
.supports_cursor = false,
.supports_block_linear = false,
+ .supports_sector_layout = false,
.has_legacy_blending = true,
.pitch_align = 64,
.has_powergate = true,
@@ -2303,6 +2385,7 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.supports_interlacing = true,
.supports_cursor = true,
.supports_block_linear = true,
+ .supports_sector_layout = false,
.has_legacy_blending = false,
.pitch_align = 64,
.has_powergate = true,
@@ -2322,6 +2405,7 @@ static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
.supports_interlacing = true,
.supports_cursor = true,
.supports_block_linear = true,
+ .supports_sector_layout = false,
.has_legacy_blending = false,
.pitch_align = 64,
.has_powergate = true,
@@ -2375,6 +2459,7 @@ static const struct tegra_dc_soc_info tegra186_dc_soc_info = {
.supports_interlacing = true,
.supports_cursor = true,
.supports_block_linear = true,
+ .supports_sector_layout = false,
.has_legacy_blending = false,
.pitch_align = 64,
.has_powergate = false,
@@ -2423,6 +2508,7 @@ static const struct tegra_dc_soc_info tegra194_dc_soc_info = {
.supports_interlacing = true,
.supports_cursor = true,
.supports_block_linear = true,
+ .supports_sector_layout = true,
.has_legacy_blending = false,
.pitch_align = 64,
.has_powergate = false,
@@ -2532,9 +2618,16 @@ static int tegra_dc_couple(struct tegra_dc *dc)
static int tegra_dc_probe(struct platform_device *pdev)
{
+ u64 dma_mask = dma_get_mask(pdev->dev.parent);
struct tegra_dc *dc;
int err;
+ err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
if (!dc)
return -ENOMEM;
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 051d03dcb9b0..29f19c3c6149 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -52,6 +52,7 @@ struct tegra_dc_soc_info {
bool supports_interlacing;
bool supports_cursor;
bool supports_block_linear;
+ bool supports_sector_layout;
bool has_legacy_blending;
unsigned int pitch_align;
bool has_powergate;
@@ -511,6 +512,8 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define DC_DISP_CURSOR_START_ADDR_HI 0x4ec
#define DC_DISP_BLEND_CURSOR_CONTROL 0x4f1
+#define CURSOR_COMPOSITION_MODE_BLEND (0 << 25)
+#define CURSOR_COMPOSITION_MODE_XOR (1 << 25)
#define CURSOR_MODE_LEGACY (0 << 24)
#define CURSOR_MODE_NORMAL (1 << 24)
#define CURSOR_DST_BLEND_ZERO (0 << 16)
@@ -705,6 +708,9 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define PROTOCOL_MASK (0xf << 8)
#define PROTOCOL_SINGLE_TMDS_A (0x1 << 8)
+#define DC_DISP_PCALC_HEAD_SET_CROPPED_POINT_IN_CURSOR 0x442
+#define DC_DISP_PCALC_HEAD_SET_CROPPED_SIZE_IN_CURSOR 0x446
+
#define DC_WIN_CORE_WINDOWGROUP_SET_CONTROL 0x702
#define OWNER_MASK (0xf << 0)
#define OWNER(x) (((x) & 0xf) << 0)
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 90709c38c993..0c350b0daab4 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -174,7 +174,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
struct drm_tegra_syncpt syncpt;
struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
struct drm_gem_object **refs;
- struct host1x_syncpt *sp;
+ struct host1x_syncpt *sp = NULL;
struct host1x_job *job;
unsigned int num_refs;
int err;
@@ -301,8 +301,8 @@ int tegra_drm_submit(struct tegra_drm_context *context,
goto fail;
}
- /* check whether syncpoint ID is valid */
- sp = host1x_syncpt_get(host1x, syncpt.id);
+ /* Syncpoint ref will be dropped on job release. */
+ sp = host1x_syncpt_get_by_id(host1x, syncpt.id);
if (!sp) {
err = -ENOENT;
goto fail;
@@ -311,7 +311,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
job->is_addr_reg = context->client->ops->is_addr_reg;
job->is_valid_class = context->client->ops->is_valid_class;
job->syncpt_incrs = syncpt.incrs;
- job->syncpt_id = syncpt.id;
+ job->syncpt = sp;
job->timeout = 10000;
if (args->timeout && args->timeout < 10000)
@@ -383,7 +383,7 @@ static int tegra_syncpt_read(struct drm_device *drm, void *data,
struct drm_tegra_syncpt_read *args = data;
struct host1x_syncpt *sp;
- sp = host1x_syncpt_get(host, args->id);
+ sp = host1x_syncpt_get_by_id_noref(host, args->id);
if (!sp)
return -EINVAL;
@@ -398,7 +398,7 @@ static int tegra_syncpt_incr(struct drm_device *drm, void *data,
struct drm_tegra_syncpt_incr *args = data;
struct host1x_syncpt *sp;
- sp = host1x_syncpt_get(host1x, args->id);
+ sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
if (!sp)
return -EINVAL;
@@ -412,7 +412,7 @@ static int tegra_syncpt_wait(struct drm_device *drm, void *data,
struct drm_tegra_syncpt_wait *args = data;
struct host1x_syncpt *sp;
- sp = host1x_syncpt_get(host1x, args->id);
+ sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
if (!sp)
return -EINVAL;
@@ -1121,9 +1121,8 @@ static int host1x_drm_probe(struct host1x_device *dev)
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
-
- drm->mode_config.max_width = 4096;
- drm->mode_config.max_height = 4096;
+ drm->mode_config.max_width = 0;
+ drm->mode_config.max_height = 0;
drm->mode_config.allow_fb_modifiers = true;
@@ -1142,6 +1141,14 @@ static int host1x_drm_probe(struct host1x_device *dev)
if (err < 0)
goto fbdev;
+ /*
+ * Now that all display controller have been initialized, the maximum
+ * supported resolution is known and the bitmask for horizontal and
+ * vertical bitfields can be computed.
+ */
+ tegra->hmask = drm->mode_config.max_width - 1;
+ tegra->vmask = drm->mode_config.max_height - 1;
+
if (tegra->use_explicit_iommu) {
u64 carveout_start, carveout_end, gem_start, gem_end;
u64 dma_mask = dma_get_mask(&dev->dev);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index f38de08e0c95..87df251c1fcf 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -24,6 +24,9 @@
#include "hub.h"
#include "trace.h"
+/* XXX move to include/uapi/drm/drm_fourcc.h? */
+#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT(22)
+
struct reset_control;
#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -54,7 +57,9 @@ struct tegra_drm {
struct tegra_fbdev *fbdev;
#endif
+ unsigned int hmask, vmask;
unsigned int pitch_align;
+ unsigned int num_crtcs;
struct tegra_display_hub *hub;
};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 01939c57fc74..cae8b8cbe9dd 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -44,6 +44,15 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
{
uint64_t modifier = framebuffer->modifier;
+ if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
+ if ((modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) == 0)
+ tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_TEGRA;
+ else
+ tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_GPU;
+
+ modifier &= ~DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT;
+ }
+
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
tiling->mode = TEGRA_BO_TILING_MODE_PITCH;
@@ -86,6 +95,7 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
break;
default:
+ DRM_DEBUG_KMS("unknown format modifier: %llx\n", modifier);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index fafb5724499b..c15fd99d6cb2 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -21,9 +21,15 @@ enum tegra_bo_tiling_mode {
TEGRA_BO_TILING_MODE_BLOCK,
};
+enum tegra_bo_sector_layout {
+ TEGRA_BO_SECTOR_LAYOUT_TEGRA,
+ TEGRA_BO_SECTOR_LAYOUT_GPU,
+};
+
struct tegra_bo_tiling {
enum tegra_bo_tiling_mode mode;
unsigned long value;
+ enum tegra_bo_sector_layout sector_layout;
};
struct tegra_bo {
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index adbe2ddcda19..de288cba3905 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -67,7 +67,7 @@ static int gr2d_init(struct host1x_client *client)
detach:
host1x_client_iommu_detach(client);
free:
- host1x_syncpt_free(client->syncpts[0]);
+ host1x_syncpt_put(client->syncpts[0]);
put:
host1x_channel_put(gr2d->channel);
return err;
@@ -86,7 +86,7 @@ static int gr2d_exit(struct host1x_client *client)
return err;
host1x_client_iommu_detach(client);
- host1x_syncpt_free(client->syncpts[0]);
+ host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(gr2d->channel);
return 0;
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index b0b8154e8104..24442ade0da3 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -76,7 +76,7 @@ static int gr3d_init(struct host1x_client *client)
detach:
host1x_client_iommu_detach(client);
free:
- host1x_syncpt_free(client->syncpts[0]);
+ host1x_syncpt_put(client->syncpts[0]);
put:
host1x_channel_put(gr3d->channel);
return err;
@@ -94,7 +94,7 @@ static int gr3d_exit(struct host1x_client *client)
return err;
host1x_client_iommu_detach(client);
- host1x_syncpt_free(client->syncpts[0]);
+ host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(gr3d->channel);
return 0;
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 8e6d329d062b..79bff8b48271 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -55,6 +55,18 @@ static const u64 tegra_shared_plane_modifiers[] = {
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
+ /*
+ * The GPU sector layout is only supported on Tegra194, but these will
+ * be filtered out later on by ->format_mod_supported() on SoCs where
+ * it isn't supported.
+ */
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ /* sentinel */
DRM_FORMAT_MOD_INVALID
};
@@ -366,6 +378,12 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+ if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
+ !dc->soc->supports_sector_layout) {
+ DRM_ERROR("hardware doesn't support GPU sector layout\n");
+ return -EINVAL;
+ }
+
/*
* Tegra doesn't support different strides for U and V planes so we
* error out if the user tries to display a framebuffer with such a
@@ -485,6 +503,16 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
base = tegra_plane_state->iova[0] + fb->offsets[0];
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ /*
+ * Physical address bit 39 in Tegra194 is used as a switch for special
+ * logic that swizzles the memory using either the legacy Tegra or the
+ * dGPU sector layout.
+ */
+ if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
+ base |= BIT(39);
+#endif
+
tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
@@ -562,9 +590,8 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub *hub = tegra->hub;
- /* planes can be assigned to arbitrary CRTCs */
- unsigned int possible_crtcs = 0x7;
struct tegra_shared_plane *plane;
+ unsigned int possible_crtcs;
unsigned int num_formats;
const u64 *modifiers;
struct drm_plane *p;
@@ -583,6 +610,9 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
p = &plane->base.base;
+ /* planes can be assigned to arbitrary CRTCs */
+ possible_crtcs = BIT(tegra->num_crtcs) - 1;
+
num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
formats = tegra_shared_plane_formats;
modifiers = tegra_shared_plane_modifiers;
@@ -848,12 +878,19 @@ static const struct host1x_client_ops tegra_display_hub_ops = {
static int tegra_display_hub_probe(struct platform_device *pdev)
{
+ u64 dma_mask = dma_get_mask(pdev->dev.parent);
struct device_node *child = NULL;
struct tegra_display_hub *hub;
struct clk *clk;
unsigned int i;
int err;
+ err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
if (!hub)
return -ENOMEM;
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 19e8847a164b..2e11b4b1f702 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -83,6 +83,22 @@ static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
kfree(state);
}
+static bool tegra_plane_supports_sector_layout(struct drm_plane *plane)
+{
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, plane->dev) {
+ if (plane->possible_crtcs & drm_crtc_mask(crtc)) {
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (!dc->soc->supports_sector_layout)
+ return false;
+ }
+ }
+
+ return true;
+}
+
static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
@@ -92,6 +108,14 @@ static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
if (modifier == DRM_FORMAT_MOD_LINEAR)
return true;
+ /* check for the sector layout bit */
+ if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
+ if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) {
+ if (!tegra_plane_supports_sector_layout(plane))
+ return false;
+ }
+ }
+
if (info->num_planes == 1)
return true;
@@ -119,6 +143,14 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
dma_addr_t phys_addr, *phys;
struct sg_table *sgt;
+ /*
+ * If we're not attached to a domain, we already stored the
+ * physical address when the buffer was allocated. If we're
+ * part of a group that's shared between all display
+ * controllers, we've also already mapped the framebuffer
+ * through the SMMU. In both cases we can short-circuit the
+ * code below and retrieve the stored IOV address.
+ */
if (!domain || dc->client.group)
phys = &phys_addr;
else
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 77e128832920..72aea1cc0cfa 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -214,7 +214,7 @@ static int vic_init(struct host1x_client *client)
return 0;
free_syncpt:
- host1x_syncpt_free(client->syncpts[0]);
+ host1x_syncpt_put(client->syncpts[0]);
free_channel:
host1x_channel_put(vic->channel);
detach:
@@ -238,7 +238,7 @@ static int vic_exit(struct host1x_client *client)
if (err < 0)
return err;
- host1x_syncpt_free(client->syncpts[0]);
+ host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(vic->channel);
host1x_client_iommu_detach(client);
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 9b787b3caeb5..510e3e001dab 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -112,7 +112,7 @@ int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
{
struct ttm_global *glob = &ttm_glob;
struct ttm_device *bdev;
- int ret = -EBUSY;
+ int ret = 0;
mutex_lock(&ttm_global_mutex);
list_for_each_entry(bdev, &glob->device_list, device_list) {
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index eecc930e97ab..a1a25410ec74 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -329,6 +329,8 @@ int ttm_tt_populate(struct ttm_device *bdev,
ttm_dma32_pages_limit) {
ret = ttm_global_swapout(ctx, GFP_KERNEL);
+ if (ret == 0)
+ break;
if (ret < 0)
goto error;
}
diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c
index 104b95a8c7a2..aeb0a22a2c34 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_memory.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_memory.c
@@ -280,7 +280,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
spin_unlock(&glob->lock);
ret = ttm_global_swapout(ctx, GFP_KERNEL);
spin_lock(&glob->lock);
- if (unlikely(ret < 0))
+ if (unlikely(ret <= 0))
break;
}
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 68a766ff0e9d..46f69c532b6b 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -197,6 +197,17 @@ int host1x_device_init(struct host1x_device *device)
mutex_lock(&device->clients_lock);
list_for_each_entry(client, &device->clients, list) {
+ if (client->ops && client->ops->early_init) {
+ err = client->ops->early_init(client);
+ if (err < 0) {
+ dev_err(&device->dev, "failed to early initialize %s: %d\n",
+ dev_name(client->dev), err);
+ goto teardown_late;
+ }
+ }
+ }
+
+ list_for_each_entry(client, &device->clients, list) {
if (client->ops && client->ops->init) {
err = client->ops->init(client);
if (err < 0) {
@@ -217,6 +228,14 @@ teardown:
if (client->ops->exit)
client->ops->exit(client);
+ /* reset client to end of list for late teardown */
+ client = list_entry(&device->clients, struct host1x_client, list);
+
+teardown_late:
+ list_for_each_entry_continue_reverse(client, &device->clients, list)
+ if (client->ops->late_exit)
+ client->ops->late_exit(client);
+
mutex_unlock(&device->clients_lock);
return err;
}
@@ -251,6 +270,18 @@ int host1x_device_exit(struct host1x_device *device)
}
}
+ list_for_each_entry_reverse(client, &device->clients, list) {
+ if (client->ops && client->ops->late_exit) {
+ err = client->ops->late_exit(client);
+ if (err < 0) {
+ dev_err(&device->dev, "failed to late cleanup %s: %d\n",
+ dev_name(client->dev), err);
+ mutex_unlock(&device->clients_lock);
+ return err;
+ }
+ }
+ }
+
mutex_unlock(&device->clients_lock);
return 0;
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index e8d3fda91d8a..6e6ca774f68d 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -273,15 +273,13 @@ static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
static void cdma_start_timer_locked(struct host1x_cdma *cdma,
struct host1x_job *job)
{
- struct host1x *host = cdma_to_host1x(cdma);
-
if (cdma->timeout.client) {
/* timer already started */
return;
}
cdma->timeout.client = job->client;
- cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id);
+ cdma->timeout.syncpt = job->syncpt;
cdma->timeout.syncpt_val = job->syncpt_end;
cdma->timeout.start_ktime = ktime_get();
@@ -312,7 +310,6 @@ static void stop_cdma_timer_locked(struct host1x_cdma *cdma)
static void update_cdma_locked(struct host1x_cdma *cdma)
{
bool signal = false;
- struct host1x *host1x = cdma_to_host1x(cdma);
struct host1x_job *job, *n;
/* If CDMA is stopped, queue is cleared and we can return */
@@ -324,8 +321,7 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
* to consume as many sync queue entries as possible without blocking
*/
list_for_each_entry_safe(job, n, &cdma->sync_queue, list) {
- struct host1x_syncpt *sp =
- host1x_syncpt_get(host1x, job->syncpt_id);
+ struct host1x_syncpt *sp = job->syncpt;
/* Check whether this syncpt has completed, and bail if not */
if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) {
@@ -499,8 +495,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
if (!cdma->timeout.initialized) {
int err;
- err = host1x_hw_cdma_timeout_init(host1x, cdma,
- job->syncpt_id);
+ err = host1x_hw_cdma_timeout_init(host1x, cdma);
if (err) {
mutex_unlock(&cdma->lock);
return err;
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index 1b4997bda1c7..8a14880c61bb 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -69,6 +69,7 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
static void show_syncpts(struct host1x *m, struct output *o)
{
+ struct list_head *pos;
unsigned int i;
host1x_debug_output(o, "---- syncpts ----\n");
@@ -76,12 +77,19 @@ static void show_syncpts(struct host1x *m, struct output *o)
for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
u32 max = host1x_syncpt_read_max(m->syncpt + i);
u32 min = host1x_syncpt_load(m->syncpt + i);
+ unsigned int waiters = 0;
- if (!min && !max)
+ spin_lock(&m->syncpt[i].intr.lock);
+ list_for_each(pos, &m->syncpt[i].intr.wait_head)
+ waiters++;
+ spin_unlock(&m->syncpt[i].intr.lock);
+
+ if (!min && !max && !waiters)
continue;
- host1x_debug_output(o, "id %u (%s) min %d max %d\n",
- i, m->syncpt[i].name, min, max);
+ host1x_debug_output(o,
+ "id %u (%s) min %d max %d (%d waiters)\n",
+ i, m->syncpt[i].name, min, max, waiters);
}
for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index d0ebb70e2fdd..fbb6447b8659 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -77,6 +77,7 @@ static const struct host1x_info host1x01_info = {
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
+ .reserve_vblank_syncpts = true,
};
static const struct host1x_info host1x02_info = {
@@ -91,6 +92,7 @@ static const struct host1x_info host1x02_info = {
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
+ .reserve_vblank_syncpts = true,
};
static const struct host1x_info host1x04_info = {
@@ -105,6 +107,7 @@ static const struct host1x_info host1x04_info = {
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
+ .reserve_vblank_syncpts = false,
};
static const struct host1x_info host1x05_info = {
@@ -119,6 +122,7 @@ static const struct host1x_info host1x05_info = {
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
+ .reserve_vblank_syncpts = false,
};
static const struct host1x_sid_entry tegra186_sid_table[] = {
@@ -142,6 +146,7 @@ static const struct host1x_info host1x06_info = {
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
.sid_table = tegra186_sid_table,
+ .reserve_vblank_syncpts = false,
};
static const struct host1x_sid_entry tegra194_sid_table[] = {
@@ -165,6 +170,7 @@ static const struct host1x_info host1x07_info = {
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
.sid_table = tegra194_sid_table,
+ .reserve_vblank_syncpts = false,
};
static const struct of_device_id host1x_of_match[] = {
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index f781a9b0f39d..fa6d4bc46e98 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -37,7 +37,7 @@ struct host1x_cdma_ops {
void (*start)(struct host1x_cdma *cdma);
void (*stop)(struct host1x_cdma *cdma);
void (*flush)(struct host1x_cdma *cdma);
- int (*timeout_init)(struct host1x_cdma *cdma, unsigned int syncpt);
+ int (*timeout_init)(struct host1x_cdma *cdma);
void (*timeout_destroy)(struct host1x_cdma *cdma);
void (*freeze)(struct host1x_cdma *cdma);
void (*resume)(struct host1x_cdma *cdma, u32 getptr);
@@ -101,6 +101,12 @@ struct host1x_info {
bool has_hypervisor; /* has hypervisor registers */
unsigned int num_sid_entries;
const struct host1x_sid_entry *sid_table;
+ /*
+ * On T20-T148, the boot chain may setup DC to increment syncpoints
+ * 26/27 on VBLANK. As such we cannot use these syncpoints until
+ * the display driver disables VBLANK increments.
+ */
+ bool reserve_vblank_syncpts;
};
struct host1x {
@@ -261,10 +267,9 @@ static inline void host1x_hw_cdma_flush(struct host1x *host,
}
static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
- struct host1x_cdma *cdma,
- unsigned int syncpt)
+ struct host1x_cdma *cdma)
{
- return host->cdma_op->timeout_init(cdma, syncpt);
+ return host->cdma_op->timeout_init(cdma);
}
static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 2f3bf94cf365..e49cd5b8f735 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -295,7 +295,7 @@ static void cdma_timeout_handler(struct work_struct *work)
/*
* Init timeout resources
*/
-static int cdma_timeout_init(struct host1x_cdma *cdma, unsigned int syncpt)
+static int cdma_timeout_init(struct host1x_cdma *cdma)
{
INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
cdma->timeout.initialized = true;
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 5eaa29d171c9..d4c28faf27d1 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -86,8 +86,7 @@ static void submit_gathers(struct host1x_job *job)
static inline void synchronize_syncpt_base(struct host1x_job *job)
{
- struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
- struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
+ struct host1x_syncpt *sp = job->syncpt;
unsigned int id;
u32 value;
@@ -118,7 +117,7 @@ static void host1x_channel_set_streamid(struct host1x_channel *channel)
static int channel_submit(struct host1x_job *job)
{
struct host1x_channel *ch = job->channel;
- struct host1x_syncpt *sp;
+ struct host1x_syncpt *sp = job->syncpt;
u32 user_syncpt_incrs = job->syncpt_incrs;
u32 prev_max = 0;
u32 syncval;
@@ -126,10 +125,9 @@ static int channel_submit(struct host1x_job *job)
struct host1x_waitlist *completed_waiter = NULL;
struct host1x *host = dev_get_drvdata(ch->dev->parent);
- sp = host->syncpt + job->syncpt_id;
trace_host1x_channel_submit(dev_name(ch->dev),
job->num_gathers, job->num_relocs,
- job->syncpt_id, job->syncpt_incrs);
+ job->syncpt->id, job->syncpt_incrs);
/* before error checks, return current max */
prev_max = job->syncpt_end = host1x_syncpt_read_max(sp);
@@ -163,7 +161,7 @@ static int channel_submit(struct host1x_job *job)
host1x_cdma_push(&ch->cdma,
host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
host1x_uclass_wait_syncpt_r(), 1),
- host1x_class_host_wait_syncpt(job->syncpt_id,
+ host1x_class_host_wait_syncpt(job->syncpt->id,
host1x_syncpt_read_max(sp)));
}
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index f31bcfa1b837..ceb48229d14b 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -204,7 +204,7 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
unsigned int i;
host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
- job, job->syncpt_id, job->syncpt_end,
+ job, job->syncpt->id, job->syncpt_end,
job->first_get, job->timeout,
job->num_slots, job->num_unpins);
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_vm.h b/drivers/gpu/host1x/hw/hw_host1x07_vm.h
index 3058b3c9a91d..b766851d5b83 100644
--- a/drivers/gpu/host1x/hw/hw_host1x07_vm.h
+++ b/drivers/gpu/host1x/hw/hw_host1x07_vm.h
@@ -29,6 +29,6 @@
#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(x) (0x652c + 4 * (x))
#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(x) (0x6590 + 4 * (x))
#define HOST1X_SYNC_SYNCPT(x) (0x8080 + 4 * (x))
-#define HOST1X_SYNC_SYNCPT_INT_THRESH(x) (0x8d00 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(x) (0x9980 + 4 * (x))
#define HOST1X_SYNC_SYNCPT_CH_APP(x) (0xa604 + 4 * (x))
#define HOST1X_SYNC_SYNCPT_CH_APP_CH(v) (((v) & 0x3f) << 8)
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 9245add23b5d..6d1f3c0fdbe7 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -235,25 +235,37 @@ int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
host1x_hw_intr_enable_syncpt_intr(host, syncpt->id);
}
- spin_unlock(&syncpt->intr.lock);
-
if (ref)
*ref = waiter;
+
+ spin_unlock(&syncpt->intr.lock);
+
return 0;
}
-void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref)
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref,
+ bool flush)
{
struct host1x_waitlist *waiter = ref;
struct host1x_syncpt *syncpt;
- while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
- WLS_REMOVED)
- schedule();
+ atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED);
syncpt = host->syncpt + id;
- (void)process_wait_list(host, syncpt,
- host1x_syncpt_load(host->syncpt + id));
+
+ spin_lock(&syncpt->intr.lock);
+ if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED) ==
+ WLS_CANCELLED) {
+ list_del(&waiter->list);
+ kref_put(&waiter->refcount, waiter_release);
+ }
+ spin_unlock(&syncpt->intr.lock);
+
+ if (flush) {
+ /* Wait until any concurrently executing handler has finished. */
+ while (atomic_read(&waiter->state) != WLS_HANDLED)
+ schedule();
+ }
kref_put(&waiter->refcount, waiter_release);
}
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
index aac38194398f..6ea55e615e3a 100644
--- a/drivers/gpu/host1x/intr.h
+++ b/drivers/gpu/host1x/intr.h
@@ -74,8 +74,10 @@ int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
* Unreference an action submitted to host1x_intr_add_action().
* You must call this if you passed non-NULL as ref.
* @ref the ref returned from host1x_intr_add_action()
+ * @flush wait until any pending handlers have completed before returning.
*/
-void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref);
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref,
+ bool flush);
/* Initialize host1x sync point interrupt */
int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 82d0a60ba3f7..adbdc225de8d 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -79,6 +79,9 @@ static void job_free(struct kref *ref)
{
struct host1x_job *job = container_of(ref, struct host1x_job, ref);
+ if (job->syncpt)
+ host1x_syncpt_put(job->syncpt);
+
kfree(job);
}
@@ -674,7 +677,7 @@ EXPORT_SYMBOL(host1x_job_unpin);
*/
void host1x_job_dump(struct device *dev, struct host1x_job *job)
{
- dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
+ dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt->id);
dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index fce7892d5137..e648ebbb2027 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -42,17 +42,32 @@ static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
base->requested = false;
}
-static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
- struct host1x_client *client,
- unsigned long flags)
+/**
+ * host1x_syncpt_alloc() - allocate a syncpoint
+ * @host: host1x device data
+ * @flags: bitfield of HOST1X_SYNCPT_* flags
+ * @name: name for the syncpoint for use in debug prints
+ *
+ * Allocates a hardware syncpoint for the caller's use. The caller then has
+ * the sole authority to mutate the syncpoint's value until it is freed again.
+ *
+ * If no free syncpoints are available, or a NULL name was specified, returns
+ * NULL.
+ */
+struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
+ unsigned long flags,
+ const char *name)
{
struct host1x_syncpt *sp = host->syncpt;
+ char *full_name;
unsigned int i;
- char *name;
+
+ if (!name)
+ return NULL;
mutex_lock(&host->syncpt_mutex);
- for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
+ for (i = 0; i < host->info->nb_pts && kref_read(&sp->ref); i++, sp++)
;
if (i >= host->info->nb_pts)
@@ -64,19 +79,19 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
goto unlock;
}
- name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
- client ? dev_name(client->dev) : NULL);
- if (!name)
+ full_name = kasprintf(GFP_KERNEL, "%u-%s", sp->id, name);
+ if (!full_name)
goto free_base;
- sp->client = client;
- sp->name = name;
+ sp->name = full_name;
if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
sp->client_managed = true;
else
sp->client_managed = false;
+ kref_init(&sp->ref);
+
mutex_unlock(&host->syncpt_mutex);
return sp;
@@ -87,6 +102,7 @@ unlock:
mutex_unlock(&host->syncpt_mutex);
return NULL;
}
+EXPORT_SYMBOL(host1x_syncpt_alloc);
/**
* host1x_syncpt_id() - retrieve syncpoint ID
@@ -294,7 +310,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
}
}
- host1x_intr_put_ref(sp->host, sp->id, ref);
+ host1x_intr_put_ref(sp->host, sp->id, ref, true);
done:
return err;
@@ -307,59 +323,12 @@ EXPORT_SYMBOL(host1x_syncpt_wait);
bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
{
u32 current_val;
- u32 future_val;
smp_rmb();
current_val = (u32)atomic_read(&sp->min_val);
- future_val = (u32)atomic_read(&sp->max_val);
-
- /* Note the use of unsigned arithmetic here (mod 1<<32).
- *
- * c = current_val = min_val = the current value of the syncpoint.
- * t = thresh = the value we are checking
- * f = future_val = max_val = the value c will reach when all
- * outstanding increments have completed.
- *
- * Note that c always chases f until it reaches f.
- *
- * Dtf = (f - t)
- * Dtc = (c - t)
- *
- * Consider all cases:
- *
- * A) .....c..t..f..... Dtf < Dtc need to wait
- * B) .....c.....f..t.. Dtf > Dtc expired
- * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large)
- *
- * Any case where f==c: always expired (for any t). Dtf == Dcf
- * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0)
- * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0,
- * Dtc!=0)
- *
- * Other cases:
- *
- * A) .....t..f..c..... Dtf < Dtc need to wait
- * A) .....f..c..t..... Dtf < Dtc need to wait
- * A) .....f..t..c..... Dtf > Dtc expired
- *
- * So:
- * Dtf >= Dtc implies EXPIRED (return true)
- * Dtf < Dtc implies WAIT (return false)
- *
- * Note: If t is expired then we *cannot* wait on it. We would wait
- * forever (hang the system).
- *
- * Note: do NOT get clever and remove the -thresh from both sides. It
- * is NOT the same.
- *
- * If future valueis zero, we have a client managed sync point. In that
- * case we do a direct comparison.
- */
- if (!host1x_syncpt_client_managed(sp))
- return future_val - thresh >= current_val - thresh;
- else
- return (s32)(current_val - thresh) >= 0;
+
+ return ((current_val - thresh) & 0x80000000U) == 0U;
}
int host1x_syncpt_init(struct host1x *host)
@@ -401,10 +370,15 @@ int host1x_syncpt_init(struct host1x *host)
host1x_hw_syncpt_enable_protection(host);
/* Allocate sync point to use for clearing waits for expired fences */
- host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
+ host->nop_sp = host1x_syncpt_alloc(host, 0, "reserved-nop");
if (!host->nop_sp)
return -ENOMEM;
+ if (host->info->reserve_vblank_syncpts) {
+ kref_init(&host->syncpt[26].ref);
+ kref_init(&host->syncpt[27].ref);
+ }
+
return 0;
}
@@ -416,44 +390,50 @@ int host1x_syncpt_init(struct host1x *host)
* host1x client drivers can use this function to allocate a syncpoint for
* subsequent use. A syncpoint returned by this function will be reserved for
* use by the client exclusively. When no longer using a syncpoint, a host1x
- * client driver needs to release it using host1x_syncpt_free().
+ * client driver needs to release it using host1x_syncpt_put().
*/
struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
unsigned long flags)
{
struct host1x *host = dev_get_drvdata(client->host->parent);
- return host1x_syncpt_alloc(host, client, flags);
+ return host1x_syncpt_alloc(host, flags, dev_name(client->dev));
}
EXPORT_SYMBOL(host1x_syncpt_request);
-/**
- * host1x_syncpt_free() - free a requested syncpoint
- * @sp: host1x syncpoint
- *
- * Release a syncpoint previously allocated using host1x_syncpt_request(). A
- * host1x client driver should call this when the syncpoint is no longer in
- * use. Note that client drivers must ensure that the syncpoint doesn't remain
- * under the control of hardware after calling this function, otherwise two
- * clients may end up trying to access the same syncpoint concurrently.
- */
-void host1x_syncpt_free(struct host1x_syncpt *sp)
+static void syncpt_release(struct kref *ref)
{
- if (!sp)
- return;
+ struct host1x_syncpt *sp = container_of(ref, struct host1x_syncpt, ref);
+
+ atomic_set(&sp->max_val, host1x_syncpt_read(sp));
mutex_lock(&sp->host->syncpt_mutex);
host1x_syncpt_base_free(sp->base);
kfree(sp->name);
sp->base = NULL;
- sp->client = NULL;
sp->name = NULL;
sp->client_managed = false;
mutex_unlock(&sp->host->syncpt_mutex);
}
-EXPORT_SYMBOL(host1x_syncpt_free);
+
+/**
+ * host1x_syncpt_put() - free a requested syncpoint
+ * @sp: host1x syncpoint
+ *
+ * Release a syncpoint previously allocated using host1x_syncpt_request(). A
+ * host1x client driver should call this when the syncpoint is no longer in
+ * use.
+ */
+void host1x_syncpt_put(struct host1x_syncpt *sp)
+{
+ if (!sp)
+ return;
+
+ kref_put(&sp->ref, syncpt_release);
+}
+EXPORT_SYMBOL(host1x_syncpt_put);
void host1x_syncpt_deinit(struct host1x *host)
{
@@ -520,16 +500,48 @@ unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
}
/**
- * host1x_syncpt_get() - obtain a syncpoint by ID
+ * host1x_syncpt_get_by_id() - obtain a syncpoint by ID
* @host: host1x controller
* @id: syncpoint ID
*/
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id)
+struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host,
+ unsigned int id)
{
if (id >= host->info->nb_pts)
return NULL;
- return host->syncpt + id;
+ if (kref_get_unless_zero(&host->syncpt[id].ref))
+ return &host->syncpt[id];
+ else
+ return NULL;
+}
+EXPORT_SYMBOL(host1x_syncpt_get_by_id);
+
+/**
+ * host1x_syncpt_get_by_id_noref() - obtain a syncpoint by ID but don't
+ * increase the refcount.
+ * @host: host1x controller
+ * @id: syncpoint ID
+ */
+struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host,
+ unsigned int id)
+{
+ if (id >= host->info->nb_pts)
+ return NULL;
+
+ return &host->syncpt[id];
+}
+EXPORT_SYMBOL(host1x_syncpt_get_by_id_noref);
+
+/**
+ * host1x_syncpt_get() - increment syncpoint refcount
+ * @sp: syncpoint
+ */
+struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp)
+{
+ kref_get(&sp->ref);
+
+ return sp;
}
EXPORT_SYMBOL(host1x_syncpt_get);
@@ -552,3 +564,31 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
return base->id;
}
EXPORT_SYMBOL(host1x_syncpt_base_id);
+
+static void do_nothing(struct kref *ref)
+{
+}
+
+/**
+ * host1x_syncpt_release_vblank_reservation() - Make VBLANK syncpoint
+ * available for allocation
+ *
+ * @client: host1x bus client
+ * @syncpt_id: syncpoint ID to make available
+ *
+ * Makes VBLANK<i> syncpoint available for allocatation if it was
+ * reserved at initialization time. This should be called by the display
+ * driver after it has ensured that any VBLANK increment programming configured
+ * by the boot chain has been disabled.
+ */
+void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
+ u32 syncpt_id)
+{
+ struct host1x *host = dev_get_drvdata(client->host->parent);
+
+ if (!host->info->reserve_vblank_syncpts)
+ return;
+
+ kref_put(&host->syncpt[syncpt_id].ref, do_nothing);
+}
+EXPORT_SYMBOL(host1x_syncpt_release_vblank_reservation);
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index 8e1d04dacaa0..a6766f8d55ee 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -11,6 +11,7 @@
#include <linux/atomic.h>
#include <linux/host1x.h>
#include <linux/kernel.h>
+#include <linux/kref.h>
#include <linux/sched.h>
#include "intr.h"
@@ -26,6 +27,8 @@ struct host1x_syncpt_base {
};
struct host1x_syncpt {
+ struct kref ref;
+
unsigned int id;
atomic_t min_val;
atomic_t max_val;
@@ -33,7 +36,6 @@ struct host1x_syncpt {
const char *name;
bool client_managed;
struct host1x *host;
- struct host1x_client *client;
struct host1x_syncpt_base *base;
/* interrupt data */
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 786b71ef7738..4bf263c2d61a 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -351,6 +351,17 @@ config HID_EZKEY
help
Support for Ezkey BTC 8193 keyboard.
+config HID_FT260
+ tristate "FTDI FT260 USB HID to I2C host support"
+ depends on USB_HID && HIDRAW && I2C
+ help
+ Provides I2C host adapter functionality over USB-HID through FT260
+ device. The customizable USB descriptor fields are exposed as sysfs
+ attributes.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hid-ft260.
+
config HID_GEMBIRD
tristate "Gembird Joypad"
depends on HID
@@ -1042,10 +1053,11 @@ config HID_THINGM
config HID_THRUSTMASTER
tristate "ThrustMaster devices support"
- depends on HID
+ depends on USB_HID
help
- Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or
- a THRUSTMASTER Ferrari GT Rumble Wheel.
+ Say Y here if you have a THRUSTMASTER FireStore Dual Power 2,
+ a THRUSTMASTER Ferrari GT Rumble Wheel or Thrustmaster FFB
+ Wheel (T150RS, T300RS, T300 Ferrari Alcantara Edition, T500RS).
config THRUSTMASTER_FF
bool "ThrustMaster devices force feedback support"
@@ -1206,4 +1218,6 @@ source "drivers/hid/intel-ish-hid/Kconfig"
source "drivers/hid/amd-sfh-hid/Kconfig"
+source "drivers/hid/surface-hid/Kconfig"
+
endmenu
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index c4f6d5c613dc..193431ec4db8 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_HID_ELAN) += hid-elan.o
obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_ELO) += hid-elo.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
+obj-$(CONFIG_HID_FT260) += hid-ft260.o
obj-$(CONFIG_HID_GEMBIRD) += hid-gembird.o
obj-$(CONFIG_HID_GFRM) += hid-gfrm.o
obj-$(CONFIG_HID_GLORIOUS) += hid-glorious.o
@@ -112,7 +113,8 @@ obj-$(CONFIG_HID_STEAM) += hid-steam.o
obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o
obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
-obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
+obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o hid-thrustmaster.o
+obj-$(CONFIG_HID_TMINIT) += hid-tminit.o
obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
@@ -145,3 +147,5 @@ obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/
obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
+
+obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index 6b665931147d..2b986d0dbde4 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -74,7 +74,7 @@ enum dev_num {
UNKNOWN,
};
/**
- * struct u1_data
+ * struct alps_dev
*
* @input: pointer to the kernel input device
* @input2: pointer to the kernel input2 device
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 097cb1ee3126..0ae9f6df59d1 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2129,7 +2129,7 @@ struct hid_dynid {
};
/**
- * store_new_id - add a new HID device ID to this driver and re-probe devices
+ * new_id_store - add a new HID device ID to this driver and re-probe devices
* @drv: target device driver
* @buf: buffer for scanning device ID data
* @count: input size
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index d7eaf9100370..59f8d716d78f 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -417,6 +417,7 @@ static const struct hid_usage_entry hid_usage_table[] = {
{ 0x85, 0x44, "Charging" },
{ 0x85, 0x45, "Discharging" },
{ 0x85, 0x4b, "NeedReplacement" },
+ { 0x85, 0x65, "AbsoluteStateOfCharge" },
{ 0x85, 0x66, "RemainingCapacity" },
{ 0x85, 0x68, "RunTimeToEmpty" },
{ 0x85, 0x6a, "AverageTimeToFull" },
diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
index dae193749d44..021049805bb7 100644
--- a/drivers/hid/hid-elan.c
+++ b/drivers/hid/hid-elan.c
@@ -410,15 +410,6 @@ static int elan_start_multitouch(struct hid_device *hdev)
return 0;
}
-static enum led_brightness elan_mute_led_get_brigtness(struct led_classdev *led_cdev)
-{
- struct device *dev = led_cdev->dev->parent;
- struct hid_device *hdev = to_hid_device(dev);
- struct elan_drvdata *drvdata = hid_get_drvdata(hdev);
-
- return drvdata->mute_led_state;
-}
-
static int elan_mute_led_set_brigtness(struct led_classdev *led_cdev,
enum led_brightness value)
{
@@ -445,8 +436,9 @@ static int elan_mute_led_set_brigtness(struct led_classdev *led_cdev,
kfree(dmabuf);
if (ret != ELAN_LED_REPORT_SIZE) {
- hid_err(hdev, "Failed to set mute led brightness: %d\n", ret);
- return ret;
+ if (ret != -ENODEV)
+ hid_err(hdev, "Failed to set mute led brightness: %d\n", ret);
+ return ret < 0 ? ret : -EIO;
}
drvdata->mute_led_state = led_state;
@@ -459,9 +451,10 @@ static int elan_init_mute_led(struct hid_device *hdev)
struct led_classdev *mute_led = &drvdata->mute_led;
mute_led->name = "elan:red:mute";
- mute_led->brightness_get = elan_mute_led_get_brigtness;
+ mute_led->default_trigger = "audio-mute";
mute_led->brightness_set_blocking = elan_mute_led_set_brigtness;
mute_led->max_brightness = LED_ON;
+ mute_led->flags = LED_HW_PLUGGABLE;
mute_led->dev = &hdev->dev;
return devm_led_classdev_register(&hdev->dev, mute_led);
diff --git a/drivers/hid/hid-ft260.c b/drivers/hid/hid-ft260.c
new file mode 100644
index 000000000000..a5751607ce24
--- /dev/null
+++ b/drivers/hid/hid-ft260.c
@@ -0,0 +1,1054 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * hid-ft260.c - FTDI FT260 USB HID to I2C host bridge
+ *
+ * Copyright (c) 2021, Michael Zaidman <michaelz@xsightlabs.com>
+ *
+ * Data Sheet:
+ * https://www.ftdichip.com/Support/Documents/DataSheets/ICs/DS_FT260.pdf
+ */
+
+#include "hid-ids.h"
+#include <linux/hidraw.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#ifdef DEBUG
+static int ft260_debug = 1;
+#else
+static int ft260_debug;
+#endif
+module_param_named(debug, ft260_debug, int, 0600);
+MODULE_PARM_DESC(debug, "Toggle FT260 debugging messages");
+
+#define ft260_dbg(format, arg...) \
+ do { \
+ if (ft260_debug) \
+ pr_info("%s: " format, __func__, ##arg); \
+ } while (0)
+
+#define FT260_REPORT_MAX_LENGTH (64)
+#define FT260_I2C_DATA_REPORT_ID(len) (FT260_I2C_REPORT_MIN + (len - 1) / 4)
+/*
+ * The input report format assigns 62 bytes for the data payload, but ft260
+ * returns 60 and 2 in two separate transactions. To minimize transfer time
+ * in reading chunks mode, set the maximum read payload length to 60 bytes.
+ */
+#define FT260_RD_DATA_MAX (60)
+#define FT260_WR_DATA_MAX (60)
+
+/*
+ * Device interface configuration.
+ * The FT260 has 2 interfaces that are controlled by DCNF0 and DCNF1 pins.
+ * First implementes USB HID to I2C bridge function and
+ * second - USB HID to UART bridge function.
+ */
+enum {
+ FT260_MODE_ALL = 0x00,
+ FT260_MODE_I2C = 0x01,
+ FT260_MODE_UART = 0x02,
+ FT260_MODE_BOTH = 0x03,
+};
+
+/* Control pipe */
+enum {
+ FT260_GET_RQST_TYPE = 0xA1,
+ FT260_GET_REPORT = 0x01,
+ FT260_SET_RQST_TYPE = 0x21,
+ FT260_SET_REPORT = 0x09,
+ FT260_FEATURE = 0x03,
+};
+
+/* Report IDs / Feature In */
+enum {
+ FT260_CHIP_VERSION = 0xA0,
+ FT260_SYSTEM_SETTINGS = 0xA1,
+ FT260_I2C_STATUS = 0xC0,
+ FT260_I2C_READ_REQ = 0xC2,
+ FT260_I2C_REPORT_MIN = 0xD0,
+ FT260_I2C_REPORT_MAX = 0xDE,
+ FT260_GPIO = 0xB0,
+ FT260_UART_INTERRUPT_STATUS = 0xB1,
+ FT260_UART_STATUS = 0xE0,
+ FT260_UART_RI_DCD_STATUS = 0xE1,
+ FT260_UART_REPORT = 0xF0,
+};
+
+/* Feature Out */
+enum {
+ FT260_SET_CLOCK = 0x01,
+ FT260_SET_I2C_MODE = 0x02,
+ FT260_SET_UART_MODE = 0x03,
+ FT260_ENABLE_INTERRUPT = 0x05,
+ FT260_SELECT_GPIO2_FUNC = 0x06,
+ FT260_ENABLE_UART_DCD_RI = 0x07,
+ FT260_SELECT_GPIOA_FUNC = 0x08,
+ FT260_SELECT_GPIOG_FUNC = 0x09,
+ FT260_SET_INTERRUPT_TRIGGER = 0x0A,
+ FT260_SET_SUSPEND_OUT_POLAR = 0x0B,
+ FT260_ENABLE_UART_RI_WAKEUP = 0x0C,
+ FT260_SET_UART_RI_WAKEUP_CFG = 0x0D,
+ FT260_SET_I2C_RESET = 0x20,
+ FT260_SET_I2C_CLOCK_SPEED = 0x22,
+ FT260_SET_UART_RESET = 0x40,
+ FT260_SET_UART_CONFIG = 0x41,
+ FT260_SET_UART_BAUD_RATE = 0x42,
+ FT260_SET_UART_DATA_BIT = 0x43,
+ FT260_SET_UART_PARITY = 0x44,
+ FT260_SET_UART_STOP_BIT = 0x45,
+ FT260_SET_UART_BREAKING = 0x46,
+ FT260_SET_UART_XON_XOFF = 0x49,
+};
+
+/* Response codes in I2C status report */
+enum {
+ FT260_I2C_STATUS_SUCCESS = 0x00,
+ FT260_I2C_STATUS_CTRL_BUSY = 0x01,
+ FT260_I2C_STATUS_ERROR = 0x02,
+ FT260_I2C_STATUS_ADDR_NO_ACK = 0x04,
+ FT260_I2C_STATUS_DATA_NO_ACK = 0x08,
+ FT260_I2C_STATUS_ARBITR_LOST = 0x10,
+ FT260_I2C_STATUS_CTRL_IDLE = 0x20,
+ FT260_I2C_STATUS_BUS_BUSY = 0x40,
+};
+
+/* I2C Conditions flags */
+enum {
+ FT260_FLAG_NONE = 0x00,
+ FT260_FLAG_START = 0x02,
+ FT260_FLAG_START_REPEATED = 0x03,
+ FT260_FLAG_STOP = 0x04,
+ FT260_FLAG_START_STOP = 0x06,
+ FT260_FLAG_START_STOP_REPEATED = 0x07,
+};
+
+#define FT260_SET_REQUEST_VALUE(report_id) ((FT260_FEATURE << 8) | report_id)
+
+/* Feature In reports */
+
+struct ft260_get_chip_version_report {
+ u8 report; /* FT260_CHIP_VERSION */
+ u8 chip_code[4]; /* FTDI chip identification code */
+ u8 reserved[8];
+} __packed;
+
+struct ft260_get_system_status_report {
+ u8 report; /* FT260_SYSTEM_SETTINGS */
+ u8 chip_mode; /* DCNF0 and DCNF1 status, bits 0-1 */
+ u8 clock_ctl; /* 0 - 12MHz, 1 - 24MHz, 2 - 48MHz */
+ u8 suspend_status; /* 0 - not suspended, 1 - suspended */
+ u8 pwren_status; /* 0 - FT260 is not ready, 1 - ready */
+ u8 i2c_enable; /* 0 - disabled, 1 - enabled */
+ u8 uart_mode; /* 0 - OFF; 1 - RTS_CTS, 2 - DTR_DSR, */
+ /* 3 - XON_XOFF, 4 - No flow control */
+ u8 hid_over_i2c_en; /* 0 - disabled, 1 - enabled */
+ u8 gpio2_function; /* 0 - GPIO, 1 - SUSPOUT, */
+ /* 2 - PWREN, 4 - TX_LED */
+ u8 gpioA_function; /* 0 - GPIO, 3 - TX_ACTIVE, 4 - TX_LED */
+ u8 gpioG_function; /* 0 - GPIO, 2 - PWREN, */
+ /* 5 - RX_LED, 6 - BCD_DET */
+ u8 suspend_out_pol; /* 0 - active-high, 1 - active-low */
+ u8 enable_wakeup_int; /* 0 - disabled, 1 - enabled */
+ u8 intr_cond; /* Interrupt trigger conditions */
+ u8 power_saving_en; /* 0 - disabled, 1 - enabled */
+ u8 reserved[10];
+} __packed;
+
+struct ft260_get_i2c_status_report {
+ u8 report; /* FT260_I2C_STATUS */
+ u8 bus_status; /* I2C bus status */
+ __le16 clock; /* I2C bus clock in range 60-3400 KHz */
+ u8 reserved;
+} __packed;
+
+/* Feature Out reports */
+
+struct ft260_set_system_clock_report {
+ u8 report; /* FT260_SYSTEM_SETTINGS */
+ u8 request; /* FT260_SET_CLOCK */
+ u8 clock_ctl; /* 0 - 12MHz, 1 - 24MHz, 2 - 48MHz */
+} __packed;
+
+struct ft260_set_i2c_mode_report {
+ u8 report; /* FT260_SYSTEM_SETTINGS */
+ u8 request; /* FT260_SET_I2C_MODE */
+ u8 i2c_enable; /* 0 - disabled, 1 - enabled */
+} __packed;
+
+struct ft260_set_uart_mode_report {
+ u8 report; /* FT260_SYSTEM_SETTINGS */
+ u8 request; /* FT260_SET_UART_MODE */
+ u8 uart_mode; /* 0 - OFF; 1 - RTS_CTS, 2 - DTR_DSR, */
+ /* 3 - XON_XOFF, 4 - No flow control */
+} __packed;
+
+struct ft260_set_i2c_reset_report {
+ u8 report; /* FT260_SYSTEM_SETTINGS */
+ u8 request; /* FT260_SET_I2C_RESET */
+} __packed;
+
+struct ft260_set_i2c_speed_report {
+ u8 report; /* FT260_SYSTEM_SETTINGS */
+ u8 request; /* FT260_SET_I2C_CLOCK_SPEED */
+ __le16 clock; /* I2C bus clock in range 60-3400 KHz */
+} __packed;
+
+/* Data transfer reports */
+
+struct ft260_i2c_write_request_report {
+ u8 report; /* FT260_I2C_REPORT */
+ u8 address; /* 7-bit I2C address */
+ u8 flag; /* I2C transaction condition */
+ u8 length; /* data payload length */
+ u8 data[60]; /* data payload */
+} __packed;
+
+struct ft260_i2c_read_request_report {
+ u8 report; /* FT260_I2C_READ_REQ */
+ u8 address; /* 7-bit I2C address */
+ u8 flag; /* I2C transaction condition */
+ __le16 length; /* data payload length */
+} __packed;
+
+struct ft260_i2c_input_report {
+ u8 report; /* FT260_I2C_REPORT */
+ u8 length; /* data payload length */
+ u8 data[2]; /* data payload */
+} __packed;
+
+static const struct hid_device_id ft260_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_FUTURE_TECHNOLOGY,
+ USB_DEVICE_ID_FT260) },
+ { /* END OF LIST */ }
+};
+MODULE_DEVICE_TABLE(hid, ft260_devices);
+
+struct ft260_device {
+ struct i2c_adapter adap;
+ struct hid_device *hdev;
+ struct completion wait;
+ struct mutex lock;
+ u8 write_buf[FT260_REPORT_MAX_LENGTH];
+ u8 *read_buf;
+ u16 read_idx;
+ u16 read_len;
+ u16 clock;
+};
+
+static int ft260_hid_feature_report_get(struct hid_device *hdev,
+ unsigned char report_id, u8 *data,
+ size_t len)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, report_id, buf, len, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ memcpy(data, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static int ft260_hid_feature_report_set(struct hid_device *hdev, u8 *data,
+ size_t len)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmemdup(data, len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = FT260_SYSTEM_SETTINGS;
+
+ ret = hid_hw_raw_request(hdev, buf[0], buf, len, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
+
+ kfree(buf);
+ return ret;
+}
+
+static int ft260_i2c_reset(struct hid_device *hdev)
+{
+ struct ft260_set_i2c_reset_report report;
+ int ret;
+
+ report.request = FT260_SET_I2C_RESET;
+
+ ret = ft260_hid_feature_report_set(hdev, (u8 *)&report, sizeof(report));
+ if (ret < 0) {
+ hid_err(hdev, "failed to reset I2C controller: %d\n", ret);
+ return ret;
+ }
+
+ ft260_dbg("done\n");
+ return ret;
+}
+
+static int ft260_xfer_status(struct ft260_device *dev)
+{
+ struct hid_device *hdev = dev->hdev;
+ struct ft260_get_i2c_status_report report;
+ int ret;
+
+ ret = ft260_hid_feature_report_get(hdev, FT260_I2C_STATUS,
+ (u8 *)&report, sizeof(report));
+ if (ret < 0) {
+ hid_err(hdev, "failed to retrieve status: %d\n", ret);
+ return ret;
+ }
+
+ dev->clock = le16_to_cpu(report.clock);
+ ft260_dbg("bus_status %#02x, clock %u\n", report.bus_status,
+ dev->clock);
+
+ if (report.bus_status & FT260_I2C_STATUS_CTRL_BUSY)
+ return -EAGAIN;
+
+ if (report.bus_status & FT260_I2C_STATUS_BUS_BUSY)
+ return -EBUSY;
+
+ if (report.bus_status & FT260_I2C_STATUS_ERROR)
+ return -EIO;
+
+ ret = -EIO;
+
+ if (report.bus_status & FT260_I2C_STATUS_ADDR_NO_ACK)
+ ft260_dbg("unacknowledged address\n");
+
+ if (report.bus_status & FT260_I2C_STATUS_DATA_NO_ACK)
+ ft260_dbg("unacknowledged data\n");
+
+ if (report.bus_status & FT260_I2C_STATUS_ARBITR_LOST)
+ ft260_dbg("arbitration loss\n");
+
+ if (report.bus_status & FT260_I2C_STATUS_CTRL_IDLE)
+ ret = 0;
+
+ return ret;
+}
+
+static int ft260_hid_output_report(struct hid_device *hdev, u8 *data,
+ size_t len)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmemdup(data, len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_output_report(hdev, buf, len);
+
+ kfree(buf);
+ return ret;
+}
+
+static int ft260_hid_output_report_check_status(struct ft260_device *dev,
+ u8 *data, int len)
+{
+ int ret, usec, try = 3;
+ struct hid_device *hdev = dev->hdev;
+
+ ret = ft260_hid_output_report(hdev, data, len);
+ if (ret < 0) {
+ hid_err(hdev, "%s: failed to start transfer, ret %d\n",
+ __func__, ret);
+ ft260_i2c_reset(hdev);
+ return ret;
+ }
+
+ /* transfer time = 1 / clock(KHz) * 10 bits * bytes */
+ usec = 10000 / dev->clock * len;
+ usleep_range(usec, usec + 100);
+ ft260_dbg("wait %d usec, len %d\n", usec, len);
+ do {
+ ret = ft260_xfer_status(dev);
+ if (ret != -EAGAIN)
+ break;
+ } while (--try);
+
+ if (ret == 0 || ret == -EBUSY)
+ return 0;
+
+ ft260_i2c_reset(hdev);
+ return -EIO;
+}
+
+static int ft260_i2c_write(struct ft260_device *dev, u8 addr, u8 *data,
+ int data_len, u8 flag)
+{
+ int len, ret, idx = 0;
+ struct hid_device *hdev = dev->hdev;
+ struct ft260_i2c_write_request_report *rep =
+ (struct ft260_i2c_write_request_report *)dev->write_buf;
+
+ do {
+ if (data_len <= FT260_WR_DATA_MAX)
+ len = data_len;
+ else
+ len = FT260_WR_DATA_MAX;
+
+ rep->report = FT260_I2C_DATA_REPORT_ID(len);
+ rep->address = addr;
+ rep->length = len;
+ rep->flag = flag;
+
+ memcpy(rep->data, &data[idx], len);
+
+ ft260_dbg("rep %#02x addr %#02x off %d len %d d[0] %#02x\n",
+ rep->report, addr, idx, len, data[0]);
+
+ ret = ft260_hid_output_report_check_status(dev, (u8 *)rep,
+ len + 4);
+ if (ret < 0) {
+ hid_err(hdev, "%s: failed to start transfer, ret %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ data_len -= len;
+ idx += len;
+
+ } while (data_len > 0);
+
+ return 0;
+}
+
+static int ft260_smbus_write(struct ft260_device *dev, u8 addr, u8 cmd,
+ u8 *data, u8 data_len, u8 flag)
+{
+ int ret = 0;
+ int len = 4;
+
+ struct ft260_i2c_write_request_report *rep =
+ (struct ft260_i2c_write_request_report *)dev->write_buf;
+
+ rep->address = addr;
+ rep->data[0] = cmd;
+ rep->length = data_len + 1;
+ rep->flag = flag;
+ len += rep->length;
+
+ rep->report = FT260_I2C_DATA_REPORT_ID(len);
+
+ if (data_len > 0)
+ memcpy(&rep->data[1], data, data_len);
+
+ ft260_dbg("rep %#02x addr %#02x cmd %#02x datlen %d replen %d\n",
+ rep->report, addr, cmd, rep->length, len);
+
+ ret = ft260_hid_output_report_check_status(dev, (u8 *)rep, len);
+
+ return ret;
+}
+
+static int ft260_i2c_read(struct ft260_device *dev, u8 addr, u8 *data,
+ u16 len, u8 flag)
+{
+ struct ft260_i2c_read_request_report rep;
+ struct hid_device *hdev = dev->hdev;
+ int timeout;
+ int ret;
+
+ if (len > FT260_RD_DATA_MAX) {
+ hid_err(hdev, "%s: unsupported rd len: %d\n", __func__, len);
+ return -EINVAL;
+ }
+
+ dev->read_idx = 0;
+ dev->read_buf = data;
+ dev->read_len = len;
+
+ rep.report = FT260_I2C_READ_REQ;
+ rep.length = cpu_to_le16(len);
+ rep.address = addr;
+ rep.flag = flag;
+
+ ft260_dbg("rep %#02x addr %#02x len %d\n", rep.report, rep.address,
+ rep.length);
+
+ reinit_completion(&dev->wait);
+
+ ret = ft260_hid_output_report(hdev, (u8 *)&rep, sizeof(rep));
+ if (ret < 0) {
+ hid_err(hdev, "%s: failed to start transaction, ret %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ timeout = msecs_to_jiffies(5000);
+ if (!wait_for_completion_timeout(&dev->wait, timeout)) {
+ ft260_i2c_reset(hdev);
+ return -ETIMEDOUT;
+ }
+
+ ret = ft260_xfer_status(dev);
+ if (ret == 0)
+ return 0;
+
+ ft260_i2c_reset(hdev);
+ return -EIO;
+}
+
+/*
+ * A random read operation is implemented as a dummy write operation, followed
+ * by a current address read operation. The dummy write operation is used to
+ * load the target byte address into the current byte address counter, from
+ * which the subsequent current address read operation then reads.
+ */
+static int ft260_i2c_write_read(struct ft260_device *dev, struct i2c_msg *msgs)
+{
+ int len, ret;
+ u16 left_len = msgs[1].len;
+ u8 *read_buf = msgs[1].buf;
+ u8 addr = msgs[0].addr;
+ u16 read_off = 0;
+ struct hid_device *hdev = dev->hdev;
+
+ if (msgs[0].len > 2) {
+ hid_err(hdev, "%s: unsupported wr len: %d\n", __func__,
+ msgs[0].len);
+ return -EOPNOTSUPP;
+ }
+
+ memcpy(&read_off, msgs[0].buf, msgs[0].len);
+
+ do {
+ if (left_len <= FT260_RD_DATA_MAX)
+ len = left_len;
+ else
+ len = FT260_RD_DATA_MAX;
+
+ ft260_dbg("read_off %#x left_len %d len %d\n", read_off,
+ left_len, len);
+
+ ret = ft260_i2c_write(dev, addr, (u8 *)&read_off, msgs[0].len,
+ FT260_FLAG_START);
+ if (ret < 0)
+ return ret;
+
+ ret = ft260_i2c_read(dev, addr, read_buf, len,
+ FT260_FLAG_START_STOP);
+ if (ret < 0)
+ return ret;
+
+ left_len -= len;
+ read_buf += len;
+ read_off += len;
+
+ } while (left_len > 0);
+
+ return 0;
+}
+
+static int ft260_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ int num)
+{
+ int ret;
+ struct ft260_device *dev = i2c_get_adapdata(adapter);
+ struct hid_device *hdev = dev->hdev;
+
+ mutex_lock(&dev->lock);
+
+ ret = hid_hw_power(hdev, PM_HINT_FULLON);
+ if (ret < 0) {
+ hid_err(hdev, "failed to enter FULLON power mode: %d\n", ret);
+ mutex_unlock(&dev->lock);
+ return ret;
+ }
+
+ if (num == 1) {
+ if (msgs->flags & I2C_M_RD)
+ ret = ft260_i2c_read(dev, msgs->addr, msgs->buf,
+ msgs->len, FT260_FLAG_START_STOP);
+ else
+ ret = ft260_i2c_write(dev, msgs->addr, msgs->buf,
+ msgs->len, FT260_FLAG_START_STOP);
+ if (ret < 0)
+ goto i2c_exit;
+
+ } else {
+ /* Combined write then read message */
+ ret = ft260_i2c_write_read(dev, msgs);
+ if (ret < 0)
+ goto i2c_exit;
+ }
+
+ ret = num;
+i2c_exit:
+ hid_hw_power(hdev, PM_HINT_NORMAL);
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static int ft260_smbus_xfer(struct i2c_adapter *adapter, u16 addr, u16 flags,
+ char read_write, u8 cmd, int size,
+ union i2c_smbus_data *data)
+{
+ int ret;
+ struct ft260_device *dev = i2c_get_adapdata(adapter);
+ struct hid_device *hdev = dev->hdev;
+
+ ft260_dbg("smbus size %d\n", size);
+
+ mutex_lock(&dev->lock);
+
+ ret = hid_hw_power(hdev, PM_HINT_FULLON);
+ if (ret < 0) {
+ hid_err(hdev, "power management error: %d\n", ret);
+ mutex_unlock(&dev->lock);
+ return ret;
+ }
+
+ switch (size) {
+ case I2C_SMBUS_QUICK:
+ if (read_write == I2C_SMBUS_READ)
+ ret = ft260_i2c_read(dev, addr, &data->byte, 0,
+ FT260_FLAG_START_STOP);
+ else
+ ret = ft260_smbus_write(dev, addr, cmd, NULL, 0,
+ FT260_FLAG_START_STOP);
+ break;
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_READ)
+ ret = ft260_i2c_read(dev, addr, &data->byte, 1,
+ FT260_FLAG_START_STOP);
+ else
+ ret = ft260_smbus_write(dev, addr, cmd, NULL, 0,
+ FT260_FLAG_START_STOP);
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = ft260_smbus_write(dev, addr, cmd, NULL, 0,
+ FT260_FLAG_START);
+ if (ret)
+ goto smbus_exit;
+
+ ret = ft260_i2c_read(dev, addr, &data->byte, 1,
+ FT260_FLAG_START_STOP_REPEATED);
+ } else {
+ ret = ft260_smbus_write(dev, addr, cmd, &data->byte, 1,
+ FT260_FLAG_START_STOP);
+ }
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = ft260_smbus_write(dev, addr, cmd, NULL, 0,
+ FT260_FLAG_START);
+ if (ret)
+ goto smbus_exit;
+
+ ret = ft260_i2c_read(dev, addr, (u8 *)&data->word, 2,
+ FT260_FLAG_START_STOP_REPEATED);
+ } else {
+ ret = ft260_smbus_write(dev, addr, cmd,
+ (u8 *)&data->word, 2,
+ FT260_FLAG_START_STOP);
+ }
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = ft260_smbus_write(dev, addr, cmd, NULL, 0,
+ FT260_FLAG_START);
+ if (ret)
+ goto smbus_exit;
+
+ ret = ft260_i2c_read(dev, addr, data->block,
+ data->block[0] + 1,
+ FT260_FLAG_START_STOP_REPEATED);
+ } else {
+ ret = ft260_smbus_write(dev, addr, cmd, data->block,
+ data->block[0] + 1,
+ FT260_FLAG_START_STOP);
+ }
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = ft260_smbus_write(dev, addr, cmd, NULL, 0,
+ FT260_FLAG_START);
+ if (ret)
+ goto smbus_exit;
+
+ ret = ft260_i2c_read(dev, addr, data->block + 1,
+ data->block[0],
+ FT260_FLAG_START_STOP_REPEATED);
+ } else {
+ ret = ft260_smbus_write(dev, addr, cmd, data->block + 1,
+ data->block[0],
+ FT260_FLAG_START_STOP);
+ }
+ break;
+ default:
+ hid_err(hdev, "unsupported smbus transaction size %d\n", size);
+ ret = -EOPNOTSUPP;
+ }
+
+smbus_exit:
+ hid_hw_power(hdev, PM_HINT_NORMAL);
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static u32 ft260_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_QUICK |
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_I2C_BLOCK;
+}
+
+static const struct i2c_adapter_quirks ft260_i2c_quirks = {
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+ .max_comb_1st_msg_len = 2,
+};
+
+static const struct i2c_algorithm ft260_i2c_algo = {
+ .master_xfer = ft260_i2c_xfer,
+ .smbus_xfer = ft260_smbus_xfer,
+ .functionality = ft260_functionality,
+};
+
+static int ft260_get_system_config(struct hid_device *hdev,
+ struct ft260_get_system_status_report *cfg)
+{
+ int ret;
+ int len = sizeof(struct ft260_get_system_status_report);
+
+ ret = ft260_hid_feature_report_get(hdev, FT260_SYSTEM_SETTINGS,
+ (u8 *)cfg, len);
+ if (ret != len) {
+ hid_err(hdev, "failed to retrieve system status\n");
+ if (ret >= 0)
+ return -EIO;
+ }
+ return 0;
+}
+
+static int ft260_is_interface_enabled(struct hid_device *hdev)
+{
+ struct ft260_get_system_status_report cfg;
+ struct usb_interface *usbif = to_usb_interface(hdev->dev.parent);
+ int interface = usbif->cur_altsetting->desc.bInterfaceNumber;
+ int ret;
+
+ ret = ft260_get_system_config(hdev, &cfg);
+ if (ret)
+ return ret;
+
+ ft260_dbg("interface: 0x%02x\n", interface);
+ ft260_dbg("chip mode: 0x%02x\n", cfg.chip_mode);
+ ft260_dbg("clock_ctl: 0x%02x\n", cfg.clock_ctl);
+ ft260_dbg("i2c_enable: 0x%02x\n", cfg.i2c_enable);
+ ft260_dbg("uart_mode: 0x%02x\n", cfg.uart_mode);
+
+ switch (cfg.chip_mode) {
+ case FT260_MODE_ALL:
+ case FT260_MODE_BOTH:
+ if (interface == 1) {
+ hid_info(hdev, "uart interface is not supported\n");
+ return 0;
+ }
+ ret = 1;
+ break;
+ case FT260_MODE_UART:
+ if (interface == 0) {
+ hid_info(hdev, "uart is unsupported on interface 0\n");
+ ret = 0;
+ }
+ break;
+ case FT260_MODE_I2C:
+ if (interface == 1) {
+ hid_info(hdev, "i2c is unsupported on interface 1\n");
+ ret = 0;
+ }
+ break;
+ }
+ return ret;
+}
+
+static int ft260_byte_show(struct hid_device *hdev, int id, u8 *cfg, int len,
+ u8 *field, u8 *buf)
+{
+ int ret;
+
+ ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
+ if (ret != len && ret >= 0)
+ return -EIO;
+
+ return scnprintf(buf, PAGE_SIZE, "%hi\n", *field);
+}
+
+static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
+ u16 *field, u8 *buf)
+{
+ int ret;
+
+ ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
+ if (ret != len && ret >= 0)
+ return -EIO;
+
+ return scnprintf(buf, PAGE_SIZE, "%hi\n", le16_to_cpu(*field));
+}
+
+#define FT260_ATTR_SHOW(name, reptype, id, type, func) \
+ static ssize_t name##_show(struct device *kdev, \
+ struct device_attribute *attr, char *buf) \
+ { \
+ struct reptype rep; \
+ struct hid_device *hdev = to_hid_device(kdev); \
+ type *field = &rep.name; \
+ int len = sizeof(rep); \
+ \
+ return func(hdev, id, (u8 *)&rep, len, field, buf); \
+ }
+
+#define FT260_SSTAT_ATTR_SHOW(name) \
+ FT260_ATTR_SHOW(name, ft260_get_system_status_report, \
+ FT260_SYSTEM_SETTINGS, u8, ft260_byte_show)
+
+#define FT260_I2CST_ATTR_SHOW(name) \
+ FT260_ATTR_SHOW(name, ft260_get_i2c_status_report, \
+ FT260_I2C_STATUS, u16, ft260_word_show)
+
+#define FT260_ATTR_STORE(name, reptype, id, req, type, func) \
+ static ssize_t name##_store(struct device *kdev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ struct reptype rep; \
+ struct hid_device *hdev = to_hid_device(kdev); \
+ type name; \
+ int ret; \
+ \
+ if (!func(buf, 10, &name)) { \
+ rep.name = name; \
+ rep.report = id; \
+ rep.request = req; \
+ ret = ft260_hid_feature_report_set(hdev, (u8 *)&rep, \
+ sizeof(rep)); \
+ if (!ret) \
+ ret = count; \
+ } else { \
+ ret = -EINVAL; \
+ } \
+ return ret; \
+ }
+
+#define FT260_BYTE_ATTR_STORE(name, reptype, req) \
+ FT260_ATTR_STORE(name, reptype, FT260_SYSTEM_SETTINGS, req, \
+ u8, kstrtou8)
+
+#define FT260_WORD_ATTR_STORE(name, reptype, req) \
+ FT260_ATTR_STORE(name, reptype, FT260_SYSTEM_SETTINGS, req, \
+ u16, kstrtou16)
+
+FT260_SSTAT_ATTR_SHOW(chip_mode);
+static DEVICE_ATTR_RO(chip_mode);
+
+FT260_SSTAT_ATTR_SHOW(pwren_status);
+static DEVICE_ATTR_RO(pwren_status);
+
+FT260_SSTAT_ATTR_SHOW(suspend_status);
+static DEVICE_ATTR_RO(suspend_status);
+
+FT260_SSTAT_ATTR_SHOW(hid_over_i2c_en);
+static DEVICE_ATTR_RO(hid_over_i2c_en);
+
+FT260_SSTAT_ATTR_SHOW(power_saving_en);
+static DEVICE_ATTR_RO(power_saving_en);
+
+FT260_SSTAT_ATTR_SHOW(i2c_enable);
+FT260_BYTE_ATTR_STORE(i2c_enable, ft260_set_i2c_mode_report,
+ FT260_SET_I2C_MODE);
+static DEVICE_ATTR_RW(i2c_enable);
+
+FT260_SSTAT_ATTR_SHOW(uart_mode);
+FT260_BYTE_ATTR_STORE(uart_mode, ft260_set_uart_mode_report,
+ FT260_SET_UART_MODE);
+static DEVICE_ATTR_RW(uart_mode);
+
+FT260_SSTAT_ATTR_SHOW(clock_ctl);
+FT260_BYTE_ATTR_STORE(clock_ctl, ft260_set_system_clock_report,
+ FT260_SET_CLOCK);
+static DEVICE_ATTR_RW(clock_ctl);
+
+FT260_I2CST_ATTR_SHOW(clock);
+FT260_WORD_ATTR_STORE(clock, ft260_set_i2c_speed_report,
+ FT260_SET_I2C_CLOCK_SPEED);
+static DEVICE_ATTR_RW(clock);
+
+static ssize_t i2c_reset_store(struct device *kdev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev = to_hid_device(kdev);
+ int ret = ft260_i2c_reset(hdev);
+
+ if (ret)
+ return ret;
+ return count;
+}
+static DEVICE_ATTR_WO(i2c_reset);
+
+static const struct attribute_group ft260_attr_group = {
+ .attrs = (struct attribute *[]) {
+ &dev_attr_chip_mode.attr,
+ &dev_attr_pwren_status.attr,
+ &dev_attr_suspend_status.attr,
+ &dev_attr_hid_over_i2c_en.attr,
+ &dev_attr_power_saving_en.attr,
+ &dev_attr_i2c_enable.attr,
+ &dev_attr_uart_mode.attr,
+ &dev_attr_clock_ctl.attr,
+ &dev_attr_i2c_reset.attr,
+ &dev_attr_clock.attr,
+ NULL
+ }
+};
+
+static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct ft260_device *dev;
+ struct ft260_get_chip_version_report version;
+ int ret;
+
+ dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "failed to parse HID\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "failed to start HID HW\n");
+ return ret;
+ }
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev, "failed to open HID HW\n");
+ goto err_hid_stop;
+ }
+
+ ret = ft260_hid_feature_report_get(hdev, FT260_CHIP_VERSION,
+ (u8 *)&version, sizeof(version));
+ if (ret != sizeof(version)) {
+ hid_err(hdev, "failed to retrieve chip version\n");
+ if (ret >= 0)
+ ret = -EIO;
+ goto err_hid_close;
+ }
+
+ hid_info(hdev, "chip code: %02x%02x %02x%02x\n",
+ version.chip_code[0], version.chip_code[1],
+ version.chip_code[2], version.chip_code[3]);
+
+ ret = ft260_is_interface_enabled(hdev);
+ if (ret <= 0)
+ goto err_hid_close;
+
+ hid_set_drvdata(hdev, dev);
+ dev->hdev = hdev;
+ dev->adap.owner = THIS_MODULE;
+ dev->adap.class = I2C_CLASS_HWMON;
+ dev->adap.algo = &ft260_i2c_algo;
+ dev->adap.quirks = &ft260_i2c_quirks;
+ dev->adap.dev.parent = &hdev->dev;
+ snprintf(dev->adap.name, sizeof(dev->adap.name),
+ "FT260 usb-i2c bridge on hidraw%d",
+ ((struct hidraw *)hdev->hidraw)->minor);
+
+ mutex_init(&dev->lock);
+ init_completion(&dev->wait);
+
+ ret = i2c_add_adapter(&dev->adap);
+ if (ret) {
+ hid_err(hdev, "failed to add i2c adapter\n");
+ goto err_hid_close;
+ }
+
+ i2c_set_adapdata(&dev->adap, dev);
+
+ ret = sysfs_create_group(&hdev->dev.kobj, &ft260_attr_group);
+ if (ret < 0) {
+ hid_err(hdev, "failed to create sysfs attrs\n");
+ goto err_i2c_free;
+ }
+
+ ret = ft260_xfer_status(dev);
+ if (ret)
+ ft260_i2c_reset(hdev);
+
+ return 0;
+
+err_i2c_free:
+ i2c_del_adapter(&dev->adap);
+err_hid_close:
+ hid_hw_close(hdev);
+err_hid_stop:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
+static void ft260_remove(struct hid_device *hdev)
+{
+ int ret;
+ struct ft260_device *dev = hid_get_drvdata(hdev);
+
+ ret = ft260_is_interface_enabled(hdev);
+ if (ret <= 0)
+ return;
+
+ sysfs_remove_group(&hdev->dev.kobj, &ft260_attr_group);
+ i2c_del_adapter(&dev->adap);
+
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+}
+
+static int ft260_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct ft260_device *dev = hid_get_drvdata(hdev);
+ struct ft260_i2c_input_report *xfer = (void *)data;
+
+ if (xfer->report >= FT260_I2C_REPORT_MIN &&
+ xfer->report <= FT260_I2C_REPORT_MAX) {
+ ft260_dbg("i2c resp: rep %#02x len %d\n", xfer->report,
+ xfer->length);
+
+ memcpy(&dev->read_buf[dev->read_idx], &xfer->data,
+ xfer->length);
+ dev->read_idx += xfer->length;
+
+ if (dev->read_idx == dev->read_len)
+ complete(&dev->wait);
+
+ } else {
+ hid_err(hdev, "unknown report: %#02x\n", xfer->report);
+ return 0;
+ }
+ return 1;
+}
+
+static struct hid_driver ft260_driver = {
+ .name = "ft260",
+ .id_table = ft260_devices,
+ .probe = ft260_probe,
+ .remove = ft260_remove,
+ .raw_event = ft260_raw_event,
+};
+
+module_hid_driver(ft260_driver);
+MODULE_DESCRIPTION("FTDI FT260 USB HID to I2C host bridge");
+MODULE_AUTHOR("Michael Zaidman <michael.zaidman@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 67fd8a2f5aba..84b8da3e7d09 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -93,6 +93,7 @@
#define BT_VENDOR_ID_APPLE 0x004c
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
+#define USB_DEVICE_ID_APPLE_MAGICMOUSE2 0x0269
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 0x0265
#define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e
@@ -431,6 +432,7 @@
#define USB_VENDOR_ID_FUTURE_TECHNOLOGY 0x0403
#define USB_DEVICE_ID_RETRODE2 0x97c1
+#define USB_DEVICE_ID_FT260 0x6030
#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f
#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
@@ -808,6 +810,7 @@
#define USB_DEVICE_ID_LOGITECH_27MHZ_MOUSE_RECEIVER 0xc51b
#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER 0xc52b
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER 0xc52f
+#define USB_DEVICE_ID_LOGITECH_G700_RECEIVER 0xc531
#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539
@@ -816,8 +819,14 @@
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
-#define USB_DEVICE_ID_DINOVO_EDGE 0xc714
-#define USB_DEVICE_ID_DINOVO_MINI 0xc71f
+#define USB_DEVICE_ID_MX5000_RECEIVER_MOUSE_DEV 0xc70a
+#define USB_DEVICE_ID_MX5000_RECEIVER_KBD_DEV 0xc70e
+#define USB_DEVICE_ID_DINOVO_EDGE_RECEIVER_KBD_DEV 0xc713
+#define USB_DEVICE_ID_DINOVO_EDGE_RECEIVER_MOUSE_DEV 0xc714
+#define USB_DEVICE_ID_MX5500_RECEIVER_KBD_DEV 0xc71b
+#define USB_DEVICE_ID_MX5500_RECEIVER_MOUSE_DEV 0xc71c
+#define USB_DEVICE_ID_DINOVO_MINI_RECEIVER_KBD_DEV 0xc71e
+#define USB_DEVICE_ID_DINOVO_MINI_RECEIVER_MOUSE_DEV 0xc71f
#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2 0xca03
#define USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL 0xca04
@@ -946,6 +955,7 @@
#define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003
#define USB_VENDOR_ID_PLANTRONICS 0x047f
+#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056
#define USB_VENDOR_ID_PANASONIC 0x04da
#define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 236bccd37760..18f5e28d475c 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -435,7 +435,8 @@ static int hidinput_get_battery_property(struct power_supply *psy,
return ret;
}
-static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, struct hid_field *field)
+static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
+ struct hid_field *field, bool is_percentage)
{
struct power_supply_desc *psy_desc;
struct power_supply_config psy_cfg = { .drv_data = dev, };
@@ -475,7 +476,7 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
min = field->logical_minimum;
max = field->logical_maximum;
- if (quirks & HID_BATTERY_QUIRK_PERCENT) {
+ if (is_percentage || (quirks & HID_BATTERY_QUIRK_PERCENT)) {
min = 0;
max = 100;
}
@@ -552,7 +553,7 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
}
#else /* !CONFIG_HID_BATTERY_STRENGTH */
static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
- struct hid_field *field)
+ struct hid_field *field, bool is_percentage)
{
return 0;
}
@@ -806,7 +807,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
case 0x3b: /* Battery Strength */
- hidinput_setup_battery(device, HID_INPUT_REPORT, field);
+ hidinput_setup_battery(device, HID_INPUT_REPORT, field, false);
usage->type = EV_PWR;
return;
@@ -1068,7 +1069,16 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case HID_UP_GENDEVCTRLS:
switch (usage->hid) {
case HID_DC_BATTERYSTRENGTH:
- hidinput_setup_battery(device, HID_INPUT_REPORT, field);
+ hidinput_setup_battery(device, HID_INPUT_REPORT, field, false);
+ usage->type = EV_PWR;
+ return;
+ }
+ goto unknown;
+
+ case HID_UP_BATTERY:
+ switch (usage->hid) {
+ case HID_BAT_ABSOLUTESTATEOFCHARGE:
+ hidinput_setup_battery(device, HID_INPUT_REPORT, field, true);
usage->type = EV_PWR;
return;
}
@@ -1672,7 +1682,7 @@ static void report_features(struct hid_device *hid)
/* Verify if Battery Strength feature is available */
if (usage->hid == HID_DC_BATTERYSTRENGTH)
hidinput_setup_battery(hid, HID_FEATURE_REPORT,
- rep->field[i]);
+ rep->field[i], false);
if (drv->feature_mapping)
drv->feature_mapping(hid, rep->field[i], usage);
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index c8b40c07eca6..f46616390a98 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -655,7 +655,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
/**
- * Enable fully-functional tablet mode by setting a special feature report.
+ * kye_tablet_enable() - Enable fully-functional tablet mode by setting a special feature report.
*
* @hdev: HID device
*
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index c6c8e20f3e8d..93b1f935e526 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -33,6 +33,9 @@
#include "hid-ids.h"
+/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
+#define LENOVO_KEY_MICMUTE KEY_F20
+
struct lenovo_drvdata {
u8 led_report[3]; /* Must be first for proper alignment */
int led_state;
@@ -62,8 +65,8 @@ struct lenovo_drvdata {
#define TP10UBKBD_LED_OFF 1
#define TP10UBKBD_LED_ON 2
-static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
- enum led_brightness value)
+static int lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
+ enum led_brightness value)
{
struct lenovo_drvdata *data = hid_get_drvdata(hdev);
int ret;
@@ -75,10 +78,18 @@ static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
data->led_report[2] = value ? TP10UBKBD_LED_ON : TP10UBKBD_LED_OFF;
ret = hid_hw_raw_request(hdev, data->led_report[0], data->led_report, 3,
HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
- if (ret)
- hid_err(hdev, "Set LED output report error: %d\n", ret);
+ if (ret != 3) {
+ if (ret != -ENODEV)
+ hid_err(hdev, "Set LED output report error: %d\n", ret);
+
+ ret = ret < 0 ? ret : -EIO;
+ } else {
+ ret = 0;
+ }
mutex_unlock(&data->led_report_mutex);
+
+ return ret;
}
static void lenovo_tp10ubkbd_sync_fn_lock(struct work_struct *work)
@@ -126,7 +137,7 @@ static int lenovo_input_mapping_tpkbd(struct hid_device *hdev,
if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
/* This sub-device contains trackpoint, mark it */
hid_set_drvdata(hdev, (void *)1);
- map_key_clear(KEY_MICMUTE);
+ map_key_clear(LENOVO_KEY_MICMUTE);
return 1;
}
return 0;
@@ -141,7 +152,7 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
(usage->hid & HID_USAGE_PAGE) == HID_UP_LNVENDOR) {
switch (usage->hid & HID_USAGE) {
case 0x00f1: /* Fn-F4: Mic mute */
- map_key_clear(KEY_MICMUTE);
+ map_key_clear(LENOVO_KEY_MICMUTE);
return 1;
case 0x00f2: /* Fn-F5: Brightness down */
map_key_clear(KEY_BRIGHTNESSDOWN);
@@ -231,7 +242,7 @@ static int lenovo_input_mapping_tp10_ultrabook_kbd(struct hid_device *hdev,
map_key_clear(KEY_FN_ESC);
return 1;
case 9: /* Fn-F4: Mic mute */
- map_key_clear(KEY_MICMUTE);
+ map_key_clear(LENOVO_KEY_MICMUTE);
return 1;
case 10: /* Fn-F7: Control panel */
map_key_clear(KEY_CONFIG);
@@ -255,6 +266,54 @@ static int lenovo_input_mapping_tp10_ultrabook_kbd(struct hid_device *hdev,
return 0;
}
+static int lenovo_input_mapping_x1_tab_kbd(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ /*
+ * The ThinkPad X1 Tablet Thin Keyboard uses 0x000c0001 usage for
+ * a bunch of keys which have no standard consumer page code.
+ */
+ if (usage->hid == 0x000c0001) {
+ switch (usage->usage_index) {
+ case 0: /* Fn-F10: Enable/disable bluetooth */
+ map_key_clear(KEY_BLUETOOTH);
+ return 1;
+ case 1: /* Fn-F11: Keyboard settings */
+ map_key_clear(KEY_KEYBOARD);
+ return 1;
+ case 2: /* Fn-F12: User function / Cortana */
+ map_key_clear(KEY_MACRO1);
+ return 1;
+ case 3: /* Fn-PrtSc: Snipping tool */
+ map_key_clear(KEY_SELECTIVE_SCREENSHOT);
+ return 1;
+ case 8: /* Fn-Esc: Fn-lock toggle */
+ map_key_clear(KEY_FN_ESC);
+ return 1;
+ case 9: /* Fn-F4: Mute/unmute microphone */
+ map_key_clear(KEY_MICMUTE);
+ return 1;
+ case 10: /* Fn-F9: Settings */
+ map_key_clear(KEY_CONFIG);
+ return 1;
+ case 13: /* Fn-F7: Manage external displays */
+ map_key_clear(KEY_SWITCHVIDEOMODE);
+ return 1;
+ case 14: /* Fn-F8: Enable/disable wifi */
+ map_key_clear(KEY_WLAN);
+ return 1;
+ }
+ }
+
+ if (usage->hid == (HID_UP_KEYBOARD | 0x009a)) {
+ map_key_clear(KEY_SYSRQ);
+ return 1;
+ }
+
+ return 0;
+}
+
static int lenovo_input_mapping(struct hid_device *hdev,
struct hid_input *hi, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max)
@@ -278,6 +337,8 @@ static int lenovo_input_mapping(struct hid_device *hdev,
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field,
usage, bit, max);
+ case USB_DEVICE_ID_LENOVO_X1_TAB:
+ return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, bit, max);
default:
return 0;
}
@@ -349,7 +410,7 @@ static ssize_t attr_fn_lock_store(struct device *dev,
{
struct hid_device *hdev = to_hid_device(dev);
struct lenovo_drvdata *data = hid_get_drvdata(hdev);
- int value;
+ int value, ret;
if (kstrtoint(buf, 10, &value))
return -EINVAL;
@@ -364,7 +425,10 @@ static ssize_t attr_fn_lock_store(struct device *dev,
lenovo_features_set_cptkbd(hdev);
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
- lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
+ case USB_DEVICE_ID_LENOVO_X1_TAB:
+ ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
+ if (ret)
+ return ret;
break;
}
@@ -498,11 +562,15 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
+ if (!hid_get_drvdata(hdev))
+ return 0;
+
switch (hdev->product) {
case USB_DEVICE_ID_LENOVO_CUSBKBD:
case USB_DEVICE_ID_LENOVO_CBTKBD:
return lenovo_event_cptkbd(hdev, field, usage, value);
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+ case USB_DEVICE_ID_LENOVO_X1_TAB:
return lenovo_event_tp10ubkbd(hdev, field, usage, value);
default:
return 0;
@@ -761,23 +829,7 @@ static void lenovo_led_set_tpkbd(struct hid_device *hdev)
hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
}
-static enum led_brightness lenovo_led_brightness_get(
- struct led_classdev *led_cdev)
-{
- struct device *dev = led_cdev->dev->parent;
- struct hid_device *hdev = to_hid_device(dev);
- struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
- int led_nr = 0;
-
- if (led_cdev == &data_pointer->led_micmute)
- led_nr = 1;
-
- return data_pointer->led_state & (1 << led_nr)
- ? LED_FULL
- : LED_OFF;
-}
-
-static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
+static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct device *dev = led_cdev->dev->parent;
@@ -785,6 +837,7 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED };
int led_nr = 0;
+ int ret = 0;
if (led_cdev == &data_pointer->led_micmute)
led_nr = 1;
@@ -799,9 +852,12 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
lenovo_led_set_tpkbd(hdev);
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
- lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
+ case USB_DEVICE_ID_LENOVO_X1_TAB:
+ ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
break;
}
+
+ return ret;
}
static int lenovo_register_leds(struct hid_device *hdev)
@@ -821,16 +877,20 @@ static int lenovo_register_leds(struct hid_device *hdev)
snprintf(name_micm, name_sz, "%s:amber:micmute", dev_name(&hdev->dev));
data->led_mute.name = name_mute;
- data->led_mute.brightness_get = lenovo_led_brightness_get;
- data->led_mute.brightness_set = lenovo_led_brightness_set;
+ data->led_mute.default_trigger = "audio-mute";
+ data->led_mute.brightness_set_blocking = lenovo_led_brightness_set;
+ data->led_mute.max_brightness = 1;
+ data->led_mute.flags = LED_HW_PLUGGABLE;
data->led_mute.dev = &hdev->dev;
ret = led_classdev_register(&hdev->dev, &data->led_mute);
if (ret < 0)
return ret;
data->led_micmute.name = name_micm;
- data->led_micmute.brightness_get = lenovo_led_brightness_get;
- data->led_micmute.brightness_set = lenovo_led_brightness_set;
+ data->led_micmute.default_trigger = "audio-micmute";
+ data->led_micmute.brightness_set_blocking = lenovo_led_brightness_set;
+ data->led_micmute.max_brightness = 1;
+ data->led_micmute.flags = LED_HW_PLUGGABLE;
data->led_micmute.dev = &hdev->dev;
ret = led_classdev_register(&hdev->dev, &data->led_micmute);
if (ret < 0) {
@@ -952,11 +1012,24 @@ static const struct attribute_group lenovo_attr_group_tp10ubkbd = {
static int lenovo_probe_tp10ubkbd(struct hid_device *hdev)
{
+ struct hid_report_enum *rep_enum;
struct lenovo_drvdata *data;
+ struct hid_report *rep;
+ bool found;
int ret;
- /* All the custom action happens on the USBMOUSE device for USB */
- if (hdev->type != HID_TYPE_USBMOUSE)
+ /*
+ * The LEDs and the Fn-lock functionality use output report 9,
+ * with an application of 0xffa0001, add the LEDs on the interface
+ * with this output report.
+ */
+ found = false;
+ rep_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list) {
+ if (rep->application == 0xffa00001)
+ found = true;
+ }
+ if (!found)
return 0;
data = devm_kzalloc(&hdev->dev, sizeof(*data), GFP_KERNEL);
@@ -1018,6 +1091,7 @@ static int lenovo_probe(struct hid_device *hdev,
ret = lenovo_probe_cptkbd(hdev);
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+ case USB_DEVICE_ID_LENOVO_X1_TAB:
ret = lenovo_probe_tp10ubkbd(hdev);
break;
default:
@@ -1083,6 +1157,7 @@ static void lenovo_remove(struct hid_device *hdev)
lenovo_remove_cptkbd(hdev);
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+ case USB_DEVICE_ID_LENOVO_X1_TAB:
lenovo_remove_tp10ubkbd(hdev);
break;
}
@@ -1122,6 +1197,12 @@ static const struct hid_device_id lenovo_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TP10UBKBD) },
+ /*
+ * Note bind to the HID_GROUP_GENERIC group, so that we only bind to the keyboard
+ * part, while letting hid-multitouch.c handle the touchpad and trackpoint.
+ */
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) },
{ }
};
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 0dc7cdfc56f7..d40af911df63 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -568,22 +568,6 @@ static int lg_ultrax_remote_mapping(struct hid_input *hi,
return 1;
}
-static int lg_dinovo_mapping(struct hid_input *hi, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
- return 0;
-
- switch (usage->hid & HID_USAGE) {
-
- case 0x00d: lg_map_key_clear(KEY_MEDIA); break;
- default:
- return 0;
-
- }
- return 1;
-}
-
static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage,
unsigned long **bit, int *max)
{
@@ -668,10 +652,6 @@ static int lg_input_mapping(struct hid_device *hdev, struct hid_input *hi,
lg_ultrax_remote_mapping(hi, usage, bit, max))
return 1;
- if (hdev->product == USB_DEVICE_ID_DINOVO_MINI &&
- lg_dinovo_mapping(hi, usage, bit, max))
- return 1;
-
if ((drv_data->quirks & LG_WIRELESS) && lg_wireless_mapping(hi, usage, bit, max))
return 1;
@@ -879,10 +859,6 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP),
.driver_data = LG_DUPLICATE_USAGES },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE),
- .driver_data = LG_DUPLICATE_USAGES },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI),
- .driver_data = LG_DUPLICATE_USAGES },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD),
.driver_data = LG_IGNORE_DOUBLED_WHEEL | LG_EXPANDED_KEYMAP },
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 271bd8d24339..fa835d565982 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -84,6 +84,7 @@
#define STD_MOUSE BIT(2)
#define MULTIMEDIA BIT(3)
#define POWER_KEYS BIT(4)
+#define KBD_MOUSE BIT(5)
#define MEDIA_CENTER BIT(8)
#define KBD_LEDS BIT(14)
/* Fake (bitnr > NUMBER_OF_HID_REPORTS) bit to track HID++ capability */
@@ -117,6 +118,7 @@ enum recvr_type {
recvr_type_mouse_only,
recvr_type_27mhz,
recvr_type_bluetooth,
+ recvr_type_dinovo,
};
struct dj_report {
@@ -333,6 +335,47 @@ static const char mse_bluetooth_descriptor[] = {
0xC0, /* END_COLLECTION */
};
+/* Mouse descriptor (5) for Bluetooth receiver, normal-res hwheel, 8 buttons */
+static const char mse5_bluetooth_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* Usage (Mouse) */
+ 0xa1, 0x01, /* Collection (Application) */
+ 0x85, 0x05, /* Report ID (5) */
+ 0x09, 0x01, /* Usage (Pointer) */
+ 0xa1, 0x00, /* Collection (Physical) */
+ 0x05, 0x09, /* Usage Page (Button) */
+ 0x19, 0x01, /* Usage Minimum (1) */
+ 0x29, 0x08, /* Usage Maximum (8) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x25, 0x01, /* Logical Maximum (1) */
+ 0x95, 0x08, /* Report Count (8) */
+ 0x75, 0x01, /* Report Size (1) */
+ 0x81, 0x02, /* Input (Data,Var,Abs) */
+ 0x05, 0x01, /* Usage Page (Generic Desktop) */
+ 0x16, 0x01, 0xf8, /* Logical Minimum (-2047) */
+ 0x26, 0xff, 0x07, /* Logical Maximum (2047) */
+ 0x75, 0x0c, /* Report Size (12) */
+ 0x95, 0x02, /* Report Count (2) */
+ 0x09, 0x30, /* Usage (X) */
+ 0x09, 0x31, /* Usage (Y) */
+ 0x81, 0x06, /* Input (Data,Var,Rel) */
+ 0x15, 0x81, /* Logical Minimum (-127) */
+ 0x25, 0x7f, /* Logical Maximum (127) */
+ 0x75, 0x08, /* Report Size (8) */
+ 0x95, 0x01, /* Report Count (1) */
+ 0x09, 0x38, /* Usage (Wheel) */
+ 0x81, 0x06, /* Input (Data,Var,Rel) */
+ 0x05, 0x0c, /* Usage Page (Consumer Devices) */
+ 0x0a, 0x38, 0x02, /* Usage (AC Pan) */
+ 0x15, 0x81, /* Logical Minimum (-127) */
+ 0x25, 0x7f, /* Logical Maximum (127) */
+ 0x75, 0x08, /* Report Size (8) */
+ 0x95, 0x01, /* Report Count (1) */
+ 0x81, 0x06, /* Input (Data,Var,Rel) */
+ 0xc0, /* End Collection */
+ 0xc0, /* End Collection */
+};
+
/* Gaming Mouse descriptor (2) */
static const char mse_high_res_descriptor[] = {
0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
@@ -480,6 +523,7 @@ static const char hidpp_descriptor[] = {
#define MAX_RDESC_SIZE \
(sizeof(kbd_descriptor) + \
sizeof(mse_bluetooth_descriptor) + \
+ sizeof(mse5_bluetooth_descriptor) + \
sizeof(consumer_descriptor) + \
sizeof(syscontrol_descriptor) + \
sizeof(media_descriptor) + \
@@ -517,6 +561,11 @@ static void delayedwork_callback(struct work_struct *work);
static LIST_HEAD(dj_hdev_list);
static DEFINE_MUTEX(dj_hdev_list_lock);
+static bool recvr_type_is_bluetooth(enum recvr_type type)
+{
+ return type == recvr_type_bluetooth || type == recvr_type_dinovo;
+}
+
/*
* dj/HID++ receivers are really a single logical entity, but for BIOS/Windows
* compatibility they have multiple USB interfaces. On HID++ receivers we need
@@ -534,7 +583,7 @@ static struct dj_receiver_dev *dj_find_receiver_dev(struct hid_device *hdev,
* The bluetooth receiver contains a built-in hub and has separate
* USB-devices for the keyboard and mouse interfaces.
*/
- sep = (type == recvr_type_bluetooth) ? '.' : '/';
+ sep = recvr_type_is_bluetooth(type) ? '.' : '/';
/* Try to find an already-probed interface from the same device */
list_for_each_entry(djrcv_dev, &dj_hdev_list, list) {
@@ -872,6 +921,14 @@ static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
* touchpad to work we must also forward mouse input reports to the dj_hiddev
* created for the keyboard (instead of forwarding them to a second paired
* device with a device_type of REPORT_TYPE_MOUSE as we normally would).
+ *
+ * On Dinovo receivers the keyboard's touchpad and an optional paired actual
+ * mouse send separate input reports, INPUT(2) aka STD_MOUSE for the mouse
+ * and INPUT(5) aka KBD_MOUSE for the keyboard's touchpad.
+ *
+ * On MX5x00 receivers (which can also be paired with a Dinovo keyboard)
+ * INPUT(2) is used for both an optional paired actual mouse and for the
+ * keyboard's touchpad.
*/
static const u16 kbd_builtin_touchpad_ids[] = {
0xb309, /* Dinovo Edge */
@@ -898,7 +955,10 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
id = (workitem->quad_id_msb << 8) | workitem->quad_id_lsb;
for (i = 0; i < ARRAY_SIZE(kbd_builtin_touchpad_ids); i++) {
if (id == kbd_builtin_touchpad_ids[i]) {
- workitem->reports_supported |= STD_MOUSE;
+ if (djrcv_dev->type == recvr_type_dinovo)
+ workitem->reports_supported |= KBD_MOUSE;
+ else
+ workitem->reports_supported |= STD_MOUSE;
break;
}
}
@@ -1367,7 +1427,7 @@ static int logi_dj_ll_parse(struct hid_device *hid)
else if (djdev->dj_receiver_dev->type == recvr_type_27mhz)
rdcat(rdesc, &rsize, mse_27mhz_descriptor,
sizeof(mse_27mhz_descriptor));
- else if (djdev->dj_receiver_dev->type == recvr_type_bluetooth)
+ else if (recvr_type_is_bluetooth(djdev->dj_receiver_dev->type))
rdcat(rdesc, &rsize, mse_bluetooth_descriptor,
sizeof(mse_bluetooth_descriptor));
else
@@ -1375,6 +1435,13 @@ static int logi_dj_ll_parse(struct hid_device *hid)
sizeof(mse_descriptor));
}
+ if (djdev->reports_supported & KBD_MOUSE) {
+ dbg_hid("%s: sending a kbd-mouse descriptor, reports_supported: %llx\n",
+ __func__, djdev->reports_supported);
+ rdcat(rdesc, &rsize, mse5_bluetooth_descriptor,
+ sizeof(mse5_bluetooth_descriptor));
+ }
+
if (djdev->reports_supported & MULTIMEDIA) {
dbg_hid("%s: sending a multimedia report descriptor: %llx\n",
__func__, djdev->reports_supported);
@@ -1692,6 +1759,7 @@ static int logi_dj_probe(struct hid_device *hdev,
case recvr_type_mouse_only: no_dj_interfaces = 2; break;
case recvr_type_27mhz: no_dj_interfaces = 2; break;
case recvr_type_bluetooth: no_dj_interfaces = 2; break;
+ case recvr_type_dinovo: no_dj_interfaces = 2; break;
}
if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
intf = to_usb_interface(hdev->dev.parent);
@@ -1857,23 +1925,27 @@ static void logi_dj_remove(struct hid_device *hdev)
}
static const struct hid_device_id logi_dj_receivers[] = {
- {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ { /* Logitech unifying receiver (0xc52b) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER),
.driver_data = recvr_type_dj},
- {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ { /* Logitech unifying receiver (0xc532) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2),
.driver_data = recvr_type_dj},
- { /* Logitech Nano mouse only receiver */
+
+ { /* Logitech Nano mouse only receiver (0xc52f) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER),
.driver_data = recvr_type_mouse_only},
- { /* Logitech Nano (non DJ) receiver */
+ { /* Logitech Nano (non DJ) receiver (0xc534) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2),
.driver_data = recvr_type_hidpp},
+
{ /* Logitech G700(s) receiver (0xc531) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- 0xc531),
+ USB_DEVICE_ID_LOGITECH_G700_RECEIVER),
.driver_data = recvr_type_gaming_hidpp},
{ /* Logitech G602 receiver (0xc537) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
@@ -1883,17 +1955,18 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1),
.driver_data = recvr_type_gaming_hidpp},
+ { /* Logitech powerplay receiver (0xc53a) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY),
+ .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech lightspeed receiver (0xc53f) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1),
.driver_data = recvr_type_gaming_hidpp},
+
{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
.driver_data = recvr_type_27mhz},
- { /* Logitech powerplay receiver (0xc53a) */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY),
- .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc517) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_S510_RECEIVER_2),
@@ -1902,22 +1975,40 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_27MHZ_MOUSE_RECEIVER),
.driver_data = recvr_type_27mhz},
- { /* Logitech MX5000 HID++ / bluetooth receiver keyboard intf. */
+
+ { /* Logitech MX5000 HID++ / bluetooth receiver keyboard intf. (0xc70e) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- 0xc70e),
+ USB_DEVICE_ID_MX5000_RECEIVER_KBD_DEV),
.driver_data = recvr_type_bluetooth},
- { /* Logitech MX5000 HID++ / bluetooth receiver mouse intf. */
+ { /* Logitech MX5000 HID++ / bluetooth receiver mouse intf. (0xc70a) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- 0xc70a),
+ USB_DEVICE_ID_MX5000_RECEIVER_MOUSE_DEV),
.driver_data = recvr_type_bluetooth},
- { /* Logitech MX5500 HID++ / bluetooth receiver keyboard intf. */
+ { /* Logitech MX5500 HID++ / bluetooth receiver keyboard intf. (0xc71b) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- 0xc71b),
+ USB_DEVICE_ID_MX5500_RECEIVER_KBD_DEV),
.driver_data = recvr_type_bluetooth},
- { /* Logitech MX5500 HID++ / bluetooth receiver mouse intf. */
+ { /* Logitech MX5500 HID++ / bluetooth receiver mouse intf. (0xc71c) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- 0xc71c),
+ USB_DEVICE_ID_MX5500_RECEIVER_MOUSE_DEV),
.driver_data = recvr_type_bluetooth},
+
+ { /* Logitech Dinovo Edge HID++ / bluetooth receiver keyboard intf. (0xc713) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_DINOVO_EDGE_RECEIVER_KBD_DEV),
+ .driver_data = recvr_type_dinovo},
+ { /* Logitech Dinovo Edge HID++ / bluetooth receiver mouse intf. (0xc714) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_DINOVO_EDGE_RECEIVER_MOUSE_DEV),
+ .driver_data = recvr_type_dinovo},
+ { /* Logitech DiNovo Mini HID++ / bluetooth receiver mouse intf. (0xc71e) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_DINOVO_MINI_RECEIVER_KBD_DEV),
+ .driver_data = recvr_type_dinovo},
+ { /* Logitech DiNovo Mini HID++ / bluetooth receiver keyboard intf. (0xc71f) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_DINOVO_MINI_RECEIVER_MOUSE_DEV),
+ .driver_data = recvr_type_dinovo},
{}
};
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index d459e2dbe647..d598094dadd0 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -261,7 +261,7 @@ static int __hidpp_send_report(struct hid_device *hdev,
return ret == fields_count ? 0 : -1;
}
-/**
+/*
* hidpp_send_message_sync() returns 0 in case of success, and something else
* in case of a failure.
* - If ' something else' is positive, that means that an error has been raised
@@ -423,7 +423,7 @@ static inline bool hidpp_report_is_connect_event(struct hidpp_device *hidpp,
(report->rap.sub_id == 0x41));
}
-/**
+/*
* hidpp_prefix_name() prefixes the current given name with "Logitech ".
*/
static void hidpp_prefix_name(char **name, int name_length)
@@ -454,6 +454,7 @@ static void hidpp_prefix_name(char **name, int name_length)
* hidpp_scroll_counter_handle_scroll() - Send high- and low-resolution scroll
* events given a high-resolution wheel
* movement.
+ * @input_dev: Pointer to the input device
* @counter: a hid_scroll_counter struct describing the wheel.
* @hi_res_value: the movement of the wheel, in the mouse's high-resolution
* units.
@@ -1884,7 +1885,7 @@ struct hidpp_touchpad_fw_items {
uint8_t persistent;
};
-/**
+/*
* send a set state command to the device by reading the current items->state
* field. items is then filled with the current state.
*/
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index abd86903875f..2bb473d8c424 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -16,6 +16,7 @@
#include <linux/input/mt.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include "hid-ids.h"
@@ -54,6 +55,7 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
#define TRACKPAD2_USB_REPORT_ID 0x02
#define TRACKPAD2_BT_REPORT_ID 0x31
#define MOUSE_REPORT_ID 0x29
+#define MOUSE2_REPORT_ID 0x12
#define DOUBLE_REPORT_ID 0xf7
/* These definitions are not precise, but they're close enough. (Bits
* 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem
@@ -127,6 +129,9 @@ struct magicmouse_sc {
u8 size;
} touches[16];
int tracking_ids[16];
+
+ struct hid_device *hdev;
+ struct delayed_work work;
};
static int magicmouse_firm_touch(struct magicmouse_sc *msc)
@@ -195,7 +200,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
int id, x, y, size, orientation, touch_major, touch_minor, state, down;
int pressure = 0;
- if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
id = (tdata[6] << 2 | tdata[5] >> 6) & 0xf;
x = (tdata[1] << 28 | tdata[0] << 20) >> 20;
y = -((tdata[2] << 24 | tdata[1] << 16) >> 20);
@@ -296,7 +302,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
input_report_abs(input, ABS_MT_PRESSURE, pressure);
if (report_undeciphered) {
- if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
input_event(input, EV_MSC, MSC_RAW, tdata[7]);
else if (input->id.product !=
USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
@@ -380,6 +387,34 @@ static int magicmouse_raw_event(struct hid_device *hdev,
* ts = data[3] >> 6 | data[4] << 2 | data[5] << 10;
*/
break;
+ case MOUSE2_REPORT_ID:
+ /* Size is either 8 or (14 + 8 * N) */
+ if (size != 8 && (size < 14 || (size - 14) % 8 != 0))
+ return 0;
+ npoints = (size - 14) / 8;
+ if (npoints > 15) {
+ hid_warn(hdev, "invalid size value (%d) for MOUSE2_REPORT_ID\n",
+ size);
+ return 0;
+ }
+ msc->ntouches = 0;
+ for (ii = 0; ii < npoints; ii++)
+ magicmouse_emit_touch(msc, ii, data + ii * 8 + 14);
+
+ /* When emulating three-button mode, it is important
+ * to have the current touch information before
+ * generating a click event.
+ */
+ x = (int)((data[3] << 24) | (data[2] << 16)) >> 16;
+ y = (int)((data[5] << 24) | (data[4] << 16)) >> 16;
+ clicks = data[1];
+
+ /* The following bits provide a device specific timestamp. They
+ * are unused here.
+ *
+ * ts = data[11] >> 6 | data[12] << 2 | data[13] << 10;
+ */
+ break;
case DOUBLE_REPORT_ID:
/* Sometimes the trackpad sends two touch reports in one
* packet.
@@ -392,7 +427,8 @@ static int magicmouse_raw_event(struct hid_device *hdev,
return 0;
}
- if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
magicmouse_emit_buttons(msc, clicks & 3);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
@@ -408,6 +444,23 @@ static int magicmouse_raw_event(struct hid_device *hdev,
return 1;
}
+static int magicmouse_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+ if (msc->input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
+ field->report->id == MOUSE2_REPORT_ID) {
+ /*
+ * magic_mouse_raw_event has done all the work. Skip hidinput.
+ *
+ * Specifically, hidinput may modify BTN_LEFT and BTN_RIGHT,
+ * breaking emulate_3button.
+ */
+ return 1;
+ }
+ return 0;
+}
+
static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
{
int error;
@@ -415,7 +468,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(EV_KEY, input->evbit);
- if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
__set_bit(BTN_LEFT, input->keybit);
__set_bit(BTN_RIGHT, input->keybit);
if (emulate_3button)
@@ -480,7 +534,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
* the origin at the same position, and just uses the additive
* inverse of the reported Y.
*/
- if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
input_set_abs_params(input, ABS_MT_POSITION_X,
MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
@@ -580,19 +635,60 @@ static int magicmouse_input_configured(struct hid_device *hdev,
return 0;
}
-
-static int magicmouse_probe(struct hid_device *hdev,
- const struct hid_device_id *id)
+static int magicmouse_enable_multitouch(struct hid_device *hdev)
{
const u8 *feature;
const u8 feature_mt[] = { 0xD7, 0x01 };
+ const u8 feature_mt_mouse2[] = { 0xF1, 0x02, 0x01 };
const u8 feature_mt_trackpad2_usb[] = { 0x02, 0x01 };
const u8 feature_mt_trackpad2_bt[] = { 0xF1, 0x02, 0x01 };
u8 *buf;
+ int ret;
+ int feature_size;
+
+ if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ if (hdev->vendor == BT_VENDOR_ID_APPLE) {
+ feature_size = sizeof(feature_mt_trackpad2_bt);
+ feature = feature_mt_trackpad2_bt;
+ } else { /* USB_VENDOR_ID_APPLE */
+ feature_size = sizeof(feature_mt_trackpad2_usb);
+ feature = feature_mt_trackpad2_usb;
+ }
+ } else if (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ feature_size = sizeof(feature_mt_mouse2);
+ feature = feature_mt_mouse2;
+ } else {
+ feature_size = sizeof(feature_mt);
+ feature = feature_mt;
+ }
+
+ buf = kmemdup(feature, feature_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, buf[0], buf, feature_size,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ kfree(buf);
+ return ret;
+}
+
+static void magicmouse_enable_mt_work(struct work_struct *work)
+{
+ struct magicmouse_sc *msc =
+ container_of(work, struct magicmouse_sc, work.work);
+ int ret;
+
+ ret = magicmouse_enable_multitouch(msc->hdev);
+ if (ret < 0)
+ hid_err(msc->hdev, "unable to request touch data (%d)\n", ret);
+}
+
+static int magicmouse_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
struct magicmouse_sc *msc;
struct hid_report *report;
int ret;
- int feature_size;
if (id->vendor == USB_VENDOR_ID_APPLE &&
id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
@@ -606,6 +702,8 @@ static int magicmouse_probe(struct hid_device *hdev,
}
msc->scroll_accel = SCROLL_ACCEL_DEFAULT;
+ msc->hdev = hdev;
+ INIT_DEFERRABLE_WORK(&msc->work, magicmouse_enable_mt_work);
msc->quirks = id->driver_data;
hid_set_drvdata(hdev, msc);
@@ -631,6 +729,9 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
MOUSE_REPORT_ID, 0);
+ else if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
+ report = hid_register_report(hdev, HID_INPUT_REPORT,
+ MOUSE2_REPORT_ID, 0);
else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
if (id->vendor == BT_VENDOR_ID_APPLE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
@@ -652,25 +753,6 @@ static int magicmouse_probe(struct hid_device *hdev,
}
report->size = 6;
- if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
- if (id->vendor == BT_VENDOR_ID_APPLE) {
- feature_size = sizeof(feature_mt_trackpad2_bt);
- feature = feature_mt_trackpad2_bt;
- } else { /* USB_VENDOR_ID_APPLE */
- feature_size = sizeof(feature_mt_trackpad2_usb);
- feature = feature_mt_trackpad2_usb;
- }
- } else {
- feature_size = sizeof(feature_mt);
- feature = feature_mt;
- }
-
- buf = kmemdup(feature, feature_size, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto err_stop_hw;
- }
-
/*
* Some devices repond with 'invalid report id' when feature
* report switching it into multitouch mode is sent to it.
@@ -679,13 +761,14 @@ static int magicmouse_probe(struct hid_device *hdev,
* but there seems to be no other way of switching the mode.
* Thus the super-ugly hacky success check below.
*/
- ret = hid_hw_raw_request(hdev, buf[0], buf, feature_size,
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
- kfree(buf);
- if (ret != -EIO && ret != feature_size) {
+ ret = magicmouse_enable_multitouch(hdev);
+ if (ret != -EIO && ret < 0) {
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
}
+ if (ret == -EIO && id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ schedule_delayed_work(&msc->work, msecs_to_jiffies(500));
+ }
return 0;
err_stop_hw:
@@ -693,9 +776,18 @@ err_stop_hw:
return ret;
}
+static void magicmouse_remove(struct hid_device *hdev)
+{
+ struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+ cancel_delayed_work_sync(&msc->work);
+ hid_hw_stop(hdev);
+}
+
static const struct hid_device_id magic_mice[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICMOUSE), .driver_data = 0 },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICMOUSE2), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICTRACKPAD), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
@@ -710,7 +802,9 @@ static struct hid_driver magicmouse_driver = {
.name = "magicmouse",
.id_table = magic_mice,
.probe = magicmouse_probe,
+ .remove = magicmouse_remove,
.raw_event = magicmouse_raw_event,
+ .event = magicmouse_event,
.input_mapping = magicmouse_input_mapping,
.input_configured = magicmouse_input_configured,
};
diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
index 1b5c63241af0..bbda231a7ce3 100644
--- a/drivers/hid/hid-picolcd_core.c
+++ b/drivers/hid/hid-picolcd_core.c
@@ -329,7 +329,6 @@ static int picolcd_raw_event(struct hid_device *hdev,
{
struct picolcd_data *data = hid_get_drvdata(hdev);
unsigned long flags;
- int ret = 0;
if (!data)
return 1;
@@ -342,9 +341,9 @@ static int picolcd_raw_event(struct hid_device *hdev,
if (report->id == REPORT_KEY_STATE) {
if (data->input_keys)
- ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
+ picolcd_raw_keypad(data, report, raw_data+1, size-1);
} else if (report->id == REPORT_IR_DATA) {
- ret = picolcd_raw_cir(data, report, raw_data+1, size-1);
+ picolcd_raw_cir(data, report, raw_data+1, size-1);
} else {
spin_lock_irqsave(&data->lock, flags);
/*
diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
index 85b685efc12f..e81b7cec2d12 100644
--- a/drivers/hid/hid-plantronics.c
+++ b/drivers/hid/hid-plantronics.c
@@ -13,6 +13,7 @@
#include <linux/hid.h>
#include <linux/module.h>
+#include <linux/jiffies.h>
#define PLT_HID_1_0_PAGE 0xffa00000
#define PLT_HID_2_0_PAGE 0xffa20000
@@ -36,6 +37,16 @@
#define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \
(usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
+#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
+
+#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
+
+struct plt_drv_data {
+ unsigned long device_type;
+ unsigned long last_volume_key_ts;
+ u32 quirks;
+};
+
static int plantronics_input_mapping(struct hid_device *hdev,
struct hid_input *hi,
struct hid_field *field,
@@ -43,7 +54,8 @@ static int plantronics_input_mapping(struct hid_device *hdev,
unsigned long **bit, int *max)
{
unsigned short mapped_key;
- unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
+ struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
+ unsigned long plt_type = drv_data->device_type;
/* special case for PTT products */
if (field->application == HID_GD_JOYSTICK)
@@ -105,6 +117,30 @@ mapped:
return 1;
}
+static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
+
+ if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) {
+ unsigned long prev_ts, cur_ts;
+
+ /* Usages are filtered in plantronics_usages. */
+
+ if (!value) /* Handle key presses only. */
+ return 0;
+
+ prev_ts = drv_data->last_volume_key_ts;
+ cur_ts = jiffies;
+ if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT)
+ return 1; /* Ignore the repeated key. */
+
+ drv_data->last_volume_key_ts = cur_ts;
+ }
+
+ return 0;
+}
+
static unsigned long plantronics_device_type(struct hid_device *hdev)
{
unsigned i, col_page;
@@ -133,15 +169,24 @@ exit:
static int plantronics_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
+ struct plt_drv_data *drv_data;
int ret;
+ drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
goto err;
}
- hid_set_drvdata(hdev, (void *)plantronics_device_type(hdev));
+ drv_data->device_type = plantronics_device_type(hdev);
+ drv_data->quirks = id->driver_data;
+ drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
+
+ hid_set_drvdata(hdev, drv_data);
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
HID_CONNECT_HIDINPUT_FORCE | HID_CONNECT_HIDDEV_FORCE);
@@ -153,15 +198,26 @@ err:
}
static const struct hid_device_id plantronics_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
+ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, plantronics_devices);
+static const struct hid_usage_id plantronics_usages[] = {
+ { HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID },
+ { HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID },
+ { HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR }
+};
+
static struct hid_driver plantronics_driver = {
.name = "plantronics",
.id_table = plantronics_devices,
+ .usage_table = plantronics_usages,
.input_mapping = plantronics_input_mapping,
+ .event = plantronics_event,
.probe = plantronics_probe,
};
module_hid_driver(plantronics_driver);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 1a9daf03dbfa..3dd6f15f2a67 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -445,8 +445,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
@@ -661,6 +659,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
#endif
+#if IS_ENABLED(CONFIG_HID_TMINIT)
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65d) },
+#endif
#if IS_ENABLED(CONFIG_HID_TIVO)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 2628bc53ed80..2e6662173a79 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -397,15 +397,14 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
if (!strncmp(name, "value", strlen("value"))) {
u32 report_id;
- int ret;
if (kstrtoint(buf, 0, &value) != 0)
return -EINVAL;
report_id = sensor_inst->fields[field_index].attribute.
report_id;
- ret = sensor_hub_set_feature(sensor_inst->hsdev, report_id,
- index, sizeof(value), &value);
+ sensor_hub_set_feature(sensor_inst->hsdev, report_id,
+ index, sizeof(value), &value);
} else
return -EINVAL;
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 3dd7d3246737..95cf88f3bafb 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -18,7 +18,6 @@
/**
* struct sensor_hub_data - Hold a instance data for a HID hub device
- * @hsdev: Stored hid instance for current hub device.
* @mutex: Mutex to serialize synchronous request.
* @lock: Spin lock to protect pending request structure.
* @dyn_callback_list: Holds callback function
@@ -34,7 +33,6 @@ struct sensor_hub_data {
spinlock_t dyn_callback_lock;
struct mfd_cell *hid_sensor_hub_client_devs;
int hid_sensor_client_cnt;
- unsigned long quirks;
int ref_cnt;
};
@@ -42,6 +40,7 @@ struct sensor_hub_data {
* struct hid_sensor_hub_callbacks_list - Stores callback list
* @list: list head.
* @usage_id: usage id for a physical device.
+ * @hsdev: Stored hid instance for current hub device.
* @usage_callback: Stores registered callback functions.
* @priv: Private data for a physical device.
*/
@@ -615,7 +614,6 @@ static int sensor_hub_probe(struct hid_device *hdev,
}
hid_set_drvdata(hdev, sd);
- sd->quirks = id->driver_data;
spin_lock_init(&sd->lock);
spin_lock_init(&sd->dyn_callback_lock);
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
new file mode 100644
index 000000000000..2e452c6e8ef4
--- /dev/null
+++ b/drivers/hid/hid-thrustmaster.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * When connected to the machine, the Thrustmaster wheels appear as
+ * a «generic» hid gamepad called "Thrustmaster FFB Wheel".
+ *
+ * When in this mode not every functionality of the wheel, like the force feedback,
+ * are available. To enable all functionalities of a Thrustmaster wheel we have to send
+ * to it a specific USB CONTROL request with a code different for each wheel.
+ *
+ * This driver tries to understand which model of Thrustmaster wheel the generic
+ * "Thrustmaster FFB Wheel" really is and then sends the appropriate control code.
+ *
+ * Copyright (c) 2020-2021 Dario Pagani <dario.pagani.146+linuxk@gmail.com>
+ * Copyright (c) 2020-2021 Kim Kuparinen <kimi.h.kuparinen@gmail.com>
+ */
+#include <linux/hid.h>
+#include <linux/usb.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+/*
+ * These interrupts are used to prevent a nasty crash when initializing the
+ * T300RS. Used in thrustmaster_interrupts().
+ */
+static const u8 setup_0[] = { 0x42, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 setup_1[] = { 0x0a, 0x04, 0x90, 0x03, 0x00, 0x00, 0x00, 0x00 };
+static const u8 setup_2[] = { 0x0a, 0x04, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00 };
+static const u8 setup_3[] = { 0x0a, 0x04, 0x12, 0x10, 0x00, 0x00, 0x00, 0x00 };
+static const u8 setup_4[] = { 0x0a, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00 };
+static const u8 *const setup_arr[] = { setup_0, setup_1, setup_2, setup_3, setup_4 };
+static const unsigned int setup_arr_sizes[] = {
+ ARRAY_SIZE(setup_0),
+ ARRAY_SIZE(setup_1),
+ ARRAY_SIZE(setup_2),
+ ARRAY_SIZE(setup_3),
+ ARRAY_SIZE(setup_4)
+};
+/*
+ * This struct contains for each type of
+ * Thrustmaster wheel
+ *
+ * Note: The values are stored in the CPU
+ * endianness, the USB protocols always use
+ * little endian; the macro cpu_to_le[BIT]()
+ * must be used when preparing USB packets
+ * and vice-versa
+ */
+struct tm_wheel_info {
+ uint16_t wheel_type;
+
+ /*
+ * See when the USB control out packet is prepared...
+ * @TODO The TMX seems to require multiple control codes to switch.
+ */
+ uint16_t switch_value;
+
+ char const *const wheel_name;
+};
+
+/*
+ * Known wheels.
+ * Note: TMX does not work as it requires 2 control packets
+ */
+static const struct tm_wheel_info tm_wheels_infos[] = {
+ {0x0306, 0x0006, "Thrustmaster T150RS"},
+ {0x0206, 0x0005, "Thrustmaster T300RS"},
+ {0x0204, 0x0005, "Thrustmaster T300 Ferrari Alcantara Edition"},
+ {0x0002, 0x0002, "Thrustmaster T500RS"}
+ //{0x0407, 0x0001, "Thrustmaster TMX"}
+};
+
+static const uint8_t tm_wheels_infos_length = 4;
+
+/*
+ * This structs contains (in little endian) the response data
+ * of the wheel to the request 73
+ *
+ * A sufficient research to understand what each field does is not
+ * beign conducted yet. The position and meaning of fields are a
+ * just a very optimistic guess based on instinct....
+ */
+struct __packed tm_wheel_response
+{
+ /*
+ * Seems to be the type of packet
+ * - 0x0049 if is data.a (15 bytes)
+ * - 0x0047 if is data.b (7 bytes)
+ */
+ uint16_t type;
+
+ union {
+ struct __packed {
+ uint16_t field0;
+ uint16_t field1;
+ /*
+ * Seems to be the model code of the wheel
+ * Read table thrustmaster_wheels to values
+ */
+ uint16_t model;
+
+ uint16_t field2;
+ uint16_t field3;
+ uint16_t field4;
+ uint16_t field5;
+ } a;
+ struct __packed {
+ uint16_t field0;
+ uint16_t field1;
+ uint16_t model;
+ } b;
+ } data;
+};
+
+struct tm_wheel {
+ struct usb_device *usb_dev;
+ struct urb *urb;
+
+ struct usb_ctrlrequest *model_request;
+ struct tm_wheel_response *response;
+
+ struct usb_ctrlrequest *change_request;
+};
+
+/* The control packet to send to wheel */
+static const struct usb_ctrlrequest model_request = {
+ .bRequestType = 0xc1,
+ .bRequest = 73,
+ .wValue = 0,
+ .wIndex = 0,
+ .wLength = cpu_to_le16(0x0010)
+};
+
+static const struct usb_ctrlrequest change_request = {
+ .bRequestType = 0x41,
+ .bRequest = 83,
+ .wValue = 0, // Will be filled by the driver
+ .wIndex = 0,
+ .wLength = 0
+};
+
+/*
+ * On some setups initializing the T300RS crashes the kernel,
+ * these interrupts fix that particular issue. So far they haven't caused any
+ * adverse effects in other wheels.
+ */
+static void thrustmaster_interrupts(struct hid_device *hdev)
+{
+ int ret, trans, i, b_ep;
+ u8 *send_buf = kmalloc(256, GFP_KERNEL);
+ struct usb_host_endpoint *ep;
+ struct device *dev = &hdev->dev;
+ struct usb_interface *usbif = to_usb_interface(dev->parent);
+ struct usb_device *usbdev = interface_to_usbdev(usbif);
+
+ if (!send_buf) {
+ hid_err(hdev, "failed allocating send buffer\n");
+ return;
+ }
+
+ ep = &usbif->cur_altsetting->endpoint[1];
+ b_ep = ep->desc.bEndpointAddress;
+
+ for (i = 0; i < ARRAY_SIZE(setup_arr); ++i) {
+ memcpy(send_buf, setup_arr[i], setup_arr_sizes[i]);
+
+ ret = usb_interrupt_msg(usbdev,
+ usb_sndintpipe(usbdev, b_ep),
+ send_buf,
+ setup_arr_sizes[i],
+ &trans,
+ USB_CTRL_SET_TIMEOUT);
+
+ if (ret) {
+ hid_err(hdev, "setup data couldn't be sent\n");
+ return;
+ }
+ }
+
+ kfree(send_buf);
+}
+
+static void thrustmaster_change_handler(struct urb *urb)
+{
+ struct hid_device *hdev = urb->context;
+
+ // The wheel seems to kill himself before answering the host and therefore is violating the USB protocol...
+ if (urb->status == 0 || urb->status == -EPROTO || urb->status == -EPIPE)
+ hid_info(hdev, "Success?! The wheel should have been initialized!\n");
+ else
+ hid_warn(hdev, "URB to change wheel mode seems to have failed with error %d\n", urb->status);
+}
+
+/*
+ * Called by the USB subsystem when the wheel responses to our request
+ * to get [what it seems to be] the wheel's model.
+ *
+ * If the model id is recognized then we send an opportune USB CONTROL REQUEST
+ * to switch the wheel to its full capabilities
+ */
+static void thrustmaster_model_handler(struct urb *urb)
+{
+ struct hid_device *hdev = urb->context;
+ struct tm_wheel *tm_wheel = hid_get_drvdata(hdev);
+ uint16_t model = 0;
+ int i, ret;
+ const struct tm_wheel_info *twi = 0;
+
+ if (urb->status) {
+ hid_err(hdev, "URB to get model id failed with error %d\n", urb->status);
+ return;
+ }
+
+ if (tm_wheel->response->type == cpu_to_le16(0x49))
+ model = le16_to_cpu(tm_wheel->response->data.a.model);
+ else if (tm_wheel->response->type == cpu_to_le16(0x47))
+ model = le16_to_cpu(tm_wheel->response->data.b.model);
+ else {
+ hid_err(hdev, "Unknown packet type 0x%x, unable to proceed further with wheel init\n", tm_wheel->response->type);
+ return;
+ }
+
+ for (i = 0; i < tm_wheels_infos_length && !twi; i++)
+ if (tm_wheels_infos[i].wheel_type == model)
+ twi = tm_wheels_infos + i;
+
+ if (twi)
+ hid_info(hdev, "Wheel with model id 0x%x is a %s\n", model, twi->wheel_name);
+ else {
+ hid_err(hdev, "Unknown wheel's model id 0x%x, unable to proceed further with wheel init\n", model);
+ return;
+ }
+
+ tm_wheel->change_request->wValue = cpu_to_le16(twi->switch_value);
+ usb_fill_control_urb(
+ tm_wheel->urb,
+ tm_wheel->usb_dev,
+ usb_sndctrlpipe(tm_wheel->usb_dev, 0),
+ (char *)tm_wheel->change_request,
+ 0, 0, // We do not expect any response from the wheel
+ thrustmaster_change_handler,
+ hdev
+ );
+
+ ret = usb_submit_urb(tm_wheel->urb, GFP_ATOMIC);
+ if (ret)
+ hid_err(hdev, "Error %d while submitting the change URB. I am unable to initialize this wheel...\n", ret);
+}
+
+static void thrustmaster_remove(struct hid_device *hdev)
+{
+ struct tm_wheel *tm_wheel = hid_get_drvdata(hdev);
+
+ usb_kill_urb(tm_wheel->urb);
+
+ kfree(tm_wheel->response);
+ kfree(tm_wheel->model_request);
+ usb_free_urb(tm_wheel->urb);
+ kfree(tm_wheel);
+
+ hid_hw_stop(hdev);
+}
+
+/*
+ * Function called by HID when a hid Thrustmaster FFB wheel is connected to the host.
+ * This function starts the hid dev, tries to allocate the tm_wheel data structure and
+ * finally send an USB CONTROL REQUEST to the wheel to get [what it seems to be] its
+ * model type.
+ */
+static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret = 0;
+ struct tm_wheel *tm_wheel = 0;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed with error %d\n", ret);
+ goto error0;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (ret) {
+ hid_err(hdev, "hw start failed with error %d\n", ret);
+ goto error0;
+ }
+
+ // Now we allocate the tm_wheel
+ tm_wheel = kzalloc(sizeof(struct tm_wheel), GFP_KERNEL);
+ if (!tm_wheel) {
+ ret = -ENOMEM;
+ goto error1;
+ }
+
+ tm_wheel->urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!tm_wheel->urb) {
+ ret = -ENOMEM;
+ goto error2;
+ }
+
+ tm_wheel->model_request = kmemdup(&model_request,
+ sizeof(struct usb_ctrlrequest),
+ GFP_KERNEL);
+ if (!tm_wheel->model_request) {
+ ret = -ENOMEM;
+ goto error3;
+ }
+
+ tm_wheel->response = kzalloc(sizeof(struct tm_wheel_response), GFP_KERNEL);
+ if (!tm_wheel->response) {
+ ret = -ENOMEM;
+ goto error4;
+ }
+
+ tm_wheel->change_request = kzalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!tm_wheel->model_request) {
+ ret = -ENOMEM;
+ goto error5;
+ }
+ memcpy(tm_wheel->change_request, &change_request, sizeof(struct usb_ctrlrequest));
+
+ tm_wheel->usb_dev = interface_to_usbdev(to_usb_interface(hdev->dev.parent));
+ hid_set_drvdata(hdev, tm_wheel);
+
+ thrustmaster_interrupts(hdev);
+
+ usb_fill_control_urb(
+ tm_wheel->urb,
+ tm_wheel->usb_dev,
+ usb_rcvctrlpipe(tm_wheel->usb_dev, 0),
+ (char *)tm_wheel->model_request,
+ tm_wheel->response,
+ sizeof(struct tm_wheel_response),
+ thrustmaster_model_handler,
+ hdev
+ );
+
+ ret = usb_submit_urb(tm_wheel->urb, GFP_ATOMIC);
+ if (ret)
+ hid_err(hdev, "Error %d while submitting the URB. I am unable to initialize this wheel...\n", ret);
+
+ return ret;
+
+error5: kfree(tm_wheel->response);
+error4: kfree(tm_wheel->model_request);
+error3: usb_free_urb(tm_wheel->urb);
+error2: kfree(tm_wheel);
+error1: hid_hw_stop(hdev);
+error0:
+ return ret;
+}
+
+static const struct hid_device_id thrustmaster_devices[] = {
+ { HID_USB_DEVICE(0x044f, 0xb65d)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(hid, thrustmaster_devices);
+
+static struct hid_driver thrustmaster_driver = {
+ .name = "hid-thrustmaster",
+ .id_table = thrustmaster_devices,
+ .probe = thrustmaster_probe,
+ .remove = thrustmaster_remove,
+};
+
+module_hid_driver(thrustmaster_driver);
+
+MODULE_AUTHOR("Dario Pagani <dario.pagani.146+linuxk@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Driver to initialize some steering wheel joysticks from Thrustmaster");
+
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 6af25c38b9cc..3d67b748a3b9 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -21,7 +21,8 @@
#include <asm/unaligned.h>
/**
- * Convert a pen in-range reporting type to a string.
+ * uclogic_params_pen_inrange_to_str() - Convert a pen in-range reporting type
+ * to a string.
*
* @inrange: The in-range reporting type to convert.
*
@@ -516,7 +517,8 @@ void uclogic_params_cleanup(struct uclogic_params *params)
}
/**
- * Get a replacement report descriptor for a tablet's interface.
+ * uclogic_params_get_desc() - Get a replacement report descriptor for a
+ * tablet's interface.
*
* @params: The parameters of a tablet interface to get report
* descriptor for. Cannot be NULL.
@@ -689,7 +691,7 @@ static void uclogic_params_init_with_pen_unused(struct uclogic_params *params)
}
/**
- * uclogic_params_init() - initialize a Huion tablet interface and discover
+ * uclogic_params_huion_init() - initialize a Huion tablet interface and discover
* its parameters.
*
* @params: Parameters to fill in (to be cleaned with
diff --git a/drivers/hid/hid-uclogic-rdesc.c b/drivers/hid/hid-uclogic-rdesc.c
index bf5da6de7bba..6dd6dcd09c8b 100644
--- a/drivers/hid/hid-uclogic-rdesc.c
+++ b/drivers/hid/hid-uclogic-rdesc.c
@@ -641,7 +641,7 @@ const __u8 uclogic_rdesc_pen_v2_template_arr[] = {
const size_t uclogic_rdesc_pen_v2_template_size =
sizeof(uclogic_rdesc_pen_v2_template_arr);
-/**
+/*
* Expand to the contents of a generic buttonpad report descriptor.
*
* @_padding: Padding from the end of button bits at bit 44, until
diff --git a/drivers/hid/i2c-hid/i2c-hid-acpi.c b/drivers/hid/i2c-hid/i2c-hid-acpi.c
index bb8c00e6be78..a6f0257a26de 100644
--- a/drivers/hid/i2c-hid/i2c-hid-acpi.c
+++ b/drivers/hid/i2c-hid/i2c-hid-acpi.c
@@ -25,12 +25,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pm.h>
+#include <linux/uuid.h>
#include "i2c-hid.h"
struct i2c_hid_acpi {
struct i2chid_ops ops;
- struct i2c_client *client;
+ struct acpi_device *adev;
};
static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
@@ -42,29 +43,24 @@ static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
{ },
};
-static int i2c_hid_acpi_get_descriptor(struct i2c_client *client)
+/* HID I²C Device: 3cdff6f7-4267-4555-ad05-b30a3d8938de */
+static guid_t i2c_hid_guid =
+ GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
+ 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
+
+static int i2c_hid_acpi_get_descriptor(struct acpi_device *adev)
{
- static guid_t i2c_hid_guid =
- GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
- 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
+ acpi_handle handle = acpi_device_handle(adev);
union acpi_object *obj;
- struct acpi_device *adev;
- acpi_handle handle;
u16 hid_descriptor_address;
- handle = ACPI_HANDLE(&client->dev);
- if (!handle || acpi_bus_get_device(handle, &adev)) {
- dev_err(&client->dev, "Error could not get ACPI device\n");
- return -ENODEV;
- }
-
if (acpi_match_device_ids(adev, i2c_hid_acpi_blacklist) == 0)
return -ENODEV;
obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
ACPI_TYPE_INTEGER);
if (!obj) {
- dev_err(&client->dev, "Error _DSM call to get HID descriptor address failed\n");
+ acpi_handle_err(handle, "Error _DSM call to get HID descriptor address failed\n");
return -ENODEV;
}
@@ -76,14 +72,12 @@ static int i2c_hid_acpi_get_descriptor(struct i2c_client *client)
static void i2c_hid_acpi_shutdown_tail(struct i2chid_ops *ops)
{
- struct i2c_hid_acpi *ihid_acpi =
- container_of(ops, struct i2c_hid_acpi, ops);
- struct device *dev = &ihid_acpi->client->dev;
- acpi_device_set_power(ACPI_COMPANION(dev), ACPI_STATE_D3_COLD);
+ struct i2c_hid_acpi *ihid_acpi = container_of(ops, struct i2c_hid_acpi, ops);
+
+ acpi_device_set_power(ihid_acpi->adev, ACPI_STATE_D3_COLD);
}
-static int i2c_hid_acpi_probe(struct i2c_client *client,
- const struct i2c_device_id *dev_id)
+static int i2c_hid_acpi_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct i2c_hid_acpi *ihid_acpi;
@@ -91,21 +85,25 @@ static int i2c_hid_acpi_probe(struct i2c_client *client,
u16 hid_descriptor_address;
int ret;
+ adev = ACPI_COMPANION(dev);
+ if (!adev) {
+ dev_err(&client->dev, "Error could not get ACPI device\n");
+ return -ENODEV;
+ }
+
ihid_acpi = devm_kzalloc(&client->dev, sizeof(*ihid_acpi), GFP_KERNEL);
if (!ihid_acpi)
return -ENOMEM;
- ihid_acpi->client = client;
+ ihid_acpi->adev = adev;
ihid_acpi->ops.shutdown_tail = i2c_hid_acpi_shutdown_tail;
- ret = i2c_hid_acpi_get_descriptor(client);
+ ret = i2c_hid_acpi_get_descriptor(adev);
if (ret < 0)
return ret;
hid_descriptor_address = ret;
- adev = ACPI_COMPANION(dev);
- if (adev)
- acpi_device_fix_up_power(adev);
+ acpi_device_fix_up_power(adev);
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
device_set_wakeup_capable(dev, true);
@@ -128,10 +126,10 @@ static struct i2c_driver i2c_hid_acpi_driver = {
.name = "i2c_hid_acpi",
.pm = &i2c_hid_core_pm,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
+ .acpi_match_table = i2c_hid_acpi_match,
},
- .probe = i2c_hid_acpi_probe,
+ .probe_new = i2c_hid_acpi_probe,
.remove = i2c_hid_core_remove,
.shutdown = i2c_hid_core_shutdown,
};
diff --git a/drivers/hid/surface-hid/Kconfig b/drivers/hid/surface-hid/Kconfig
new file mode 100644
index 000000000000..7ce9b5d641eb
--- /dev/null
+++ b/drivers/hid/surface-hid/Kconfig
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0+
+menu "Surface System Aggregator Module HID support"
+ depends on SURFACE_AGGREGATOR
+ depends on INPUT
+
+config SURFACE_HID
+ tristate "HID transport driver for Surface System Aggregator Module"
+ depends on SURFACE_AGGREGATOR_REGISTRY
+ select SURFACE_HID_CORE
+ help
+ Driver to support integrated HID devices on newer Microsoft Surface
+ models.
+
+ This driver provides support for the HID transport protocol provided
+ by the Surface Aggregator Module (i.e. the embedded controller) on
+ 7th-generation Microsoft Surface devices, i.e. Surface Book 3 and
+ Surface Laptop 3. On those models, it is mainly used to connect the
+ integrated touchpad and keyboard.
+
+ Say M or Y here, if you want support for integrated HID devices, i.e.
+ integrated touchpad and keyboard, on 7th generation Microsoft Surface
+ models.
+
+config SURFACE_KBD
+ tristate "HID keyboard transport driver for Surface System Aggregator Module"
+ select SURFACE_HID_CORE
+ help
+ Driver to support HID keyboards on Surface Laptop 1 and 2 devices.
+
+ This driver provides support for the HID transport protocol provided
+ by the Surface Aggregator Module (i.e. the embedded controller) on
+ Microsoft Surface Laptops 1 and 2. It is used to connect the
+ integrated keyboard on those devices.
+
+ Say M or Y here, if you want support for the integrated keyboard on
+ Microsoft Surface Laptops 1 and 2.
+
+endmenu
+
+config SURFACE_HID_CORE
+ tristate
+ select HID
diff --git a/drivers/hid/surface-hid/Makefile b/drivers/hid/surface-hid/Makefile
new file mode 100644
index 000000000000..4ae11cf09b25
--- /dev/null
+++ b/drivers/hid/surface-hid/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Makefile - Surface System Aggregator Module (SSAM) HID transport driver.
+#
+obj-$(CONFIG_SURFACE_HID_CORE) += surface_hid_core.o
+obj-$(CONFIG_SURFACE_HID) += surface_hid.o
+obj-$(CONFIG_SURFACE_KBD) += surface_kbd.o
diff --git a/drivers/hid/surface-hid/surface_hid.c b/drivers/hid/surface-hid/surface_hid.c
new file mode 100644
index 000000000000..3477b31611ae
--- /dev/null
+++ b/drivers/hid/surface-hid/surface_hid.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface System Aggregator Module (SSAM) HID transport driver for the
+ * generic HID interface (HID/TC=0x15 subsystem). Provides support for
+ * integrated HID devices on Surface Laptop 3, Book 3, and later.
+ *
+ * Copyright (C) 2019-2021 Blaž Hrastnik <blaz@mxxn.io>,
+ * Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/hid.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+
+#include "surface_hid_core.h"
+
+
+/* -- SAM interface. -------------------------------------------------------- */
+
+struct surface_hid_buffer_slice {
+ __u8 entry;
+ __le32 offset;
+ __le32 length;
+ __u8 end;
+ __u8 data[];
+} __packed;
+
+static_assert(sizeof(struct surface_hid_buffer_slice) == 10);
+
+enum surface_hid_cid {
+ SURFACE_HID_CID_OUTPUT_REPORT = 0x01,
+ SURFACE_HID_CID_GET_FEATURE_REPORT = 0x02,
+ SURFACE_HID_CID_SET_FEATURE_REPORT = 0x03,
+ SURFACE_HID_CID_GET_DESCRIPTOR = 0x04,
+};
+
+static int ssam_hid_get_descriptor(struct surface_hid_device *shid, u8 entry, u8 *buf, size_t len)
+{
+ u8 buffer[sizeof(struct surface_hid_buffer_slice) + 0x76];
+ struct surface_hid_buffer_slice *slice;
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ u32 buffer_len, offset, length;
+ int status;
+
+ /*
+ * Note: The 0x76 above has been chosen because that's what's used by
+ * the Windows driver. Together with the header, this leads to a 128
+ * byte payload in total.
+ */
+
+ buffer_len = ARRAY_SIZE(buffer) - sizeof(struct surface_hid_buffer_slice);
+
+ rqst.target_category = shid->uid.category;
+ rqst.target_id = shid->uid.target;
+ rqst.command_id = SURFACE_HID_CID_GET_DESCRIPTOR;
+ rqst.instance_id = shid->uid.instance;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = sizeof(struct surface_hid_buffer_slice);
+ rqst.payload = buffer;
+
+ rsp.capacity = ARRAY_SIZE(buffer);
+ rsp.pointer = buffer;
+
+ slice = (struct surface_hid_buffer_slice *)buffer;
+ slice->entry = entry;
+ slice->end = 0;
+
+ offset = 0;
+ length = buffer_len;
+
+ while (!slice->end && offset < len) {
+ put_unaligned_le32(offset, &slice->offset);
+ put_unaligned_le32(length, &slice->length);
+
+ rsp.length = 0;
+
+ status = ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp,
+ sizeof(*slice));
+ if (status)
+ return status;
+
+ offset = get_unaligned_le32(&slice->offset);
+ length = get_unaligned_le32(&slice->length);
+
+ /* Don't mess stuff up in case we receive garbage. */
+ if (length > buffer_len || offset > len)
+ return -EPROTO;
+
+ if (offset + length > len)
+ length = len - offset;
+
+ memcpy(buf + offset, &slice->data[0], length);
+
+ offset += length;
+ length = buffer_len;
+ }
+
+ if (offset != len) {
+ dev_err(shid->dev, "unexpected descriptor length: got %u, expected %zu\n",
+ offset, len);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int ssam_hid_set_raw_report(struct surface_hid_device *shid, u8 rprt_id, bool feature,
+ u8 *buf, size_t len)
+{
+ struct ssam_request rqst;
+ u8 cid;
+
+ if (feature)
+ cid = SURFACE_HID_CID_SET_FEATURE_REPORT;
+ else
+ cid = SURFACE_HID_CID_OUTPUT_REPORT;
+
+ rqst.target_category = shid->uid.category;
+ rqst.target_id = shid->uid.target;
+ rqst.instance_id = shid->uid.instance;
+ rqst.command_id = cid;
+ rqst.flags = 0;
+ rqst.length = len;
+ rqst.payload = buf;
+
+ buf[0] = rprt_id;
+
+ return ssam_retry(ssam_request_sync, shid->ctrl, &rqst, NULL);
+}
+
+static int ssam_hid_get_raw_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
+{
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+
+ rqst.target_category = shid->uid.category;
+ rqst.target_id = shid->uid.target;
+ rqst.instance_id = shid->uid.instance;
+ rqst.command_id = SURFACE_HID_CID_GET_FEATURE_REPORT;
+ rqst.flags = 0;
+ rqst.length = sizeof(rprt_id);
+ rqst.payload = &rprt_id;
+
+ rsp.capacity = len;
+ rsp.length = 0;
+ rsp.pointer = buf;
+
+ return ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(rprt_id));
+}
+
+static u32 ssam_hid_event_fn(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct surface_hid_device *shid = container_of(nf, struct surface_hid_device, notif);
+
+ if (event->command_id != 0x00)
+ return 0;
+
+ hid_input_report(shid->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 0);
+ return SSAM_NOTIF_HANDLED;
+}
+
+
+/* -- Transport driver. ----------------------------------------------------- */
+
+static int shid_output_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
+{
+ int status;
+
+ status = ssam_hid_set_raw_report(shid, rprt_id, false, buf, len);
+ return status >= 0 ? len : status;
+}
+
+static int shid_get_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
+{
+ int status;
+
+ status = ssam_hid_get_raw_report(shid, rprt_id, buf, len);
+ return status >= 0 ? len : status;
+}
+
+static int shid_set_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
+{
+ int status;
+
+ status = ssam_hid_set_raw_report(shid, rprt_id, true, buf, len);
+ return status >= 0 ? len : status;
+}
+
+
+/* -- Driver setup. --------------------------------------------------------- */
+
+static int surface_hid_probe(struct ssam_device *sdev)
+{
+ struct surface_hid_device *shid;
+
+ shid = devm_kzalloc(&sdev->dev, sizeof(*shid), GFP_KERNEL);
+ if (!shid)
+ return -ENOMEM;
+
+ shid->dev = &sdev->dev;
+ shid->ctrl = sdev->ctrl;
+ shid->uid = sdev->uid;
+
+ shid->notif.base.priority = 1;
+ shid->notif.base.fn = ssam_hid_event_fn;
+ shid->notif.event.reg = SSAM_EVENT_REGISTRY_REG;
+ shid->notif.event.id.target_category = sdev->uid.category;
+ shid->notif.event.id.instance = sdev->uid.instance;
+ shid->notif.event.mask = SSAM_EVENT_MASK_STRICT;
+ shid->notif.event.flags = 0;
+
+ shid->ops.get_descriptor = ssam_hid_get_descriptor;
+ shid->ops.output_report = shid_output_report;
+ shid->ops.get_feature_report = shid_get_feature_report;
+ shid->ops.set_feature_report = shid_set_feature_report;
+
+ ssam_device_set_drvdata(sdev, shid);
+ return surface_hid_device_add(shid);
+}
+
+static void surface_hid_remove(struct ssam_device *sdev)
+{
+ surface_hid_device_destroy(ssam_device_get_drvdata(sdev));
+}
+
+static const struct ssam_device_id surface_hid_match[] = {
+ { SSAM_SDEV(HID, 0x02, SSAM_ANY_IID, 0x00) },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, surface_hid_match);
+
+static struct ssam_device_driver surface_hid_driver = {
+ .probe = surface_hid_probe,
+ .remove = surface_hid_remove,
+ .match_table = surface_hid_match,
+ .driver = {
+ .name = "surface_hid",
+ .pm = &surface_hid_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_ssam_device_driver(surface_hid_driver);
+
+MODULE_AUTHOR("Blaž Hrastnik <blaz@mxxn.io>");
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("HID transport driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/surface-hid/surface_hid_core.c b/drivers/hid/surface-hid/surface_hid_core.c
new file mode 100644
index 000000000000..7b27ec392232
--- /dev/null
+++ b/drivers/hid/surface-hid/surface_hid_core.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Common/core components for the Surface System Aggregator Module (SSAM) HID
+ * transport driver. Provides support for integrated HID devices on Microsoft
+ * Surface models.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/hid.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/usb/ch9.h>
+
+#include <linux/surface_aggregator/controller.h>
+
+#include "surface_hid_core.h"
+
+
+/* -- Device descriptor access. --------------------------------------------- */
+
+static int surface_hid_load_hid_descriptor(struct surface_hid_device *shid)
+{
+ int status;
+
+ status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_HID,
+ (u8 *)&shid->hid_desc, sizeof(shid->hid_desc));
+ if (status)
+ return status;
+
+ if (shid->hid_desc.desc_len != sizeof(shid->hid_desc)) {
+ dev_err(shid->dev, "unexpected HID descriptor length: got %u, expected %zu\n",
+ shid->hid_desc.desc_len, sizeof(shid->hid_desc));
+ return -EPROTO;
+ }
+
+ if (shid->hid_desc.desc_type != HID_DT_HID) {
+ dev_err(shid->dev, "unexpected HID descriptor type: got %#04x, expected %#04x\n",
+ shid->hid_desc.desc_type, HID_DT_HID);
+ return -EPROTO;
+ }
+
+ if (shid->hid_desc.num_descriptors != 1) {
+ dev_err(shid->dev, "unexpected number of descriptors: got %u, expected 1\n",
+ shid->hid_desc.num_descriptors);
+ return -EPROTO;
+ }
+
+ if (shid->hid_desc.report_desc_type != HID_DT_REPORT) {
+ dev_err(shid->dev, "unexpected report descriptor type: got %#04x, expected %#04x\n",
+ shid->hid_desc.report_desc_type, HID_DT_REPORT);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int surface_hid_load_device_attributes(struct surface_hid_device *shid)
+{
+ int status;
+
+ status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_ATTRS,
+ (u8 *)&shid->attrs, sizeof(shid->attrs));
+ if (status)
+ return status;
+
+ if (get_unaligned_le32(&shid->attrs.length) != sizeof(shid->attrs)) {
+ dev_err(shid->dev, "unexpected attribute length: got %u, expected %zu\n",
+ get_unaligned_le32(&shid->attrs.length), sizeof(shid->attrs));
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+
+/* -- Transport driver (common). -------------------------------------------- */
+
+static int surface_hid_start(struct hid_device *hid)
+{
+ struct surface_hid_device *shid = hid->driver_data;
+
+ return ssam_notifier_register(shid->ctrl, &shid->notif);
+}
+
+static void surface_hid_stop(struct hid_device *hid)
+{
+ struct surface_hid_device *shid = hid->driver_data;
+
+ /* Note: This call will log errors for us, so ignore them here. */
+ ssam_notifier_unregister(shid->ctrl, &shid->notif);
+}
+
+static int surface_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void surface_hid_close(struct hid_device *hid)
+{
+}
+
+static int surface_hid_parse(struct hid_device *hid)
+{
+ struct surface_hid_device *shid = hid->driver_data;
+ size_t len = get_unaligned_le16(&shid->hid_desc.report_desc_len);
+ u8 *buf;
+ int status;
+
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_REPORT, buf, len);
+ if (!status)
+ status = hid_parse_report(hid, buf, len);
+
+ kfree(buf);
+ return status;
+}
+
+static int surface_hid_raw_request(struct hid_device *hid, unsigned char reportnum, u8 *buf,
+ size_t len, unsigned char rtype, int reqtype)
+{
+ struct surface_hid_device *shid = hid->driver_data;
+
+ if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT)
+ return shid->ops.output_report(shid, reportnum, buf, len);
+
+ else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT)
+ return shid->ops.get_feature_report(shid, reportnum, buf, len);
+
+ else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT)
+ return shid->ops.set_feature_report(shid, reportnum, buf, len);
+
+ return -EIO;
+}
+
+static struct hid_ll_driver surface_hid_ll_driver = {
+ .start = surface_hid_start,
+ .stop = surface_hid_stop,
+ .open = surface_hid_open,
+ .close = surface_hid_close,
+ .parse = surface_hid_parse,
+ .raw_request = surface_hid_raw_request,
+};
+
+
+/* -- Common device setup. -------------------------------------------------- */
+
+int surface_hid_device_add(struct surface_hid_device *shid)
+{
+ int status;
+
+ status = surface_hid_load_hid_descriptor(shid);
+ if (status)
+ return status;
+
+ status = surface_hid_load_device_attributes(shid);
+ if (status)
+ return status;
+
+ shid->hid = hid_allocate_device();
+ if (IS_ERR(shid->hid))
+ return PTR_ERR(shid->hid);
+
+ shid->hid->dev.parent = shid->dev;
+ shid->hid->bus = BUS_HOST;
+ shid->hid->vendor = cpu_to_le16(shid->attrs.vendor);
+ shid->hid->product = cpu_to_le16(shid->attrs.product);
+ shid->hid->version = cpu_to_le16(shid->hid_desc.hid_version);
+ shid->hid->country = shid->hid_desc.country_code;
+
+ snprintf(shid->hid->name, sizeof(shid->hid->name), "Microsoft Surface %04X:%04X",
+ shid->hid->vendor, shid->hid->product);
+
+ strscpy(shid->hid->phys, dev_name(shid->dev), sizeof(shid->hid->phys));
+
+ shid->hid->driver_data = shid;
+ shid->hid->ll_driver = &surface_hid_ll_driver;
+
+ status = hid_add_device(shid->hid);
+ if (status)
+ hid_destroy_device(shid->hid);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(surface_hid_device_add);
+
+void surface_hid_device_destroy(struct surface_hid_device *shid)
+{
+ hid_destroy_device(shid->hid);
+}
+EXPORT_SYMBOL_GPL(surface_hid_device_destroy);
+
+
+/* -- PM ops. --------------------------------------------------------------- */
+
+#ifdef CONFIG_PM_SLEEP
+
+static int surface_hid_suspend(struct device *dev)
+{
+ struct surface_hid_device *d = dev_get_drvdata(dev);
+
+ if (d->hid->driver && d->hid->driver->suspend)
+ return d->hid->driver->suspend(d->hid, PMSG_SUSPEND);
+
+ return 0;
+}
+
+static int surface_hid_resume(struct device *dev)
+{
+ struct surface_hid_device *d = dev_get_drvdata(dev);
+
+ if (d->hid->driver && d->hid->driver->resume)
+ return d->hid->driver->resume(d->hid);
+
+ return 0;
+}
+
+static int surface_hid_freeze(struct device *dev)
+{
+ struct surface_hid_device *d = dev_get_drvdata(dev);
+
+ if (d->hid->driver && d->hid->driver->suspend)
+ return d->hid->driver->suspend(d->hid, PMSG_FREEZE);
+
+ return 0;
+}
+
+static int surface_hid_poweroff(struct device *dev)
+{
+ struct surface_hid_device *d = dev_get_drvdata(dev);
+
+ if (d->hid->driver && d->hid->driver->suspend)
+ return d->hid->driver->suspend(d->hid, PMSG_HIBERNATE);
+
+ return 0;
+}
+
+static int surface_hid_restore(struct device *dev)
+{
+ struct surface_hid_device *d = dev_get_drvdata(dev);
+
+ if (d->hid->driver && d->hid->driver->reset_resume)
+ return d->hid->driver->reset_resume(d->hid);
+
+ return 0;
+}
+
+const struct dev_pm_ops surface_hid_pm_ops = {
+ .freeze = surface_hid_freeze,
+ .thaw = surface_hid_resume,
+ .suspend = surface_hid_suspend,
+ .resume = surface_hid_resume,
+ .poweroff = surface_hid_poweroff,
+ .restore = surface_hid_restore,
+};
+EXPORT_SYMBOL_GPL(surface_hid_pm_ops);
+
+#else /* CONFIG_PM_SLEEP */
+
+const struct dev_pm_ops surface_hid_pm_ops = { };
+EXPORT_SYMBOL_GPL(surface_hid_pm_ops);
+
+#endif /* CONFIG_PM_SLEEP */
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("HID transport driver core for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/surface-hid/surface_hid_core.h b/drivers/hid/surface-hid/surface_hid_core.h
new file mode 100644
index 000000000000..4b1a7b57e035
--- /dev/null
+++ b/drivers/hid/surface-hid/surface_hid_core.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Common/core components for the Surface System Aggregator Module (SSAM) HID
+ * transport driver. Provides support for integrated HID devices on Microsoft
+ * Surface models.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef SURFACE_HID_CORE_H
+#define SURFACE_HID_CORE_H
+
+#include <linux/hid.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+
+enum surface_hid_descriptor_entry {
+ SURFACE_HID_DESC_HID = 0,
+ SURFACE_HID_DESC_REPORT = 1,
+ SURFACE_HID_DESC_ATTRS = 2,
+};
+
+struct surface_hid_descriptor {
+ __u8 desc_len; /* = 9 */
+ __u8 desc_type; /* = HID_DT_HID */
+ __le16 hid_version;
+ __u8 country_code;
+ __u8 num_descriptors; /* = 1 */
+
+ __u8 report_desc_type; /* = HID_DT_REPORT */
+ __le16 report_desc_len;
+} __packed;
+
+static_assert(sizeof(struct surface_hid_descriptor) == 9);
+
+struct surface_hid_attributes {
+ __le32 length;
+ __le16 vendor;
+ __le16 product;
+ __le16 version;
+ __u8 _unknown[22];
+} __packed;
+
+static_assert(sizeof(struct surface_hid_attributes) == 32);
+
+struct surface_hid_device;
+
+struct surface_hid_device_ops {
+ int (*get_descriptor)(struct surface_hid_device *shid, u8 entry, u8 *buf, size_t len);
+ int (*output_report)(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len);
+ int (*get_feature_report)(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len);
+ int (*set_feature_report)(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len);
+};
+
+struct surface_hid_device {
+ struct device *dev;
+ struct ssam_controller *ctrl;
+ struct ssam_device_uid uid;
+
+ struct surface_hid_descriptor hid_desc;
+ struct surface_hid_attributes attrs;
+
+ struct ssam_event_notifier notif;
+ struct hid_device *hid;
+
+ struct surface_hid_device_ops ops;
+};
+
+int surface_hid_device_add(struct surface_hid_device *shid);
+void surface_hid_device_destroy(struct surface_hid_device *shid);
+
+extern const struct dev_pm_ops surface_hid_pm_ops;
+
+#endif /* SURFACE_HID_CORE_H */
diff --git a/drivers/hid/surface-hid/surface_kbd.c b/drivers/hid/surface-hid/surface_kbd.c
new file mode 100644
index 000000000000..0635341bc517
--- /dev/null
+++ b/drivers/hid/surface-hid/surface_kbd.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface System Aggregator Module (SSAM) HID transport driver for the legacy
+ * keyboard interface (KBD/TC=0x08 subsystem). Provides support for the
+ * integrated HID keyboard on Surface Laptops 1 and 2.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/hid.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/controller.h>
+
+#include "surface_hid_core.h"
+
+
+/* -- SAM interface (KBD). -------------------------------------------------- */
+
+#define KBD_FEATURE_REPORT_SIZE 7 /* 6 + report ID */
+
+enum surface_kbd_cid {
+ SURFACE_KBD_CID_GET_DESCRIPTOR = 0x00,
+ SURFACE_KBD_CID_SET_CAPSLOCK_LED = 0x01,
+ SURFACE_KBD_CID_EVT_INPUT_GENERIC = 0x03,
+ SURFACE_KBD_CID_EVT_INPUT_HOTKEYS = 0x04,
+ SURFACE_KBD_CID_GET_FEATURE_REPORT = 0x0b,
+};
+
+static int ssam_kbd_get_descriptor(struct surface_hid_device *shid, u8 entry, u8 *buf, size_t len)
+{
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ int status;
+
+ rqst.target_category = shid->uid.category;
+ rqst.target_id = shid->uid.target;
+ rqst.command_id = SURFACE_KBD_CID_GET_DESCRIPTOR;
+ rqst.instance_id = shid->uid.instance;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = sizeof(entry);
+ rqst.payload = &entry;
+
+ rsp.capacity = len;
+ rsp.length = 0;
+ rsp.pointer = buf;
+
+ status = ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(entry));
+ if (status)
+ return status;
+
+ if (rsp.length != len) {
+ dev_err(shid->dev, "invalid descriptor length: got %zu, expected, %zu\n",
+ rsp.length, len);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int ssam_kbd_set_caps_led(struct surface_hid_device *shid, bool value)
+{
+ struct ssam_request rqst;
+ u8 value_u8 = value;
+
+ rqst.target_category = shid->uid.category;
+ rqst.target_id = shid->uid.target;
+ rqst.command_id = SURFACE_KBD_CID_SET_CAPSLOCK_LED;
+ rqst.instance_id = shid->uid.instance;
+ rqst.flags = 0;
+ rqst.length = sizeof(value_u8);
+ rqst.payload = &value_u8;
+
+ return ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, NULL, sizeof(value_u8));
+}
+
+static int ssam_kbd_get_feature_report(struct surface_hid_device *shid, u8 *buf, size_t len)
+{
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ u8 payload = 0;
+ int status;
+
+ rqst.target_category = shid->uid.category;
+ rqst.target_id = shid->uid.target;
+ rqst.command_id = SURFACE_KBD_CID_GET_FEATURE_REPORT;
+ rqst.instance_id = shid->uid.instance;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = sizeof(payload);
+ rqst.payload = &payload;
+
+ rsp.capacity = len;
+ rsp.length = 0;
+ rsp.pointer = buf;
+
+ status = ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(payload));
+ if (status)
+ return status;
+
+ if (rsp.length != len) {
+ dev_err(shid->dev, "invalid feature report length: got %zu, expected, %zu\n",
+ rsp.length, len);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static bool ssam_kbd_is_input_event(const struct ssam_event *event)
+{
+ if (event->command_id == SURFACE_KBD_CID_EVT_INPUT_GENERIC)
+ return true;
+
+ if (event->command_id == SURFACE_KBD_CID_EVT_INPUT_HOTKEYS)
+ return true;
+
+ return false;
+}
+
+static u32 ssam_kbd_event_fn(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct surface_hid_device *shid = container_of(nf, struct surface_hid_device, notif);
+
+ /*
+ * Check against device UID manually, as registry and device target
+ * category doesn't line up.
+ */
+
+ if (shid->uid.category != event->target_category)
+ return 0;
+
+ if (shid->uid.target != event->target_id)
+ return 0;
+
+ if (shid->uid.instance != event->instance_id)
+ return 0;
+
+ if (!ssam_kbd_is_input_event(event))
+ return 0;
+
+ hid_input_report(shid->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 0);
+ return SSAM_NOTIF_HANDLED;
+}
+
+
+/* -- Transport driver (KBD). ----------------------------------------------- */
+
+static int skbd_get_caps_led_value(struct hid_device *hid, u8 rprt_id, u8 *buf, size_t len)
+{
+ struct hid_field *field;
+ unsigned int offset, size;
+ int i;
+
+ /* Get LED field. */
+ field = hidinput_get_led_field(hid);
+ if (!field)
+ return -ENOENT;
+
+ /* Check if we got the correct report. */
+ if (len != hid_report_len(field->report))
+ return -ENOENT;
+
+ if (rprt_id != field->report->id)
+ return -ENOENT;
+
+ /* Get caps lock LED index. */
+ for (i = 0; i < field->report_count; i++)
+ if ((field->usage[i].hid & 0xffff) == 0x02)
+ break;
+
+ if (i == field->report_count)
+ return -ENOENT;
+
+ /* Extract value. */
+ size = field->report_size;
+ offset = field->report_offset + i * size;
+ return !!hid_field_extract(hid, buf + 1, size, offset);
+}
+
+static int skbd_output_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
+{
+ int caps_led;
+ int status;
+
+ caps_led = skbd_get_caps_led_value(shid->hid, rprt_id, buf, len);
+ if (caps_led < 0)
+ return -EIO; /* Only caps LED output reports are supported. */
+
+ status = ssam_kbd_set_caps_led(shid, caps_led);
+ if (status < 0)
+ return status;
+
+ return len;
+}
+
+static int skbd_get_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
+{
+ u8 report[KBD_FEATURE_REPORT_SIZE];
+ int status;
+
+ /*
+ * The keyboard only has a single hard-coded read-only feature report
+ * of size KBD_FEATURE_REPORT_SIZE. Try to load it and compare its
+ * report ID against the requested one.
+ */
+
+ if (len < ARRAY_SIZE(report))
+ return -ENOSPC;
+
+ status = ssam_kbd_get_feature_report(shid, report, ARRAY_SIZE(report));
+ if (status < 0)
+ return status;
+
+ if (rprt_id != report[0])
+ return -ENOENT;
+
+ memcpy(buf, report, ARRAY_SIZE(report));
+ return len;
+}
+
+static int skbd_set_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
+{
+ /* Not supported. See skbd_get_feature_report() for details. */
+ return -EIO;
+}
+
+
+/* -- Driver setup. --------------------------------------------------------- */
+
+static int surface_kbd_probe(struct platform_device *pdev)
+{
+ struct ssam_controller *ctrl;
+ struct surface_hid_device *shid;
+
+ /* Add device link to EC. */
+ ctrl = ssam_client_bind(&pdev->dev);
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
+
+ shid = devm_kzalloc(&pdev->dev, sizeof(*shid), GFP_KERNEL);
+ if (!shid)
+ return -ENOMEM;
+
+ shid->dev = &pdev->dev;
+ shid->ctrl = ctrl;
+
+ shid->uid.domain = SSAM_DOMAIN_SERIALHUB;
+ shid->uid.category = SSAM_SSH_TC_KBD;
+ shid->uid.target = 2;
+ shid->uid.instance = 0;
+ shid->uid.function = 0;
+
+ shid->notif.base.priority = 1;
+ shid->notif.base.fn = ssam_kbd_event_fn;
+ shid->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ shid->notif.event.id.target_category = shid->uid.category;
+ shid->notif.event.id.instance = shid->uid.instance;
+ shid->notif.event.mask = SSAM_EVENT_MASK_NONE;
+ shid->notif.event.flags = 0;
+
+ shid->ops.get_descriptor = ssam_kbd_get_descriptor;
+ shid->ops.output_report = skbd_output_report;
+ shid->ops.get_feature_report = skbd_get_feature_report;
+ shid->ops.set_feature_report = skbd_set_feature_report;
+
+ platform_set_drvdata(pdev, shid);
+ return surface_hid_device_add(shid);
+}
+
+static int surface_kbd_remove(struct platform_device *pdev)
+{
+ surface_hid_device_destroy(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static const struct acpi_device_id surface_kbd_match[] = {
+ { "MSHW0096" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, surface_kbd_match);
+
+static struct platform_driver surface_kbd_driver = {
+ .probe = surface_kbd_probe,
+ .remove = surface_kbd_remove,
+ .driver = {
+ .name = "surface_keyboard",
+ .acpi_match_table = surface_kbd_match,
+ .pm = &surface_hid_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(surface_kbd_driver);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("HID legacy transport driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
index fddac7c72f64..ea126c50acc3 100644
--- a/drivers/hid/usbhid/hid-pidff.c
+++ b/drivers/hid/usbhid/hid-pidff.c
@@ -505,7 +505,7 @@ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n)
HID_REQ_SET_REPORT);
}
-/**
+/*
* Play the effect with effect id @effect_id for @value times
*/
static int pidff_playback(struct input_dev *dev, int effect_id, int value)
@@ -997,7 +997,7 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
return 0;
}
-/**
+/*
* Find the implemented effect types
*/
static int pidff_find_effects(struct pidff_device *pidff,
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 45e0b1c75cb1..2fb2991dbe4c 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -887,11 +887,11 @@ int hiddev_connect(struct hid_device *hid, unsigned int force)
break;
if (i == hid->maxcollection)
- return -1;
+ return -EINVAL;
}
if (!(hiddev = kzalloc(sizeof(struct hiddev), GFP_KERNEL)))
- return -1;
+ return -ENOMEM;
init_waitqueue_head(&hiddev->wait);
INIT_LIST_HEAD(&hiddev->list);
@@ -905,7 +905,7 @@ int hiddev_connect(struct hid_device *hid, unsigned int force)
hid_err(hid, "Not able to get a minor for this device\n");
hid->hiddev = NULL;
kfree(hiddev);
- return -1;
+ return retval;
}
/*
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index d5b7a696a68c..e22434dfc9ef 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -63,7 +63,7 @@ static const unsigned char usb_kbd_keycode[256] = {
* new key is pressed or a key that was pressed is released.
* @led: URB for sending LEDs (e.g. numlock, ...)
* @newleds: data that will be sent with the @led URB representing which LEDs
- should be on
+ * should be on
* @name: Name of the keyboard. @dev's name field points to this buffer
* @phys: Physical path of the keyboard. @dev's phys field points to this
* buffer
@@ -91,7 +91,7 @@ struct usb_kbd {
unsigned char *leds;
dma_addr_t new_dma;
dma_addr_t leds_dma;
-
+
spinlock_t leds_lock;
bool led_urb_submitted;
@@ -175,15 +175,15 @@ static int usb_kbd_event(struct input_dev *dev, unsigned int type,
}
*(kbd->leds) = kbd->newleds;
-
+
kbd->led->dev = kbd->usbdev;
if (usb_submit_urb(kbd->led, GFP_ATOMIC))
pr_err("usb_submit_urb(leds) failed\n");
else
kbd->led_urb_submitted = true;
-
+
spin_unlock_irqrestore(&kbd->leds_lock, flags);
-
+
return 0;
}
@@ -205,14 +205,14 @@ static void usb_kbd_led(struct urb *urb)
}
*(kbd->leds) = kbd->newleds;
-
+
kbd->led->dev = kbd->usbdev;
if (usb_submit_urb(kbd->led, GFP_ATOMIC)){
hid_err(urb->dev, "usb_submit_urb(leds) failed\n");
kbd->led_urb_submitted = false;
}
spin_unlock_irqrestore(&kbd->leds_lock, flags);
-
+
}
static int usb_kbd_open(struct input_dev *dev)
@@ -358,9 +358,9 @@ static int usb_kbd_probe(struct usb_interface *iface,
device_set_wakeup_enable(&dev->dev, 1);
return 0;
-fail2:
+fail2:
usb_kbd_free_mem(dev, kbd);
-fail1:
+fail1:
input_free_device(input_dev);
kfree(kbd);
return error;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 8328ef155c46..57bfa0ae9836 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1495,7 +1495,7 @@ struct wacom_led *wacom_led_find(struct wacom *wacom, unsigned int group_id,
return &group->leds[id];
}
-/**
+/*
* wacom_led_next: gives the next available led with a wacom trigger.
*
* returns the next available struct wacom_led which has its default trigger
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 2d70dc4bea65..81d7d12bcf34 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1860,8 +1860,6 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
usage->type = type;
usage->code = code;
- set_bit(type, input->evbit);
-
switch (type) {
case EV_ABS:
input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
@@ -1869,13 +1867,9 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
hidinput_calc_abs_res(field, resolution_code));
break;
case EV_KEY:
- input_set_capability(input, EV_KEY, code);
- break;
case EV_MSC:
- input_set_capability(input, EV_MSC, code);
- break;
case EV_SW:
- input_set_capability(input, EV_SW, code);
+ input_set_capability(input, type, code);
break;
}
}
@@ -2187,6 +2181,18 @@ static void wacom_wac_pad_report(struct hid_device *hdev,
}
}
+static void wacom_set_barrel_switch3_usage(struct wacom_wac *wacom_wac)
+{
+ struct input_dev *input = wacom_wac->pen_input;
+ struct wacom_features *features = &wacom_wac->features;
+
+ if (!(features->quirks & WACOM_QUIRK_AESPEN) &&
+ wacom_wac->hid_data.barrelswitch &&
+ wacom_wac->hid_data.barrelswitch2 &&
+ wacom_wac->hid_data.serialhi)
+ input_set_capability(input, EV_KEY, BTN_STYLUS3);
+}
+
static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
@@ -2227,13 +2233,21 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
break;
case HID_DG_ERASER:
+ input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
+ wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
+ break;
case HID_DG_TIPSWITCH:
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
break;
case HID_DG_BARRELSWITCH:
+ wacom_wac->hid_data.barrelswitch = true;
+ wacom_set_barrel_switch3_usage(wacom_wac);
wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS, 0);
break;
case HID_DG_BARRELSWITCH2:
+ wacom_wac->hid_data.barrelswitch2 = true;
+ wacom_set_barrel_switch3_usage(wacom_wac);
wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS2, 0);
break;
case HID_DG_TOOLSERIALNUMBER:
@@ -2245,22 +2259,12 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
break;
case WACOM_HID_WD_SERIALHI:
+ wacom_wac->hid_data.serialhi = true;
+ wacom_set_barrel_switch3_usage(wacom_wac);
wacom_map_usage(input, usage, field, EV_ABS, ABS_MISC, 0);
-
- if (!(features->quirks & WACOM_QUIRK_AESPEN)) {
- set_bit(EV_KEY, input->evbit);
- input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
- input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
- input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
- input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
- input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
- if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
- input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
- input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
- }
- }
break;
case WACOM_HID_WD_FINGERWHEEL:
+ input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
break;
}
@@ -3582,11 +3586,9 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
else
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
- if (features->type == HID_GENERIC) {
- /* setup has already been done; apply otherwise-undetectible quirks */
- input_set_capability(input_dev, EV_KEY, BTN_STYLUS3);
+ if (features->type == HID_GENERIC)
+ /* setup has already been done */
return 0;
- }
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
__set_bit(BTN_TOUCH, input_dev->keybit);
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 195910dd2154..71c886245dbf 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -300,6 +300,7 @@ struct hid_data {
bool tipswitch;
bool barrelswitch;
bool barrelswitch2;
+ bool serialhi;
int x;
int y;
int pressure;
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 32cd26352f38..53e13476e831 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -28,17 +28,6 @@ config HWSPINLOCK_QCOM
If unsure, say N.
-config HWSPINLOCK_SIRF
- tristate "SIRF Hardware Spinlock device"
- depends on ARCH_SIRF || COMPILE_TEST
- help
- Say y here to support the SIRF Hardware Spinlock device, which
- provides a synchronisation mechanism for the various processors
- on the SoC.
-
- It's safe to say n here if you're not interested in SIRF hardware
- spinlock or just want a bare minimum kernel.
-
config HWSPINLOCK_SPRD
tristate "SPRD Hardware Spinlock device"
depends on ARCH_SPRD || COMPILE_TEST
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
index ed053e3f02be..1f8dd6f5814f 100644
--- a/drivers/hwspinlock/Makefile
+++ b/drivers/hwspinlock/Makefile
@@ -6,7 +6,6 @@
obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
obj-$(CONFIG_HWSPINLOCK_QCOM) += qcom_hwspinlock.o
-obj-$(CONFIG_HWSPINLOCK_SIRF) += sirf_hwspinlock.o
obj-$(CONFIG_HWSPINLOCK_SPRD) += sprd_hwspinlock.o
obj-$(CONFIG_HWSPINLOCK_STM32) += stm32_hwspinlock.o
obj-$(CONFIG_HSEM_U8500) += u8500_hsem.o
diff --git a/drivers/hwspinlock/sirf_hwspinlock.c b/drivers/hwspinlock/sirf_hwspinlock.c
deleted file mode 100644
index a3f77120bad7..000000000000
--- a/drivers/hwspinlock/sirf_hwspinlock.c
+++ /dev/null
@@ -1,105 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SIRF hardware spinlock driver
- *
- * Copyright (c) 2015 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/hwspinlock.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include "hwspinlock_internal.h"
-
-struct sirf_hwspinlock {
- void __iomem *io_base;
- struct hwspinlock_device bank;
-};
-
-/* Number of Hardware Spinlocks*/
-#define HW_SPINLOCK_NUMBER 30
-
-/* Hardware spinlock register offsets */
-#define HW_SPINLOCK_BASE 0x404
-#define HW_SPINLOCK_OFFSET(x) (HW_SPINLOCK_BASE + 0x4 * (x))
-
-static int sirf_hwspinlock_trylock(struct hwspinlock *lock)
-{
- void __iomem *lock_addr = lock->priv;
-
- /* attempt to acquire the lock by reading value == 1 from it */
- return !!readl(lock_addr);
-}
-
-static void sirf_hwspinlock_unlock(struct hwspinlock *lock)
-{
- void __iomem *lock_addr = lock->priv;
-
- /* release the lock by writing 0 to it */
- writel(0, lock_addr);
-}
-
-static const struct hwspinlock_ops sirf_hwspinlock_ops = {
- .trylock = sirf_hwspinlock_trylock,
- .unlock = sirf_hwspinlock_unlock,
-};
-
-static int sirf_hwspinlock_probe(struct platform_device *pdev)
-{
- struct sirf_hwspinlock *hwspin;
- struct hwspinlock *hwlock;
- int idx;
-
- if (!pdev->dev.of_node)
- return -ENODEV;
-
- hwspin = devm_kzalloc(&pdev->dev,
- struct_size(hwspin, bank.lock,
- HW_SPINLOCK_NUMBER),
- GFP_KERNEL);
- if (!hwspin)
- return -ENOMEM;
-
- /* retrieve io base */
- hwspin->io_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(hwspin->io_base))
- return PTR_ERR(hwspin->io_base);
-
- for (idx = 0; idx < HW_SPINLOCK_NUMBER; idx++) {
- hwlock = &hwspin->bank.lock[idx];
- hwlock->priv = hwspin->io_base + HW_SPINLOCK_OFFSET(idx);
- }
-
- platform_set_drvdata(pdev, hwspin);
-
- return devm_hwspin_lock_register(&pdev->dev, &hwspin->bank,
- &sirf_hwspinlock_ops, 0,
- HW_SPINLOCK_NUMBER);
-}
-
-static const struct of_device_id sirf_hwpinlock_ids[] = {
- { .compatible = "sirf,hwspinlock", },
- {},
-};
-MODULE_DEVICE_TABLE(of, sirf_hwpinlock_ids);
-
-static struct platform_driver sirf_hwspinlock_driver = {
- .probe = sirf_hwspinlock_probe,
- .driver = {
- .name = "atlas7_hwspinlock",
- .of_match_table = sirf_hwpinlock_ids,
- },
-};
-
-module_platform_driver(sirf_hwspinlock_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("SIRF Hardware spinlock driver");
-MODULE_AUTHOR("Wei Chen <wei.chen@csr.com>");
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 7b44ba22cbe1..84530fd80998 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -97,15 +97,15 @@ config CORESIGHT_SOURCE_ETM3X
module will be called coresight-etm3x.
config CORESIGHT_SOURCE_ETM4X
- tristate "CoreSight Embedded Trace Macrocell 4.x driver"
+ tristate "CoreSight ETMv4.x / ETE driver"
depends on ARM64
select CORESIGHT_LINKS_AND_SINKS
select PID_IN_CONTEXTIDR
help
- This driver provides support for the ETM4.x tracer module, tracing the
- instructions that a processor is executing. This is primarily useful
- for instruction level tracing. Depending on the implemented version
- data tracing may also be available.
+ This driver provides support for the CoreSight Embedded Trace Macrocell
+ version 4.x and the Embedded Trace Extensions (ETE). Both are CPU tracer
+ modules, tracing the instructions that a processor is executing. This is
+ primarily useful for instruction level tracing.
To compile this driver as a module, choose M here: the
module will be called coresight-etm4x.
@@ -173,4 +173,18 @@ config CORESIGHT_CTI_INTEGRATION_REGS
CTI trigger connections between this and other devices.These
registers are not used in normal operation and can leave devices in
an inconsistent state.
+
+config CORESIGHT_TRBE
+ tristate "Trace Buffer Extension (TRBE) driver"
+ depends on ARM64 && CORESIGHT_SOURCE_ETM4X
+ help
+ This driver provides support for percpu Trace Buffer Extension (TRBE).
+ TRBE always needs to be used along with it's corresponding percpu ETE
+ component. ETE generates trace data which is then captured with TRBE.
+ Unlike traditional sink devices, TRBE is a CPU feature accessible via
+ system registers. But it's explicit dependency with trace unit (ETE)
+ requires it to be plugged in as a coresight sink device.
+
+ To compile this driver as a module, choose M here: the module will be
+ called coresight-trbe.
endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index f20e357758d1..d60816509755 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -21,5 +21,6 @@ obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o
obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
+obj-$(CONFIG_CORESIGHT_TRBE) += coresight-trbe.o
coresight-cti-y := coresight-cti-core.o coresight-cti-platform.o \
coresight-cti-sysfs.o
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index b57bea167102..6c68d34d956e 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -23,6 +23,7 @@
#include "coresight-priv.h"
static DEFINE_MUTEX(coresight_mutex);
+static DEFINE_PER_CPU(struct coresight_device *, csdev_sink);
/**
* struct coresight_node - elements of a path, from source to sink
@@ -70,6 +71,18 @@ void coresight_remove_cti_ops(void)
}
EXPORT_SYMBOL_GPL(coresight_remove_cti_ops);
+void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev)
+{
+ per_cpu(csdev_sink, cpu) = csdev;
+}
+EXPORT_SYMBOL_GPL(coresight_set_percpu_sink);
+
+struct coresight_device *coresight_get_percpu_sink(int cpu)
+{
+ return per_cpu(csdev_sink, cpu);
+}
+EXPORT_SYMBOL_GPL(coresight_get_percpu_sink);
+
static int coresight_id_match(struct device *dev, void *data)
{
int trace_id, i_trace_id;
@@ -784,6 +797,14 @@ static int _coresight_build_path(struct coresight_device *csdev,
if (csdev == sink)
goto out;
+ if (coresight_is_percpu_source(csdev) && coresight_is_percpu_sink(sink) &&
+ sink == per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev))) {
+ if (_coresight_build_path(sink, sink, path) == 0) {
+ found = true;
+ goto out;
+ }
+ }
+
/* Not a sink - recursively explore each port found on this element */
for (i = 0; i < csdev->pdata->nr_outport; i++) {
struct coresight_device *child_dev;
@@ -999,8 +1020,12 @@ coresight_find_default_sink(struct coresight_device *csdev)
int depth = 0;
/* look for a default sink if we have not found for this device */
- if (!csdev->def_sink)
- csdev->def_sink = coresight_find_sink(csdev, &depth);
+ if (!csdev->def_sink) {
+ if (coresight_is_percpu_source(csdev))
+ csdev->def_sink = per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev));
+ if (!csdev->def_sink)
+ csdev->def_sink = coresight_find_sink(csdev, &depth);
+ }
return csdev->def_sink;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index c1bec2ad3911..6f398377fec9 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -24,7 +24,26 @@
static struct pmu etm_pmu;
static bool etm_perf_up;
-static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
+/*
+ * An ETM context for a running event includes the perf aux handle
+ * and aux_data. For ETM, the aux_data (etm_event_data), consists of
+ * the trace path and the sink configuration. The event data is accessible
+ * via perf_get_aux(handle). However, a sink could "end" a perf output
+ * handle via the IRQ handler. And if the "sink" encounters a failure
+ * to "begin" another session (e.g due to lack of space in the buffer),
+ * the handle will be cleared. Thus, the event_data may not be accessible
+ * from the handle when we get to the etm_event_stop(), which is required
+ * for stopping the trace path. The event_data is guaranteed to stay alive
+ * until "free_aux()", which cannot happen as long as the event is active on
+ * the ETM. Thus the event_data for the session must be part of the ETM context
+ * to make sure we can disable the trace path.
+ */
+struct etm_ctxt {
+ struct perf_output_handle handle;
+ struct etm_event_data *event_data;
+};
+
+static DEFINE_PER_CPU(struct etm_ctxt, etm_ctxt);
static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
/*
@@ -232,6 +251,25 @@ static void etm_free_aux(void *data)
schedule_work(&event_data->work);
}
+/*
+ * Check if two given sinks are compatible with each other,
+ * so that they can use the same sink buffers, when an event
+ * moves around.
+ */
+static bool sinks_compatible(struct coresight_device *a,
+ struct coresight_device *b)
+{
+ if (!a || !b)
+ return false;
+ /*
+ * If the sinks are of the same subtype and driven
+ * by the same driver, we can use the same buffer
+ * on these sinks.
+ */
+ return (a->subtype.sink_subtype == b->subtype.sink_subtype) &&
+ (sink_ops(a) == sink_ops(b));
+}
+
static void *etm_setup_aux(struct perf_event *event, void **pages,
int nr_pages, bool overwrite)
{
@@ -239,6 +277,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
int cpu = event->cpu;
cpumask_t *mask;
struct coresight_device *sink = NULL;
+ struct coresight_device *user_sink = NULL, *last_sink = NULL;
struct etm_event_data *event_data = NULL;
event_data = alloc_event_data(cpu);
@@ -249,7 +288,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
/* First get the selected sink from user space. */
if (event->attr.config2) {
id = (u32)event->attr.config2;
- sink = coresight_get_sink_by_id(id);
+ sink = user_sink = coresight_get_sink_by_id(id);
}
mask = &event_data->mask;
@@ -277,14 +316,33 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
}
/*
- * No sink provided - look for a default sink for one of the
- * devices. At present we only support topology where all CPUs
- * use the same sink [N:1], so only need to find one sink. The
- * coresight_build_path later will remove any CPU that does not
- * attach to the sink, or if we have not found a sink.
+ * No sink provided - look for a default sink for all the ETMs,
+ * where this event can be scheduled.
+ * We allocate the sink specific buffers only once for this
+ * event. If the ETMs have different default sink devices, we
+ * can only use a single "type" of sink as the event can carry
+ * only one sink specific buffer. Thus we have to make sure
+ * that the sinks are of the same type and driven by the same
+ * driver, as the one we allocate the buffer for. As such
+ * we choose the first sink and check if the remaining ETMs
+ * have a compatible default sink. We don't trace on a CPU
+ * if the sink is not compatible.
*/
- if (!sink)
+ if (!user_sink) {
+ /* Find the default sink for this ETM */
sink = coresight_find_default_sink(csdev);
+ if (!sink) {
+ cpumask_clear_cpu(cpu, mask);
+ continue;
+ }
+
+ /* Check if this sink compatible with the last sink */
+ if (last_sink && !sinks_compatible(last_sink, sink)) {
+ cpumask_clear_cpu(cpu, mask);
+ continue;
+ }
+ last_sink = sink;
+ }
/*
* Building a path doesn't enable it, it simply builds a
@@ -312,7 +370,12 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
goto err;
- /* Allocate the sink buffer for this session */
+ /*
+ * Allocate the sink buffer for this session. All the sinks
+ * where this event can be scheduled are ensured to be of the
+ * same type. Thus the same sink configuration is used by the
+ * sinks.
+ */
event_data->snk_config =
sink_ops(sink)->alloc_buffer(sink, event, pages,
nr_pages, overwrite);
@@ -332,13 +395,18 @@ static void etm_event_start(struct perf_event *event, int flags)
{
int cpu = smp_processor_id();
struct etm_event_data *event_data;
- struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
+ struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
+ struct perf_output_handle *handle = &ctxt->handle;
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
struct list_head *path;
if (!csdev)
goto fail;
+ /* Have we messed up our tracking ? */
+ if (WARN_ON(ctxt->event_data))
+ goto fail;
+
/*
* Deal with the ring buffer API and get a handle on the
* session's information.
@@ -374,6 +442,8 @@ static void etm_event_start(struct perf_event *event, int flags)
if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
goto fail_disable_path;
+ /* Save the event_data for this ETM */
+ ctxt->event_data = event_data;
out:
return;
@@ -392,13 +462,30 @@ static void etm_event_stop(struct perf_event *event, int mode)
int cpu = smp_processor_id();
unsigned long size;
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
- struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
- struct etm_event_data *event_data = perf_get_aux(handle);
+ struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
+ struct perf_output_handle *handle = &ctxt->handle;
+ struct etm_event_data *event_data;
struct list_head *path;
+ /*
+ * If we still have access to the event_data via handle,
+ * confirm that we haven't messed up the tracking.
+ */
+ if (handle->event &&
+ WARN_ON(perf_get_aux(handle) != ctxt->event_data))
+ return;
+
+ event_data = ctxt->event_data;
+ /* Clear the event_data as this ETM is stopping the trace. */
+ ctxt->event_data = NULL;
+
if (event->hw.state == PERF_HES_STOPPED)
return;
+ /* We must have a valid event_data for a running event */
+ if (WARN_ON(!event_data))
+ return;
+
if (!csdev)
return;
@@ -416,7 +503,13 @@ static void etm_event_stop(struct perf_event *event, int mode)
/* tell the core */
event->hw.state = PERF_HES_STOPPED;
- if (mode & PERF_EF_UPDATE) {
+ /*
+ * If the handle is not bound to an event anymore
+ * (e.g, the sink driver was unable to restart the
+ * handle due to lack of buffer space), we don't
+ * have to do anything here.
+ */
+ if (handle->event && (mode & PERF_EF_UPDATE)) {
if (WARN_ON_ONCE(handle->event != event))
return;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index a5b13a7779c3..db881993c211 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -31,6 +31,7 @@
#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <asm/barrier.h>
#include <asm/sections.h>
#include <asm/sysreg.h>
#include <asm/local.h>
@@ -114,30 +115,91 @@ void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
}
}
-static void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata, struct csdev_access *csa)
+static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
{
- /* Writing 0 to TRCOSLAR unlocks the trace registers */
- etm4x_relaxed_write32(csa, 0x0, TRCOSLAR);
- drvdata->os_unlock = true;
+ u64 res = 0;
+
+ switch (offset) {
+ ETE_READ_CASES(res)
+ default :
+ pr_warn_ratelimited("ete: trying to read unsupported register @%x\n",
+ offset);
+ }
+
+ if (!_relaxed)
+ __iormb(res); /* Imitate the !relaxed I/O helpers */
+
+ return res;
+}
+
+static void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
+{
+ if (!_relaxed)
+ __iowmb(); /* Imitate the !relaxed I/O helpers */
+ if (!_64bit)
+ val &= GENMASK(31, 0);
+
+ switch (offset) {
+ ETE_WRITE_CASES(val)
+ default :
+ pr_warn_ratelimited("ete: trying to write to unsupported register @%x\n",
+ offset);
+ }
+}
+
+static void etm_detect_os_lock(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ u32 oslsr = etm4x_relaxed_read32(csa, TRCOSLSR);
+
+ drvdata->os_lock_model = ETM_OSLSR_OSLM(oslsr);
+}
+
+static void etm_write_os_lock(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa, u32 val)
+{
+ val = !!val;
+
+ switch (drvdata->os_lock_model) {
+ case ETM_OSLOCK_PRESENT:
+ etm4x_relaxed_write32(csa, val, TRCOSLAR);
+ break;
+ case ETM_OSLOCK_PE:
+ write_sysreg_s(val, SYS_OSLAR_EL1);
+ break;
+ default:
+ pr_warn_once("CPU%d: Unsupported Trace OSLock model: %x\n",
+ smp_processor_id(), drvdata->os_lock_model);
+ fallthrough;
+ case ETM_OSLOCK_NI:
+ return;
+ }
isb();
}
+static inline void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ WARN_ON(drvdata->cpu != smp_processor_id());
+
+ /* Writing 0 to OS Lock unlocks the trace unit registers */
+ etm_write_os_lock(drvdata, csa, 0x0);
+ drvdata->os_unlock = true;
+}
+
static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
{
if (!WARN_ON(!drvdata->csdev))
etm4_os_unlock_csa(drvdata, &drvdata->csdev->access);
-
}
static void etm4_os_lock(struct etmv4_drvdata *drvdata)
{
if (WARN_ON(!drvdata->csdev))
return;
-
- /* Writing 0x1 to TRCOSLAR locks the trace registers */
- etm4x_relaxed_write32(&drvdata->csdev->access, 0x1, TRCOSLAR);
+ /* Writing 0x1 to OS Lock locks the trace registers */
+ etm_write_os_lock(drvdata, &drvdata->csdev->access, 0x1);
drvdata->os_unlock = false;
- isb();
}
static void etm4_cs_lock(struct etmv4_drvdata *drvdata,
@@ -371,6 +433,13 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, trcpdcr | TRCPDCR_PU, TRCPDCR);
}
+ /*
+ * ETE mandates that the TRCRSR is written to before
+ * enabling it.
+ */
+ if (etm4x_is_ete(drvdata))
+ etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR);
+
/* Enable the trace unit */
etm4x_relaxed_write32(csa, 1, TRCPRGCTLR);
@@ -654,6 +723,7 @@ static int etm4_enable(struct coresight_device *csdev,
static void etm4_disable_hw(void *info)
{
u32 control;
+ u64 trfcr;
struct etmv4_drvdata *drvdata = info;
struct etmv4_config *config = &drvdata->config;
struct coresight_device *csdev = drvdata->csdev;
@@ -677,18 +747,32 @@ static void etm4_disable_hw(void *info)
control &= ~0x1;
/*
+ * If the CPU supports v8.4 Trace filter Control,
+ * set the ETM to trace prohibited region.
+ */
+ if (drvdata->trfc) {
+ trfcr = read_sysreg_s(SYS_TRFCR_EL1);
+ write_sysreg_s(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE),
+ SYS_TRFCR_EL1);
+ isb();
+ }
+ /*
* Make sure everything completes before disabling, as recommended
* by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
* SSTATUS") of ARM IHI 0064D
*/
dsb(sy);
isb();
+ /* Trace synchronization barrier, is a nop if not supported */
+ tsb_csync();
etm4x_relaxed_write32(csa, control, TRCPRGCTLR);
/* wait for TRCSTATR.PMSTABLE to go to '1' */
if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1))
dev_err(etm_dev,
"timeout while waiting for PM stable Trace Status\n");
+ if (drvdata->trfc)
+ write_sysreg_s(trfcr, SYS_TRFCR_EL1);
/* read the status of the single shot comparators */
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
@@ -817,13 +901,24 @@ static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata,
* ETMs implementing sysreg access must implement TRCDEVARCH.
*/
devarch = read_etm4x_sysreg_const_offset(TRCDEVARCH);
- if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH)
+ switch (devarch & ETM_DEVARCH_ID_MASK) {
+ case ETM_DEVARCH_ETMv4x_ARCH:
+ *csa = (struct csdev_access) {
+ .io_mem = false,
+ .read = etm4x_sysreg_read,
+ .write = etm4x_sysreg_write,
+ };
+ break;
+ case ETM_DEVARCH_ETE_ARCH:
+ *csa = (struct csdev_access) {
+ .io_mem = false,
+ .read = ete_sysreg_read,
+ .write = ete_sysreg_write,
+ };
+ break;
+ default:
return false;
- *csa = (struct csdev_access) {
- .io_mem = false,
- .read = etm4x_sysreg_read,
- .write = etm4x_sysreg_write,
- };
+ }
drvdata->arch = etm_devarch_to_arch(devarch);
return true;
@@ -873,7 +968,7 @@ static bool etm4_init_csdev_access(struct etmv4_drvdata *drvdata,
return false;
}
-static void cpu_enable_tracing(void)
+static void cpu_enable_tracing(struct etmv4_drvdata *drvdata)
{
u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
u64 trfcr;
@@ -881,6 +976,7 @@ static void cpu_enable_tracing(void)
if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT))
return;
+ drvdata->trfc = true;
/*
* If the CPU supports v8.4 SelfHosted Tracing, enable
* tracing at the kernel EL and EL0, forcing to use the
@@ -920,6 +1016,9 @@ static void etm4_init_arch_data(void *info)
if (!etm4_init_csdev_access(drvdata, csa))
return;
+ /* Detect the support for OS Lock before we actually use it */
+ etm_detect_os_lock(drvdata, csa);
+
/* Make sure all registers are accessible */
etm4_os_unlock_csa(drvdata, csa);
etm4_cs_unlock(drvdata, csa);
@@ -1082,7 +1181,7 @@ static void etm4_init_arch_data(void *info)
/* NUMCNTR, bits[30:28] number of counters available for tracing */
drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
etm4_cs_lock(drvdata, csa);
- cpu_enable_tracing();
+ cpu_enable_tracing(drvdata);
}
static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config)
@@ -1760,6 +1859,8 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
struct etmv4_drvdata *drvdata;
struct coresight_desc desc = { 0 };
struct etm4_init_arg init_arg = { 0 };
+ u8 major, minor;
+ char *type_name;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
@@ -1786,10 +1887,6 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
if (drvdata->cpu < 0)
return drvdata->cpu;
- desc.name = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu);
- if (!desc.name)
- return -ENOMEM;
-
init_arg.drvdata = drvdata;
init_arg.csa = &desc.access;
init_arg.pid = etm_pid;
@@ -1806,6 +1903,22 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
drvdata->skip_power_up = true;
+ major = ETM_ARCH_MAJOR_VERSION(drvdata->arch);
+ minor = ETM_ARCH_MINOR_VERSION(drvdata->arch);
+
+ if (etm4x_is_ete(drvdata)) {
+ type_name = "ete";
+ /* ETE v1 has major version == 0b101. Adjust this for logging.*/
+ major -= 4;
+ } else {
+ type_name = "etm";
+ }
+
+ desc.name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s%d", type_name, drvdata->cpu);
+ if (!desc.name)
+ return -ENOMEM;
+
etm4_init_trace_id(drvdata);
etm4_set_default(&drvdata->config);
@@ -1833,9 +1946,8 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
etmdrvdata[drvdata->cpu] = drvdata;
- dev_info(&drvdata->csdev->dev, "CPU%d: ETM v%d.%d initialized\n",
- drvdata->cpu, ETM_ARCH_MAJOR_VERSION(drvdata->arch),
- ETM_ARCH_MINOR_VERSION(drvdata->arch));
+ dev_info(&drvdata->csdev->dev, "CPU%d: %s v%d.%d initialized\n",
+ drvdata->cpu, type_name, major, minor);
if (boot_enable) {
coresight_enable(drvdata->csdev);
@@ -1979,6 +2091,7 @@ static struct amba_driver etm4x_amba_driver = {
static const struct of_device_id etm4_sysreg_match[] = {
{ .compatible = "arm,coresight-etm4x-sysreg" },
+ { .compatible = "arm,embedded-trace-extension" },
{}
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 0995a10790f4..007bad9e7ad8 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -2374,12 +2374,20 @@ static inline bool
etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
{
switch (offset) {
- ETM4x_SYSREG_LIST_CASES
+ ETM_COMMON_SYSREG_LIST_CASES
/*
- * Registers accessible via system instructions are always
- * implemented.
+ * Common registers to ETE & ETM4x accessible via system
+ * instructions are always implemented.
*/
return true;
+
+ ETM4x_ONLY_SYSREG_LIST_CASES
+ /*
+ * We only support etm4x and ete. So if the device is not
+ * ETE, it must be ETMv4x.
+ */
+ return !etm4x_is_ete(drvdata);
+
ETM4x_MMAP_LIST_CASES
/*
* Registers accessible only via memory-mapped registers
@@ -2389,8 +2397,13 @@ etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
* coresight_register() and the csdev is not initialized
* until that is done. So rely on the drvdata->base to
* detect if we have a memory mapped access.
+ * Also ETE doesn't implement memory mapped access, thus
+ * it is sufficient to check that we are using mmio.
*/
return !!drvdata->base;
+
+ ETE_ONLY_SYSREG_LIST_CASES
+ return etm4x_is_ete(drvdata);
}
return false;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 0af60571aa23..e5b79bdb9851 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -29,6 +29,7 @@
#define TRCAUXCTLR 0x018
#define TRCEVENTCTL0R 0x020
#define TRCEVENTCTL1R 0x024
+#define TRCRSR 0x028
#define TRCSTALLCTLR 0x02C
#define TRCTSCTLR 0x030
#define TRCSYNCPR 0x034
@@ -49,6 +50,7 @@
#define TRCSEQRSTEVR 0x118
#define TRCSEQSTR 0x11C
#define TRCEXTINSELR 0x120
+#define TRCEXTINSELRn(n) (0x120 + (n * 4)) /* n = 0-3 */
#define TRCCNTRLDVRn(n) (0x140 + (n * 4)) /* n = 0-3 */
#define TRCCNTCTLRn(n) (0x150 + (n * 4)) /* n = 0-3 */
#define TRCCNTVRn(n) (0x160 + (n * 4)) /* n = 0-3 */
@@ -126,6 +128,8 @@
#define TRCCIDR2 0xFF8
#define TRCCIDR3 0xFFC
+#define TRCRSR_TA BIT(12)
+
/*
* System instructions to access ETM registers.
* See ETMv4.4 spec ARM IHI0064F section 4.3.6 System instructions
@@ -160,10 +164,22 @@
#define CASE_NOP(__unused, x) \
case (x): /* fall through */
+#define ETE_ONLY_SYSREG_LIST(op, val) \
+ CASE_##op((val), TRCRSR) \
+ CASE_##op((val), TRCEXTINSELRn(1)) \
+ CASE_##op((val), TRCEXTINSELRn(2)) \
+ CASE_##op((val), TRCEXTINSELRn(3))
+
/* List of registers accessible via System instructions */
-#define ETM_SYSREG_LIST(op, val) \
- CASE_##op((val), TRCPRGCTLR) \
+#define ETM4x_ONLY_SYSREG_LIST(op, val) \
CASE_##op((val), TRCPROCSELR) \
+ CASE_##op((val), TRCVDCTLR) \
+ CASE_##op((val), TRCVDSACCTLR) \
+ CASE_##op((val), TRCVDARCCTLR) \
+ CASE_##op((val), TRCOSLAR)
+
+#define ETM_COMMON_SYSREG_LIST(op, val) \
+ CASE_##op((val), TRCPRGCTLR) \
CASE_##op((val), TRCSTATR) \
CASE_##op((val), TRCCONFIGR) \
CASE_##op((val), TRCAUXCTLR) \
@@ -180,9 +196,6 @@
CASE_##op((val), TRCVIIECTLR) \
CASE_##op((val), TRCVISSCTLR) \
CASE_##op((val), TRCVIPCSSCTLR) \
- CASE_##op((val), TRCVDCTLR) \
- CASE_##op((val), TRCVDSACCTLR) \
- CASE_##op((val), TRCVDARCCTLR) \
CASE_##op((val), TRCSEQEVRn(0)) \
CASE_##op((val), TRCSEQEVRn(1)) \
CASE_##op((val), TRCSEQEVRn(2)) \
@@ -277,7 +290,6 @@
CASE_##op((val), TRCSSPCICRn(5)) \
CASE_##op((val), TRCSSPCICRn(6)) \
CASE_##op((val), TRCSSPCICRn(7)) \
- CASE_##op((val), TRCOSLAR) \
CASE_##op((val), TRCOSLSR) \
CASE_##op((val), TRCACVRn(0)) \
CASE_##op((val), TRCACVRn(1)) \
@@ -369,12 +381,38 @@
CASE_##op((val), TRCPIDR2) \
CASE_##op((val), TRCPIDR3)
-#define ETM4x_READ_SYSREG_CASES(res) ETM_SYSREG_LIST(READ, (res))
-#define ETM4x_WRITE_SYSREG_CASES(val) ETM_SYSREG_LIST(WRITE, (val))
+#define ETM4x_READ_SYSREG_CASES(res) \
+ ETM_COMMON_SYSREG_LIST(READ, (res)) \
+ ETM4x_ONLY_SYSREG_LIST(READ, (res))
+
+#define ETM4x_WRITE_SYSREG_CASES(val) \
+ ETM_COMMON_SYSREG_LIST(WRITE, (val)) \
+ ETM4x_ONLY_SYSREG_LIST(WRITE, (val))
+
+#define ETM_COMMON_SYSREG_LIST_CASES \
+ ETM_COMMON_SYSREG_LIST(NOP, __unused)
+
+#define ETM4x_ONLY_SYSREG_LIST_CASES \
+ ETM4x_ONLY_SYSREG_LIST(NOP, __unused)
+
+#define ETM4x_SYSREG_LIST_CASES \
+ ETM_COMMON_SYSREG_LIST_CASES \
+ ETM4x_ONLY_SYSREG_LIST(NOP, __unused)
-#define ETM4x_SYSREG_LIST_CASES ETM_SYSREG_LIST(NOP, __unused)
#define ETM4x_MMAP_LIST_CASES ETM_MMAP_LIST(NOP, __unused)
+/* ETE only supports system register access */
+#define ETE_READ_CASES(res) \
+ ETM_COMMON_SYSREG_LIST(READ, (res)) \
+ ETE_ONLY_SYSREG_LIST(READ, (res))
+
+#define ETE_WRITE_CASES(val) \
+ ETM_COMMON_SYSREG_LIST(WRITE, (val)) \
+ ETE_ONLY_SYSREG_LIST(WRITE, (val))
+
+#define ETE_ONLY_SYSREG_LIST_CASES \
+ ETE_ONLY_SYSREG_LIST(NOP, __unused)
+
#define read_etm4x_sysreg_offset(offset, _64bit) \
({ \
u64 __val; \
@@ -506,6 +544,20 @@
ETM_MODE_EXCL_USER)
/*
+ * TRCOSLSR.OSLM advertises the OS Lock model.
+ * OSLM[2:0] = TRCOSLSR[4:3,0]
+ *
+ * 0b000 - Trace OS Lock is not implemented.
+ * 0b010 - Trace OS Lock is implemented.
+ * 0b100 - Trace OS Lock is not implemented, unit is controlled by PE OS Lock.
+ */
+#define ETM_OSLOCK_NI 0b000
+#define ETM_OSLOCK_PRESENT 0b010
+#define ETM_OSLOCK_PE 0b100
+
+#define ETM_OSLSR_OSLM(oslsr) ((((oslsr) & GENMASK(4, 3)) >> 2) | (oslsr & 0x1))
+
+/*
* TRCDEVARCH Bit field definitions
* Bits[31:21] - ARCHITECT = Always Arm Ltd.
* * Bits[31:28] = 0x4
@@ -541,11 +593,14 @@
((ETM_DEVARCH_MAKE_ARCHID_ARCH_VER(major)) | ETM_DEVARCH_ARCHID_ARCH_PART(0xA13))
#define ETM_DEVARCH_ARCHID_ETMv4x ETM_DEVARCH_MAKE_ARCHID(0x4)
+#define ETM_DEVARCH_ARCHID_ETE ETM_DEVARCH_MAKE_ARCHID(0x5)
#define ETM_DEVARCH_ID_MASK \
(ETM_DEVARCH_ARCHITECT_MASK | ETM_DEVARCH_ARCHID_MASK | ETM_DEVARCH_PRESENT)
#define ETM_DEVARCH_ETMv4x_ARCH \
(ETM_DEVARCH_ARCHITECT_ARM | ETM_DEVARCH_ARCHID_ETMv4x | ETM_DEVARCH_PRESENT)
+#define ETM_DEVARCH_ETE_ARCH \
+ (ETM_DEVARCH_ARCHITECT_ARM | ETM_DEVARCH_ARCHID_ETE | ETM_DEVARCH_PRESENT)
#define TRCSTATR_IDLE_BIT 0
#define TRCSTATR_PMSTABLE_BIT 1
@@ -635,6 +690,8 @@
#define ETM_ARCH_MINOR_VERSION(arch) ((arch) & 0xfU)
#define ETM_ARCH_V4 ETM_ARCH_VERSION(4, 0)
+#define ETM_ARCH_ETE ETM_ARCH_VERSION(5, 0)
+
/* Interpretation of resource numbers change at ETM v4.3 architecture */
#define ETM_ARCH_V4_3 ETM_ARCH_VERSION(4, 3)
@@ -862,6 +919,7 @@ struct etmv4_save_state {
* @nooverflow: Indicate if overflow prevention is supported.
* @atbtrig: If the implementation can support ATB triggers
* @lpoverride: If the implementation can support low-power state over.
+ * @trfc: If the implementation supports Arm v8.4 trace filter controls.
* @config: structure holding configuration parameters.
* @save_state: State to be preserved across power loss
* @state_needs_restore: True when there is context to restore after PM exit
@@ -897,6 +955,7 @@ struct etmv4_drvdata {
u8 s_ex_level;
u8 ns_ex_level;
u8 q_support;
+ u8 os_lock_model;
bool sticky_enable;
bool boot_enable;
bool os_unlock;
@@ -912,6 +971,7 @@ struct etmv4_drvdata {
bool nooverflow;
bool atbtrig;
bool lpoverride;
+ bool trfc;
struct etmv4_config config;
struct etmv4_save_state *save_state;
bool state_needs_restore;
@@ -940,4 +1000,9 @@ void etm4_config_trace_mode(struct etmv4_config *config);
u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit);
void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit);
+
+static inline bool etm4x_is_ete(struct etmv4_drvdata *drvdata)
+{
+ return drvdata->arch >= ETM_ARCH_ETE;
+}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index 3629b7885aca..c594f45319fc 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -90,6 +90,12 @@ static void of_coresight_get_ports_legacy(const struct device_node *node,
struct of_endpoint endpoint;
int in = 0, out = 0;
+ /*
+ * Avoid warnings in of_graph_get_next_endpoint()
+ * if the device doesn't have any graph connections
+ */
+ if (!of_graph_is_present(node))
+ return;
do {
ep = of_graph_get_next_endpoint(node, ep);
if (!ep)
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index f5f654ea2994..ff1dd2092ac5 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -232,4 +232,7 @@ coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode);
void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev,
struct coresight_device *ect_csdev);
+void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev);
+struct coresight_device *coresight_get_percpu_sink(int cpu);
+
#endif
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
new file mode 100644
index 000000000000..176868496879
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -0,0 +1,1157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This driver enables Trace Buffer Extension (TRBE) as a per-cpu coresight
+ * sink device could then pair with an appropriate per-cpu coresight source
+ * device (ETE) thus generating required trace data. Trace can be enabled
+ * via the perf framework.
+ *
+ * The AUX buffer handling is inspired from Arm SPE PMU driver.
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ *
+ * Author: Anshuman Khandual <anshuman.khandual@arm.com>
+ */
+#define DRVNAME "arm_trbe"
+
+#define pr_fmt(fmt) DRVNAME ": " fmt
+
+#include <asm/barrier.h>
+#include "coresight-trbe.h"
+
+#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+
+/*
+ * A padding packet that will help the user space tools
+ * in skipping relevant sections in the captured trace
+ * data which could not be decoded. TRBE doesn't support
+ * formatting the trace data, unlike the legacy CoreSight
+ * sinks and thus we use ETE trace packets to pad the
+ * sections of the buffer.
+ */
+#define ETE_IGNORE_PACKET 0x70
+
+/*
+ * Minimum amount of meaningful trace will contain:
+ * A-Sync, Trace Info, Trace On, Address, Atom.
+ * This is about 44bytes of ETE trace. To be on
+ * the safer side, we assume 64bytes is the minimum
+ * space required for a meaningful session, before
+ * we hit a "WRAP" event.
+ */
+#define TRBE_TRACE_MIN_BUF_SIZE 64
+
+enum trbe_fault_action {
+ TRBE_FAULT_ACT_WRAP,
+ TRBE_FAULT_ACT_SPURIOUS,
+ TRBE_FAULT_ACT_FATAL,
+};
+
+struct trbe_buf {
+ /*
+ * Even though trbe_base represents vmap()
+ * mapped allocated buffer's start address,
+ * it's being as unsigned long for various
+ * arithmetic and comparision operations &
+ * also to be consistent with trbe_write &
+ * trbe_limit sibling pointers.
+ */
+ unsigned long trbe_base;
+ unsigned long trbe_limit;
+ unsigned long trbe_write;
+ int nr_pages;
+ void **pages;
+ bool snapshot;
+ struct trbe_cpudata *cpudata;
+};
+
+struct trbe_cpudata {
+ bool trbe_flag;
+ u64 trbe_align;
+ int cpu;
+ enum cs_mode mode;
+ struct trbe_buf *buf;
+ struct trbe_drvdata *drvdata;
+};
+
+struct trbe_drvdata {
+ struct trbe_cpudata __percpu *cpudata;
+ struct perf_output_handle * __percpu *handle;
+ struct hlist_node hotplug_node;
+ int irq;
+ cpumask_t supported_cpus;
+ enum cpuhp_state trbe_online;
+ struct platform_device *pdev;
+};
+
+static int trbe_alloc_node(struct perf_event *event)
+{
+ if (event->cpu == -1)
+ return NUMA_NO_NODE;
+ return cpu_to_node(event->cpu);
+}
+
+static void trbe_drain_buffer(void)
+{
+ tsb_csync();
+ dsb(nsh);
+}
+
+static void trbe_drain_and_disable_local(void)
+{
+ u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
+
+ trbe_drain_buffer();
+
+ /*
+ * Disable the TRBE without clearing LIMITPTR which
+ * might be required for fetching the buffer limits.
+ */
+ trblimitr &= ~TRBLIMITR_ENABLE;
+ write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+ isb();
+}
+
+static void trbe_reset_local(void)
+{
+ trbe_drain_and_disable_local();
+ write_sysreg_s(0, SYS_TRBLIMITR_EL1);
+ write_sysreg_s(0, SYS_TRBPTR_EL1);
+ write_sysreg_s(0, SYS_TRBBASER_EL1);
+ write_sysreg_s(0, SYS_TRBSR_EL1);
+}
+
+static void trbe_stop_and_truncate_event(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+
+ /*
+ * We cannot proceed with the buffer collection and we
+ * do not have any data for the current session. The
+ * etm_perf driver expects to close out the aux_buffer
+ * at event_stop(). So disable the TRBE here and leave
+ * the update_buffer() to return a 0 size.
+ */
+ trbe_drain_and_disable_local();
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
+}
+
+/*
+ * TRBE Buffer Management
+ *
+ * The TRBE buffer spans from the base pointer till the limit pointer. When enabled,
+ * it starts writing trace data from the write pointer onward till the limit pointer.
+ * When the write pointer reaches the address just before the limit pointer, it gets
+ * wrapped around again to the base pointer. This is called a TRBE wrap event, which
+ * generates a maintenance interrupt when operated in WRAP or FILL mode. This driver
+ * uses FILL mode, where the TRBE stops the trace collection at wrap event. The IRQ
+ * handler updates the AUX buffer and re-enables the TRBE with updated WRITE and
+ * LIMIT pointers.
+ *
+ * Wrap around with an IRQ
+ * ------ < ------ < ------- < ----- < -----
+ * | |
+ * ------ > ------ > ------- > ----- > -----
+ *
+ * +---------------+-----------------------+
+ * | | |
+ * +---------------+-----------------------+
+ * Base Pointer Write Pointer Limit Pointer
+ *
+ * The base and limit pointers always needs to be PAGE_SIZE aligned. But the write
+ * pointer can be aligned to the implementation defined TRBE trace buffer alignment
+ * as captured in trbe_cpudata->trbe_align.
+ *
+ *
+ * head tail wakeup
+ * +---------------------------------------+----- ~ ~ ------
+ * |$$$$$$$|################|$$$$$$$$$$$$$$| |
+ * +---------------------------------------+----- ~ ~ ------
+ * Base Pointer Write Pointer Limit Pointer
+ *
+ * The perf_output_handle indices (head, tail, wakeup) are monotonically increasing
+ * values which tracks all the driver writes and user reads from the perf auxiliary
+ * buffer. Generally [head..tail] is the area where the driver can write into unless
+ * the wakeup is behind the tail. Enabled TRBE buffer span needs to be adjusted and
+ * configured depending on the perf_output_handle indices, so that the driver does
+ * not override into areas in the perf auxiliary buffer which is being or yet to be
+ * consumed from the user space. The enabled TRBE buffer area is a moving subset of
+ * the allocated perf auxiliary buffer.
+ */
+static void trbe_pad_buf(struct perf_output_handle *handle, int len)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ u64 head = PERF_IDX2OFF(handle->head, buf);
+
+ memset((void *)buf->trbe_base + head, ETE_IGNORE_PACKET, len);
+ if (!buf->snapshot)
+ perf_aux_output_skip(handle, len);
+}
+
+static unsigned long trbe_snapshot_offset(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+
+ /*
+ * The ETE trace has alignment synchronization packets allowing
+ * the decoder to reset in case of an overflow or corruption.
+ * So we can use the entire buffer for the snapshot mode.
+ */
+ return buf->nr_pages * PAGE_SIZE;
+}
+
+/*
+ * TRBE Limit Calculation
+ *
+ * The following markers are used to illustrate various TRBE buffer situations.
+ *
+ * $$$$ - Data area, unconsumed captured trace data, not to be overridden
+ * #### - Free area, enabled, trace will be written
+ * %%%% - Free area, disabled, trace will not be written
+ * ==== - Free area, padded with ETE_IGNORE_PACKET, trace will be skipped
+ */
+static unsigned long __trbe_normal_offset(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ struct trbe_cpudata *cpudata = buf->cpudata;
+ const u64 bufsize = buf->nr_pages * PAGE_SIZE;
+ u64 limit = bufsize;
+ u64 head, tail, wakeup;
+
+ head = PERF_IDX2OFF(handle->head, buf);
+
+ /*
+ * head
+ * ------->|
+ * |
+ * head TRBE align tail
+ * +----|-------|---------------|-------+
+ * |$$$$|=======|###############|$$$$$$$|
+ * +----|-------|---------------|-------+
+ * trbe_base trbe_base + nr_pages
+ *
+ * Perf aux buffer output head position can be misaligned depending on
+ * various factors including user space reads. In case misaligned, head
+ * needs to be aligned before TRBE can be configured. Pad the alignment
+ * gap with ETE_IGNORE_PACKET bytes that will be ignored by user tools
+ * and skip this section thus advancing the head.
+ */
+ if (!IS_ALIGNED(head, cpudata->trbe_align)) {
+ unsigned long delta = roundup(head, cpudata->trbe_align) - head;
+
+ delta = min(delta, handle->size);
+ trbe_pad_buf(handle, delta);
+ head = PERF_IDX2OFF(handle->head, buf);
+ }
+
+ /*
+ * head = tail (size = 0)
+ * +----|-------------------------------+
+ * |$$$$|$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ |
+ * +----|-------------------------------+
+ * trbe_base trbe_base + nr_pages
+ *
+ * Perf aux buffer does not have any space for the driver to write into.
+ * Just communicate trace truncation event to the user space by marking
+ * it with PERF_AUX_FLAG_TRUNCATED.
+ */
+ if (!handle->size) {
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ return 0;
+ }
+
+ /* Compute the tail and wakeup indices now that we've aligned head */
+ tail = PERF_IDX2OFF(handle->head + handle->size, buf);
+ wakeup = PERF_IDX2OFF(handle->wakeup, buf);
+
+ /*
+ * Lets calculate the buffer area which TRBE could write into. There
+ * are three possible scenarios here. Limit needs to be aligned with
+ * PAGE_SIZE per the TRBE requirement. Always avoid clobbering the
+ * unconsumed data.
+ *
+ * 1) head < tail
+ *
+ * head tail
+ * +----|-----------------------|-------+
+ * |$$$$|#######################|$$$$$$$|
+ * +----|-----------------------|-------+
+ * trbe_base limit trbe_base + nr_pages
+ *
+ * TRBE could write into [head..tail] area. Unless the tail is right at
+ * the end of the buffer, neither an wrap around nor an IRQ is expected
+ * while being enabled.
+ *
+ * 2) head == tail
+ *
+ * head = tail (size > 0)
+ * +----|-------------------------------+
+ * |%%%%|###############################|
+ * +----|-------------------------------+
+ * trbe_base limit = trbe_base + nr_pages
+ *
+ * TRBE should just write into [head..base + nr_pages] area even though
+ * the entire buffer is empty. Reason being, when the trace reaches the
+ * end of the buffer, it will just wrap around with an IRQ giving an
+ * opportunity to reconfigure the buffer.
+ *
+ * 3) tail < head
+ *
+ * tail head
+ * +----|-----------------------|-------+
+ * |%%%%|$$$$$$$$$$$$$$$$$$$$$$$|#######|
+ * +----|-----------------------|-------+
+ * trbe_base limit = trbe_base + nr_pages
+ *
+ * TRBE should just write into [head..base + nr_pages] area even though
+ * the [trbe_base..tail] is also empty. Reason being, when the trace
+ * reaches the end of the buffer, it will just wrap around with an IRQ
+ * giving an opportunity to reconfigure the buffer.
+ */
+ if (head < tail)
+ limit = round_down(tail, PAGE_SIZE);
+
+ /*
+ * Wakeup may be arbitrarily far into the future. If it's not in the
+ * current generation, either we'll wrap before hitting it, or it's
+ * in the past and has been handled already.
+ *
+ * If there's a wakeup before we wrap, arrange to be woken up by the
+ * page boundary following it. Keep the tail boundary if that's lower.
+ *
+ * head wakeup tail
+ * +----|---------------|-------|-------+
+ * |$$$$|###############|%%%%%%%|$$$$$$$|
+ * +----|---------------|-------|-------+
+ * trbe_base limit trbe_base + nr_pages
+ */
+ if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
+ limit = min(limit, round_up(wakeup, PAGE_SIZE));
+
+ /*
+ * There are two situation when this can happen i.e limit is before
+ * the head and hence TRBE cannot be configured.
+ *
+ * 1) head < tail (aligned down with PAGE_SIZE) and also they are both
+ * within the same PAGE size range.
+ *
+ * PAGE_SIZE
+ * |----------------------|
+ *
+ * limit head tail
+ * +------------|------|--------|-------+
+ * |$$$$$$$$$$$$$$$$$$$|========|$$$$$$$|
+ * +------------|------|--------|-------+
+ * trbe_base trbe_base + nr_pages
+ *
+ * 2) head < wakeup (aligned up with PAGE_SIZE) < tail and also both
+ * head and wakeup are within same PAGE size range.
+ *
+ * PAGE_SIZE
+ * |----------------------|
+ *
+ * limit head wakeup tail
+ * +----|------|-------|--------|-------+
+ * |$$$$$$$$$$$|=======|========|$$$$$$$|
+ * +----|------|-------|--------|-------+
+ * trbe_base trbe_base + nr_pages
+ */
+ if (limit > head)
+ return limit;
+
+ trbe_pad_buf(handle, handle->size);
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ return 0;
+}
+
+static unsigned long trbe_normal_offset(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = perf_get_aux(handle);
+ u64 limit = __trbe_normal_offset(handle);
+ u64 head = PERF_IDX2OFF(handle->head, buf);
+
+ /*
+ * If the head is too close to the limit and we don't
+ * have space for a meaningful run, we rather pad it
+ * and start fresh.
+ */
+ if (limit && (limit - head < TRBE_TRACE_MIN_BUF_SIZE)) {
+ trbe_pad_buf(handle, limit - head);
+ limit = __trbe_normal_offset(handle);
+ }
+ return limit;
+}
+
+static unsigned long compute_trbe_buffer_limit(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ unsigned long offset;
+
+ if (buf->snapshot)
+ offset = trbe_snapshot_offset(handle);
+ else
+ offset = trbe_normal_offset(handle);
+ return buf->trbe_base + offset;
+}
+
+static void clr_trbe_status(void)
+{
+ u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
+
+ WARN_ON(is_trbe_enabled());
+ trbsr &= ~TRBSR_IRQ;
+ trbsr &= ~TRBSR_TRG;
+ trbsr &= ~TRBSR_WRAP;
+ trbsr &= ~(TRBSR_EC_MASK << TRBSR_EC_SHIFT);
+ trbsr &= ~(TRBSR_BSC_MASK << TRBSR_BSC_SHIFT);
+ trbsr &= ~TRBSR_STOP;
+ write_sysreg_s(trbsr, SYS_TRBSR_EL1);
+}
+
+static void set_trbe_limit_pointer_enabled(unsigned long addr)
+{
+ u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
+
+ WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_LIMIT_SHIFT)));
+ WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
+
+ trblimitr &= ~TRBLIMITR_NVM;
+ trblimitr &= ~(TRBLIMITR_FILL_MODE_MASK << TRBLIMITR_FILL_MODE_SHIFT);
+ trblimitr &= ~(TRBLIMITR_TRIG_MODE_MASK << TRBLIMITR_TRIG_MODE_SHIFT);
+ trblimitr &= ~(TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT);
+
+ /*
+ * Fill trace buffer mode is used here while configuring the
+ * TRBE for trace capture. In this particular mode, the trace
+ * collection is stopped and a maintenance interrupt is raised
+ * when the current write pointer wraps. This pause in trace
+ * collection gives the software an opportunity to capture the
+ * trace data in the interrupt handler, before reconfiguring
+ * the TRBE.
+ */
+ trblimitr |= (TRBE_FILL_MODE_FILL & TRBLIMITR_FILL_MODE_MASK) << TRBLIMITR_FILL_MODE_SHIFT;
+
+ /*
+ * Trigger mode is not used here while configuring the TRBE for
+ * the trace capture. Hence just keep this in the ignore mode.
+ */
+ trblimitr |= (TRBE_TRIG_MODE_IGNORE & TRBLIMITR_TRIG_MODE_MASK) <<
+ TRBLIMITR_TRIG_MODE_SHIFT;
+ trblimitr |= (addr & PAGE_MASK);
+
+ trblimitr |= TRBLIMITR_ENABLE;
+ write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+
+ /* Synchronize the TRBE enable event */
+ isb();
+}
+
+static void trbe_enable_hw(struct trbe_buf *buf)
+{
+ WARN_ON(buf->trbe_write < buf->trbe_base);
+ WARN_ON(buf->trbe_write >= buf->trbe_limit);
+ set_trbe_disabled();
+ isb();
+ clr_trbe_status();
+ set_trbe_base_pointer(buf->trbe_base);
+ set_trbe_write_pointer(buf->trbe_write);
+
+ /*
+ * Synchronize all the register updates
+ * till now before enabling the TRBE.
+ */
+ isb();
+ set_trbe_limit_pointer_enabled(buf->trbe_limit);
+}
+
+static enum trbe_fault_action trbe_get_fault_act(u64 trbsr)
+{
+ int ec = get_trbe_ec(trbsr);
+ int bsc = get_trbe_bsc(trbsr);
+
+ WARN_ON(is_trbe_running(trbsr));
+ if (is_trbe_trg(trbsr) || is_trbe_abort(trbsr))
+ return TRBE_FAULT_ACT_FATAL;
+
+ if ((ec == TRBE_EC_STAGE1_ABORT) || (ec == TRBE_EC_STAGE2_ABORT))
+ return TRBE_FAULT_ACT_FATAL;
+
+ if (is_trbe_wrap(trbsr) && (ec == TRBE_EC_OTHERS) && (bsc == TRBE_BSC_FILLED)) {
+ if (get_trbe_write_pointer() == get_trbe_base_pointer())
+ return TRBE_FAULT_ACT_WRAP;
+ }
+ return TRBE_FAULT_ACT_SPURIOUS;
+}
+
+static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
+ struct perf_event *event, void **pages,
+ int nr_pages, bool snapshot)
+{
+ struct trbe_buf *buf;
+ struct page **pglist;
+ int i;
+
+ /*
+ * TRBE LIMIT and TRBE WRITE pointers must be page aligned. But with
+ * just a single page, there would not be any room left while writing
+ * into a partially filled TRBE buffer after the page size alignment.
+ * Hence restrict the minimum buffer size as two pages.
+ */
+ if (nr_pages < 2)
+ return NULL;
+
+ buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event));
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
+ if (!pglist) {
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < nr_pages; i++)
+ pglist[i] = virt_to_page(pages[i]);
+
+ buf->trbe_base = (unsigned long)vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!buf->trbe_base) {
+ kfree(pglist);
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
+ buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
+ buf->trbe_write = buf->trbe_base;
+ buf->snapshot = snapshot;
+ buf->nr_pages = nr_pages;
+ buf->pages = pages;
+ kfree(pglist);
+ return buf;
+}
+
+static void arm_trbe_free_buffer(void *config)
+{
+ struct trbe_buf *buf = config;
+
+ vunmap((void *)buf->trbe_base);
+ kfree(buf);
+}
+
+static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *config)
+{
+ struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
+ struct trbe_buf *buf = config;
+ enum trbe_fault_action act;
+ unsigned long size, offset;
+ unsigned long write, base, status;
+ unsigned long flags;
+
+ WARN_ON(buf->cpudata != cpudata);
+ WARN_ON(cpudata->cpu != smp_processor_id());
+ WARN_ON(cpudata->drvdata != drvdata);
+ if (cpudata->mode != CS_MODE_PERF)
+ return 0;
+
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
+
+ /*
+ * We are about to disable the TRBE. And this could in turn
+ * fill up the buffer triggering, an IRQ. This could be consumed
+ * by the PE asynchronously, causing a race here against
+ * the IRQ handler in closing out the handle. So, let us
+ * make sure the IRQ can't trigger while we are collecting
+ * the buffer. We also make sure that a WRAP event is handled
+ * accordingly.
+ */
+ local_irq_save(flags);
+
+ /*
+ * If the TRBE was disabled due to lack of space in the AUX buffer or a
+ * spurious fault, the driver leaves it disabled, truncating the buffer.
+ * Since the etm_perf driver expects to close out the AUX buffer, the
+ * driver skips it. Thus, just pass in 0 size here to indicate that the
+ * buffer was truncated.
+ */
+ if (!is_trbe_enabled()) {
+ size = 0;
+ goto done;
+ }
+ /*
+ * perf handle structure needs to be shared with the TRBE IRQ handler for
+ * capturing trace data and restarting the handle. There is a probability
+ * of an undefined reference based crash when etm event is being stopped
+ * while a TRBE IRQ also getting processed. This happens due the release
+ * of perf handle via perf_aux_output_end() in etm_event_stop(). Stopping
+ * the TRBE here will ensure that no IRQ could be generated when the perf
+ * handle gets freed in etm_event_stop().
+ */
+ trbe_drain_and_disable_local();
+ write = get_trbe_write_pointer();
+ base = get_trbe_base_pointer();
+
+ /* Check if there is a pending interrupt and handle it here */
+ status = read_sysreg_s(SYS_TRBSR_EL1);
+ if (is_trbe_irq(status)) {
+
+ /*
+ * Now that we are handling the IRQ here, clear the IRQ
+ * from the status, to let the irq handler know that it
+ * is taken care of.
+ */
+ clr_trbe_irq();
+ isb();
+
+ act = trbe_get_fault_act(status);
+ /*
+ * If this was not due to a WRAP event, we have some
+ * errors and as such buffer is empty.
+ */
+ if (act != TRBE_FAULT_ACT_WRAP) {
+ size = 0;
+ goto done;
+ }
+
+ /*
+ * Otherwise, the buffer is full and the write pointer
+ * has reached base. Adjust this back to the Limit pointer
+ * for correct size. Also, mark the buffer truncated.
+ */
+ write = get_trbe_limit_pointer();
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ }
+
+ offset = write - base;
+ if (WARN_ON_ONCE(offset < PERF_IDX2OFF(handle->head, buf)))
+ size = 0;
+ else
+ size = offset - PERF_IDX2OFF(handle->head, buf);
+
+done:
+ local_irq_restore(flags);
+
+ if (buf->snapshot)
+ handle->head += size;
+ return size;
+}
+
+static int arm_trbe_enable(struct coresight_device *csdev, u32 mode, void *data)
+{
+ struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
+ struct perf_output_handle *handle = data;
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+
+ WARN_ON(cpudata->cpu != smp_processor_id());
+ WARN_ON(cpudata->drvdata != drvdata);
+ if (mode != CS_MODE_PERF)
+ return -EINVAL;
+
+ *this_cpu_ptr(drvdata->handle) = handle;
+ cpudata->buf = buf;
+ cpudata->mode = mode;
+ buf->cpudata = cpudata;
+ buf->trbe_limit = compute_trbe_buffer_limit(handle);
+ buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
+ if (buf->trbe_limit == buf->trbe_base) {
+ trbe_stop_and_truncate_event(handle);
+ return 0;
+ }
+ trbe_enable_hw(buf);
+ return 0;
+}
+
+static int arm_trbe_disable(struct coresight_device *csdev)
+{
+ struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
+ struct trbe_buf *buf = cpudata->buf;
+
+ WARN_ON(buf->cpudata != cpudata);
+ WARN_ON(cpudata->cpu != smp_processor_id());
+ WARN_ON(cpudata->drvdata != drvdata);
+ if (cpudata->mode != CS_MODE_PERF)
+ return -EINVAL;
+
+ trbe_drain_and_disable_local();
+ buf->cpudata = NULL;
+ cpudata->buf = NULL;
+ cpudata->mode = CS_MODE_DISABLED;
+ return 0;
+}
+
+static void trbe_handle_spurious(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+
+ buf->trbe_limit = compute_trbe_buffer_limit(handle);
+ buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
+ if (buf->trbe_limit == buf->trbe_base) {
+ trbe_drain_and_disable_local();
+ return;
+ }
+ trbe_enable_hw(buf);
+}
+
+static void trbe_handle_overflow(struct perf_output_handle *handle)
+{
+ struct perf_event *event = handle->event;
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ unsigned long offset, size;
+ struct etm_event_data *event_data;
+
+ offset = get_trbe_limit_pointer() - get_trbe_base_pointer();
+ size = offset - PERF_IDX2OFF(handle->head, buf);
+ if (buf->snapshot)
+ handle->head += size;
+
+ /*
+ * Mark the buffer as truncated, as we have stopped the trace
+ * collection upon the WRAP event, without stopping the source.
+ */
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW |
+ PERF_AUX_FLAG_TRUNCATED);
+ perf_aux_output_end(handle, size);
+ event_data = perf_aux_output_begin(handle, event);
+ if (!event_data) {
+ /*
+ * We are unable to restart the trace collection,
+ * thus leave the TRBE disabled. The etm-perf driver
+ * is able to detect this with a disconnected handle
+ * (handle->event = NULL).
+ */
+ trbe_drain_and_disable_local();
+ *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
+ return;
+ }
+ buf->trbe_limit = compute_trbe_buffer_limit(handle);
+ buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
+ if (buf->trbe_limit == buf->trbe_base) {
+ trbe_stop_and_truncate_event(handle);
+ return;
+ }
+ *this_cpu_ptr(buf->cpudata->drvdata->handle) = handle;
+ trbe_enable_hw(buf);
+}
+
+static bool is_perf_trbe(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ struct trbe_cpudata *cpudata = buf->cpudata;
+ struct trbe_drvdata *drvdata = cpudata->drvdata;
+ int cpu = smp_processor_id();
+
+ WARN_ON(buf->trbe_base != get_trbe_base_pointer());
+ WARN_ON(buf->trbe_limit != get_trbe_limit_pointer());
+
+ if (cpudata->mode != CS_MODE_PERF)
+ return false;
+
+ if (cpudata->cpu != cpu)
+ return false;
+
+ if (!cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ return false;
+
+ return true;
+}
+
+static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
+{
+ struct perf_output_handle **handle_ptr = dev;
+ struct perf_output_handle *handle = *handle_ptr;
+ enum trbe_fault_action act;
+ u64 status;
+
+ /*
+ * Ensure the trace is visible to the CPUs and
+ * any external aborts have been resolved.
+ */
+ trbe_drain_and_disable_local();
+
+ status = read_sysreg_s(SYS_TRBSR_EL1);
+ /*
+ * If the pending IRQ was handled by update_buffer callback
+ * we have nothing to do here.
+ */
+ if (!is_trbe_irq(status))
+ return IRQ_NONE;
+
+ clr_trbe_irq();
+ isb();
+
+ if (WARN_ON_ONCE(!handle) || !perf_get_aux(handle))
+ return IRQ_NONE;
+
+ if (!is_perf_trbe(handle))
+ return IRQ_NONE;
+
+ /*
+ * Ensure perf callbacks have completed, which may disable
+ * the trace buffer in response to a TRUNCATION flag.
+ */
+ irq_work_run();
+
+ act = trbe_get_fault_act(status);
+ switch (act) {
+ case TRBE_FAULT_ACT_WRAP:
+ trbe_handle_overflow(handle);
+ break;
+ case TRBE_FAULT_ACT_SPURIOUS:
+ trbe_handle_spurious(handle);
+ break;
+ case TRBE_FAULT_ACT_FATAL:
+ trbe_stop_and_truncate_event(handle);
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
+static const struct coresight_ops_sink arm_trbe_sink_ops = {
+ .enable = arm_trbe_enable,
+ .disable = arm_trbe_disable,
+ .alloc_buffer = arm_trbe_alloc_buffer,
+ .free_buffer = arm_trbe_free_buffer,
+ .update_buffer = arm_trbe_update_buffer,
+};
+
+static const struct coresight_ops arm_trbe_cs_ops = {
+ .sink_ops = &arm_trbe_sink_ops,
+};
+
+static ssize_t align_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%llx\n", cpudata->trbe_align);
+}
+static DEVICE_ATTR_RO(align);
+
+static ssize_t flag_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", cpudata->trbe_flag);
+}
+static DEVICE_ATTR_RO(flag);
+
+static struct attribute *arm_trbe_attrs[] = {
+ &dev_attr_align.attr,
+ &dev_attr_flag.attr,
+ NULL,
+};
+
+static const struct attribute_group arm_trbe_group = {
+ .attrs = arm_trbe_attrs,
+};
+
+static const struct attribute_group *arm_trbe_groups[] = {
+ &arm_trbe_group,
+ NULL,
+};
+
+static void arm_trbe_enable_cpu(void *info)
+{
+ struct trbe_drvdata *drvdata = info;
+
+ trbe_reset_local();
+ enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE);
+}
+
+static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
+{
+ struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+ struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
+ struct coresight_desc desc = { 0 };
+ struct device *dev;
+
+ if (WARN_ON(trbe_csdev))
+ return;
+
+ dev = &cpudata->drvdata->pdev->dev;
+ desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
+ if (!desc.name)
+ goto cpu_clear;
+
+ desc.type = CORESIGHT_DEV_TYPE_SINK;
+ desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM;
+ desc.ops = &arm_trbe_cs_ops;
+ desc.pdata = dev_get_platdata(dev);
+ desc.groups = arm_trbe_groups;
+ desc.dev = dev;
+ trbe_csdev = coresight_register(&desc);
+ if (IS_ERR(trbe_csdev))
+ goto cpu_clear;
+
+ dev_set_drvdata(&trbe_csdev->dev, cpudata);
+ coresight_set_percpu_sink(cpu, trbe_csdev);
+ return;
+cpu_clear:
+ cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
+}
+
+static void arm_trbe_probe_cpu(void *info)
+{
+ struct trbe_drvdata *drvdata = info;
+ int cpu = smp_processor_id();
+ struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+ u64 trbidr;
+
+ if (WARN_ON(!cpudata))
+ goto cpu_clear;
+
+ if (!is_trbe_available()) {
+ pr_err("TRBE is not implemented on cpu %d\n", cpu);
+ goto cpu_clear;
+ }
+
+ trbidr = read_sysreg_s(SYS_TRBIDR_EL1);
+ if (!is_trbe_programmable(trbidr)) {
+ pr_err("TRBE is owned in higher exception level on cpu %d\n", cpu);
+ goto cpu_clear;
+ }
+
+ cpudata->trbe_align = 1ULL << get_trbe_address_align(trbidr);
+ if (cpudata->trbe_align > SZ_2K) {
+ pr_err("Unsupported alignment on cpu %d\n", cpu);
+ goto cpu_clear;
+ }
+ cpudata->trbe_flag = get_trbe_flag_update(trbidr);
+ cpudata->cpu = cpu;
+ cpudata->drvdata = drvdata;
+ return;
+cpu_clear:
+ cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
+}
+
+static void arm_trbe_remove_coresight_cpu(void *info)
+{
+ int cpu = smp_processor_id();
+ struct trbe_drvdata *drvdata = info;
+ struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+ struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
+
+ disable_percpu_irq(drvdata->irq);
+ trbe_reset_local();
+ if (trbe_csdev) {
+ coresight_unregister(trbe_csdev);
+ cpudata->drvdata = NULL;
+ coresight_set_percpu_sink(cpu, NULL);
+ }
+}
+
+static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata)
+{
+ int cpu;
+
+ drvdata->cpudata = alloc_percpu(typeof(*drvdata->cpudata));
+ if (!drvdata->cpudata)
+ return -ENOMEM;
+
+ for_each_cpu(cpu, &drvdata->supported_cpus) {
+ smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1);
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ arm_trbe_register_coresight_cpu(drvdata, cpu);
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ smp_call_function_single(cpu, arm_trbe_enable_cpu, drvdata, 1);
+ }
+ return 0;
+}
+
+static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata)
+{
+ int cpu;
+
+ for_each_cpu(cpu, &drvdata->supported_cpus)
+ smp_call_function_single(cpu, arm_trbe_remove_coresight_cpu, drvdata, 1);
+ free_percpu(drvdata->cpudata);
+ return 0;
+}
+
+static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node)
+{
+ struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
+
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
+
+ /*
+ * If this CPU was not probed for TRBE,
+ * initialize it now.
+ */
+ if (!coresight_get_percpu_sink(cpu)) {
+ arm_trbe_probe_cpu(drvdata);
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ arm_trbe_register_coresight_cpu(drvdata, cpu);
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ arm_trbe_enable_cpu(drvdata);
+ } else {
+ arm_trbe_enable_cpu(drvdata);
+ }
+ }
+ return 0;
+}
+
+static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node)
+{
+ struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
+
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
+ disable_percpu_irq(drvdata->irq);
+ trbe_reset_local();
+ }
+ return 0;
+}
+
+static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata)
+{
+ enum cpuhp_state trbe_online;
+ int ret;
+
+ trbe_online = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
+ arm_trbe_cpu_startup, arm_trbe_cpu_teardown);
+ if (trbe_online < 0)
+ return trbe_online;
+
+ ret = cpuhp_state_add_instance(trbe_online, &drvdata->hotplug_node);
+ if (ret) {
+ cpuhp_remove_multi_state(trbe_online);
+ return ret;
+ }
+ drvdata->trbe_online = trbe_online;
+ return 0;
+}
+
+static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata)
+{
+ cpuhp_remove_multi_state(drvdata->trbe_online);
+}
+
+static int arm_trbe_probe_irq(struct platform_device *pdev,
+ struct trbe_drvdata *drvdata)
+{
+ int ret;
+
+ drvdata->irq = platform_get_irq(pdev, 0);
+ if (drvdata->irq < 0) {
+ pr_err("IRQ not found for the platform device\n");
+ return drvdata->irq;
+ }
+
+ if (!irq_is_percpu(drvdata->irq)) {
+ pr_err("IRQ is not a PPI\n");
+ return -EINVAL;
+ }
+
+ if (irq_get_percpu_devid_partition(drvdata->irq, &drvdata->supported_cpus))
+ return -EINVAL;
+
+ drvdata->handle = alloc_percpu(struct perf_output_handle *);
+ if (!drvdata->handle)
+ return -ENOMEM;
+
+ ret = request_percpu_irq(drvdata->irq, arm_trbe_irq_handler, DRVNAME, drvdata->handle);
+ if (ret) {
+ free_percpu(drvdata->handle);
+ return ret;
+ }
+ return 0;
+}
+
+static void arm_trbe_remove_irq(struct trbe_drvdata *drvdata)
+{
+ free_percpu_irq(drvdata->irq, drvdata->handle);
+ free_percpu(drvdata->handle);
+}
+
+static int arm_trbe_device_probe(struct platform_device *pdev)
+{
+ struct coresight_platform_data *pdata;
+ struct trbe_drvdata *drvdata;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ dev_set_drvdata(dev, drvdata);
+ dev->platform_data = pdata;
+ drvdata->pdev = pdev;
+ ret = arm_trbe_probe_irq(pdev, drvdata);
+ if (ret)
+ return ret;
+
+ ret = arm_trbe_probe_coresight(drvdata);
+ if (ret)
+ goto probe_failed;
+
+ ret = arm_trbe_probe_cpuhp(drvdata);
+ if (ret)
+ goto cpuhp_failed;
+
+ return 0;
+cpuhp_failed:
+ arm_trbe_remove_coresight(drvdata);
+probe_failed:
+ arm_trbe_remove_irq(drvdata);
+ return ret;
+}
+
+static int arm_trbe_device_remove(struct platform_device *pdev)
+{
+ struct trbe_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ arm_trbe_remove_cpuhp(drvdata);
+ arm_trbe_remove_coresight(drvdata);
+ arm_trbe_remove_irq(drvdata);
+ return 0;
+}
+
+static const struct of_device_id arm_trbe_of_match[] = {
+ { .compatible = "arm,trace-buffer-extension"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, arm_trbe_of_match);
+
+static struct platform_driver arm_trbe_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .of_match_table = of_match_ptr(arm_trbe_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = arm_trbe_device_probe,
+ .remove = arm_trbe_device_remove,
+};
+
+static int __init arm_trbe_init(void)
+{
+ int ret;
+
+ if (arm64_kernel_unmapped_at_el0()) {
+ pr_err("TRBE wouldn't work if kernel gets unmapped at EL0\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = platform_driver_register(&arm_trbe_driver);
+ if (!ret)
+ return 0;
+
+ pr_err("Error registering %s platform driver\n", DRVNAME);
+ return ret;
+}
+
+static void __exit arm_trbe_exit(void)
+{
+ platform_driver_unregister(&arm_trbe_driver);
+}
+module_init(arm_trbe_init);
+module_exit(arm_trbe_exit);
+
+MODULE_AUTHOR("Anshuman Khandual <anshuman.khandual@arm.com>");
+MODULE_DESCRIPTION("Arm Trace Buffer Extension (TRBE) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/coresight-trbe.h b/drivers/hwtracing/coresight/coresight-trbe.h
new file mode 100644
index 000000000000..abf3e36082f0
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-trbe.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This contains all required hardware related helper functions for
+ * Trace Buffer Extension (TRBE) driver in the coresight framework.
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ *
+ * Author: Anshuman Khandual <anshuman.khandual@arm.com>
+ */
+#include <linux/coresight.h>
+#include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+
+#include "coresight-etm-perf.h"
+
+static inline bool is_trbe_available(void)
+{
+ u64 aa64dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
+ unsigned int trbe = cpuid_feature_extract_unsigned_field(aa64dfr0, ID_AA64DFR0_TRBE_SHIFT);
+
+ return trbe >= 0b0001;
+}
+
+static inline bool is_trbe_enabled(void)
+{
+ u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
+
+ return trblimitr & TRBLIMITR_ENABLE;
+}
+
+#define TRBE_EC_OTHERS 0
+#define TRBE_EC_STAGE1_ABORT 36
+#define TRBE_EC_STAGE2_ABORT 37
+
+static inline int get_trbe_ec(u64 trbsr)
+{
+ return (trbsr >> TRBSR_EC_SHIFT) & TRBSR_EC_MASK;
+}
+
+#define TRBE_BSC_NOT_STOPPED 0
+#define TRBE_BSC_FILLED 1
+#define TRBE_BSC_TRIGGERED 2
+
+static inline int get_trbe_bsc(u64 trbsr)
+{
+ return (trbsr >> TRBSR_BSC_SHIFT) & TRBSR_BSC_MASK;
+}
+
+static inline void clr_trbe_irq(void)
+{
+ u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
+
+ trbsr &= ~TRBSR_IRQ;
+ write_sysreg_s(trbsr, SYS_TRBSR_EL1);
+}
+
+static inline bool is_trbe_irq(u64 trbsr)
+{
+ return trbsr & TRBSR_IRQ;
+}
+
+static inline bool is_trbe_trg(u64 trbsr)
+{
+ return trbsr & TRBSR_TRG;
+}
+
+static inline bool is_trbe_wrap(u64 trbsr)
+{
+ return trbsr & TRBSR_WRAP;
+}
+
+static inline bool is_trbe_abort(u64 trbsr)
+{
+ return trbsr & TRBSR_ABORT;
+}
+
+static inline bool is_trbe_running(u64 trbsr)
+{
+ return !(trbsr & TRBSR_STOP);
+}
+
+#define TRBE_TRIG_MODE_STOP 0
+#define TRBE_TRIG_MODE_IRQ 1
+#define TRBE_TRIG_MODE_IGNORE 3
+
+#define TRBE_FILL_MODE_FILL 0
+#define TRBE_FILL_MODE_WRAP 1
+#define TRBE_FILL_MODE_CIRCULAR_BUFFER 3
+
+static inline void set_trbe_disabled(void)
+{
+ u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
+
+ trblimitr &= ~TRBLIMITR_ENABLE;
+ write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+}
+
+static inline bool get_trbe_flag_update(u64 trbidr)
+{
+ return trbidr & TRBIDR_FLAG;
+}
+
+static inline bool is_trbe_programmable(u64 trbidr)
+{
+ return !(trbidr & TRBIDR_PROG);
+}
+
+static inline int get_trbe_address_align(u64 trbidr)
+{
+ return (trbidr >> TRBIDR_ALIGN_SHIFT) & TRBIDR_ALIGN_MASK;
+}
+
+static inline unsigned long get_trbe_write_pointer(void)
+{
+ return read_sysreg_s(SYS_TRBPTR_EL1);
+}
+
+static inline void set_trbe_write_pointer(unsigned long addr)
+{
+ WARN_ON(is_trbe_enabled());
+ write_sysreg_s(addr, SYS_TRBPTR_EL1);
+}
+
+static inline unsigned long get_trbe_limit_pointer(void)
+{
+ u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
+ unsigned long addr = trblimitr & (TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT);
+
+ WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
+ return addr;
+}
+
+static inline unsigned long get_trbe_base_pointer(void)
+{
+ u64 trbbaser = read_sysreg_s(SYS_TRBBASER_EL1);
+ unsigned long addr = trbbaser & (TRBBASER_BASE_MASK << TRBBASER_BASE_SHIFT);
+
+ WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
+ return addr;
+}
+
+static inline void set_trbe_base_pointer(unsigned long addr)
+{
+ WARN_ON(is_trbe_enabled());
+ WARN_ON(!IS_ALIGNED(addr, (1UL << TRBBASER_BASE_SHIFT)));
+ WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
+ write_sysreg_s(addr, SYS_TRBBASER_EL1);
+}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 3eec59f1fed3..281a65d9b44b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -645,6 +645,16 @@ config I2C_HIGHLANDER
This driver can also be built as a module. If so, the module
will be called i2c-highlander.
+config I2C_HISI
+ tristate "HiSilicon I2C controller"
+ depends on ARM64 || COMPILE_TEST
+ help
+ Say Y here if you want to have Hisilicon I2C controller support
+ available on the Kunpeng Server.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-hisi.
+
config I2C_IBM_IIC
tristate "IBM PPC 4xx on-chip I2C interface"
depends on 4xx
@@ -776,7 +786,7 @@ config I2C_MT7621
config I2C_MV64XXX
tristate "Marvell mv64xxx I2C Controller"
- depends on MV64X60 || PLAT_ORION || ARCH_SUNXI || ARCH_MVEBU || COMPILE_TEST
+ depends on PLAT_ORION || ARCH_SUNXI || ARCH_MVEBU || COMPILE_TEST
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Marvell 64xxx line of host bridges.
@@ -1199,6 +1209,16 @@ config I2C_DLN2
This driver can also be built as a module. If so, the module
will be called i2c-dln2.
+config I2C_CP2615
+ tristate "Silicon Labs CP2615 USB sound card and I2C adapter"
+ depends on USB
+ help
+ If you say yes to this option, support will be included for Silicon
+ Labs CP2615's I2C interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-cp2615.
+
config I2C_PARPORT
tristate "Parallel port adapter"
depends on PARPORT
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 615f35e3e31f..69e9963615f6 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_I2C_EMEV2) += i2c-emev2.o
obj-$(CONFIG_I2C_EXYNOS5) += i2c-exynos5.o
obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o
+obj-$(CONFIG_I2C_HISI) += i2c-hisi.o
obj-$(CONFIG_I2C_HIX5HD2) += i2c-hix5hd2.o
obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
obj-$(CONFIG_I2C_IMG) += i2c-img-scb.o
@@ -123,6 +124,7 @@ obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o
# External I2C/SMBus adapter drivers
obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
obj-$(CONFIG_I2C_DLN2) += i2c-dln2.o
+obj-$(CONFIG_I2C_CP2615) += i2c-cp2615.o
obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF) += i2c-robotfuzz-osif.o
obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 34862ad3423e..1ed7e945bb6d 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -186,9 +186,9 @@ static int amd_ec_write(struct amd_smbus *smbus, unsigned char address,
#define AMD_SMB_PRTCL_PEC 0x80
-static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
+static s32 amd8111_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write, u8 command, int size,
- union i2c_smbus_data * data)
+ union i2c_smbus_data *data)
{
struct amd_smbus *smbus = adap->algo_data;
unsigned char protocol, len, pec, temp[2];
@@ -199,130 +199,130 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
pec = (flags & I2C_CLIENT_PEC) ? AMD_SMB_PRTCL_PEC : 0;
switch (size) {
- case I2C_SMBUS_QUICK:
- protocol |= AMD_SMB_PRTCL_QUICK;
- read_write = I2C_SMBUS_WRITE;
- break;
-
- case I2C_SMBUS_BYTE:
- if (read_write == I2C_SMBUS_WRITE) {
- status = amd_ec_write(smbus, AMD_SMB_CMD,
- command);
- if (status)
- return status;
- }
- protocol |= AMD_SMB_PRTCL_BYTE;
- break;
-
- case I2C_SMBUS_BYTE_DATA:
- status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ case I2C_SMBUS_QUICK:
+ protocol |= AMD_SMB_PRTCL_QUICK;
+ read_write = I2C_SMBUS_WRITE;
+ break;
+
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_WRITE) {
+ status = amd_ec_write(smbus, AMD_SMB_CMD,
+ command);
if (status)
return status;
- if (read_write == I2C_SMBUS_WRITE) {
- status = amd_ec_write(smbus, AMD_SMB_DATA,
- data->byte);
- if (status)
- return status;
- }
- protocol |= AMD_SMB_PRTCL_BYTE_DATA;
- break;
+ }
+ protocol |= AMD_SMB_PRTCL_BYTE;
+ break;
- case I2C_SMBUS_WORD_DATA:
- status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ case I2C_SMBUS_BYTE_DATA:
+ status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ if (status)
+ return status;
+ if (read_write == I2C_SMBUS_WRITE) {
+ status = amd_ec_write(smbus, AMD_SMB_DATA,
+ data->byte);
if (status)
return status;
- if (read_write == I2C_SMBUS_WRITE) {
- status = amd_ec_write(smbus, AMD_SMB_DATA,
- data->word & 0xff);
- if (status)
- return status;
- status = amd_ec_write(smbus, AMD_SMB_DATA + 1,
- data->word >> 8);
- if (status)
- return status;
- }
- protocol |= AMD_SMB_PRTCL_WORD_DATA | pec;
- break;
+ }
+ protocol |= AMD_SMB_PRTCL_BYTE_DATA;
+ break;
- case I2C_SMBUS_BLOCK_DATA:
- status = amd_ec_write(smbus, AMD_SMB_CMD, command);
- if (status)
- return status;
- if (read_write == I2C_SMBUS_WRITE) {
- len = min_t(u8, data->block[0],
- I2C_SMBUS_BLOCK_MAX);
- status = amd_ec_write(smbus, AMD_SMB_BCNT, len);
- if (status)
- return status;
- for (i = 0; i < len; i++) {
- status =
- amd_ec_write(smbus, AMD_SMB_DATA + i,
- data->block[i + 1]);
- if (status)
- return status;
- }
- }
- protocol |= AMD_SMB_PRTCL_BLOCK_DATA | pec;
- break;
-
- case I2C_SMBUS_I2C_BLOCK_DATA:
- len = min_t(u8, data->block[0],
- I2C_SMBUS_BLOCK_MAX);
- status = amd_ec_write(smbus, AMD_SMB_CMD, command);
- if (status)
- return status;
- status = amd_ec_write(smbus, AMD_SMB_BCNT, len);
- if (status)
- return status;
- if (read_write == I2C_SMBUS_WRITE)
- for (i = 0; i < len; i++) {
- status =
- amd_ec_write(smbus, AMD_SMB_DATA + i,
- data->block[i + 1]);
- if (status)
- return status;
- }
- protocol |= AMD_SMB_PRTCL_I2C_BLOCK_DATA;
- break;
-
- case I2C_SMBUS_PROC_CALL:
- status = amd_ec_write(smbus, AMD_SMB_CMD, command);
- if (status)
- return status;
+ case I2C_SMBUS_WORD_DATA:
+ status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ if (status)
+ return status;
+ if (read_write == I2C_SMBUS_WRITE) {
status = amd_ec_write(smbus, AMD_SMB_DATA,
- data->word & 0xff);
+ data->word & 0xff);
if (status)
return status;
status = amd_ec_write(smbus, AMD_SMB_DATA + 1,
- data->word >> 8);
+ data->word >> 8);
if (status)
return status;
- protocol = AMD_SMB_PRTCL_PROC_CALL | pec;
- read_write = I2C_SMBUS_READ;
- break;
+ }
+ protocol |= AMD_SMB_PRTCL_WORD_DATA | pec;
+ break;
- case I2C_SMBUS_BLOCK_PROC_CALL:
+ case I2C_SMBUS_BLOCK_DATA:
+ status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ if (status)
+ return status;
+ if (read_write == I2C_SMBUS_WRITE) {
len = min_t(u8, data->block[0],
- I2C_SMBUS_BLOCK_MAX - 1);
- status = amd_ec_write(smbus, AMD_SMB_CMD, command);
- if (status)
- return status;
+ I2C_SMBUS_BLOCK_MAX);
status = amd_ec_write(smbus, AMD_SMB_BCNT, len);
if (status)
return status;
for (i = 0; i < len; i++) {
- status = amd_ec_write(smbus, AMD_SMB_DATA + i,
- data->block[i + 1]);
+ status =
+ amd_ec_write(smbus, AMD_SMB_DATA + i,
+ data->block[i + 1]);
if (status)
return status;
}
- protocol = AMD_SMB_PRTCL_BLOCK_PROC_CALL | pec;
- read_write = I2C_SMBUS_READ;
- break;
+ }
+ protocol |= AMD_SMB_PRTCL_BLOCK_DATA | pec;
+ break;
+
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ len = min_t(u8, data->block[0],
+ I2C_SMBUS_BLOCK_MAX);
+ status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ if (status)
+ return status;
+ status = amd_ec_write(smbus, AMD_SMB_BCNT, len);
+ if (status)
+ return status;
+ if (read_write == I2C_SMBUS_WRITE)
+ for (i = 0; i < len; i++) {
+ status =
+ amd_ec_write(smbus, AMD_SMB_DATA + i,
+ data->block[i + 1]);
+ if (status)
+ return status;
+ }
+ protocol |= AMD_SMB_PRTCL_I2C_BLOCK_DATA;
+ break;
- default:
- dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
- return -EOPNOTSUPP;
+ case I2C_SMBUS_PROC_CALL:
+ status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ if (status)
+ return status;
+ status = amd_ec_write(smbus, AMD_SMB_DATA,
+ data->word & 0xff);
+ if (status)
+ return status;
+ status = amd_ec_write(smbus, AMD_SMB_DATA + 1,
+ data->word >> 8);
+ if (status)
+ return status;
+ protocol = AMD_SMB_PRTCL_PROC_CALL | pec;
+ read_write = I2C_SMBUS_READ;
+ break;
+
+ case I2C_SMBUS_BLOCK_PROC_CALL:
+ len = min_t(u8, data->block[0],
+ I2C_SMBUS_BLOCK_MAX - 1);
+ status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ if (status)
+ return status;
+ status = amd_ec_write(smbus, AMD_SMB_BCNT, len);
+ if (status)
+ return status;
+ for (i = 0; i < len; i++) {
+ status = amd_ec_write(smbus, AMD_SMB_DATA + i,
+ data->block[i + 1]);
+ if (status)
+ return status;
+ }
+ protocol = AMD_SMB_PRTCL_BLOCK_PROC_CALL | pec;
+ read_write = I2C_SMBUS_READ;
+ break;
+
+ default:
+ dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
+ return -EOPNOTSUPP;
}
status = amd_ec_write(smbus, AMD_SMB_ADDR, addr << 1);
@@ -357,40 +357,40 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
return 0;
switch (size) {
- case I2C_SMBUS_BYTE:
- case I2C_SMBUS_BYTE_DATA:
- status = amd_ec_read(smbus, AMD_SMB_DATA, &data->byte);
- if (status)
- return status;
- break;
+ case I2C_SMBUS_BYTE:
+ case I2C_SMBUS_BYTE_DATA:
+ status = amd_ec_read(smbus, AMD_SMB_DATA, &data->byte);
+ if (status)
+ return status;
+ break;
- case I2C_SMBUS_WORD_DATA:
- case I2C_SMBUS_PROC_CALL:
- status = amd_ec_read(smbus, AMD_SMB_DATA, temp + 0);
- if (status)
- return status;
- status = amd_ec_read(smbus, AMD_SMB_DATA + 1, temp + 1);
- if (status)
- return status;
- data->word = (temp[1] << 8) | temp[0];
- break;
+ case I2C_SMBUS_WORD_DATA:
+ case I2C_SMBUS_PROC_CALL:
+ status = amd_ec_read(smbus, AMD_SMB_DATA, temp + 0);
+ if (status)
+ return status;
+ status = amd_ec_read(smbus, AMD_SMB_DATA + 1, temp + 1);
+ if (status)
+ return status;
+ data->word = (temp[1] << 8) | temp[0];
+ break;
- case I2C_SMBUS_BLOCK_DATA:
- case I2C_SMBUS_BLOCK_PROC_CALL:
- status = amd_ec_read(smbus, AMD_SMB_BCNT, &len);
+ case I2C_SMBUS_BLOCK_DATA:
+ case I2C_SMBUS_BLOCK_PROC_CALL:
+ status = amd_ec_read(smbus, AMD_SMB_BCNT, &len);
+ if (status)
+ return status;
+ len = min_t(u8, len, I2C_SMBUS_BLOCK_MAX);
+ fallthrough;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ for (i = 0; i < len; i++) {
+ status = amd_ec_read(smbus, AMD_SMB_DATA + i,
+ data->block + i + 1);
if (status)
return status;
- len = min_t(u8, len, I2C_SMBUS_BLOCK_MAX);
- fallthrough;
- case I2C_SMBUS_I2C_BLOCK_DATA:
- for (i = 0; i < len; i++) {
- status = amd_ec_read(smbus, AMD_SMB_DATA + i,
- data->block + i + 1);
- if (status)
- return status;
- }
- data->block[0] = len;
- break;
+ }
+ data->block[0] = len;
+ break;
}
return 0;
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index ba766d24219e..490ee3962645 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -22,7 +22,6 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/version.h>
#define N_DATA_REGS 8
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index e4b7f2a951ad..c1bbc4caeb5c 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -789,7 +789,7 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
bool change_role = false;
#endif
- ret = pm_runtime_get_sync(id->dev);
+ ret = pm_runtime_resume_and_get(id->dev);
if (ret < 0)
return ret;
@@ -911,7 +911,7 @@ static int cdns_reg_slave(struct i2c_client *slave)
if (slave->flags & I2C_CLIENT_TEN)
return -EAFNOSUPPORT;
- ret = pm_runtime_get_sync(id->dev);
+ ret = pm_runtime_resume_and_get(id->dev);
if (ret < 0)
return ret;
@@ -1200,7 +1200,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
if (IS_ERR(id->membase))
return PTR_ERR(id->membase);
- id->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ id->irq = ret;
id->adap.owner = THIS_MODULE;
id->adap.dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index f80d79e973cd..08f491ea21ac 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -280,6 +280,10 @@ static const struct property_entry bq24190_props[] = {
{ }
};
+static const struct software_node bq24190_node = {
+ .properties = bq24190_props,
+};
+
static struct regulator_consumer_supply fusb302_consumer = {
.supply = "vbus",
/* Must match fusb302 dev_name in intel_cht_int33fe.c */
@@ -308,7 +312,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
.type = "bq24190",
.addr = 0x6b,
.dev_name = "bq24190",
- .properties = bq24190_props,
+ .swnode = &bq24190_node,
.platform_data = &bq24190_pdata,
};
int ret, reg, irq;
diff --git a/drivers/i2c/busses/i2c-cp2615.c b/drivers/i2c/busses/i2c-cp2615.c
new file mode 100644
index 000000000000..78cfecd1ea76
--- /dev/null
+++ b/drivers/i2c/busses/i2c-cp2615.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * i2c support for Silicon Labs' CP2615 Digital Audio Bridge
+ *
+ * (c) 2021, Bence Csókás <bence98@sch.bme.hu>
+ */
+
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/usb.h>
+
+/** CP2615 I/O Protocol implementation */
+
+#define CP2615_VID 0x10c4
+#define CP2615_PID 0xeac1
+
+#define IOP_EP_IN 0x82
+#define IOP_EP_OUT 0x02
+#define IOP_IFN 1
+#define IOP_ALTSETTING 2
+
+#define MAX_IOP_SIZE 64
+#define MAX_IOP_PAYLOAD_SIZE (MAX_IOP_SIZE - 6)
+#define MAX_I2C_SIZE (MAX_IOP_PAYLOAD_SIZE - 4)
+
+enum cp2615_iop_msg_type {
+ iop_GetAccessoryInfo = 0xD100,
+ iop_AccessoryInfo = 0xA100,
+ iop_GetPortConfiguration = 0xD203,
+ iop_PortConfiguration = 0xA203,
+ iop_DoI2cTransfer = 0xD400,
+ iop_I2cTransferResult = 0xA400,
+ iop_GetSerialState = 0xD501,
+ iop_SerialState = 0xA501
+};
+
+struct __packed cp2615_iop_msg {
+ __be16 preamble, length, msg;
+ u8 data[MAX_IOP_PAYLOAD_SIZE];
+};
+
+#define PART_ID_A01 0x1400
+#define PART_ID_A02 0x1500
+
+struct __packed cp2615_iop_accessory_info {
+ __be16 part_id, option_id, proto_ver;
+};
+
+struct __packed cp2615_i2c_transfer {
+ u8 tag, i2caddr, read_len, write_len;
+ u8 data[MAX_I2C_SIZE];
+};
+
+/* Possible values for struct cp2615_i2c_transfer_result.status */
+enum cp2615_i2c_status {
+ /* Writing to the internal EEPROM failed, because it is locked */
+ CP2615_CFG_LOCKED = -6,
+ /* read_len or write_len out of range */
+ CP2615_INVALID_PARAM = -4,
+ /* I2C slave did not ACK in time */
+ CP2615_TIMEOUT,
+ /* I2C bus busy */
+ CP2615_BUS_BUSY,
+ /* I2C bus error (ie. device NAK'd the request) */
+ CP2615_BUS_ERROR,
+ CP2615_SUCCESS
+};
+
+struct __packed cp2615_i2c_transfer_result {
+ u8 tag, i2caddr;
+ s8 status;
+ u8 read_len;
+ u8 data[MAX_I2C_SIZE];
+};
+
+static int cp2615_init_iop_msg(struct cp2615_iop_msg *ret, enum cp2615_iop_msg_type msg,
+ const void *data, size_t data_len)
+{
+ if (data_len > MAX_IOP_PAYLOAD_SIZE)
+ return -EFBIG;
+
+ if (!ret)
+ return -EINVAL;
+
+ ret->preamble = 0x2A2A;
+ ret->length = htons(data_len + 6);
+ ret->msg = htons(msg);
+ if (data && data_len)
+ memcpy(&ret->data, data, data_len);
+ return 0;
+}
+
+static int cp2615_init_i2c_msg(struct cp2615_iop_msg *ret, const struct cp2615_i2c_transfer *data)
+{
+ return cp2615_init_iop_msg(ret, iop_DoI2cTransfer, data, 4 + data->write_len);
+}
+
+/* Translates status codes to Linux errno's */
+static int cp2615_check_status(enum cp2615_i2c_status status)
+{
+ switch (status) {
+ case CP2615_SUCCESS:
+ return 0;
+ case CP2615_BUS_ERROR:
+ return -ENXIO;
+ case CP2615_BUS_BUSY:
+ return -EAGAIN;
+ case CP2615_TIMEOUT:
+ return -ETIMEDOUT;
+ case CP2615_INVALID_PARAM:
+ return -EINVAL;
+ case CP2615_CFG_LOCKED:
+ return -EPERM;
+ }
+ /* Unknown error code */
+ return -EPROTO;
+}
+
+/** Driver code */
+
+static int
+cp2615_i2c_send(struct usb_interface *usbif, struct cp2615_i2c_transfer *i2c_w)
+{
+ struct cp2615_iop_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ struct usb_device *usbdev = interface_to_usbdev(usbif);
+ int res = cp2615_init_i2c_msg(msg, i2c_w);
+
+ if (!res)
+ res = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, IOP_EP_OUT),
+ msg, ntohs(msg->length), NULL, 0);
+ kfree(msg);
+ return res;
+}
+
+static int
+cp2615_i2c_recv(struct usb_interface *usbif, unsigned char tag, void *buf)
+{
+ struct cp2615_iop_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ struct cp2615_i2c_transfer_result *i2c_r = (struct cp2615_i2c_transfer_result *)&msg->data;
+ struct usb_device *usbdev = interface_to_usbdev(usbif);
+ int res = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, IOP_EP_IN),
+ msg, sizeof(struct cp2615_iop_msg), NULL, 0);
+
+ if (res < 0) {
+ kfree(msg);
+ return res;
+ }
+
+ if (msg->msg != htons(iop_I2cTransferResult) || i2c_r->tag != tag) {
+ kfree(msg);
+ return -EIO;
+ }
+
+ res = cp2615_check_status(i2c_r->status);
+ if (!res)
+ memcpy(buf, &i2c_r->data, i2c_r->read_len);
+
+ kfree(msg);
+ return res;
+}
+
+/* Checks if the IOP is functional by querying the part's ID */
+static int cp2615_check_iop(struct usb_interface *usbif)
+{
+ struct cp2615_iop_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ struct cp2615_iop_accessory_info *info = (struct cp2615_iop_accessory_info *)&msg->data;
+ struct usb_device *usbdev = interface_to_usbdev(usbif);
+ int res = cp2615_init_iop_msg(msg, iop_GetAccessoryInfo, NULL, 0);
+
+ if (res)
+ goto out;
+
+ res = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, IOP_EP_OUT),
+ msg, ntohs(msg->length), NULL, 0);
+ if (res)
+ goto out;
+
+ res = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, IOP_EP_IN),
+ msg, sizeof(struct cp2615_iop_msg), NULL, 0);
+ if (res)
+ goto out;
+
+ if (msg->msg != htons(iop_AccessoryInfo)) {
+ res = -EIO;
+ goto out;
+ }
+
+ switch (ntohs(info->part_id)) {
+ case PART_ID_A01:
+ dev_dbg(&usbif->dev, "Found A01 part. (WARNING: errata exists!)\n");
+ break;
+ case PART_ID_A02:
+ dev_dbg(&usbif->dev, "Found good A02 part.\n");
+ break;
+ default:
+ dev_warn(&usbif->dev, "Unknown part ID %04X\n", ntohs(info->part_id));
+ }
+
+out:
+ kfree(msg);
+ return res;
+}
+
+static int
+cp2615_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct usb_interface *usbif = adap->algo_data;
+ int i = 0, ret = 0;
+ struct i2c_msg *msg;
+ struct cp2615_i2c_transfer i2c_w = {0};
+
+ dev_dbg(&usbif->dev, "Doing %d I2C transactions\n", num);
+
+ for (; !ret && i < num; i++) {
+ msg = &msgs[i];
+
+ i2c_w.tag = 0xdd;
+ i2c_w.i2caddr = i2c_8bit_addr_from_msg(msg);
+ if (msg->flags & I2C_M_RD) {
+ i2c_w.read_len = msg->len;
+ i2c_w.write_len = 0;
+ } else {
+ i2c_w.read_len = 0;
+ i2c_w.write_len = msg->len;
+ memcpy(&i2c_w.data, msg->buf, i2c_w.write_len);
+ }
+ ret = cp2615_i2c_send(usbif, &i2c_w);
+ if (ret)
+ break;
+ ret = cp2615_i2c_recv(usbif, i2c_w.tag, msg->buf);
+ }
+ if (ret < 0)
+ return ret;
+ return i;
+}
+
+static u32
+cp2615_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm cp2615_i2c_algo = {
+ .master_xfer = cp2615_i2c_master_xfer,
+ .functionality = cp2615_i2c_func,
+};
+
+/*
+ * This chip has some limitations: one is that the USB endpoint
+ * can only receive 64 bytes/transfer, that leaves 54 bytes for
+ * the I2C transfer. On top of that, EITHER read_len OR write_len
+ * may be zero, but not both. If both are non-zero, the adapter
+ * issues a write followed by a read. And the chip does not
+ * support repeated START between the write and read phases.
+ */
+static struct i2c_adapter_quirks cp2615_i2c_quirks = {
+ .max_write_len = MAX_I2C_SIZE,
+ .max_read_len = MAX_I2C_SIZE,
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ | I2C_AQ_NO_ZERO_LEN | I2C_AQ_NO_REP_START,
+ .max_comb_1st_msg_len = MAX_I2C_SIZE,
+ .max_comb_2nd_msg_len = MAX_I2C_SIZE
+};
+
+static void
+cp2615_i2c_remove(struct usb_interface *usbif)
+{
+ struct i2c_adapter *adap = usb_get_intfdata(usbif);
+
+ usb_set_intfdata(usbif, NULL);
+ i2c_del_adapter(adap);
+}
+
+static int
+cp2615_i2c_probe(struct usb_interface *usbif, const struct usb_device_id *id)
+{
+ int ret = 0;
+ struct i2c_adapter *adap;
+ struct usb_device *usbdev = interface_to_usbdev(usbif);
+
+ ret = usb_set_interface(usbdev, IOP_IFN, IOP_ALTSETTING);
+ if (ret)
+ return ret;
+
+ ret = cp2615_check_iop(usbif);
+ if (ret)
+ return ret;
+
+ adap = devm_kzalloc(&usbif->dev, sizeof(struct i2c_adapter), GFP_KERNEL);
+ if (!adap)
+ return -ENOMEM;
+
+ strncpy(adap->name, usbdev->serial, sizeof(adap->name) - 1);
+ adap->owner = THIS_MODULE;
+ adap->dev.parent = &usbif->dev;
+ adap->dev.of_node = usbif->dev.of_node;
+ adap->timeout = HZ;
+ adap->algo = &cp2615_i2c_algo;
+ adap->quirks = &cp2615_i2c_quirks;
+ adap->algo_data = usbif;
+
+ ret = i2c_add_adapter(adap);
+ if (ret)
+ return ret;
+
+ usb_set_intfdata(usbif, adap);
+ return 0;
+}
+
+static const struct usb_device_id id_table[] = {
+ { USB_DEVICE_INTERFACE_NUMBER(CP2615_VID, CP2615_PID, IOP_IFN) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver cp2615_i2c_driver = {
+ .name = "i2c-cp2615",
+ .probe = cp2615_i2c_probe,
+ .disconnect = cp2615_i2c_remove,
+ .id_table = id_table,
+};
+
+module_usb_driver(cp2615_i2c_driver);
+
+MODULE_AUTHOR("Bence Csókás <bence98@sch.bme.hu>");
+MODULE_DESCRIPTION("CP2615 I2C bus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 3c19aada4b30..fdc34d9e3702 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -150,6 +150,9 @@ int i2c_dw_init_regmap(struct dw_i2c_dev *dev)
reg = readl(dev->base + DW_IC_COMP_TYPE);
i2c_dw_release_lock(dev);
+ if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU)
+ map_cfg.max_register = AMD_UCSI_INTR_REG;
+
if (reg == swab32(DW_IC_COMP_TYPE_VALUE)) {
map_cfg.reg_read = dw_reg_read_swab;
map_cfg.reg_write = dw_reg_write_swab;
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 5392b82f68a4..6a53f75abf7c 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -295,8 +295,16 @@ struct dw_i2c_dev {
#define MODEL_MSCC_OCELOT BIT(8)
#define MODEL_BAIKAL_BT1 BIT(9)
+#define MODEL_AMD_NAVI_GPU BIT(10)
#define MODEL_MASK GENMASK(11, 8)
+/*
+ * Enable UCSI interrupt by writing 0xd at register
+ * offset 0x474 specified in hardware specification.
+ */
+#define AMD_UCSI_INTR_REG 0x474
+#define AMD_UCSI_INTR_EN 0xd
+
int i2c_dw_init_regmap(struct dw_i2c_dev *dev);
u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset);
u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 873ef38eb1c8..13be1d678c39 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -23,6 +23,10 @@
#include "i2c-designware-core.h"
+#define AMD_TIMEOUT_MIN_US 25
+#define AMD_TIMEOUT_MAX_US 250
+#define AMD_MASTERCFG_MASK GENMASK(15, 0)
+
static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
{
/* Configure Tx/Rx FIFO threshold levels */
@@ -35,10 +39,10 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
{
- const char *mode_str, *fp_str = "";
u32 comp_param1;
u32 sda_falling_time, scl_falling_time;
struct i2c_timings *t = &dev->timings;
+ const char *fp_str = "";
u32 ic_clk;
int ret;
@@ -78,7 +82,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
* difference is the timing parameter values since the registers are
* the same.
*/
- if (t->bus_freq_hz == 1000000) {
+ if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) {
/*
* Check are Fast Mode Plus parameters available. Calculate
* SCL timing parameters for Fast Mode Plus if not set.
@@ -154,22 +158,10 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
ret = i2c_dw_set_sda_hold(dev);
if (ret)
- goto out;
-
- switch (dev->master_cfg & DW_IC_CON_SPEED_MASK) {
- case DW_IC_CON_SPEED_STD:
- mode_str = "Standard Mode";
- break;
- case DW_IC_CON_SPEED_HIGH:
- mode_str = "High Speed Mode";
- break;
- default:
- mode_str = "Fast Mode";
- }
- dev_dbg(dev->dev, "Bus speed: %s%s\n", mode_str, fp_str);
+ return ret;
-out:
- return ret;
+ dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz));
+ return 0;
}
/**
@@ -260,6 +252,108 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_MASTER_MASK);
}
+static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val,
+ !(val & DW_IC_INTR_STOP_DET),
+ 1100, 20000);
+ if (ret)
+ dev_err(dev->dev, "i2c timeout error %d\n", ret);
+
+ return ret;
+}
+
+static int i2c_dw_status(struct dw_i2c_dev *dev)
+{
+ int status;
+
+ status = i2c_dw_wait_bus_not_busy(dev);
+ if (status)
+ return status;
+
+ return i2c_dw_check_stopbit(dev);
+}
+
+/*
+ * Initiate and continue master read/write transaction with polling
+ * based transfer routine afterward write messages into the Tx buffer.
+ */
+static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs)
+{
+ struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+ int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx;
+ int cmd = 0, status;
+ u8 *tx_buf;
+ u32 val;
+
+ /*
+ * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card,
+ * it is mandatory to set the right value in specific register
+ * (offset:0x474) as per the hardware IP specification.
+ */
+ regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN);
+
+ dev->msgs = msgs;
+ dev->msgs_num = num_msgs;
+ i2c_dw_xfer_init(dev);
+ i2c_dw_disable_int(dev);
+
+ /* Initiate messages read/write transaction */
+ for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) {
+ tx_buf = msgs[msg_wrt_idx].buf;
+ buf_len = msgs[msg_wrt_idx].len;
+
+ if (!(msgs[msg_wrt_idx].flags & I2C_M_RD))
+ regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1);
+ /*
+ * Initiate the i2c read/write transaction of buffer length,
+ * and poll for bus busy status. For the last message transfer,
+ * update the command with stopbit enable.
+ */
+ for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) {
+ if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1)
+ cmd |= BIT(9);
+
+ if (msgs[msg_wrt_idx].flags & I2C_M_RD) {
+ /* Due to hardware bug, need to write the same command twice. */
+ regmap_write(dev->map, DW_IC_DATA_CMD, 0x100);
+ regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd);
+ if (cmd) {
+ regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1));
+ regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1));
+ /*
+ * Need to check the stop bit. However, it cannot be
+ * detected from the registers so we check it always
+ * when read/write the last byte.
+ */
+ status = i2c_dw_status(dev);
+ if (status)
+ return status;
+
+ for (data_idx = 0; data_idx < buf_len; data_idx++) {
+ regmap_read(dev->map, DW_IC_DATA_CMD, &val);
+ tx_buf[data_idx] = val;
+ }
+ status = i2c_dw_check_stopbit(dev);
+ if (status)
+ return status;
+ }
+ } else {
+ regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd);
+ usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US);
+ }
+ }
+ status = i2c_dw_check_stopbit(dev);
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
/*
* Initiate (and continue) low level master read/write transaction.
* This function is only called from i2c_dw_isr, and pumping i2c_msg
@@ -463,6 +557,16 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
pm_runtime_get_sync(dev->dev);
+ /*
+ * Initiate I2C message transfer when AMD NAVI GPU card is enabled,
+ * As it is polling based transfer mechanism, which does not support
+ * interrupt based functionalities of existing DesignWare driver.
+ */
+ if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
+ ret = amd_i2c_dw_xfer_quirk(adap, msgs, num);
+ goto done_nolock;
+ }
+
if (dev_WARN_ONCE(dev->dev, dev->suspended, "Transfer while suspended\n")) {
ret = -ESHUTDOWN;
goto done_nolock;
@@ -739,6 +843,20 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
return 0;
}
+static int amd_i2c_adap_quirk(struct dw_i2c_dev *dev)
+{
+ struct i2c_adapter *adap = &dev->adapter;
+ int ret;
+
+ pm_runtime_get_noresume(dev->dev);
+ ret = i2c_add_numbered_adapter(adap);
+ if (ret)
+ dev_err(dev->dev, "Failed to add adapter: %d\n", ret);
+ pm_runtime_put_noidle(dev->dev);
+
+ return ret;
+}
+
int i2c_dw_probe_master(struct dw_i2c_dev *dev)
{
struct i2c_adapter *adap = &dev->adapter;
@@ -775,6 +893,9 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev)
adap->dev.parent = dev->dev;
i2c_set_adapdata(adap, dev);
+ if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU)
+ return amd_i2c_adap_quirk(dev);
+
if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
irq_flags = IRQF_NO_SUSPEND;
} else {
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 55c83a7a24f3..0f409a4c2da0 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -26,6 +26,7 @@
#include "i2c-designware-core.h"
#define DRIVER_NAME "i2c-designware-pci"
+#define AMD_CLK_RATE_HZ 100000
enum dw_pci_ctl_id_t {
medfield,
@@ -34,6 +35,7 @@ enum dw_pci_ctl_id_t {
cherrytrail,
haswell,
elkhartlake,
+ navi_amd,
};
struct dw_scl_sda_cfg {
@@ -78,11 +80,23 @@ static struct dw_scl_sda_cfg hsw_config = {
.sda_hold = 0x9,
};
+/* NAVI-AMD HCNT/LCNT/SDA hold time */
+static struct dw_scl_sda_cfg navi_amd_config = {
+ .ss_hcnt = 0x1ae,
+ .ss_lcnt = 0x23a,
+ .sda_hold = 0x9,
+};
+
static u32 mfld_get_clk_rate_khz(struct dw_i2c_dev *dev)
{
return 25000;
}
+static u32 navi_amd_get_clk_rate_khz(struct dw_i2c_dev *dev)
+{
+ return AMD_CLK_RATE_HZ;
+}
+
static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
{
struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev);
@@ -104,6 +118,35 @@ static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
return -ENODEV;
}
+ /*
+ * TODO find a better way how to deduplicate instantiation
+ * of USB PD slave device from nVidia GPU driver.
+ */
+static int navi_amd_register_client(struct dw_i2c_dev *dev)
+{
+ struct i2c_board_info info;
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strscpy(info.type, "ccgx-ucsi", I2C_NAME_SIZE);
+ info.addr = 0x08;
+ info.irq = dev->irq;
+
+ dev->slave = i2c_new_client_device(&dev->adapter, &info);
+ if (IS_ERR(dev->slave))
+ return PTR_ERR(dev->slave);
+
+ return 0;
+}
+
+static int navi_amd_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
+{
+ struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev);
+
+ dev->flags |= MODEL_AMD_NAVI_GPU;
+ dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
+ return 0;
+}
+
static int mrfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
{
/*
@@ -155,6 +198,12 @@ static struct dw_pci_controller dw_pci_controllers[] = {
.bus_num = -1,
.get_clk_rate_khz = ehl_get_clk_rate_khz,
},
+ [navi_amd] = {
+ .bus_num = -1,
+ .scl_sda_cfg = &navi_amd_config,
+ .setup = navi_amd_setup,
+ .get_clk_rate_khz = navi_amd_get_clk_rate_khz,
+ },
};
#ifdef CONFIG_PM
@@ -274,6 +323,14 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
return r;
}
+ if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
+ r = navi_amd_register_client(dev);
+ if (r) {
+ dev_err(dev->dev, "register client failed with %d\n", r);
+ return r;
+ }
+ }
+
pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
@@ -337,6 +394,10 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4bbe), elkhartlake },
{ PCI_VDEVICE(INTEL, 0x4bbf), elkhartlake },
{ PCI_VDEVICE(INTEL, 0x4bc0), elkhartlake },
+ { PCI_VDEVICE(ATI, 0x7314), navi_amd },
+ { PCI_VDEVICE(ATI, 0x73a4), navi_amd },
+ { PCI_VDEVICE(ATI, 0x73e4), navi_amd },
+ { PCI_VDEVICE(ATI, 0x73c4), navi_amd },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids);
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index a08554c1a570..bdff0e6345d9 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -395,7 +395,10 @@ static int em_i2c_probe(struct platform_device *pdev)
em_i2c_reset(&priv->adap);
- priv->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_clk;
+ priv->irq = ret;
ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
"em_i2c", priv);
if (ret)
diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c
new file mode 100644
index 000000000000..acf394812061
--- /dev/null
+++ b/drivers/i2c/busses/i2c-hisi.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * HiSilicon I2C Controller Driver for Kunpeng SoC
+ *
+ * Copyright (c) 2021 HiSilicon Technologies Co., Ltd.
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#define HISI_I2C_FRAME_CTRL 0x0000
+#define HISI_I2C_FRAME_CTRL_SPEED_MODE GENMASK(1, 0)
+#define HISI_I2C_FRAME_CTRL_ADDR_TEN BIT(2)
+#define HISI_I2C_SLV_ADDR 0x0004
+#define HISI_I2C_SLV_ADDR_VAL GENMASK(9, 0)
+#define HISI_I2C_SLV_ADDR_GC_S_MODE BIT(10)
+#define HISI_I2C_SLV_ADDR_GC_S_EN BIT(11)
+#define HISI_I2C_CMD_TXDATA 0x0008
+#define HISI_I2C_CMD_TXDATA_DATA GENMASK(7, 0)
+#define HISI_I2C_CMD_TXDATA_RW BIT(8)
+#define HISI_I2C_CMD_TXDATA_P_EN BIT(9)
+#define HISI_I2C_CMD_TXDATA_SR_EN BIT(10)
+#define HISI_I2C_RXDATA 0x000c
+#define HISI_I2C_RXDATA_DATA GENMASK(7, 0)
+#define HISI_I2C_SS_SCL_HCNT 0x0010
+#define HISI_I2C_SS_SCL_LCNT 0x0014
+#define HISI_I2C_FS_SCL_HCNT 0x0018
+#define HISI_I2C_FS_SCL_LCNT 0x001c
+#define HISI_I2C_HS_SCL_HCNT 0x0020
+#define HISI_I2C_HS_SCL_LCNT 0x0024
+#define HISI_I2C_FIFO_CTRL 0x0028
+#define HISI_I2C_FIFO_RX_CLR BIT(0)
+#define HISI_I2C_FIFO_TX_CLR BIT(1)
+#define HISI_I2C_FIFO_RX_AF_THRESH GENMASK(7, 2)
+#define HISI_I2C_FIFO_TX_AE_THRESH GENMASK(13, 8)
+#define HISI_I2C_FIFO_STATE 0x002c
+#define HISI_I2C_FIFO_STATE_RX_RERR BIT(0)
+#define HISI_I2C_FIFO_STATE_RX_WERR BIT(1)
+#define HISI_I2C_FIFO_STATE_RX_EMPTY BIT(3)
+#define HISI_I2C_FIFO_STATE_TX_RERR BIT(6)
+#define HISI_I2C_FIFO_STATE_TX_WERR BIT(7)
+#define HISI_I2C_FIFO_STATE_TX_FULL BIT(11)
+#define HISI_I2C_SDA_HOLD 0x0030
+#define HISI_I2C_SDA_HOLD_TX GENMASK(15, 0)
+#define HISI_I2C_SDA_HOLD_RX GENMASK(23, 16)
+#define HISI_I2C_FS_SPK_LEN 0x0038
+#define HISI_I2C_FS_SPK_LEN_CNT GENMASK(7, 0)
+#define HISI_I2C_HS_SPK_LEN 0x003c
+#define HISI_I2C_HS_SPK_LEN_CNT GENMASK(7, 0)
+#define HISI_I2C_INT_MSTAT 0x0044
+#define HISI_I2C_INT_CLR 0x0048
+#define HISI_I2C_INT_MASK 0x004C
+#define HISI_I2C_TRANS_STATE 0x0050
+#define HISI_I2C_TRANS_ERR 0x0054
+#define HISI_I2C_VERSION 0x0058
+
+#define HISI_I2C_INT_ALL GENMASK(4, 0)
+#define HISI_I2C_INT_TRANS_CPLT BIT(0)
+#define HISI_I2C_INT_TRANS_ERR BIT(1)
+#define HISI_I2C_INT_FIFO_ERR BIT(2)
+#define HISI_I2C_INT_RX_FULL BIT(3)
+#define HISI_I2C_INT_TX_EMPTY BIT(4)
+#define HISI_I2C_INT_ERR \
+ (HISI_I2C_INT_TRANS_ERR | HISI_I2C_INT_FIFO_ERR)
+
+#define HISI_I2C_STD_SPEED_MODE 0
+#define HISI_I2C_FAST_SPEED_MODE 1
+#define HISI_I2C_HIGH_SPEED_MODE 2
+
+#define HISI_I2C_TX_FIFO_DEPTH 64
+#define HISI_I2C_RX_FIFO_DEPTH 64
+#define HISI_I2C_TX_F_AE_THRESH 1
+#define HISI_I2C_RX_F_AF_THRESH 60
+
+#define HZ_PER_KHZ 1000
+
+#define NSEC_TO_CYCLES(ns, clk_rate_khz) \
+ DIV_ROUND_UP_ULL((clk_rate_khz) * (ns), NSEC_PER_MSEC)
+
+struct hisi_i2c_controller {
+ struct i2c_adapter adapter;
+ void __iomem *iobase;
+ struct device *dev;
+ int irq;
+
+ /* Intermediates for recording the transfer process */
+ struct completion *completion;
+ struct i2c_msg *msgs;
+ int msg_num;
+ int msg_tx_idx;
+ int buf_tx_idx;
+ int msg_rx_idx;
+ int buf_rx_idx;
+ u16 tar_addr;
+ u32 xfer_err;
+
+ /* I2C bus configuration */
+ struct i2c_timings t;
+ u32 clk_rate_khz;
+ u32 spk_len;
+};
+
+static void hisi_i2c_enable_int(struct hisi_i2c_controller *ctlr, u32 mask)
+{
+ writel_relaxed(mask, ctlr->iobase + HISI_I2C_INT_MASK);
+}
+
+static void hisi_i2c_disable_int(struct hisi_i2c_controller *ctlr, u32 mask)
+{
+ writel_relaxed((~mask) & HISI_I2C_INT_ALL, ctlr->iobase + HISI_I2C_INT_MASK);
+}
+
+static void hisi_i2c_clear_int(struct hisi_i2c_controller *ctlr, u32 mask)
+{
+ writel_relaxed(mask, ctlr->iobase + HISI_I2C_INT_CLR);
+}
+
+static void hisi_i2c_handle_errors(struct hisi_i2c_controller *ctlr)
+{
+ u32 int_err = ctlr->xfer_err, reg;
+
+ if (int_err & HISI_I2C_INT_FIFO_ERR) {
+ reg = readl(ctlr->iobase + HISI_I2C_FIFO_STATE);
+
+ if (reg & HISI_I2C_FIFO_STATE_RX_RERR)
+ dev_err(ctlr->dev, "rx fifo error read\n");
+
+ if (reg & HISI_I2C_FIFO_STATE_RX_WERR)
+ dev_err(ctlr->dev, "rx fifo error write\n");
+
+ if (reg & HISI_I2C_FIFO_STATE_TX_RERR)
+ dev_err(ctlr->dev, "tx fifo error read\n");
+
+ if (reg & HISI_I2C_FIFO_STATE_TX_WERR)
+ dev_err(ctlr->dev, "tx fifo error write\n");
+ }
+}
+
+static int hisi_i2c_start_xfer(struct hisi_i2c_controller *ctlr)
+{
+ struct i2c_msg *msg = ctlr->msgs;
+ u32 reg;
+
+ reg = readl(ctlr->iobase + HISI_I2C_FRAME_CTRL);
+ reg &= ~HISI_I2C_FRAME_CTRL_ADDR_TEN;
+ if (msg->flags & I2C_M_TEN)
+ reg |= HISI_I2C_FRAME_CTRL_ADDR_TEN;
+ writel(reg, ctlr->iobase + HISI_I2C_FRAME_CTRL);
+
+ reg = readl(ctlr->iobase + HISI_I2C_SLV_ADDR);
+ reg &= ~HISI_I2C_SLV_ADDR_VAL;
+ reg |= FIELD_PREP(HISI_I2C_SLV_ADDR_VAL, msg->addr);
+ writel(reg, ctlr->iobase + HISI_I2C_SLV_ADDR);
+
+ reg = readl(ctlr->iobase + HISI_I2C_FIFO_CTRL);
+ reg |= HISI_I2C_FIFO_RX_CLR | HISI_I2C_FIFO_TX_CLR;
+ writel(reg, ctlr->iobase + HISI_I2C_FIFO_CTRL);
+ reg &= ~(HISI_I2C_FIFO_RX_CLR | HISI_I2C_FIFO_TX_CLR);
+ writel(reg, ctlr->iobase + HISI_I2C_FIFO_CTRL);
+
+ hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL);
+ hisi_i2c_enable_int(ctlr, HISI_I2C_INT_ALL);
+
+ return 0;
+}
+
+static void hisi_i2c_reset_xfer(struct hisi_i2c_controller *ctlr)
+{
+ ctlr->msg_num = 0;
+ ctlr->xfer_err = 0;
+ ctlr->msg_tx_idx = 0;
+ ctlr->msg_rx_idx = 0;
+ ctlr->buf_tx_idx = 0;
+ ctlr->buf_rx_idx = 0;
+}
+
+/*
+ * Initialize the transfer information and start the I2C bus transfer.
+ * We only configure the transfer and do some pre/post works here, and
+ * wait for the transfer done. The major transfer process is performed
+ * in the IRQ handler.
+ */
+static int hisi_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct hisi_i2c_controller *ctlr = i2c_get_adapdata(adap);
+ DECLARE_COMPLETION_ONSTACK(done);
+ int ret = num;
+
+ hisi_i2c_reset_xfer(ctlr);
+ ctlr->completion = &done;
+ ctlr->msg_num = num;
+ ctlr->msgs = msgs;
+
+ hisi_i2c_start_xfer(ctlr);
+
+ if (!wait_for_completion_timeout(ctlr->completion, adap->timeout)) {
+ hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL);
+ synchronize_irq(ctlr->irq);
+ i2c_recover_bus(&ctlr->adapter);
+ dev_err(ctlr->dev, "bus transfer timeout\n");
+ ret = -EIO;
+ }
+
+ if (ctlr->xfer_err) {
+ hisi_i2c_handle_errors(ctlr);
+ ret = -EIO;
+ }
+
+ hisi_i2c_reset_xfer(ctlr);
+ ctlr->completion = NULL;
+
+ return ret;
+}
+
+static u32 hisi_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm hisi_i2c_algo = {
+ .master_xfer = hisi_i2c_master_xfer,
+ .functionality = hisi_i2c_functionality,
+};
+
+static int hisi_i2c_read_rx_fifo(struct hisi_i2c_controller *ctlr)
+{
+ struct i2c_msg *cur_msg;
+ u32 fifo_state;
+
+ while (ctlr->msg_rx_idx < ctlr->msg_num) {
+ cur_msg = ctlr->msgs + ctlr->msg_rx_idx;
+
+ if (!(cur_msg->flags & I2C_M_RD)) {
+ ctlr->msg_rx_idx++;
+ continue;
+ }
+
+ fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE);
+ while (!(fifo_state & HISI_I2C_FIFO_STATE_RX_EMPTY) &&
+ ctlr->buf_rx_idx < cur_msg->len) {
+ cur_msg->buf[ctlr->buf_rx_idx++] = readl(ctlr->iobase + HISI_I2C_RXDATA);
+ fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE);
+ }
+
+ if (ctlr->buf_rx_idx == cur_msg->len) {
+ ctlr->buf_rx_idx = 0;
+ ctlr->msg_rx_idx++;
+ }
+
+ if (fifo_state & HISI_I2C_FIFO_STATE_RX_EMPTY)
+ break;
+ }
+
+ return 0;
+}
+
+static void hisi_i2c_xfer_msg(struct hisi_i2c_controller *ctlr)
+{
+ int max_write = HISI_I2C_TX_FIFO_DEPTH;
+ bool need_restart = false, last_msg;
+ struct i2c_msg *cur_msg;
+ u32 cmd, fifo_state;
+
+ while (ctlr->msg_tx_idx < ctlr->msg_num) {
+ cur_msg = ctlr->msgs + ctlr->msg_tx_idx;
+ last_msg = (ctlr->msg_tx_idx == ctlr->msg_num - 1);
+
+ /* Signal the SR bit when we start transferring a new message */
+ if (ctlr->msg_tx_idx && !ctlr->buf_tx_idx)
+ need_restart = true;
+
+ fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE);
+ while (!(fifo_state & HISI_I2C_FIFO_STATE_TX_FULL) &&
+ ctlr->buf_tx_idx < cur_msg->len && max_write) {
+ cmd = 0;
+
+ if (need_restart) {
+ cmd |= HISI_I2C_CMD_TXDATA_SR_EN;
+ need_restart = false;
+ }
+
+ /* Signal the STOP bit at the last frame of the last message */
+ if (ctlr->buf_tx_idx == cur_msg->len - 1 && last_msg)
+ cmd |= HISI_I2C_CMD_TXDATA_P_EN;
+
+ if (cur_msg->flags & I2C_M_RD)
+ cmd |= HISI_I2C_CMD_TXDATA_RW;
+ else
+ cmd |= FIELD_PREP(HISI_I2C_CMD_TXDATA_DATA,
+ cur_msg->buf[ctlr->buf_tx_idx]);
+
+ writel(cmd, ctlr->iobase + HISI_I2C_CMD_TXDATA);
+ ctlr->buf_tx_idx++;
+ max_write--;
+
+ fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE);
+ }
+
+ /* Update the transfer index after per message transfer is done. */
+ if (ctlr->buf_tx_idx == cur_msg->len) {
+ ctlr->buf_tx_idx = 0;
+ ctlr->msg_tx_idx++;
+ }
+
+ if ((fifo_state & HISI_I2C_FIFO_STATE_TX_FULL) ||
+ max_write == 0)
+ break;
+ }
+}
+
+static irqreturn_t hisi_i2c_irq(int irq, void *context)
+{
+ struct hisi_i2c_controller *ctlr = context;
+ u32 int_stat;
+
+ int_stat = readl(ctlr->iobase + HISI_I2C_INT_MSTAT);
+ hisi_i2c_clear_int(ctlr, int_stat);
+ if (!(int_stat & HISI_I2C_INT_ALL))
+ return IRQ_NONE;
+
+ if (int_stat & HISI_I2C_INT_TX_EMPTY)
+ hisi_i2c_xfer_msg(ctlr);
+
+ if (int_stat & HISI_I2C_INT_ERR) {
+ ctlr->xfer_err = int_stat;
+ goto out;
+ }
+
+ /* Drain the rx fifo before finish the transfer */
+ if (int_stat & (HISI_I2C_INT_TRANS_CPLT | HISI_I2C_INT_RX_FULL))
+ hisi_i2c_read_rx_fifo(ctlr);
+
+out:
+ if (int_stat & HISI_I2C_INT_TRANS_CPLT || ctlr->xfer_err) {
+ hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL);
+ hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL);
+ complete(ctlr->completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Helper function for calculating and configuring the HIGH and LOW
+ * periods of SCL clock. The caller will pass the ratio of the
+ * counts (divide / divisor) according to the target speed mode,
+ * and the target registers.
+ */
+static void hisi_i2c_set_scl(struct hisi_i2c_controller *ctlr,
+ u32 divide, u32 divisor,
+ u32 reg_hcnt, u32 reg_lcnt)
+{
+ u32 total_cnt, t_scl_hcnt, t_scl_lcnt, scl_fall_cnt, scl_rise_cnt;
+ u32 scl_hcnt, scl_lcnt;
+
+ /* Total SCL clock cycles per speed period */
+ total_cnt = DIV_ROUND_UP_ULL(ctlr->clk_rate_khz * HZ_PER_KHZ, ctlr->t.bus_freq_hz);
+ /* Total HIGH level SCL clock cycles including edges */
+ t_scl_hcnt = DIV_ROUND_UP_ULL(total_cnt * divide, divisor);
+ /* Total LOW level SCL clock cycles including edges */
+ t_scl_lcnt = total_cnt - t_scl_hcnt;
+ /* Fall edge SCL clock cycles */
+ scl_fall_cnt = NSEC_TO_CYCLES(ctlr->t.scl_fall_ns, ctlr->clk_rate_khz);
+ /* Rise edge SCL clock cycles */
+ scl_rise_cnt = NSEC_TO_CYCLES(ctlr->t.scl_rise_ns, ctlr->clk_rate_khz);
+
+ /* Calculated HIGH and LOW periods of SCL clock */
+ scl_hcnt = t_scl_hcnt - ctlr->spk_len - 7 - scl_fall_cnt;
+ scl_lcnt = t_scl_lcnt - 1 - scl_rise_cnt;
+
+ writel(scl_hcnt, ctlr->iobase + reg_hcnt);
+ writel(scl_lcnt, ctlr->iobase + reg_lcnt);
+}
+
+static void hisi_i2c_configure_bus(struct hisi_i2c_controller *ctlr)
+{
+ u32 reg, sda_hold_cnt, speed_mode;
+
+ i2c_parse_fw_timings(ctlr->dev, &ctlr->t, true);
+ ctlr->spk_len = NSEC_TO_CYCLES(ctlr->t.digital_filter_width_ns, ctlr->clk_rate_khz);
+
+ switch (ctlr->t.bus_freq_hz) {
+ case I2C_MAX_FAST_MODE_FREQ:
+ speed_mode = HISI_I2C_FAST_SPEED_MODE;
+ hisi_i2c_set_scl(ctlr, 26, 76, HISI_I2C_FS_SCL_HCNT, HISI_I2C_FS_SCL_LCNT);
+ break;
+ case I2C_MAX_HIGH_SPEED_MODE_FREQ:
+ speed_mode = HISI_I2C_HIGH_SPEED_MODE;
+ hisi_i2c_set_scl(ctlr, 6, 22, HISI_I2C_HS_SCL_HCNT, HISI_I2C_HS_SCL_LCNT);
+ break;
+ case I2C_MAX_STANDARD_MODE_FREQ:
+ default:
+ speed_mode = HISI_I2C_STD_SPEED_MODE;
+
+ /* For default condition force the bus speed to standard mode. */
+ ctlr->t.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
+ hisi_i2c_set_scl(ctlr, 40, 87, HISI_I2C_SS_SCL_HCNT, HISI_I2C_SS_SCL_LCNT);
+ break;
+ }
+
+ reg = readl(ctlr->iobase + HISI_I2C_FRAME_CTRL);
+ reg &= ~HISI_I2C_FRAME_CTRL_SPEED_MODE;
+ reg |= FIELD_PREP(HISI_I2C_FRAME_CTRL_SPEED_MODE, speed_mode);
+ writel(reg, ctlr->iobase + HISI_I2C_FRAME_CTRL);
+
+ sda_hold_cnt = NSEC_TO_CYCLES(ctlr->t.sda_hold_ns, ctlr->clk_rate_khz);
+
+ reg = FIELD_PREP(HISI_I2C_SDA_HOLD_TX, sda_hold_cnt);
+ writel(reg, ctlr->iobase + HISI_I2C_SDA_HOLD);
+
+ writel(ctlr->spk_len, ctlr->iobase + HISI_I2C_FS_SPK_LEN);
+
+ reg = FIELD_PREP(HISI_I2C_FIFO_RX_AF_THRESH, HISI_I2C_RX_F_AF_THRESH);
+ reg |= FIELD_PREP(HISI_I2C_FIFO_TX_AE_THRESH, HISI_I2C_TX_F_AE_THRESH);
+ writel(reg, ctlr->iobase + HISI_I2C_FIFO_CTRL);
+}
+
+static int hisi_i2c_probe(struct platform_device *pdev)
+{
+ struct hisi_i2c_controller *ctlr;
+ struct device *dev = &pdev->dev;
+ struct i2c_adapter *adapter;
+ u64 clk_rate_hz;
+ u32 hw_version;
+ int ret;
+
+ ctlr = devm_kzalloc(dev, sizeof(*ctlr), GFP_KERNEL);
+ if (!ctlr)
+ return -ENOMEM;
+
+ ctlr->iobase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctlr->iobase))
+ return PTR_ERR(ctlr->iobase);
+
+ ctlr->irq = platform_get_irq(pdev, 0);
+ if (ctlr->irq < 0)
+ return ctlr->irq;
+
+ ctlr->dev = dev;
+
+ hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL);
+
+ ret = devm_request_irq(dev, ctlr->irq, hisi_i2c_irq, 0, "hisi-i2c", ctlr);
+ if (ret) {
+ dev_err(dev, "failed to request irq handler, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = device_property_read_u64(dev, "clk_rate", &clk_rate_hz);
+ if (ret) {
+ dev_err(dev, "failed to get clock frequency, ret = %d\n", ret);
+ return ret;
+ }
+
+ ctlr->clk_rate_khz = DIV_ROUND_UP_ULL(clk_rate_hz, HZ_PER_KHZ);
+
+ hisi_i2c_configure_bus(ctlr);
+
+ adapter = &ctlr->adapter;
+ snprintf(adapter->name, sizeof(adapter->name),
+ "HiSilicon I2C Controller %s", dev_name(dev));
+ adapter->owner = THIS_MODULE;
+ adapter->algo = &hisi_i2c_algo;
+ adapter->dev.parent = dev;
+ i2c_set_adapdata(adapter, ctlr);
+
+ ret = devm_i2c_add_adapter(dev, adapter);
+ if (ret)
+ return ret;
+
+ hw_version = readl(ctlr->iobase + HISI_I2C_VERSION);
+ dev_info(ctlr->dev, "speed mode is %s. hw version 0x%x\n",
+ i2c_freq_mode_string(ctlr->t.bus_freq_hz), hw_version);
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_i2c_acpi_ids[] = {
+ { "HISI03D1", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_i2c_acpi_ids);
+
+static struct platform_driver hisi_i2c_driver = {
+ .probe = hisi_i2c_probe,
+ .driver = {
+ .name = "hisi-i2c",
+ .acpi_match_table = hisi_i2c_acpi_ids,
+ },
+};
+module_platform_driver(hisi_i2c_driver);
+
+MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>");
+MODULE_DESCRIPTION("HiSilicon I2C Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 4acee6f9e5a3..99d446763530 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -73,6 +73,7 @@
* Comet Lake-V (PCH) 0xa3a3 32 hard yes yes yes
* Alder Lake-S (PCH) 0x7aa3 32 hard yes yes yes
* Alder Lake-P (PCH) 0x51a3 32 hard yes yes yes
+ * Alder Lake-M (PCH) 0x54a3 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -230,6 +231,7 @@
#define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS 0x4b23
#define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS 0x51a3
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS 0x54a3
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
@@ -1087,6 +1089,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS) },
{ 0, }
};
@@ -1771,6 +1774,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_EBG_SMBUS:
case PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS:
case PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS:
+ case PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS:
priv->features |= FEATURE_BLOCK_PROC;
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c
index 66c9923fc766..c8c422e9dda4 100644
--- a/drivers/i2c/busses/i2c-icy.c
+++ b/drivers/i2c/busses/i2c-icy.c
@@ -54,7 +54,6 @@ struct icy_i2c {
void __iomem *reg_s0;
void __iomem *reg_s1;
- struct fwnode_handle *ltc2990_fwnode;
struct i2c_client *ltc2990_client;
};
@@ -115,6 +114,10 @@ static const struct property_entry icy_ltc2990_props[] = {
{ }
};
+static const struct software_node icy_ltc2990_node = {
+ .properties = icy_ltc2990_props,
+};
+
static int icy_probe(struct zorro_dev *z,
const struct zorro_device_id *ent)
{
@@ -123,6 +126,7 @@ static int icy_probe(struct zorro_dev *z,
struct fwnode_handle *new_fwnode;
struct i2c_board_info ltc2990_info = {
.type = "ltc2990",
+ .swnode = &icy_ltc2990_node,
};
i2c = devm_kzalloc(&z->dev, sizeof(*i2c), GFP_KERNEL);
@@ -174,26 +178,10 @@ static int icy_probe(struct zorro_dev *z,
*
* See property_entry above for in1, in2, temp3.
*/
- new_fwnode = fwnode_create_software_node(icy_ltc2990_props, NULL);
- if (IS_ERR(new_fwnode)) {
- dev_info(&z->dev, "Failed to create fwnode for LTC2990, error: %ld\n",
- PTR_ERR(new_fwnode));
- } else {
- /*
- * Store the fwnode so we can destroy it on .remove().
- * Only store it on success, as fwnode_remove_software_node()
- * is NULL safe, but not PTR_ERR safe.
- */
- i2c->ltc2990_fwnode = new_fwnode;
- ltc2990_info.fwnode = new_fwnode;
-
- i2c->ltc2990_client =
- i2c_new_scanned_device(&i2c->adapter,
- &ltc2990_info,
- icy_ltc2990_addresses,
- NULL);
- }
-
+ i2c->ltc2990_client = i2c_new_scanned_device(&i2c->adapter,
+ &ltc2990_info,
+ icy_ltc2990_addresses,
+ NULL);
return 0;
}
@@ -202,8 +190,6 @@ static void icy_remove(struct zorro_dev *z)
struct icy_i2c *i2c = dev_get_drvdata(&z->dev);
i2c_unregister_device(i2c->ltc2990_client);
- fwnode_remove_software_node(i2c->ltc2990_fwnode);
-
i2c_del_adapter(&i2c->adapter);
}
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 98a89301ed2a..8e987945ed45 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -1057,7 +1057,7 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
atomic = true;
}
- ret = pm_runtime_get_sync(adap->dev.parent);
+ ret = pm_runtime_resume_and_get(adap->dev.parent);
if (ret < 0)
return ret;
@@ -1158,7 +1158,7 @@ static int img_i2c_init(struct img_i2c *i2c)
u32 rev;
int ret;
- ret = pm_runtime_get_sync(i2c->adap.dev.parent);
+ ret = pm_runtime_resume_and_get(i2c->adap.dev.parent);
if (ret < 0)
return ret;
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 9db6ccded5e9..8b9ba055c418 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -259,7 +259,7 @@ static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
unsigned int temp;
int ret;
- ret = pm_runtime_get_sync(lpi2c_imx->adapter.dev.parent);
+ ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
if (ret < 0)
return ret;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index b80fdc1f0092..dc5ca71906db 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -801,7 +801,7 @@ static int i2c_imx_reg_slave(struct i2c_client *client)
i2c_imx->last_slave_event = I2C_SLAVE_STOP;
/* Resume */
- ret = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
+ ret = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
if (ret < 0) {
dev_err(&i2c_imx->adapter.dev, "failed to resume i2c controller");
return ret;
@@ -1253,7 +1253,7 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter);
int result;
- result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
+ result = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
if (result < 0)
return result;
@@ -1496,7 +1496,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
int irq, ret;
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 2f8b8050a223..cfecaf18ccbb 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -125,10 +125,12 @@ iop3xx_i2c_error(u32 sr)
int rc = 0;
if ((sr & IOP3XX_ISR_BERRD)) {
- if ( !rc ) rc = -I2C_ERR_BERR;
+ if (!rc)
+ rc = -I2C_ERR_BERR;
}
if ((sr & IOP3XX_ISR_ALD)) {
- if ( !rc ) rc = -I2C_ERR_ALD;
+ if (!rc)
+ rc = -I2C_ERR_ALD;
}
return rc;
}
@@ -151,12 +153,12 @@ iop3xx_i2c_get_srstat(struct i2c_algo_iop3xx_data *iop3xx_adap)
* sleep until interrupted, then recover and analyse the SR
* saved by handler
*/
-typedef int (* compare_func)(unsigned test, unsigned mask);
+typedef int (*compare_func)(unsigned test, unsigned mask);
/* returns 1 on correct comparison */
static int
iop3xx_i2c_wait_event(struct i2c_algo_iop3xx_data *iop3xx_adap,
- unsigned flags, unsigned* status,
+ unsigned flags, unsigned *status,
compare_func compare)
{
unsigned sr = 0;
@@ -167,7 +169,7 @@ iop3xx_i2c_wait_event(struct i2c_algo_iop3xx_data *iop3xx_adap,
do {
interrupted = wait_event_interruptible_timeout (
iop3xx_adap->waitq,
- (done = compare( sr = iop3xx_i2c_get_srstat(iop3xx_adap) ,flags )),
+ (done = compare(sr = iop3xx_i2c_get_srstat(iop3xx_adap), flags)),
1 * HZ
);
if ((rc = iop3xx_i2c_error(sr)) < 0) {
@@ -177,7 +179,7 @@ iop3xx_i2c_wait_event(struct i2c_algo_iop3xx_data *iop3xx_adap,
*status = sr;
return -ETIMEDOUT;
}
- } while(!done);
+ } while (!done);
*status = sr;
@@ -204,7 +206,7 @@ iop3xx_i2c_wait_tx_done(struct i2c_algo_iop3xx_data *iop3xx_adap, int *status)
{
return iop3xx_i2c_wait_event(
iop3xx_adap,
- IOP3XX_ISR_TXEMPTY | IOP3XX_ISR_ALD | IOP3XX_ISR_BERRD,
+ IOP3XX_ISR_TXEMPTY | IOP3XX_ISR_ALD | IOP3XX_ISR_BERRD,
status, any_bits_set);
}
@@ -226,7 +228,7 @@ iop3xx_i2c_wait_idle(struct i2c_algo_iop3xx_data *iop3xx_adap, int *status)
static int
iop3xx_i2c_send_target_addr(struct i2c_algo_iop3xx_data *iop3xx_adap,
- struct i2c_msg* msg)
+ struct i2c_msg *msg)
{
unsigned long cr = __raw_readl(iop3xx_adap->ioaddr + CR_OFFSET);
int status;
@@ -273,7 +275,7 @@ iop3xx_i2c_write_byte(struct i2c_algo_iop3xx_data *iop3xx_adap, char byte,
}
static int
-iop3xx_i2c_read_byte(struct i2c_algo_iop3xx_data *iop3xx_adap, char* byte,
+iop3xx_i2c_read_byte(struct i2c_algo_iop3xx_data *iop3xx_adap, char *byte,
int stop)
{
unsigned long cr = __raw_readl(iop3xx_adap->ioaddr + CR_OFFSET);
@@ -305,7 +307,7 @@ iop3xx_i2c_writebytes(struct i2c_adapter *i2c_adap, const char *buf, int count)
int rc = 0;
for (ii = 0; rc == 0 && ii != count; ++ii)
- rc = iop3xx_i2c_write_byte(iop3xx_adap, buf[ii], ii==count-1);
+ rc = iop3xx_i2c_write_byte(iop3xx_adap, buf[ii], ii == count-1);
return rc;
}
@@ -317,7 +319,7 @@ iop3xx_i2c_readbytes(struct i2c_adapter *i2c_adap, char *buf, int count)
int rc = 0;
for (ii = 0; rc == 0 && ii != count; ++ii)
- rc = iop3xx_i2c_read_byte(iop3xx_adap, &buf[ii], ii==count-1);
+ rc = iop3xx_i2c_read_byte(iop3xx_adap, &buf[ii], ii == count-1);
return rc;
}
@@ -330,7 +332,7 @@ iop3xx_i2c_readbytes(struct i2c_adapter *i2c_adap, char *buf, int count)
* condition.
*/
static int
-iop3xx_i2c_handle_msg(struct i2c_adapter *i2c_adap, struct i2c_msg* pmsg)
+iop3xx_i2c_handle_msg(struct i2c_adapter *i2c_adap, struct i2c_msg *pmsg)
{
struct i2c_algo_iop3xx_data *iop3xx_adap = i2c_adap->algo_data;
int rc;
@@ -369,7 +371,7 @@ iop3xx_i2c_master_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
iop3xx_i2c_transaction_cleanup(iop3xx_adap);
- if(ret)
+ if (ret)
return ret;
return im;
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 55177eb21d7b..baa7319eee53 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -825,7 +825,10 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
- i2c->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err;
+ i2c->irq = ret;
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
if (ret)
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index 2fb0532d8a16..8716032f030a 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -172,12 +172,6 @@
#define MLXBF_I2C_SMBUS_THIGH_MAX_TBUF 0x14
#define MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT 0x18
-enum {
- MLXBF_I2C_TIMING_100KHZ = 100000,
- MLXBF_I2C_TIMING_400KHZ = 400000,
- MLXBF_I2C_TIMING_1000KHZ = 1000000,
-};
-
/*
* Defines SMBus operating frequency and core clock frequency.
* According to ADB files, default values are compliant to 100KHz SMBus
@@ -1202,7 +1196,7 @@ static int mlxbf_i2c_init_timings(struct platform_device *pdev,
ret = device_property_read_u32(dev, "clock-frequency", &config_khz);
if (ret < 0)
- config_khz = MLXBF_I2C_TIMING_100KHZ;
+ config_khz = I2C_MAX_STANDARD_MODE_FREQ;
switch (config_khz) {
default:
@@ -1210,15 +1204,15 @@ static int mlxbf_i2c_init_timings(struct platform_device *pdev,
pr_warn("Illegal value %d: defaulting to 100 KHz\n",
config_khz);
fallthrough;
- case MLXBF_I2C_TIMING_100KHZ:
+ case I2C_MAX_STANDARD_MODE_FREQ:
config_idx = MLXBF_I2C_TIMING_CONFIG_100KHZ;
break;
- case MLXBF_I2C_TIMING_400KHZ:
+ case I2C_MAX_FAST_MODE_FREQ:
config_idx = MLXBF_I2C_TIMING_CONFIG_400KHZ;
break;
- case MLXBF_I2C_TIMING_1000KHZ:
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
config_idx = MLXBF_I2C_TIMING_CONFIG_1000KHZ;
break;
}
@@ -2376,6 +2370,8 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
mlxbf_i2c_init_slave(pdev, priv);
irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
ret = devm_request_irq(dev, irq, mlxbf_smbus_irq,
IRQF_ONESHOT | IRQF_SHARED | IRQF_PROBE_SHARED,
dev_name(dev), priv);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index d94f05c8b8b7..30d9e89a3db2 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -1,16 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * (C) Copyright 2003-2004
- * Humboldt Solutions Ltd, adrian@humboldt.co.uk.
-
* This is a combined i2c adapter and algorithm driver for the
* MPC107/Tsi107 PowerPC northbridge and processors that include
* the same I2C unit (8240, 8245, 85xx).
*
- * Release 0.8
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
+ * Copyright (C) 2003-2004 Humboldt Solutions Ltd, adrian@humboldt.co.uk
+ * Copyright (C) 2021 Allied Telesis Labs
*/
#include <linux/kernel.h>
@@ -19,6 +14,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/clk.h>
@@ -58,18 +54,50 @@
#define CSR_MIF 0x02
#define CSR_RXAK 0x01
+enum mpc_i2c_action {
+ MPC_I2C_ACTION_START = 1,
+ MPC_I2C_ACTION_RESTART,
+ MPC_I2C_ACTION_READ_BEGIN,
+ MPC_I2C_ACTION_READ_BYTE,
+ MPC_I2C_ACTION_WRITE,
+ MPC_I2C_ACTION_STOP,
+
+ __MPC_I2C_ACTION_CNT
+};
+
+static const char * const action_str[] = {
+ "invalid",
+ "start",
+ "restart",
+ "read begin",
+ "read",
+ "write",
+ "stop",
+};
+
+static_assert(ARRAY_SIZE(action_str) == __MPC_I2C_ACTION_CNT);
+
struct mpc_i2c {
struct device *dev;
void __iomem *base;
u32 interrupt;
- wait_queue_head_t queue;
+ wait_queue_head_t waitq;
+ spinlock_t lock;
struct i2c_adapter adap;
int irq;
u32 real_clk;
-#ifdef CONFIG_PM_SLEEP
u8 fdr, dfsrr;
-#endif
struct clk *clk_per;
+ u32 cntl_bits;
+ enum mpc_i2c_action action;
+ struct i2c_msg *msgs;
+ int num_msgs;
+ int curr_msg;
+ u32 byte_posn;
+ u32 block;
+ int rc;
+ int expect_rxack;
+
};
struct mpc_i2c_divider {
@@ -86,19 +114,6 @@ static inline void writeccr(struct mpc_i2c *i2c, u32 x)
writeb(x, i2c->base + MPC_I2C_CR);
}
-static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
-{
- struct mpc_i2c *i2c = dev_id;
- if (readb(i2c->base + MPC_I2C_SR) & CSR_MIF) {
- /* Read again to allow register to stabilise */
- i2c->interrupt = readb(i2c->base + MPC_I2C_SR);
- writeb(0, i2c->base + MPC_I2C_SR);
- wake_up(&i2c->queue);
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
-}
-
/* Sometimes 9th clock pulse isn't generated, and slave doesn't release
* the bus, because it wants to send ACK.
* Following sequence of enabling/disabling and sending start/stop generates
@@ -121,61 +136,6 @@ static void mpc_i2c_fixup(struct mpc_i2c *i2c)
}
}
-static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
-{
- unsigned long orig_jiffies = jiffies;
- u32 cmd_err;
- int result = 0;
-
- if (!i2c->irq) {
- while (!(readb(i2c->base + MPC_I2C_SR) & CSR_MIF)) {
- schedule();
- if (time_after(jiffies, orig_jiffies + timeout)) {
- dev_dbg(i2c->dev, "timeout\n");
- writeccr(i2c, 0);
- result = -ETIMEDOUT;
- break;
- }
- }
- cmd_err = readb(i2c->base + MPC_I2C_SR);
- writeb(0, i2c->base + MPC_I2C_SR);
- } else {
- /* Interrupt mode */
- result = wait_event_timeout(i2c->queue,
- (i2c->interrupt & CSR_MIF), timeout);
-
- if (unlikely(!(i2c->interrupt & CSR_MIF))) {
- dev_dbg(i2c->dev, "wait timeout\n");
- writeccr(i2c, 0);
- result = -ETIMEDOUT;
- }
-
- cmd_err = i2c->interrupt;
- i2c->interrupt = 0;
- }
-
- if (result < 0)
- return result;
-
- if (!(cmd_err & CSR_MCF)) {
- dev_dbg(i2c->dev, "unfinished\n");
- return -EIO;
- }
-
- if (cmd_err & CSR_MAL) {
- dev_dbg(i2c->dev, "MAL\n");
- return -EAGAIN;
- }
-
- if (writing && (cmd_err & CSR_RXAK)) {
- dev_dbg(i2c->dev, "No RXAK\n");
- /* generate stop */
- writeccr(i2c, CCR_MEN);
- return -ENXIO;
- }
- return 0;
-}
-
#if defined(CONFIG_PPC_MPC52xx) || defined(CONFIG_PPC_MPC512x)
static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
{20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23},
@@ -415,7 +375,7 @@ static int mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
}
*real_clk = fsl_get_sys_freq() / prescaler / div->divider;
- return div ? (int)div->fdr : -EINVAL;
+ return (int)div->fdr;
}
static void mpc_i2c_setup_8xxx(struct device_node *node,
@@ -450,168 +410,209 @@ static void mpc_i2c_setup_8xxx(struct device_node *node,
}
#endif /* CONFIG_FSL_SOC */
-static void mpc_i2c_start(struct mpc_i2c *i2c)
+static void mpc_i2c_finish(struct mpc_i2c *i2c, int rc)
{
- /* Clear arbitration */
- writeb(0, i2c->base + MPC_I2C_SR);
- /* Start with MEN */
- writeccr(i2c, CCR_MEN);
+ i2c->rc = rc;
+ i2c->block = 0;
+ i2c->cntl_bits = CCR_MEN;
+ writeccr(i2c, i2c->cntl_bits);
+ wake_up(&i2c->waitq);
}
-static void mpc_i2c_stop(struct mpc_i2c *i2c)
+static void mpc_i2c_do_action(struct mpc_i2c *i2c)
{
- writeccr(i2c, CCR_MEN);
-}
+ struct i2c_msg *msg = &i2c->msgs[i2c->curr_msg];
+ int dir = 0;
+ int recv_len = 0;
+ u8 byte;
+
+ dev_dbg(i2c->dev, "action = %s\n", action_str[i2c->action]);
+
+ i2c->cntl_bits &= ~(CCR_RSTA | CCR_MTX | CCR_TXAK);
+
+ if (msg->flags & I2C_M_RD)
+ dir = 1;
+ if (msg->flags & I2C_M_RECV_LEN)
+ recv_len = 1;
+
+ switch (i2c->action) {
+ case MPC_I2C_ACTION_RESTART:
+ i2c->cntl_bits |= CCR_RSTA;
+ fallthrough;
+
+ case MPC_I2C_ACTION_START:
+ i2c->cntl_bits |= CCR_MSTA | CCR_MTX;
+ writeccr(i2c, i2c->cntl_bits);
+ writeb((msg->addr << 1) | dir, i2c->base + MPC_I2C_DR);
+ i2c->expect_rxack = 1;
+ i2c->action = dir ? MPC_I2C_ACTION_READ_BEGIN : MPC_I2C_ACTION_WRITE;
+ break;
+
+ case MPC_I2C_ACTION_READ_BEGIN:
+ if (msg->len) {
+ if (msg->len == 1 && !(msg->flags & I2C_M_RECV_LEN))
+ i2c->cntl_bits |= CCR_TXAK;
+
+ writeccr(i2c, i2c->cntl_bits);
+ /* Dummy read */
+ readb(i2c->base + MPC_I2C_DR);
+ }
+ i2c->action = MPC_I2C_ACTION_READ_BYTE;
+ break;
+
+ case MPC_I2C_ACTION_READ_BYTE:
+ if (i2c->byte_posn || !recv_len) {
+ /* Generate Tx ACK on next to last byte */
+ if (i2c->byte_posn == msg->len - 2)
+ i2c->cntl_bits |= CCR_TXAK;
+ /* Do not generate stop on last byte */
+ if (i2c->byte_posn == msg->len - 1)
+ i2c->cntl_bits |= CCR_MTX;
-static int mpc_write(struct mpc_i2c *i2c, int target,
- const u8 *data, int length, int restart)
-{
- int i, result;
- unsigned timeout = i2c->adap.timeout;
- u32 flags = restart ? CCR_RSTA : 0;
+ writeccr(i2c, i2c->cntl_bits);
+ }
- /* Start as master */
- writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX | flags);
- /* Write target byte */
- writeb((target << 1), i2c->base + MPC_I2C_DR);
+ byte = readb(i2c->base + MPC_I2C_DR);
- result = i2c_wait(i2c, timeout, 1);
- if (result < 0)
- return result;
+ if (i2c->byte_posn == 0 && recv_len) {
+ if (byte == 0 || byte > I2C_SMBUS_BLOCK_MAX) {
+ mpc_i2c_finish(i2c, -EPROTO);
+ return;
+ }
+ msg->len += byte;
+ /*
+ * For block reads, generate Tx ACK here if data length
+ * is 1 byte (total length is 2 bytes).
+ */
+ if (msg->len == 2) {
+ i2c->cntl_bits |= CCR_TXAK;
+ writeccr(i2c, i2c->cntl_bits);
+ }
+ }
+
+ dev_dbg(i2c->dev, "%s %02x\n", action_str[i2c->action], byte);
+ msg->buf[i2c->byte_posn++] = byte;
+ break;
+
+ case MPC_I2C_ACTION_WRITE:
+ dev_dbg(i2c->dev, "%s %02x\n", action_str[i2c->action],
+ msg->buf[i2c->byte_posn]);
+ writeb(msg->buf[i2c->byte_posn++], i2c->base + MPC_I2C_DR);
+ i2c->expect_rxack = 1;
+ break;
- for (i = 0; i < length; i++) {
- /* Write data byte */
- writeb(data[i], i2c->base + MPC_I2C_DR);
+ case MPC_I2C_ACTION_STOP:
+ mpc_i2c_finish(i2c, 0);
+ break;
- result = i2c_wait(i2c, timeout, 1);
- if (result < 0)
- return result;
+ default:
+ WARN(1, "Unexpected action %d\n", i2c->action);
+ break;
}
- return 0;
+ if (msg->len == i2c->byte_posn) {
+ i2c->curr_msg++;
+ i2c->byte_posn = 0;
+
+ if (i2c->curr_msg == i2c->num_msgs) {
+ i2c->action = MPC_I2C_ACTION_STOP;
+ /*
+ * We don't get another interrupt on read so
+ * finish the transfer now
+ */
+ if (dir)
+ mpc_i2c_finish(i2c, 0);
+ } else {
+ i2c->action = MPC_I2C_ACTION_RESTART;
+ }
+ }
}
-static int mpc_read(struct mpc_i2c *i2c, int target,
- u8 *data, int length, int restart, bool recv_len)
+static void mpc_i2c_do_intr(struct mpc_i2c *i2c, u8 status)
{
- unsigned timeout = i2c->adap.timeout;
- int i, result;
- u32 flags = restart ? CCR_RSTA : 0;
+ spin_lock(&i2c->lock);
- /* Switch to read - restart */
- writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX | flags);
- /* Write target address byte - this time with the read flag set */
- writeb((target << 1) | 1, i2c->base + MPC_I2C_DR);
+ if (!(status & CSR_MCF)) {
+ dev_dbg(i2c->dev, "unfinished\n");
+ mpc_i2c_finish(i2c, -EIO);
+ goto out;
+ }
- result = i2c_wait(i2c, timeout, 1);
- if (result < 0)
- return result;
+ if (status & CSR_MAL) {
+ dev_dbg(i2c->dev, "arbitration lost\n");
+ mpc_i2c_finish(i2c, -EAGAIN);
+ goto out;
+ }
- if (length) {
- if (length == 1 && !recv_len)
- writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_TXAK);
- else
- writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA);
- /* Dummy read */
- readb(i2c->base + MPC_I2C_DR);
+ if (i2c->expect_rxack && (status & CSR_RXAK)) {
+ dev_dbg(i2c->dev, "no Rx ACK\n");
+ mpc_i2c_finish(i2c, -ENXIO);
+ goto out;
}
+ i2c->expect_rxack = 0;
- for (i = 0; i < length; i++) {
- u8 byte;
-
- result = i2c_wait(i2c, timeout, 0);
- if (result < 0)
- return result;
-
- /*
- * For block reads, we have to know the total length (1st byte)
- * before we can determine if we are done.
- */
- if (i || !recv_len) {
- /* Generate txack on next to last byte */
- if (i == length - 2)
- writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA
- | CCR_TXAK);
- /* Do not generate stop on last byte */
- if (i == length - 1)
- writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA
- | CCR_MTX);
- }
+ mpc_i2c_do_action(i2c);
- byte = readb(i2c->base + MPC_I2C_DR);
+out:
+ spin_unlock(&i2c->lock);
+}
- /*
- * Adjust length if first received byte is length.
- * The length is 1 length byte plus actually data length
- */
- if (i == 0 && recv_len) {
- if (byte == 0 || byte > I2C_SMBUS_BLOCK_MAX)
- return -EPROTO;
- length += byte;
- /*
- * For block reads, generate txack here if data length
- * is 1 byte (total length is 2 bytes).
- */
- if (length == 2)
- writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA
- | CCR_TXAK);
- }
- data[i] = byte;
+static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
+{
+ struct mpc_i2c *i2c = dev_id;
+ u8 status;
+
+ status = readb(i2c->base + MPC_I2C_SR);
+ if (status & CSR_MIF) {
+ writeb(0, i2c->base + MPC_I2C_SR);
+ mpc_i2c_do_intr(i2c, status);
+ return IRQ_HANDLED;
}
+ return IRQ_NONE;
+}
- return length;
+static int mpc_i2c_wait_for_completion(struct mpc_i2c *i2c)
+{
+ long time_left;
+
+ time_left = wait_event_timeout(i2c->waitq, !i2c->block, i2c->adap.timeout);
+ if (!time_left)
+ return -ETIMEDOUT;
+ if (time_left < 0)
+ return time_left;
+
+ return 0;
}
-static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+static int mpc_i2c_execute_msg(struct mpc_i2c *i2c)
{
- struct i2c_msg *pmsg;
- int i;
- int ret = 0;
- unsigned long orig_jiffies = jiffies;
- struct mpc_i2c *i2c = i2c_get_adapdata(adap);
+ unsigned long orig_jiffies;
+ unsigned long flags;
+ int ret;
- mpc_i2c_start(i2c);
+ spin_lock_irqsave(&i2c->lock, flags);
- /* Allow bus up to 1s to become not busy */
- while (readb(i2c->base + MPC_I2C_SR) & CSR_MBB) {
- if (signal_pending(current)) {
- dev_dbg(i2c->dev, "Interrupted\n");
- writeccr(i2c, 0);
- return -EINTR;
- }
- if (time_after(jiffies, orig_jiffies + HZ)) {
- u8 status = readb(i2c->base + MPC_I2C_SR);
+ i2c->curr_msg = 0;
+ i2c->rc = 0;
+ i2c->byte_posn = 0;
+ i2c->block = 1;
+ i2c->action = MPC_I2C_ACTION_START;
- dev_dbg(i2c->dev, "timeout\n");
- if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) {
- writeb(status & ~CSR_MAL,
- i2c->base + MPC_I2C_SR);
- mpc_i2c_fixup(i2c);
- }
- return -EIO;
- }
- schedule();
- }
+ i2c->cntl_bits = CCR_MEN | CCR_MIEN;
+ writeb(0, i2c->base + MPC_I2C_SR);
+ writeccr(i2c, i2c->cntl_bits);
+
+ mpc_i2c_do_action(i2c);
+
+ spin_unlock_irqrestore(&i2c->lock, flags);
+
+ ret = mpc_i2c_wait_for_completion(i2c);
+ if (ret)
+ i2c->rc = ret;
+
+ if (i2c->rc == -EIO || i2c->rc == -EAGAIN || i2c->rc == -ETIMEDOUT)
+ i2c_recover_bus(&i2c->adap);
- for (i = 0; ret >= 0 && i < num; i++) {
- pmsg = &msgs[i];
- dev_dbg(i2c->dev,
- "Doing %s %d bytes to 0x%02x - %d of %d messages\n",
- pmsg->flags & I2C_M_RD ? "read" : "write",
- pmsg->len, pmsg->addr, i + 1, num);
- if (pmsg->flags & I2C_M_RD) {
- bool recv_len = pmsg->flags & I2C_M_RECV_LEN;
-
- ret = mpc_read(i2c, pmsg->addr, pmsg->buf, pmsg->len, i,
- recv_len);
- if (recv_len && ret > 0)
- pmsg->len = ret;
- } else {
- ret =
- mpc_write(i2c, pmsg->addr, pmsg->buf, pmsg->len, i);
- }
- }
- mpc_i2c_stop(i2c); /* Initiate STOP */
orig_jiffies = jiffies;
/* Wait until STOP is seen, allow up to 1 s */
while (readb(i2c->base + MPC_I2C_SR) & CSR_MBB) {
@@ -622,13 +623,41 @@ static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) {
writeb(status & ~CSR_MAL,
i2c->base + MPC_I2C_SR);
- mpc_i2c_fixup(i2c);
+ i2c_recover_bus(&i2c->adap);
}
return -EIO;
}
cond_resched();
}
- return (ret < 0) ? ret : num;
+
+ return i2c->rc;
+}
+
+static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ int rc, ret = num;
+ struct mpc_i2c *i2c = i2c_get_adapdata(adap);
+ int i;
+
+ dev_dbg(i2c->dev, "num = %d\n", num);
+ for (i = 0; i < num; i++)
+ dev_dbg(i2c->dev, " addr = %02x, flags = %02x, len = %d, %*ph\n",
+ msgs[i].addr, msgs[i].flags, msgs[i].len,
+ msgs[i].flags & I2C_M_RD ? 0 : msgs[i].len,
+ msgs[i].buf);
+
+ WARN_ON(i2c->msgs != NULL);
+ i2c->msgs = msgs;
+ i2c->num_msgs = num;
+
+ rc = mpc_i2c_execute_msg(i2c);
+ if (rc < 0)
+ ret = rc;
+
+ i2c->num_msgs = 0;
+ i2c->msgs = NULL;
+
+ return ret;
}
static u32 mpc_functionality(struct i2c_adapter *adap)
@@ -637,6 +666,15 @@ static u32 mpc_functionality(struct i2c_adapter *adap)
| I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL;
}
+static int fsl_i2c_bus_recovery(struct i2c_adapter *adap)
+{
+ struct mpc_i2c *i2c = i2c_get_adapdata(adap);
+
+ mpc_i2c_fixup(i2c);
+
+ return 0;
+}
+
static const struct i2c_algorithm mpc_algo = {
.master_xfer = mpc_xfer,
.functionality = mpc_functionality,
@@ -648,63 +686,61 @@ static struct i2c_adapter mpc_ops = {
.timeout = HZ,
};
-static const struct of_device_id mpc_i2c_of_match[];
+static struct i2c_bus_recovery_info fsl_i2c_recovery_info = {
+ .recover_bus = fsl_i2c_bus_recovery,
+};
+
static int fsl_i2c_probe(struct platform_device *op)
{
- const struct of_device_id *match;
+ const struct mpc_i2c_data *data;
struct mpc_i2c *i2c;
const u32 *prop;
u32 clock = MPC_I2C_CLOCK_LEGACY;
int result = 0;
int plen;
- struct resource res;
struct clk *clk;
int err;
- match = of_match_device(mpc_i2c_of_match, &op->dev);
- if (!match)
- return -EINVAL;
-
- i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
+ i2c = devm_kzalloc(&op->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
return -ENOMEM;
i2c->dev = &op->dev; /* for debug and error output */
- init_waitqueue_head(&i2c->queue);
+ init_waitqueue_head(&i2c->waitq);
+ spin_lock_init(&i2c->lock);
- i2c->base = of_iomap(op->dev.of_node, 0);
- if (!i2c->base) {
- dev_err(i2c->dev, "failed to map controller\n");
- result = -ENOMEM;
- goto fail_map;
- }
+ i2c->base = devm_platform_ioremap_resource(op, 0);
+ if (IS_ERR(i2c->base))
+ return PTR_ERR(i2c->base);
- i2c->irq = irq_of_parse_and_map(op->dev.of_node, 0);
- if (i2c->irq) { /* no i2c->irq implies polling */
- result = request_irq(i2c->irq, mpc_i2c_isr,
- IRQF_SHARED, "i2c-mpc", i2c);
- if (result < 0) {
- dev_err(i2c->dev, "failed to attach interrupt\n");
- goto fail_request;
- }
+ i2c->irq = platform_get_irq(op, 0);
+ if (i2c->irq < 0)
+ return i2c->irq;
+
+ result = devm_request_irq(&op->dev, i2c->irq, mpc_i2c_isr,
+ IRQF_SHARED, "i2c-mpc", i2c);
+ if (result < 0) {
+ dev_err(i2c->dev, "failed to attach interrupt\n");
+ return result;
}
/*
* enable clock for the I2C peripheral (non fatal),
* keep a reference upon successful allocation
*/
- clk = devm_clk_get(&op->dev, NULL);
- if (!IS_ERR(clk)) {
- err = clk_prepare_enable(clk);
- if (err) {
- dev_err(&op->dev, "failed to enable clock\n");
- goto fail_request;
- } else {
- i2c->clk_per = clk;
- }
+ clk = devm_clk_get_optional(&op->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ err = clk_prepare_enable(clk);
+ if (err) {
+ dev_err(&op->dev, "failed to enable clock\n");
+ return err;
}
+ i2c->clk_per = clk;
+
if (of_property_read_bool(op->dev.of_node, "fsl,preserve-clocking")) {
clock = MPC_I2C_CLOCK_PRESERVE;
} else {
@@ -714,8 +750,8 @@ static int fsl_i2c_probe(struct platform_device *op)
clock = *prop;
}
- if (match->data) {
- const struct mpc_i2c_data *data = match->data;
+ data = device_get_match_data(&op->dev);
+ if (data) {
data->setup(op->dev.of_node, i2c, clock);
} else {
/* Backwards compatibility */
@@ -731,31 +767,25 @@ static int fsl_i2c_probe(struct platform_device *op)
}
dev_info(i2c->dev, "timeout %u us\n", mpc_ops.timeout * 1000000 / HZ);
- platform_set_drvdata(op, i2c);
-
i2c->adap = mpc_ops;
- of_address_to_resource(op->dev.of_node, 0, &res);
scnprintf(i2c->adap.name, sizeof(i2c->adap.name),
- "MPC adapter at 0x%llx", (unsigned long long)res.start);
- i2c_set_adapdata(&i2c->adap, i2c);
+ "MPC adapter (%s)", of_node_full_name(op->dev.of_node));
i2c->adap.dev.parent = &op->dev;
+ i2c->adap.nr = op->id;
i2c->adap.dev.of_node = of_node_get(op->dev.of_node);
+ i2c->adap.bus_recovery_info = &fsl_i2c_recovery_info;
+ platform_set_drvdata(op, i2c);
+ i2c_set_adapdata(&i2c->adap, i2c);
- result = i2c_add_adapter(&i2c->adap);
- if (result < 0)
+ result = i2c_add_numbered_adapter(&i2c->adap);
+ if (result)
goto fail_add;
- return result;
+ return 0;
fail_add:
- if (i2c->clk_per)
- clk_disable_unprepare(i2c->clk_per);
- free_irq(i2c->irq, i2c);
- fail_request:
- irq_dispose_mapping(i2c->irq);
- iounmap(i2c->base);
- fail_map:
- kfree(i2c);
+ clk_disable_unprepare(i2c->clk_per);
+
return result;
};
@@ -765,20 +795,12 @@ static int fsl_i2c_remove(struct platform_device *op)
i2c_del_adapter(&i2c->adap);
- if (i2c->clk_per)
- clk_disable_unprepare(i2c->clk_per);
+ clk_disable_unprepare(i2c->clk_per);
- if (i2c->irq)
- free_irq(i2c->irq, i2c);
-
- irq_dispose_mapping(i2c->irq);
- iounmap(i2c->base);
- kfree(i2c);
return 0;
};
-#ifdef CONFIG_PM_SLEEP
-static int mpc_i2c_suspend(struct device *dev)
+static int __maybe_unused mpc_i2c_suspend(struct device *dev)
{
struct mpc_i2c *i2c = dev_get_drvdata(dev);
@@ -788,7 +810,7 @@ static int mpc_i2c_suspend(struct device *dev)
return 0;
}
-static int mpc_i2c_resume(struct device *dev)
+static int __maybe_unused mpc_i2c_resume(struct device *dev)
{
struct mpc_i2c *i2c = dev_get_drvdata(dev);
@@ -797,12 +819,7 @@ static int mpc_i2c_resume(struct device *dev)
return 0;
}
-
static SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
-#define MPC_I2C_PM_OPS (&mpc_i2c_pm_ops)
-#else
-#define MPC_I2C_PM_OPS NULL
-#endif
static const struct mpc_i2c_data mpc_i2c_data_512x = {
.setup = mpc_i2c_setup_512x,
@@ -845,7 +862,7 @@ static struct platform_driver mpc_i2c_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = mpc_i2c_of_match,
- .pm = MPC_I2C_PM_OPS,
+ .pm = &mpc_i2c_pm_ops,
},
};
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 2ffd2f354d0a..5ddfa4e56ee2 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -231,6 +231,7 @@ struct mtk_i2c {
struct i2c_adapter adap; /* i2c host adapter */
struct device *dev;
struct completion msg_complete;
+ struct i2c_timings timing_info;
/* set in i2c probe */
void __iomem *base; /* i2c base addr */
@@ -479,7 +480,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
- if (i2c->dev_comp->dma_sync) {
+ if (i2c->dev_comp->apdma_sync) {
writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
udelay(10);
writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
@@ -564,7 +565,7 @@ static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed)
static int mtk_i2c_max_step_cnt(unsigned int target_speed)
{
- if (target_speed > I2C_MAX_FAST_MODE_FREQ)
+ if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ)
return MAX_HS_STEP_CNT_DIV;
else
return MAX_STEP_CNT_DIV;
@@ -607,7 +608,8 @@ static int mtk_i2c_check_ac_timing(struct mtk_i2c *i2c,
else
clk_ns = sample_ns / 2;
- su_sta_cnt = DIV_ROUND_UP(spec->min_su_sta_ns, clk_ns);
+ su_sta_cnt = DIV_ROUND_UP(spec->min_su_sta_ns +
+ i2c->timing_info.scl_int_delay_ns, clk_ns);
if (su_sta_cnt > max_sta_cnt)
return -1;
@@ -635,7 +637,7 @@ static int mtk_i2c_check_ac_timing(struct mtk_i2c *i2c,
if (sda_min > sda_max)
return -3;
- if (check_speed > I2C_MAX_FAST_MODE_FREQ) {
+ if (check_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) {
if (i2c->dev_comp->ltiming_adjust) {
i2c->ac_timing.hs = I2C_TIME_DEFAULT_VALUE |
(sample_cnt << 12) | (high_cnt << 8);
@@ -850,7 +852,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
control_reg = mtk_i2c_readw(i2c, OFFSET_CONTROL) &
~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS);
- if ((i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ) || (left_num >= 1))
+ if ((i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ) || (left_num >= 1))
control_reg |= I2C_CONTROL_RS;
if (i2c->op == I2C_MASTER_WRRD)
@@ -1067,7 +1069,8 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
}
}
- if (i2c->auto_restart && num >= 2 && i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ)
+ if (i2c->auto_restart && num >= 2 &&
+ i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ)
/* ignore the first restart irq after the master code,
* otherwise the first transfer will be discarded.
*/
@@ -1175,6 +1178,8 @@ static int mtk_i2c_parse_dt(struct device_node *np, struct mtk_i2c *i2c)
i2c->use_push_pull =
of_property_read_bool(np, "mediatek,use-push-pull");
+ i2c_parse_fw_timings(i2c->dev, &i2c->timing_info, true);
+
return 0;
}
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index a3363b20f168..dc77e1c4e80f 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -277,7 +277,7 @@ static int init_hw(struct nmk_i2c_dev *dev)
goto exit;
/* disable the controller */
- i2c_clr_bit(dev->virtbase + I2C_CR , I2C_CR_PE);
+ i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
disable_all_interrupts(dev);
@@ -525,7 +525,7 @@ static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
dev->virtbase + I2C_CR);
/* enable the controller */
- i2c_set_bit(dev->virtbase + I2C_CR , I2C_CR_PE);
+ i2c_set_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
init_completion(&dev->xfer_complete);
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index 6b20601ffb13..b5055a3cbd93 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -262,6 +262,10 @@ static const struct property_entry ccgx_props[] = {
{ }
};
+static const struct software_node ccgx_node = {
+ .properties = ccgx_props,
+};
+
static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
{
i2cd->gpu_ccgx_ucsi = devm_kzalloc(i2cd->dev,
@@ -274,7 +278,7 @@ static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
sizeof(i2cd->gpu_ccgx_ucsi->type));
i2cd->gpu_ccgx_ucsi->addr = 0x8;
i2cd->gpu_ccgx_ucsi->irq = irq;
- i2cd->gpu_ccgx_ucsi->properties = ccgx_props;
+ i2cd->gpu_ccgx_ucsi->swnode = &ccgx_node;
i2cd->ccgx_client = i2c_new_client_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
return PTR_ERR_OR_ZERO(i2cd->ccgx_client);
}
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 12ac4212aded..d4f6c6d60683 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1404,9 +1404,9 @@ omap_i2c_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(omap->dev, OMAP_I2C_PM_TIMEOUT);
pm_runtime_use_autosuspend(omap->dev);
- r = pm_runtime_get_sync(omap->dev);
+ r = pm_runtime_resume_and_get(omap->dev);
if (r < 0)
- goto err_free_mem;
+ goto err_disable_pm;
/*
* Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
@@ -1513,8 +1513,8 @@ err_unuse_clocks:
omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
pm_runtime_dont_use_autosuspend(omap->dev);
pm_runtime_put_sync(omap->dev);
+err_disable_pm:
pm_runtime_disable(&pdev->dev);
-err_free_mem:
return r;
}
@@ -1525,7 +1525,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
int ret;
i2c_del_adapter(&omap->adapter);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 3e38e114948b..5241e6f414e9 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -76,11 +76,6 @@ static s32 i2c_powermac_smbus_xfer( struct i2c_adapter* adap,
* but I think the current API makes no sense and I don't want
* any driver that I haven't verified for correctness to go
* anywhere near a pmac i2c bus anyway ...
- *
- * I'm also not completely sure what kind of phases to do between
- * the actual command and the data (what I am _supposed_ to do that
- * is). For now, I assume writes are a single stream and reads have
- * a repeat start/addr phase (but not stop in between)
*/
case I2C_SMBUS_BLOCK_DATA:
buf = data->block;
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index 1c259b5188de..c63d5545fc2a 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -569,9 +569,9 @@ static int cci_probe(struct platform_device *pdev)
cci->master[idx].mode = I2C_MODE_STANDARD;
ret = of_property_read_u32(child, "clock-frequency", &val);
if (!ret) {
- if (val == 400000)
+ if (val == I2C_MAX_FAST_MODE_FREQ)
cci->master[idx].mode = I2C_MODE_FAST;
- else if (val == 1000000)
+ else if (val == I2C_MAX_FAST_MODE_PLUS_FREQ)
cci->master[idx].mode = I2C_MODE_FAST_PLUS;
}
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 12f6d452c0f7..327c092a4130 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -141,6 +141,7 @@ struct rcar_i2c_priv {
enum dma_data_direction dma_direction;
struct reset_control *rstc;
+ bool atomic_xfer;
int irq;
struct i2c_client *host_notify_client;
@@ -353,7 +354,9 @@ static void rcar_i2c_prepare_msg(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
rcar_i2c_write(priv, ICMSR, 0);
}
- rcar_i2c_write(priv, ICMIER, read ? RCAR_IRQ_RECV : RCAR_IRQ_SEND);
+
+ if (!priv->atomic_xfer)
+ rcar_i2c_write(priv, ICMIER, read ? RCAR_IRQ_RECV : RCAR_IRQ_SEND);
}
static void rcar_i2c_next_msg(struct rcar_i2c_priv *priv)
@@ -418,7 +421,7 @@ static bool rcar_i2c_dma(struct rcar_i2c_priv *priv)
int len;
/* Do various checks to see if DMA is feasible at all */
- if (IS_ERR(chan) || msg->len < RCAR_MIN_DMA_LEN ||
+ if (priv->atomic_xfer || IS_ERR(chan) || msg->len < RCAR_MIN_DMA_LEN ||
!(msg->flags & I2C_M_DMA_SAFE) || (read && priv->flags & ID_P_NO_RXDMA))
return false;
@@ -646,7 +649,8 @@ static irqreturn_t rcar_i2c_irq(int irq, struct rcar_i2c_priv *priv, u32 msr)
/* Nack */
if (msr & MNR) {
/* HW automatically sends STOP after received NACK */
- rcar_i2c_write(priv, ICMIER, RCAR_IRQ_STOP);
+ if (!priv->atomic_xfer)
+ rcar_i2c_write(priv, ICMIER, RCAR_IRQ_STOP);
priv->flags |= ID_NACK;
goto out;
}
@@ -667,7 +671,8 @@ out:
if (priv->flags & ID_DONE) {
rcar_i2c_write(priv, ICMIER, 0);
rcar_i2c_write(priv, ICMSR, 0);
- wake_up(&priv->wait);
+ if (!priv->atomic_xfer)
+ wake_up(&priv->wait);
}
return IRQ_HANDLED;
@@ -684,7 +689,8 @@ static irqreturn_t rcar_i2c_gen2_irq(int irq, void *ptr)
/* Only handle interrupts that are currently enabled */
msr = rcar_i2c_read(priv, ICMSR);
- msr &= rcar_i2c_read(priv, ICMIER);
+ if (!priv->atomic_xfer)
+ msr &= rcar_i2c_read(priv, ICMIER);
return rcar_i2c_irq(irq, priv, msr);
}
@@ -696,7 +702,8 @@ static irqreturn_t rcar_i2c_gen3_irq(int irq, void *ptr)
/* Only handle interrupts that are currently enabled */
msr = rcar_i2c_read(priv, ICMSR);
- msr &= rcar_i2c_read(priv, ICMIER);
+ if (!priv->atomic_xfer)
+ msr &= rcar_i2c_read(priv, ICMIER);
/*
* Clear START or STOP immediately, except for REPSTART after read or
@@ -804,6 +811,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
int i, ret;
long time_left;
+ priv->atomic_xfer = false;
+
pm_runtime_get_sync(dev);
/* Check bus state before init otherwise bus busy info will be lost */
@@ -858,6 +867,68 @@ out:
return ret;
}
+static int rcar_i2c_master_xfer_atomic(struct i2c_adapter *adap,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct rcar_i2c_priv *priv = i2c_get_adapdata(adap);
+ struct device *dev = rcar_i2c_priv_to_dev(priv);
+ unsigned long j;
+ bool time_left;
+ int ret;
+
+ priv->atomic_xfer = true;
+
+ pm_runtime_get_sync(dev);
+
+ /* Check bus state before init otherwise bus busy info will be lost */
+ ret = rcar_i2c_bus_barrier(priv);
+ if (ret < 0)
+ goto out;
+
+ rcar_i2c_init(priv);
+
+ /* init first message */
+ priv->msg = msgs;
+ priv->msgs_left = num;
+ priv->flags = (priv->flags & ID_P_MASK) | ID_FIRST_MSG;
+ rcar_i2c_prepare_msg(priv);
+
+ j = jiffies + num * adap->timeout;
+ do {
+ u32 msr = rcar_i2c_read(priv, ICMSR);
+
+ msr &= (rcar_i2c_is_recv(priv) ? RCAR_IRQ_RECV : RCAR_IRQ_SEND) | RCAR_IRQ_STOP;
+
+ if (msr) {
+ if (priv->devtype < I2C_RCAR_GEN3)
+ rcar_i2c_gen2_irq(0, priv);
+ else
+ rcar_i2c_gen3_irq(0, priv);
+ }
+
+ time_left = time_before_eq(jiffies, j);
+ } while (!(priv->flags & ID_DONE) && time_left);
+
+ if (!time_left) {
+ rcar_i2c_init(priv);
+ ret = -ETIMEDOUT;
+ } else if (priv->flags & ID_NACK) {
+ ret = -ENXIO;
+ } else if (priv->flags & ID_ARBLOST) {
+ ret = -EAGAIN;
+ } else {
+ ret = num - priv->msgs_left; /* The number of transfer */
+ }
+out:
+ pm_runtime_put(dev);
+
+ if (ret < 0 && ret != -ENXIO)
+ dev_err(dev, "error %d : %x\n", ret, priv->flags);
+
+ return ret;
+}
+
static int rcar_reg_slave(struct i2c_client *slave)
{
struct rcar_i2c_priv *priv = i2c_get_adapdata(slave->adapter);
@@ -922,6 +993,7 @@ static u32 rcar_i2c_func(struct i2c_adapter *adap)
static const struct i2c_algorithm rcar_i2c_algo = {
.master_xfer = rcar_i2c_master_xfer,
+ .master_xfer_atomic = rcar_i2c_master_xfer_atomic,
.functionality = rcar_i2c_func,
.reg_slave = rcar_reg_slave,
.unreg_slave = rcar_unreg_slave,
@@ -1027,7 +1099,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (of_property_read_bool(dev->of_node, "smbus"))
priv->flags |= ID_P_HOST_NOTIFY;
- priv->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto out_pm_disable;
+ priv->irq = ret;
ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
if (ret < 0) {
dev_err(dev, "cannot get irq %d\n", priv->irq);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 62a903fbe912..ab928613afba 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/mfd/syscon.h>
@@ -156,12 +157,8 @@ MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
*/
static inline kernel_ulong_t s3c24xx_get_device_quirks(struct platform_device *pdev)
{
- if (pdev->dev.of_node) {
- const struct of_device_id *match;
-
- match = of_match_node(s3c24xx_i2c_match, pdev->dev.of_node);
- return (kernel_ulong_t)match->data;
- }
+ if (pdev->dev.of_node)
+ return (kernel_ulong_t)of_device_get_match_data(&pdev->dev);
return platform_get_device_id(pdev)->driver_data;
}
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 1dc387392e74..6746aa46d96c 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -18,8 +18,6 @@
/* SMBUS HID definition as supported by Microsoft Windows */
#define ACPI_SMBUS_MS_HID "SMB0001"
-ACPI_MODULE_NAME("smbus_cmi");
-
struct smbus_methods_t {
char *mt_info;
char *mt_sbr;
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index c2005c789d2b..319d1fa617c8 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -471,7 +471,10 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
goto out2;
}
- id->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto out3;
+ id->irq = ret;
id->adap.nr = pdev->id;
id->adap.algo = &sh7760_i2c_algo;
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 2917fecf6c80..4fe15cd78907 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -290,7 +290,7 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
int im, ret;
- ret = pm_runtime_get_sync(i2c_dev->dev);
+ ret = pm_runtime_resume_and_get(i2c_dev->dev);
if (ret < 0)
return ret;
@@ -576,7 +576,7 @@ static int sprd_i2c_remove(struct platform_device *pdev)
struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev);
int ret;
- ret = pm_runtime_get_sync(i2c_dev->dev);
+ ret = pm_runtime_resume_and_get(i2c_dev->dev);
if (ret < 0)
return ret;
@@ -640,6 +640,7 @@ static const struct of_device_id sprd_i2c_of_match[] = {
{ .compatible = "sprd,sc9860-i2c", },
{},
};
+MODULE_DEVICE_TABLE(of, sprd_i2c_of_match);
static struct platform_driver sprd_i2c_driver = {
.probe = sprd_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index c62c815b88eb..0138317ea600 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -164,7 +164,6 @@ enum {
#define STM32F7_I2C_DNF_DEFAULT 0
#define STM32F7_I2C_DNF_MAX 15
-#define STM32F7_I2C_ANALOG_FILTER_ENABLE 1
#define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */
#define STM32F7_I2C_ANALOG_FILTER_DELAY_MAX 260 /* ns */
@@ -223,8 +222,6 @@ struct stm32f7_i2c_spec {
* @clock_src: I2C clock source frequency (Hz)
* @rise_time: Rise time (ns)
* @fall_time: Fall time (ns)
- * @dnf: Digital filter coefficient (0-16)
- * @analog_filter: Analog filter delay (On/Off)
* @fmp_clr_offset: Fast Mode Plus clear register offset from set register
*/
struct stm32f7_i2c_setup {
@@ -232,8 +229,6 @@ struct stm32f7_i2c_setup {
u32 clock_src;
u32 rise_time;
u32 fall_time;
- u8 dnf;
- bool analog_filter;
u32 fmp_clr_offset;
};
@@ -312,6 +307,9 @@ struct stm32f7_i2c_msg {
* @wakeup_src: boolean to know if the device is a wakeup source
* @smbus_mode: states that the controller is configured in SMBus mode
* @host_notify_client: SMBus host-notify client
+ * @analog_filter: boolean to indicate enabling of the analog filter
+ * @dnf_dt: value of digital filter requested via dt
+ * @dnf: value of digital filter to apply
*/
struct stm32f7_i2c_dev {
struct i2c_adapter adap;
@@ -340,6 +338,9 @@ struct stm32f7_i2c_dev {
bool wakeup_src;
bool smbus_mode;
struct i2c_client *host_notify_client;
+ bool analog_filter;
+ u32 dnf_dt;
+ u32 dnf;
};
/*
@@ -385,15 +386,11 @@ static struct stm32f7_i2c_spec stm32f7_i2c_specs[] = {
static const struct stm32f7_i2c_setup stm32f7_setup = {
.rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
.fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
- .dnf = STM32F7_I2C_DNF_DEFAULT,
- .analog_filter = STM32F7_I2C_ANALOG_FILTER_ENABLE,
};
static const struct stm32f7_i2c_setup stm32mp15_setup = {
.rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
.fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
- .dnf = STM32F7_I2C_DNF_DEFAULT,
- .analog_filter = STM32F7_I2C_ANALOG_FILTER_ENABLE,
.fmp_clr_offset = 0x40,
};
@@ -462,27 +459,28 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev,
return -EINVAL;
}
- if (setup->dnf > STM32F7_I2C_DNF_MAX) {
+ i2c_dev->dnf = DIV_ROUND_CLOSEST(i2c_dev->dnf_dt, i2cclk);
+ if (i2c_dev->dnf > STM32F7_I2C_DNF_MAX) {
dev_err(i2c_dev->dev,
"DNF out of bound %d/%d\n",
- setup->dnf, STM32F7_I2C_DNF_MAX);
+ i2c_dev->dnf * i2cclk, STM32F7_I2C_DNF_MAX * i2cclk);
return -EINVAL;
}
/* Analog and Digital Filters */
af_delay_min =
- (setup->analog_filter ?
+ (i2c_dev->analog_filter ?
STM32F7_I2C_ANALOG_FILTER_DELAY_MIN : 0);
af_delay_max =
- (setup->analog_filter ?
+ (i2c_dev->analog_filter ?
STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0);
- dnf_delay = setup->dnf * i2cclk;
+ dnf_delay = i2c_dev->dnf * i2cclk;
sdadel_min = specs->hddat_min + setup->fall_time -
- af_delay_min - (setup->dnf + 3) * i2cclk;
+ af_delay_min - (i2c_dev->dnf + 3) * i2cclk;
sdadel_max = specs->vddat_max - setup->rise_time -
- af_delay_max - (setup->dnf + 4) * i2cclk;
+ af_delay_max - (i2c_dev->dnf + 4) * i2cclk;
scldel_min = setup->rise_time + specs->sudat_min;
@@ -648,6 +646,7 @@ static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev,
setup->speed_freq = t->bus_freq_hz;
i2c_dev->setup.rise_time = t->scl_rise_ns;
i2c_dev->setup.fall_time = t->scl_fall_ns;
+ i2c_dev->dnf_dt = t->digital_filter_width_ns;
setup->clock_src = clk_get_rate(i2c_dev->clk);
if (!setup->clock_src) {
@@ -655,6 +654,9 @@ static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev,
return -EINVAL;
}
+ if (!of_property_read_bool(i2c_dev->dev->of_node, "i2c-digital-filter"))
+ i2c_dev->dnf_dt = STM32F7_I2C_DNF_DEFAULT;
+
do {
ret = stm32f7_i2c_compute_timing(i2c_dev, setup,
&i2c_dev->timing);
@@ -676,12 +678,15 @@ static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev,
return ret;
}
+ i2c_dev->analog_filter = of_property_read_bool(i2c_dev->dev->of_node,
+ "i2c-analog-filter");
+
dev_dbg(i2c_dev->dev, "I2C Speed(%i), Clk Source(%i)\n",
setup->speed_freq, setup->clock_src);
dev_dbg(i2c_dev->dev, "I2C Rise(%i) and Fall(%i) Time\n",
setup->rise_time, setup->fall_time);
dev_dbg(i2c_dev->dev, "I2C Analog Filter(%s), DNF(%i)\n",
- (setup->analog_filter ? "On" : "Off"), setup->dnf);
+ (i2c_dev->analog_filter ? "On" : "Off"), i2c_dev->dnf);
i2c_dev->bus_rate = setup->speed_freq;
@@ -720,8 +725,8 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
timing |= STM32F7_I2C_TIMINGR_SCLL(t->scll);
writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR);
- /* Enable I2C */
- if (i2c_dev->setup.analog_filter)
+ /* Configure the Analog Filter */
+ if (i2c_dev->analog_filter)
stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_ANFOFF);
else
@@ -732,7 +737,7 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_DNF_MASK);
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
- STM32F7_I2C_CR1_DNF(i2c_dev->setup.dnf));
+ STM32F7_I2C_CR1_DNF(i2c_dev->dnf));
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_PE);
@@ -1597,7 +1602,8 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
/* Bus error */
if (status & STM32F7_I2C_ISR_BERR) {
- dev_err(dev, "<%s>: Bus error\n", __func__);
+ dev_err(dev, "<%s>: Bus error accessing addr 0x%x\n",
+ __func__, f7_msg->addr);
writel_relaxed(STM32F7_I2C_ICR_BERRCF, base + STM32F7_I2C_ICR);
stm32f7_i2c_release_bus(&i2c_dev->adap);
f7_msg->result = -EIO;
@@ -1605,13 +1611,15 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
/* Arbitration loss */
if (status & STM32F7_I2C_ISR_ARLO) {
- dev_dbg(dev, "<%s>: Arbitration loss\n", __func__);
+ dev_dbg(dev, "<%s>: Arbitration loss accessing addr 0x%x\n",
+ __func__, f7_msg->addr);
writel_relaxed(STM32F7_I2C_ICR_ARLOCF, base + STM32F7_I2C_ICR);
f7_msg->result = -EAGAIN;
}
if (status & STM32F7_I2C_ISR_PECERR) {
- dev_err(dev, "<%s>: PEC error in reception\n", __func__);
+ dev_err(dev, "<%s>: PEC error in reception accessing addr 0x%x\n",
+ __func__, f7_msg->addr);
writel_relaxed(STM32F7_I2C_ICR_PECCF, base + STM32F7_I2C_ICR);
f7_msg->result = -EINVAL;
}
@@ -1652,7 +1660,7 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
i2c_dev->msg_id = 0;
f7_msg->smbus = false;
- ret = pm_runtime_get_sync(i2c_dev->dev);
+ ret = pm_runtime_resume_and_get(i2c_dev->dev);
if (ret < 0)
return ret;
@@ -1698,7 +1706,7 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
f7_msg->read_write = read_write;
f7_msg->smbus = true;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
@@ -1799,7 +1807,7 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
if (ret)
return ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
@@ -1880,7 +1888,7 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
WARN_ON(!i2c_dev->slave[id]);
- ret = pm_runtime_get_sync(i2c_dev->dev);
+ ret = pm_runtime_resume_and_get(i2c_dev->dev);
if (ret < 0)
return ret;
@@ -2027,12 +2035,8 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
phy_addr = (dma_addr_t)res->start;
irq_event = platform_get_irq(pdev, 0);
- if (irq_event <= 0) {
- if (irq_event != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get IRQ event: %d\n",
- irq_event);
+ if (irq_event <= 0)
return irq_event ? : -ENOENT;
- }
irq_error = platform_get_irq(pdev, 1);
if (irq_error <= 0)
@@ -2267,13 +2271,12 @@ static int __maybe_unused stm32f7_i2c_runtime_resume(struct device *dev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
+static int __maybe_unused stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
{
int ret;
struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
- ret = pm_runtime_get_sync(i2c_dev->dev);
+ ret = pm_runtime_resume_and_get(i2c_dev->dev);
if (ret < 0)
return ret;
@@ -2289,13 +2292,13 @@ static int stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
return ret;
}
-static int stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
+static int __maybe_unused stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
{
u32 cr1;
int ret;
struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
- ret = pm_runtime_get_sync(i2c_dev->dev);
+ ret = pm_runtime_resume_and_get(i2c_dev->dev);
if (ret < 0)
return ret;
@@ -2320,7 +2323,7 @@ static int stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
return ret;
}
-static int stm32f7_i2c_suspend(struct device *dev)
+static int __maybe_unused stm32f7_i2c_suspend(struct device *dev)
{
struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
int ret;
@@ -2341,7 +2344,7 @@ static int stm32f7_i2c_suspend(struct device *dev)
return 0;
}
-static int stm32f7_i2c_resume(struct device *dev)
+static int __maybe_unused stm32f7_i2c_resume(struct device *dev)
{
struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
int ret;
@@ -2361,7 +2364,6 @@ static int stm32f7_i2c_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops stm32f7_i2c_pm_ops = {
SET_RUNTIME_PM_OPS(stm32f7_i2c_runtime_suspend,
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index c0c7d01473f2..3680d608698b 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -38,49 +38,31 @@ struct tegra_bpmp_i2c {
* firmware I2C driver to avoid any issues in future if Linux I2C flags are
* changed.
*/
-static int tegra_bpmp_xlate_flags(u16 flags, u16 *out)
+static void tegra_bpmp_xlate_flags(u16 flags, u16 *out)
{
- if (flags & I2C_M_TEN) {
+ if (flags & I2C_M_TEN)
*out |= SERIALI2C_TEN;
- flags &= ~I2C_M_TEN;
- }
- if (flags & I2C_M_RD) {
+ if (flags & I2C_M_RD)
*out |= SERIALI2C_RD;
- flags &= ~I2C_M_RD;
- }
- if (flags & I2C_M_STOP) {
+ if (flags & I2C_M_STOP)
*out |= SERIALI2C_STOP;
- flags &= ~I2C_M_STOP;
- }
- if (flags & I2C_M_NOSTART) {
+ if (flags & I2C_M_NOSTART)
*out |= SERIALI2C_NOSTART;
- flags &= ~I2C_M_NOSTART;
- }
- if (flags & I2C_M_REV_DIR_ADDR) {
+ if (flags & I2C_M_REV_DIR_ADDR)
*out |= SERIALI2C_REV_DIR_ADDR;
- flags &= ~I2C_M_REV_DIR_ADDR;
- }
- if (flags & I2C_M_IGNORE_NAK) {
+ if (flags & I2C_M_IGNORE_NAK)
*out |= SERIALI2C_IGNORE_NAK;
- flags &= ~I2C_M_IGNORE_NAK;
- }
- if (flags & I2C_M_NO_RD_ACK) {
+ if (flags & I2C_M_NO_RD_ACK)
*out |= SERIALI2C_NO_RD_ACK;
- flags &= ~I2C_M_NO_RD_ACK;
- }
- if (flags & I2C_M_RECV_LEN) {
+ if (flags & I2C_M_RECV_LEN)
*out |= SERIALI2C_RECV_LEN;
- flags &= ~I2C_M_RECV_LEN;
- }
-
- return 0;
}
/**
@@ -97,22 +79,19 @@ static int tegra_bpmp_xlate_flags(u16 flags, u16 *out)
*
* See deserialize_i2c documentation for the data format in the other direction.
*/
-static int tegra_bpmp_serialize_i2c_msg(struct tegra_bpmp_i2c *i2c,
+static void tegra_bpmp_serialize_i2c_msg(struct tegra_bpmp_i2c *i2c,
struct mrq_i2c_request *request,
struct i2c_msg *msgs,
unsigned int num)
{
char *buf = request->xfer.data_buf;
unsigned int i, j, pos = 0;
- int err;
for (i = 0; i < num; i++) {
struct i2c_msg *msg = &msgs[i];
u16 flags = 0;
- err = tegra_bpmp_xlate_flags(msg->flags, &flags);
- if (err < 0)
- return err;
+ tegra_bpmp_xlate_flags(msg->flags, &flags);
buf[pos++] = msg->addr & 0xff;
buf[pos++] = (msg->addr & 0xff00) >> 8;
@@ -128,8 +107,6 @@ static int tegra_bpmp_serialize_i2c_msg(struct tegra_bpmp_i2c *i2c,
}
request->xfer.data_size = pos;
-
- return 0;
}
/**
@@ -217,7 +194,32 @@ static int tegra_bpmp_i2c_msg_xfer(struct tegra_bpmp_i2c *i2c,
else
err = tegra_bpmp_transfer(i2c->bpmp, &msg);
- return err;
+ if (err < 0) {
+ dev_err(i2c->dev, "failed to transfer message: %d\n", err);
+ return err;
+ }
+
+ if (msg.rx.ret != 0) {
+ if (msg.rx.ret == -BPMP_EAGAIN) {
+ dev_dbg(i2c->dev, "arbitration lost\n");
+ return -EAGAIN;
+ }
+
+ if (msg.rx.ret == -BPMP_ETIMEDOUT) {
+ dev_dbg(i2c->dev, "timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ if (msg.rx.ret == -BPMP_ENXIO) {
+ dev_dbg(i2c->dev, "NAK\n");
+ return -ENXIO;
+ }
+
+ dev_err(i2c->dev, "transaction failed: %d\n", msg.rx.ret);
+ return -EIO;
+ }
+
+ return 0;
}
static int tegra_bpmp_i2c_xfer_common(struct i2c_adapter *adapter,
@@ -238,12 +240,7 @@ static int tegra_bpmp_i2c_xfer_common(struct i2c_adapter *adapter,
memset(&request, 0, sizeof(request));
memset(&response, 0, sizeof(response));
- err = tegra_bpmp_serialize_i2c_msg(i2c, &request, msgs, num);
- if (err < 0) {
- dev_err(i2c->dev, "failed to serialize message: %d\n", err);
- return err;
- }
-
+ tegra_bpmp_serialize_i2c_msg(i2c, &request, msgs, num);
err = tegra_bpmp_i2c_msg_xfer(i2c, &request, &response, atomic);
if (err < 0) {
dev_err(i2c->dev, "failed to transfer message: %d\n", err);
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 63cbb9c7c1b0..bba08cbce6e1 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/version.h>
#define MAILBOX_OP_TIMEOUT 1000 /* Operation time out in ms */
#define MAILBOX_I2C_INDEX 0
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 087b2951942e..2a8568b97c14 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -706,7 +706,7 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
- err = pm_runtime_get_sync(i2c->dev);
+ err = pm_runtime_resume_and_get(i2c->dev);
if (err < 0)
return err;
@@ -873,7 +873,7 @@ static int xiic_i2c_remove(struct platform_device *pdev)
/* remove adapter & data */
i2c_del_adapter(&i2c->adap);
- ret = pm_runtime_get_sync(i2c->dev);
+ ret = pm_runtime_resume_and_get(i2c->dev);
if (ret < 0)
return ret;
diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
index 8bc51d4e69df..4df8ad092df3 100644
--- a/drivers/i2c/i2c-boardinfo.c
+++ b/drivers/i2c/i2c-boardinfo.c
@@ -47,7 +47,6 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num);
*
* The board info passed can safely be __initdata, but be careful of embedded
* pointers (for platform_data, functions, etc) since that won't be copied.
- * Device properties are deep-copied though.
*/
int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len)
{
@@ -72,16 +71,6 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig
devinfo->busnum = busnum;
devinfo->board_info = *info;
- if (info->properties) {
- devinfo->board_info.properties =
- property_entries_dup(info->properties);
- if (IS_ERR(devinfo->board_info.properties)) {
- status = PTR_ERR(devinfo->board_info.properties);
- kfree(devinfo);
- break;
- }
- }
-
if (info->resources) {
devinfo->board_info.resources =
kmemdup(info->resources,
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index f21362355973..5a97e4a02fa2 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -76,6 +76,27 @@ void i2c_transfer_trace_unreg(void)
static_branch_dec(&i2c_trace_msg_key);
}
+const char *i2c_freq_mode_string(u32 bus_freq_hz)
+{
+ switch (bus_freq_hz) {
+ case I2C_MAX_STANDARD_MODE_FREQ:
+ return "Standard Mode (100 kHz)";
+ case I2C_MAX_FAST_MODE_FREQ:
+ return "Fast Mode (400 kHz)";
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
+ return "Fast Mode Plus (1.0 MHz)";
+ case I2C_MAX_TURBO_MODE_FREQ:
+ return "Turbo Mode (1.4 MHz)";
+ case I2C_MAX_HIGH_SPEED_MODE_FREQ:
+ return "High Speed Mode (3.4 MHz)";
+ case I2C_MAX_ULTRA_FAST_MODE_FREQ:
+ return "Ultra Fast Mode (5.0 MHz)";
+ default:
+ return "Unknown Mode";
+ }
+}
+EXPORT_SYMBOL_GPL(i2c_freq_mode_string);
+
const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
const struct i2c_client *client)
{
@@ -249,7 +270,7 @@ EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
int i2c_recover_bus(struct i2c_adapter *adap)
{
if (!adap->bus_recovery_info)
- return -EOPNOTSUPP;
+ return -EBUSY;
dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
return adap->bus_recovery_info->recover_bus(adap);
@@ -519,6 +540,13 @@ static int i2c_device_probe(struct device *dev)
if (status)
goto err_clear_wakeup_irq;
+ client->devres_group_id = devres_open_group(&client->dev, NULL,
+ GFP_KERNEL);
+ if (!client->devres_group_id) {
+ status = -ENOMEM;
+ goto err_detach_pm_domain;
+ }
+
/*
* When there are no more users of probe(),
* rename probe_new to probe.
@@ -531,11 +559,21 @@ static int i2c_device_probe(struct device *dev)
else
status = -EINVAL;
+ /*
+ * Note that we are not closing the devres group opened above so
+ * even resources that were attached to the device after probe is
+ * run are released when i2c_device_remove() is executed. This is
+ * needed as some drivers would allocate additional resources,
+ * for example when updating firmware.
+ */
+
if (status)
- goto err_detach_pm_domain;
+ goto err_release_driver_resources;
return 0;
+err_release_driver_resources:
+ devres_release_group(&client->dev, client->devres_group_id);
err_detach_pm_domain:
dev_pm_domain_detach(&client->dev, true);
err_clear_wakeup_irq:
@@ -564,6 +602,8 @@ static int i2c_device_remove(struct device *dev)
dev_warn(dev, "remove failed (%pe), will be ignored\n", ERR_PTR(status));
}
+ devres_release_group(&client->dev, client->devres_group_id);
+
dev_pm_domain_detach(&client->dev, true);
dev_pm_clear_wake_irq(&client->dev);
@@ -612,7 +652,7 @@ modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
if (len != -ENODEV)
return len;
- len = acpi_device_modalias(dev, buf, PAGE_SIZE -1);
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
if (len != -ENODEV)
return len;
@@ -910,11 +950,11 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
i2c_dev_set_name(adap, client, info);
- if (info->properties) {
- status = device_add_properties(&client->dev, info->properties);
+ if (info->swnode) {
+ status = device_add_software_node(&client->dev, info->swnode);
if (status) {
dev_err(&adap->dev,
- "Failed to add properties to client %s: %d\n",
+ "Failed to add software node to client %s: %d\n",
client->name, status);
goto out_err_put_of_node;
}
@@ -922,16 +962,15 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
status = device_register(&client->dev);
if (status)
- goto out_free_props;
+ goto out_remove_swnode;
dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n",
client->name, dev_name(&client->dev));
return client;
-out_free_props:
- if (info->properties)
- device_remove_properties(&client->dev);
+out_remove_swnode:
+ device_remove_software_node(&client->dev);
out_err_put_of_node:
of_node_put(info->of_node);
out_err:
@@ -961,6 +1000,7 @@ void i2c_unregister_device(struct i2c_client *client)
if (ACPI_COMPANION(&client->dev))
acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
+ device_remove_software_node(&client->dev);
device_unregister(&client->dev);
}
EXPORT_SYMBOL_GPL(i2c_unregister_device);
@@ -1017,15 +1057,9 @@ struct i2c_client *i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address
}
EXPORT_SYMBOL_GPL(i2c_new_dummy_device);
-struct i2c_dummy_devres {
- struct i2c_client *client;
-};
-
-static void devm_i2c_release_dummy(struct device *dev, void *res)
+static void devm_i2c_release_dummy(void *client)
{
- struct i2c_dummy_devres *this = res;
-
- i2c_unregister_device(this->client);
+ i2c_unregister_device(client);
}
/**
@@ -1042,20 +1076,16 @@ struct i2c_client *devm_i2c_new_dummy_device(struct device *dev,
struct i2c_adapter *adapter,
u16 address)
{
- struct i2c_dummy_devres *dr;
struct i2c_client *client;
-
- dr = devres_alloc(devm_i2c_release_dummy, sizeof(*dr), GFP_KERNEL);
- if (!dr)
- return ERR_PTR(-ENOMEM);
+ int ret;
client = i2c_new_dummy_device(adapter, address);
- if (IS_ERR(client)) {
- devres_free(dr);
- } else {
- dr->client = client;
- devres_add(dev, dr);
- }
+ if (IS_ERR(client))
+ return client;
+
+ ret = devm_add_action_or_reset(dev, devm_i2c_release_dummy, client);
+ if (ret)
+ return ERR_PTR(ret);
return client;
}
@@ -1704,6 +1734,32 @@ void i2c_del_adapter(struct i2c_adapter *adap)
}
EXPORT_SYMBOL(i2c_del_adapter);
+static void devm_i2c_del_adapter(void *adapter)
+{
+ i2c_del_adapter(adapter);
+}
+
+/**
+ * devm_i2c_add_adapter - device-managed variant of i2c_add_adapter()
+ * @dev: managing device for adding this I2C adapter
+ * @adapter: the adapter to add
+ * Context: can sleep
+ *
+ * Add adapter with dynamic bus number, same with i2c_add_adapter()
+ * but the adapter will be auto deleted on driver detach.
+ */
+int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter)
+{
+ int ret;
+
+ ret = i2c_add_adapter(adapter);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_i2c_del_adapter, adapter);
+}
+EXPORT_SYMBOL_GPL(devm_i2c_add_adapter);
+
static void i2c_parse_timing(struct device *dev, char *prop_name, u32 *cur_val_p,
u32 def_val, bool use_def)
{
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 6ceb11cc4be1..6ef38a8ee95c 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -440,8 +440,13 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
sizeof(rdwr_arg)))
return -EFAULT;
- /* Put an arbitrary limit on the number of messages that can
- * be sent at once */
+ if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
+ return -EINVAL;
+
+ /*
+ * Put an arbitrary limit on the number of messages that can
+ * be sent at once
+ */
if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
return -EINVAL;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 5c9fac7cf420..3b0991fedd81 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -121,7 +121,7 @@ struct ib_gid_table {
u32 default_gid_indices;
};
-static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
+static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port)
{
struct ib_event event;
@@ -197,7 +197,7 @@ int ib_cache_gid_parse_type_str(const char *buf)
}
EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
-static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
+static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port)
{
return device->port_data[port].cache.gid;
}
@@ -237,10 +237,10 @@ static void put_gid_ndev(struct rcu_head *head)
static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
{
struct ib_device *device = entry->attr.device;
- u8 port_num = entry->attr.port_num;
+ u32 port_num = entry->attr.port_num;
struct ib_gid_table *table = rdma_gid_table(device, port_num);
- dev_dbg(&device->dev, "%s port=%d index=%d gid %pI6\n", __func__,
+ dev_dbg(&device->dev, "%s port=%u index=%d gid %pI6\n", __func__,
port_num, entry->attr.index, entry->attr.gid.raw);
write_lock_irq(&table->rwlock);
@@ -282,7 +282,7 @@ static void free_gid_work(struct work_struct *work)
struct ib_gid_table_entry *entry =
container_of(work, struct ib_gid_table_entry, del_work);
struct ib_device *device = entry->attr.device;
- u8 port_num = entry->attr.port_num;
+ u32 port_num = entry->attr.port_num;
struct ib_gid_table *table = rdma_gid_table(device, port_num);
mutex_lock(&table->lock);
@@ -379,7 +379,7 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
* @ix: GID entry index to delete
*
*/
-static void del_gid(struct ib_device *ib_dev, u8 port,
+static void del_gid(struct ib_device *ib_dev, u32 port,
struct ib_gid_table *table, int ix)
{
struct roce_gid_ndev_storage *ndev_storage;
@@ -387,7 +387,7 @@ static void del_gid(struct ib_device *ib_dev, u8 port,
lockdep_assert_held(&table->lock);
- dev_dbg(&ib_dev->dev, "%s port=%d index=%d gid %pI6\n", __func__, port,
+ dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port,
ix, table->data_vec[ix]->attr.gid.raw);
write_lock_irq(&table->rwlock);
@@ -543,7 +543,7 @@ static void make_default_gid(struct net_device *dev, union ib_gid *gid)
addrconf_ifid_eui48(&gid->raw[8], dev);
}
-static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
+static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
union ib_gid *gid, struct ib_gid_attr *attr,
unsigned long mask, bool default_gid)
{
@@ -587,7 +587,7 @@ out_unlock:
return ret;
}
-int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
union ib_gid *gid, struct ib_gid_attr *attr)
{
unsigned long mask = GID_ATTR_FIND_MASK_GID |
@@ -598,7 +598,7 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
}
static int
-_ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
+_ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
union ib_gid *gid, struct ib_gid_attr *attr,
unsigned long mask, bool default_gid)
{
@@ -627,7 +627,7 @@ out_unlock:
return ret;
}
-int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
union ib_gid *gid, struct ib_gid_attr *attr)
{
unsigned long mask = GID_ATTR_FIND_MASK_GID |
@@ -638,7 +638,7 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
}
-int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
struct net_device *ndev)
{
struct ib_gid_table *table;
@@ -683,7 +683,7 @@ const struct ib_gid_attr *
rdma_find_gid_by_port(struct ib_device *ib_dev,
const union ib_gid *gid,
enum ib_gid_type gid_type,
- u8 port, struct net_device *ndev)
+ u32 port, struct net_device *ndev)
{
int local_index;
struct ib_gid_table *table;
@@ -734,7 +734,7 @@ EXPORT_SYMBOL(rdma_find_gid_by_port);
*
*/
const struct ib_gid_attr *rdma_find_gid_by_filter(
- struct ib_device *ib_dev, const union ib_gid *gid, u8 port,
+ struct ib_device *ib_dev, const union ib_gid *gid, u32 port,
bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
void *),
void *context)
@@ -818,7 +818,7 @@ static void release_gid_table(struct ib_device *device,
kfree(table);
}
-static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
+static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port,
struct ib_gid_table *table)
{
int i;
@@ -834,7 +834,7 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
mutex_unlock(&table->lock);
}
-void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
+void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
struct net_device *ndev,
unsigned long gid_type_mask,
enum ib_cache_gid_default_mode mode)
@@ -867,7 +867,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
}
}
-static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
+static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port,
struct ib_gid_table *table)
{
unsigned int i;
@@ -884,7 +884,7 @@ static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
static void gid_table_release_one(struct ib_device *ib_dev)
{
- unsigned int p;
+ u32 p;
rdma_for_each_port (ib_dev, p) {
release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
@@ -895,7 +895,7 @@ static void gid_table_release_one(struct ib_device *ib_dev)
static int _gid_table_setup_one(struct ib_device *ib_dev)
{
struct ib_gid_table *table;
- unsigned int rdma_port;
+ u32 rdma_port;
rdma_for_each_port (ib_dev, rdma_port) {
table = alloc_gid_table(
@@ -915,7 +915,7 @@ rollback_table_setup:
static void gid_table_cleanup_one(struct ib_device *ib_dev)
{
- unsigned int p;
+ u32 p;
rdma_for_each_port (ib_dev, p)
cleanup_gid_table_port(ib_dev, p,
@@ -950,7 +950,7 @@ static int gid_table_setup_one(struct ib_device *ib_dev)
* Returns 0 on success or appropriate error code.
*
*/
-int rdma_query_gid(struct ib_device *device, u8 port_num,
+int rdma_query_gid(struct ib_device *device, u32 port_num,
int index, union ib_gid *gid)
{
struct ib_gid_table *table;
@@ -1014,7 +1014,7 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
unsigned long mask = GID_ATTR_FIND_MASK_GID |
GID_ATTR_FIND_MASK_GID_TYPE;
struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
- unsigned int p;
+ u32 p;
if (ndev)
mask |= GID_ATTR_FIND_MASK_NETDEV;
@@ -1043,7 +1043,7 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
EXPORT_SYMBOL(rdma_find_gid);
int ib_get_cached_pkey(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
int index,
u16 *pkey)
{
@@ -1069,9 +1069,8 @@ int ib_get_cached_pkey(struct ib_device *device,
}
EXPORT_SYMBOL(ib_get_cached_pkey);
-int ib_get_cached_subnet_prefix(struct ib_device *device,
- u8 port_num,
- u64 *sn_pfx)
+int ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num,
+ u64 *sn_pfx)
{
unsigned long flags;
@@ -1086,10 +1085,8 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
}
EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
-int ib_find_cached_pkey(struct ib_device *device,
- u8 port_num,
- u16 pkey,
- u16 *index)
+int ib_find_cached_pkey(struct ib_device *device, u32 port_num,
+ u16 pkey, u16 *index)
{
struct ib_pkey_cache *cache;
unsigned long flags;
@@ -1116,8 +1113,9 @@ int ib_find_cached_pkey(struct ib_device *device,
*index = i;
ret = 0;
break;
- } else
+ } else {
partial_ix = i;
+ }
}
if (ret && partial_ix >= 0) {
@@ -1132,10 +1130,8 @@ err:
}
EXPORT_SYMBOL(ib_find_cached_pkey);
-int ib_find_exact_cached_pkey(struct ib_device *device,
- u8 port_num,
- u16 pkey,
- u16 *index)
+int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num,
+ u16 pkey, u16 *index)
{
struct ib_pkey_cache *cache;
unsigned long flags;
@@ -1169,9 +1165,7 @@ err:
}
EXPORT_SYMBOL(ib_find_exact_cached_pkey);
-int ib_get_cached_lmc(struct ib_device *device,
- u8 port_num,
- u8 *lmc)
+int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc)
{
unsigned long flags;
int ret = 0;
@@ -1187,8 +1181,7 @@ int ib_get_cached_lmc(struct ib_device *device,
}
EXPORT_SYMBOL(ib_get_cached_lmc);
-int ib_get_cached_port_state(struct ib_device *device,
- u8 port_num,
+int ib_get_cached_port_state(struct ib_device *device, u32 port_num,
enum ib_port_state *port_state)
{
unsigned long flags;
@@ -1222,7 +1215,7 @@ EXPORT_SYMBOL(ib_get_cached_port_state);
* code.
*/
const struct ib_gid_attr *
-rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index)
+rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index)
{
const struct ib_gid_attr *attr = ERR_PTR(-ENODATA);
struct ib_gid_table *table;
@@ -1263,7 +1256,7 @@ ssize_t rdma_query_gid_table(struct ib_device *device,
const struct ib_gid_attr *gid_attr;
ssize_t num_entries = 0, ret;
struct ib_gid_table *table;
- unsigned int port_num, i;
+ u32 port_num, i;
struct net_device *ndev;
unsigned long flags;
@@ -1361,7 +1354,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
container_of(attr, struct ib_gid_table_entry, attr);
struct ib_device *device = entry->attr.device;
struct net_device *ndev = ERR_PTR(-EINVAL);
- u8 port_num = entry->attr.port_num;
+ u32 port_num = entry->attr.port_num;
struct ib_gid_table *table;
unsigned long flags;
bool valid;
@@ -1441,7 +1434,7 @@ int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
EXPORT_SYMBOL(rdma_read_gid_l2_fields);
static int config_non_roce_gid_cache(struct ib_device *device,
- u8 port, int gid_tbl_len)
+ u32 port, int gid_tbl_len)
{
struct ib_gid_attr gid_attr = {};
struct ib_gid_table *table;
@@ -1472,7 +1465,7 @@ err:
}
static int
-ib_cache_update(struct ib_device *device, u8 port, bool enforce_security)
+ib_cache_update(struct ib_device *device, u32 port, bool enforce_security)
{
struct ib_port_attr *tprops = NULL;
struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
@@ -1621,7 +1614,7 @@ EXPORT_SYMBOL(ib_dispatch_event);
int ib_cache_setup_one(struct ib_device *device)
{
- unsigned int p;
+ u32 p;
int err;
rwlock_init(&device->cache_lock);
@@ -1641,7 +1634,7 @@ int ib_cache_setup_one(struct ib_device *device)
void ib_cache_release_one(struct ib_device *device)
{
- unsigned int p;
+ u32 p;
/*
* The release function frees all the cache elements.
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 3d194bb60840..0ead0d223154 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -202,7 +202,7 @@ static struct attribute *cm_counter_default_attrs[] = {
struct cm_port {
struct cm_device *cm_dev;
struct ib_mad_agent *mad_agent;
- u8 port_num;
+ u32 port_num;
struct list_head cm_priv_prim_list;
struct list_head cm_priv_altr_list;
struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
@@ -255,7 +255,8 @@ struct cm_id_private {
struct completion comp;
refcount_t refcount;
/* Number of clients sharing this ib_cm_id. Only valid for listeners.
- * Protected by the cm.lock spinlock. */
+ * Protected by the cm.lock spinlock.
+ */
int listen_sharecount;
struct rcu_head rcu;
@@ -420,8 +421,7 @@ static int cm_alloc_response_msg(struct cm_port *port,
return 0;
}
-static void * cm_copy_private_data(const void *private_data,
- u8 private_data_len)
+static void *cm_copy_private_data(const void *private_data, u8 private_data_len)
{
void *data;
@@ -680,8 +680,8 @@ static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
return cm_id_priv;
}
-static struct cm_id_private * cm_find_listen(struct ib_device *device,
- __be64 service_id)
+static struct cm_id_private *cm_find_listen(struct ib_device *device,
+ __be64 service_id)
{
struct rb_node *node = cm.listen_service_table.rb_node;
struct cm_id_private *cm_id_priv;
@@ -708,8 +708,8 @@ static struct cm_id_private * cm_find_listen(struct ib_device *device,
return NULL;
}
-static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
- *timewait_info)
+static struct cm_timewait_info *
+cm_insert_remote_id(struct cm_timewait_info *timewait_info)
{
struct rb_node **link = &cm.remote_id_table.rb_node;
struct rb_node *parent = NULL;
@@ -767,8 +767,8 @@ static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
return res;
}
-static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
- *timewait_info)
+static struct cm_timewait_info *
+cm_insert_remote_qpn(struct cm_timewait_info *timewait_info)
{
struct rb_node **link = &cm.remote_qp_table.rb_node;
struct rb_node *parent = NULL;
@@ -797,8 +797,8 @@ static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
return NULL;
}
-static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
- *cm_id_priv)
+static struct cm_id_private *
+cm_insert_remote_sidr(struct cm_id_private *cm_id_priv)
{
struct rb_node **link = &cm.remote_sidr_table.rb_node;
struct rb_node *parent = NULL;
@@ -897,7 +897,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
}
EXPORT_SYMBOL(ib_create_cm_id);
-static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
+static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv)
{
struct cm_work *work;
@@ -986,7 +986,7 @@ static void cm_remove_remote(struct cm_id_private *cm_id_priv)
}
}
-static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
+static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id)
{
struct cm_timewait_info *timewait_info;
@@ -1631,7 +1631,7 @@ static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
req_msg))));
}
-static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
+static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num,
struct sa_path_rec *path, union ib_gid *gid)
{
if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
@@ -1750,7 +1750,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
static u16 cm_get_bth_pkey(struct cm_work *work)
{
struct ib_device *ib_dev = work->port->cm_dev->ib_device;
- u8 port_num = work->port->port_num;
+ u32 port_num = work->port->port_num;
u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
u16 pkey;
int ret;
@@ -1778,7 +1778,7 @@ static void cm_opa_to_ib_sgid(struct cm_work *work,
struct sa_path_rec *path)
{
struct ib_device *dev = work->port->cm_dev->ib_device;
- u8 port_num = work->port->port_num;
+ u32 port_num = work->port->port_num;
if (rdma_cap_opa_ah(dev, port_num) &&
(ib_is_opa_gid(&path->sgid))) {
@@ -1977,8 +1977,8 @@ unlock: spin_unlock_irq(&cm_id_priv->lock);
free: cm_free_msg(msg);
}
-static struct cm_id_private * cm_match_req(struct cm_work *work,
- struct cm_id_private *cm_id_priv)
+static struct cm_id_private *cm_match_req(struct cm_work *work,
+ struct cm_id_private *cm_id_priv)
{
struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
struct cm_timewait_info *timewait_info;
@@ -2138,20 +2138,17 @@ static int cm_req_handler(struct cm_work *work)
goto destroy;
}
- cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
-
memset(&work->path[0], 0, sizeof(work->path[0]));
if (cm_req_has_alt_path(req_msg))
memset(&work->path[1], 0, sizeof(work->path[1]));
grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
gid_attr = grh->sgid_attr;
- if (gid_attr &&
- rdma_protocol_roce(work->port->cm_dev->ib_device,
- work->port->port_num)) {
+ if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) {
work->path[0].rec_type =
sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
} else {
+ cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
cm_path_set_rec_type(
work->port->cm_dev->ib_device, work->port->port_num,
&work->path[0],
@@ -2993,7 +2990,7 @@ static void cm_format_rej_event(struct cm_work *work)
IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
}
-static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
+static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
{
struct cm_id_private *cm_id_priv;
__be32 remote_id;
@@ -3098,7 +3095,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
- switch(cm_id_priv->id.state) {
+ switch (cm_id_priv->id.state) {
case IB_CM_REQ_RCVD:
cm_state = IB_CM_MRA_REQ_SENT;
lap_state = cm_id->lap_state;
@@ -3155,7 +3152,7 @@ error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
}
EXPORT_SYMBOL(ib_send_cm_mra);
-static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
+static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
{
switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
case CM_MSG_RESPONSE_REQ:
@@ -3917,8 +3914,7 @@ static int cm_establish(struct ib_cm_id *cm_id)
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
- switch (cm_id->state)
- {
+ switch (cm_id->state) {
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
cm_id->state = IB_CM_ESTABLISHED;
@@ -4334,7 +4330,7 @@ static int cm_add_one(struct ib_device *ib_device)
unsigned long flags;
int ret;
int count = 0;
- unsigned int i;
+ u32 i;
cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
GFP_KERNEL);
@@ -4432,7 +4428,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
.clr_port_cap_mask = IB_PORT_CM_SUP
};
unsigned long flags;
- unsigned int i;
+ u32 i;
write_lock_irqsave(&cm.device_lock, flags);
list_del(&cm_dev->list);
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 0cc40656b5c5..8462de7ca26e 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -22,7 +22,7 @@
static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
{
u8 transport_type = IBA_GET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg);
- switch(transport_type) {
+ switch (transport_type) {
case 0: return IB_QPT_RC;
case 1: return IB_QPT_UC;
case 3:
@@ -37,7 +37,7 @@ static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
enum ib_qp_type qp_type)
{
- switch(qp_type) {
+ switch (qp_type) {
case IB_QPT_UC:
IBA_SET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg, 1);
break;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 94096511599f..2b9ffc21cbc4 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -43,7 +43,6 @@ MODULE_DESCRIPTION("Generic RDMA CM Agent");
MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_RESPONSE_TIMEOUT 20
-#define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
#define CMA_MAX_CM_RETRIES 15
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 18
@@ -219,14 +218,6 @@ struct rdma_bind_list {
unsigned short port;
};
-struct class_port_info_context {
- struct ib_class_port_info *class_port_info;
- struct ib_device *device;
- struct completion done;
- struct ib_sa_query *sa_query;
- u8 port_num;
-};
-
static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps,
struct rdma_bind_list *bind_list, int snum)
{
@@ -287,7 +278,7 @@ struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
}
int cma_get_default_gid_type(struct cma_device *cma_dev,
- unsigned int port)
+ u32 port)
{
if (!rdma_is_port_valid(cma_dev->device, port))
return -EINVAL;
@@ -296,7 +287,7 @@ int cma_get_default_gid_type(struct cma_device *cma_dev,
}
int cma_set_default_gid_type(struct cma_device *cma_dev,
- unsigned int port,
+ u32 port,
enum ib_gid_type default_gid_type)
{
unsigned long supported_gids;
@@ -319,7 +310,7 @@ int cma_set_default_gid_type(struct cma_device *cma_dev,
return 0;
}
-int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port)
+int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port)
{
if (!rdma_is_port_valid(cma_dev->device, port))
return -EINVAL;
@@ -327,7 +318,7 @@ int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port)
return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)];
}
-int cma_set_default_roce_tos(struct cma_device *cma_dev, unsigned int port,
+int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port,
u8 default_roce_tos)
{
if (!rdma_is_port_valid(cma_dev->device, port))
@@ -463,7 +454,6 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
id_priv->id.route.addr.dev_addr.transport =
rdma_node_get_transport(cma_dev->device->node_type);
list_add_tail(&id_priv->list, &cma_dev->id_list);
- rdma_restrack_add(&id_priv->res);
trace_cm_id_attach(id_priv, cma_dev->device);
}
@@ -562,7 +552,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
}
static const struct ib_gid_attr *
-cma_validate_port(struct ib_device *device, u8 port,
+cma_validate_port(struct ib_device *device, u32 port,
enum ib_gid_type gid_type,
union ib_gid *gid,
struct rdma_id_private *id_priv)
@@ -620,7 +610,7 @@ static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
struct cma_device *cma_dev;
enum ib_gid_type gid_type;
int ret = -ENODEV;
- unsigned int port;
+ u32 port;
if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
id_priv->id.ps == RDMA_PS_IPOIB)
@@ -700,6 +690,7 @@ static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
mutex_lock(&lock);
cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
mutex_unlock(&lock);
+ rdma_restrack_add(&id_priv->res);
return 0;
}
@@ -711,8 +702,8 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
struct cma_device *cma_dev;
enum ib_gid_type gid_type;
int ret = -ENODEV;
- unsigned int port;
union ib_gid gid;
+ u32 port;
if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
id_priv->id.ps == RDMA_PS_IPOIB)
@@ -754,8 +745,10 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
}
out:
- if (!ret)
+ if (!ret) {
cma_attach_to_dev(id_priv, cma_dev);
+ rdma_restrack_add(&id_priv->res);
+ }
mutex_unlock(&lock);
return ret;
@@ -816,6 +809,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
found:
cma_attach_to_dev(id_priv, cma_dev);
+ rdma_restrack_add(&id_priv->res);
mutex_unlock(&lock);
addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
@@ -852,6 +846,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
id_priv->id.qp_type = qp_type;
id_priv->tos_set = false;
id_priv->timeout_set = false;
+ id_priv->min_rnr_timer_set = false;
id_priv->gid_type = IB_GID_TYPE_IB;
spin_lock_init(&id_priv->lock);
mutex_init(&id_priv->qp_mutex);
@@ -1135,12 +1130,16 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
qp_attr_mask);
qp_attr->port_num = id_priv->id.port_num;
*qp_attr_mask |= IB_QP_PORT;
- } else
+ } else {
ret = -ENOSYS;
+ }
if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set)
qp_attr->timeout = id_priv->timeout;
+ if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set)
+ qp_attr->min_rnr_timer = id_priv->min_rnr_timer;
+
return ret;
}
EXPORT_SYMBOL(rdma_init_qp_attr);
@@ -1581,7 +1580,7 @@ static bool cma_match_private_data(struct rdma_id_private *id_priv,
static bool cma_protocol_roce(const struct rdma_cm_id *id)
{
struct ib_device *device = id->device;
- const int port_num = id->port_num ?: rdma_start_port(device);
+ const u32 port_num = id->port_num ?: rdma_start_port(device);
return rdma_protocol_roce(device, port_num);
}
@@ -2474,6 +2473,7 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
id->tos = id_priv->tos;
id->tos_set = id_priv->tos_set;
+ id->afonly = id_priv->afonly;
id_priv->cm_id.iw = id;
memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
@@ -2529,6 +2529,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
rdma_addr_size(cma_src_addr(id_priv)));
_cma_attach_to_dev(dev_id_priv, cma_dev);
+ rdma_restrack_add(&dev_id_priv->res);
cma_id_get(id_priv);
dev_id_priv->internal_id = 1;
dev_id_priv->afonly = id_priv->afonly;
@@ -2615,6 +2616,43 @@ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
}
EXPORT_SYMBOL(rdma_set_ack_timeout);
+/**
+ * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the
+ * QP associated with a connection identifier.
+ * @id: Communication identifier to associated with service type.
+ * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK
+ * Timer Field" in the IBTA specification.
+ *
+ * This function should be called before rdma_connect() on active
+ * side, and on passive side before rdma_accept(). The timer value
+ * will be associated with the local QP. When it receives a send it is
+ * not read to handle, typically if the receive queue is empty, an RNR
+ * Retry NAK is returned to the requester with the min_rnr_timer
+ * encoded. The requester will then wait at least the time specified
+ * in the NAK before retrying. The default is zero, which translates
+ * to a minimum RNR Timer value of 655 ms.
+ *
+ * Return: 0 for success
+ */
+int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
+{
+ struct rdma_id_private *id_priv;
+
+ /* It is a five-bit value */
+ if (min_rnr_timer & 0xe0)
+ return -EINVAL;
+
+ if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT))
+ return -EINVAL;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ id_priv->min_rnr_timer = min_rnr_timer;
+ id_priv->min_rnr_timer_set = true;
+
+ return 0;
+}
+EXPORT_SYMBOL(rdma_set_min_rnr_timer);
+
static void cma_query_handler(int status, struct sa_path_rec *path_rec,
void *context)
{
@@ -3169,6 +3207,7 @@ port_found:
ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
id_priv->id.port_num = p;
cma_attach_to_dev(id_priv, cma_dev);
+ rdma_restrack_add(&id_priv->res);
cma_set_loopback(cma_src_addr(id_priv));
out:
mutex_unlock(&lock);
@@ -3201,6 +3240,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
if (status)
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
status);
+ rdma_restrack_add(&id_priv->res);
} else if (status) {
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
}
@@ -3812,6 +3852,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (ret)
goto err2;
+ if (!cma_any_addr(addr))
+ rdma_restrack_add(&id_priv->res);
return 0;
err2:
if (id_priv->cma_dev)
@@ -4124,10 +4166,11 @@ int rdma_connect_locked(struct rdma_cm_id *id,
ret = cma_resolve_ib_udp(id_priv, conn_param);
else
ret = cma_connect_ib(id_priv, conn_param);
- } else if (rdma_cap_iw_cm(id->device, id->port_num))
+ } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = cma_connect_iw(id_priv, conn_param);
- else
+ } else {
ret = -ENOSYS;
+ }
if (ret)
goto err_state;
return 0;
@@ -4234,9 +4277,9 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
iw_param.ird = conn_param->responder_resources;
iw_param.private_data = conn_param->private_data;
iw_param.private_data_len = conn_param->private_data_len;
- if (id_priv->id.qp) {
+ if (id_priv->id.qp)
iw_param.qpn = id_priv->qp_num;
- } else
+ else
iw_param.qpn = conn_param->qp_num;
return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
@@ -4319,11 +4362,11 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
else
ret = cma_rep_recv(id_priv);
}
- } else if (rdma_cap_iw_cm(id->device, id->port_num))
+ } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = cma_accept_iw(id_priv, conn_param);
- else
+ } else {
ret = -ENOSYS;
-
+ }
if (ret)
goto reject;
@@ -4409,8 +4452,9 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = iw_cm_reject(id_priv->cm_id.iw,
private_data, private_data_len);
- } else
+ } else {
ret = -ENOSYS;
+ }
return ret;
}
@@ -4864,14 +4908,28 @@ static void cma_process_remove(struct cma_device *cma_dev)
wait_for_completion(&cma_dev->comp);
}
+static bool cma_supported(struct ib_device *device)
+{
+ u32 i;
+
+ rdma_for_each_port(device, i) {
+ if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i))
+ return true;
+ }
+ return false;
+}
+
static int cma_add_one(struct ib_device *device)
{
struct rdma_id_private *to_destroy;
struct cma_device *cma_dev;
struct rdma_id_private *id_priv;
- unsigned int i;
unsigned long supported_gids = 0;
int ret;
+ u32 i;
+
+ if (!cma_supported(device))
+ return -EOPNOTSUPP;
cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL);
if (!cma_dev)
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index e0d5e3bae458..9ac16e0db761 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -43,7 +43,7 @@ struct cma_device;
struct cma_dev_group;
struct cma_dev_port_group {
- unsigned int port_num;
+ u32 port_num;
struct cma_dev_group *cma_dev_group;
struct config_group group;
};
@@ -200,10 +200,10 @@ static const struct config_item_type cma_port_group_type = {
static int make_cma_ports(struct cma_dev_group *cma_dev_group,
struct cma_device *cma_dev)
{
- struct ib_device *ibdev;
- unsigned int i;
- unsigned int ports_num;
struct cma_dev_port_group *ports;
+ struct ib_device *ibdev;
+ u32 ports_num;
+ u32 i;
ibdev = cma_get_ib_dev(cma_dev);
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index caece96ebcf5..5c463da99845 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -86,9 +86,11 @@ struct rdma_id_private {
u8 tos;
u8 tos_set:1;
u8 timeout_set:1;
+ u8 min_rnr_timer_set:1;
u8 reuseaddr;
u8 afonly;
u8 timeout;
+ u8 min_rnr_timer;
enum ib_gid_type gid_type;
/*
@@ -117,11 +119,11 @@ void cma_dev_put(struct cma_device *dev);
typedef bool (*cma_device_filter)(struct ib_device *, void *);
struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
void *cookie);
-int cma_get_default_gid_type(struct cma_device *dev, unsigned int port);
-int cma_set_default_gid_type(struct cma_device *dev, unsigned int port,
+int cma_get_default_gid_type(struct cma_device *dev, u32 port);
+int cma_set_default_gid_type(struct cma_device *dev, u32 port,
enum ib_gid_type default_gid_type);
-int cma_get_default_roce_tos(struct cma_device *dev, unsigned int port);
-int cma_set_default_roce_tos(struct cma_device *dev, unsigned int port,
+int cma_get_default_roce_tos(struct cma_device *dev, u32 port);
+int cma_set_default_roce_tos(struct cma_device *dev, u32 port,
u8 default_roce_tos);
struct ib_device *cma_get_ib_dev(struct cma_device *dev);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 315f7a297eee..29809dd30041 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -83,14 +83,14 @@ void ib_device_unregister_sysfs(struct ib_device *device);
int ib_device_rename(struct ib_device *ibdev, const char *name);
int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim);
-typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
+typedef void (*roce_netdev_callback)(struct ib_device *device, u32 port,
struct net_device *idev, void *cookie);
-typedef bool (*roce_netdev_filter)(struct ib_device *device, u8 port,
+typedef bool (*roce_netdev_filter)(struct ib_device *device, u32 port,
struct net_device *idev, void *cookie);
struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
- unsigned int port);
+ u32 port);
void ib_enum_roce_netdev(struct ib_device *ib_dev,
roce_netdev_filter filter,
@@ -113,7 +113,7 @@ int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
struct ib_client_nl_info {
struct sk_buff *nl_msg;
struct device *cdev;
- unsigned int port;
+ u32 port;
u64 abi;
};
int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name,
@@ -128,24 +128,24 @@ int ib_cache_gid_parse_type_str(const char *buf);
const char *ib_cache_gid_type_str(enum ib_gid_type gid_type);
-void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
+void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
struct net_device *ndev,
unsigned long gid_type_mask,
enum ib_cache_gid_default_mode mode);
-int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
union ib_gid *gid, struct ib_gid_attr *attr);
-int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
union ib_gid *gid, struct ib_gid_attr *attr);
-int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
struct net_device *ndev);
int roce_gid_mgmt_init(void);
void roce_gid_mgmt_cleanup(void);
-unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
+unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port);
int ib_cache_setup_one(struct ib_device *device);
void ib_cache_cleanup_one(struct ib_device *device);
@@ -215,14 +215,14 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
struct netlink_ext_ack *extack);
int ib_get_cached_subnet_prefix(struct ib_device *device,
- u8 port_num,
- u64 *sn_pfx);
+ u32 port_num,
+ u64 *sn_pfx);
#ifdef CONFIG_SECURITY_INFINIBAND
void ib_security_release_port_pkey_list(struct ib_device *device);
void ib_security_cache_change(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u64 subnet_prefix);
int ib_security_modify_qp(struct ib_qp *qp,
@@ -247,7 +247,7 @@ static inline void ib_security_release_port_pkey_list(struct ib_device *device)
}
static inline void ib_security_cache_change(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u64 subnet_prefix)
{
}
@@ -381,7 +381,7 @@ int ib_setup_port_attrs(struct ib_core_device *coredev);
int rdma_compatdev_set(u8 enable);
-int ib_port_register_module_stat(struct ib_device *device, u8 port_num,
+int ib_port_register_module_stat(struct ib_device *device, u32 port_num,
struct kobject *kobj, struct kobj_type *ktype,
const char *name);
void ib_port_unregister_module_stat(struct kobject *kobj);
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index f3a7c1f404af..15493357cfef 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -14,10 +14,12 @@ static int __counter_set_mode(struct rdma_port_counter *port_counter,
enum rdma_nl_counter_mode new_mode,
enum rdma_nl_counter_mask new_mask)
{
- if (new_mode == RDMA_COUNTER_MODE_AUTO && port_counter->num_counters)
- if (new_mask & ~ALL_AUTO_MODE_MASKS ||
- port_counter->mode.mode != RDMA_COUNTER_MODE_NONE)
+ if (new_mode == RDMA_COUNTER_MODE_AUTO) {
+ if (new_mask & (~ALL_AUTO_MODE_MASKS))
return -EINVAL;
+ if (port_counter->num_counters)
+ return -EBUSY;
+ }
port_counter->mode.mode = new_mode;
port_counter->mode.mask = new_mask;
@@ -32,14 +34,17 @@ static int __counter_set_mode(struct rdma_port_counter *port_counter,
* @mask: Mask to configure
* @extack: Message to the user
*
- * Return 0 on success.
+ * Return 0 on success. If counter mode wasn't changed then it is considered
+ * as success as well.
+ * Return -EBUSY when changing to auto mode while there are bounded counters.
+ *
*/
-int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
+int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port,
enum rdma_nl_counter_mask mask,
struct netlink_ext_ack *extack)
{
- enum rdma_nl_counter_mode mode = RDMA_COUNTER_MODE_AUTO;
struct rdma_port_counter *port_counter;
+ enum rdma_nl_counter_mode mode;
int ret;
port_counter = &dev->port_data[port].port_counter;
@@ -47,25 +52,26 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
return -EOPNOTSUPP;
mutex_lock(&port_counter->lock);
- if (mask) {
- ret = __counter_set_mode(port_counter, mode, mask);
- if (ret)
- NL_SET_ERR_MSG(
- extack,
- "Turning on auto mode is not allowed when there is bound QP");
+ if (mask)
+ mode = RDMA_COUNTER_MODE_AUTO;
+ else
+ mode = (port_counter->num_counters) ? RDMA_COUNTER_MODE_MANUAL :
+ RDMA_COUNTER_MODE_NONE;
+
+ if (port_counter->mode.mode == mode &&
+ port_counter->mode.mask == mask) {
+ ret = 0;
goto out;
}
- if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO) {
- ret = -EINVAL;
- goto out;
- }
+ ret = __counter_set_mode(port_counter, mode, mask);
- mode = (port_counter->num_counters) ? RDMA_COUNTER_MODE_MANUAL :
- RDMA_COUNTER_MODE_NONE;
- ret = __counter_set_mode(port_counter, mode, 0);
out:
mutex_unlock(&port_counter->lock);
+ if (ret == -EBUSY)
+ NL_SET_ERR_MSG(
+ extack,
+ "Modifying auto mode is not allowed when there is a bound QP");
return ret;
}
@@ -100,7 +106,7 @@ static int __rdma_counter_bind_qp(struct rdma_counter *counter,
return ret;
}
-static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u8 port,
+static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port,
struct ib_qp *qp,
enum rdma_nl_counter_mode mode)
{
@@ -238,7 +244,7 @@ static void counter_history_stat_update(struct rdma_counter *counter)
* Return: The counter (with ref-count increased) if found
*/
static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp,
- u8 port)
+ u32 port)
{
struct rdma_port_counter *port_counter;
struct rdma_counter *counter = NULL;
@@ -282,7 +288,7 @@ static void counter_release(struct kref *kref)
* rdma_counter_bind_qp_auto - Check and bind the QP to a counter base on
* the auto-mode rule
*/
-int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port)
+int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port)
{
struct rdma_port_counter *port_counter;
struct ib_device *dev = qp->device;
@@ -352,7 +358,7 @@ int rdma_counter_query_stats(struct rdma_counter *counter)
}
static u64 get_running_counters_hwstat_sum(struct ib_device *dev,
- u8 port, u32 index)
+ u32 port, u32 index)
{
struct rdma_restrack_entry *res;
struct rdma_restrack_root *rt;
@@ -388,7 +394,7 @@ next:
* rdma_counter_get_hwstat_value() - Get the sum value of all counters on a
* specific port, including the running ones and history data
*/
-u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index)
+u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index)
{
struct rdma_port_counter *port_counter;
u64 sum;
@@ -443,7 +449,7 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
/*
* rdma_counter_bind_qpn() - Bind QP @qp_num to counter @counter_id
*/
-int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
+int rdma_counter_bind_qpn(struct ib_device *dev, u32 port,
u32 qp_num, u32 counter_id)
{
struct rdma_port_counter *port_counter;
@@ -493,7 +499,7 @@ err:
* rdma_counter_bind_qpn_alloc() - Alloc a counter and bind QP @qp_num to it
* The id of new counter is returned in @counter_id
*/
-int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
+int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port,
u32 qp_num, u32 *counter_id)
{
struct rdma_port_counter *port_counter;
@@ -540,7 +546,7 @@ err:
/*
* rdma_counter_unbind_qpn() - Unbind QP @qp_num from a counter
*/
-int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port,
+int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port,
u32 qp_num, u32 counter_id)
{
struct rdma_port_counter *port_counter;
@@ -573,7 +579,7 @@ out:
return ret;
}
-int rdma_counter_get_mode(struct ib_device *dev, u8 port,
+int rdma_counter_get_mode(struct ib_device *dev, u32 port,
enum rdma_nl_counter_mode *mode,
enum rdma_nl_counter_mask *mask)
{
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index aac0fe14e1d9..c660cef66ac6 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -779,7 +779,7 @@ static void remove_client_context(struct ib_device *device,
static int alloc_port_data(struct ib_device *device)
{
struct ib_port_data_rcu *pdata_rcu;
- unsigned int port;
+ u32 port;
if (device->port_data)
return 0;
@@ -788,6 +788,10 @@ static int alloc_port_data(struct ib_device *device)
if (WARN_ON(!device->phys_port_cnt))
return -EINVAL;
+ /* Reserve U32_MAX so the logic to go over all the ports is sane */
+ if (WARN_ON(device->phys_port_cnt == U32_MAX))
+ return -EINVAL;
+
/*
* device->port_data is indexed directly by the port number to make
* access to this data as efficient as possible.
@@ -819,7 +823,7 @@ static int alloc_port_data(struct ib_device *device)
return 0;
}
-static int verify_immutable(const struct ib_device *dev, u8 port)
+static int verify_immutable(const struct ib_device *dev, u32 port)
{
return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
rdma_max_mad_size(dev, port) != 0);
@@ -827,7 +831,7 @@ static int verify_immutable(const struct ib_device *dev, u8 port)
static int setup_port_data(struct ib_device *device)
{
- unsigned int port;
+ u32 port;
int ret;
ret = alloc_port_data(device);
@@ -2005,7 +2009,7 @@ void ib_dispatch_event_clients(struct ib_event *event)
}
static int iw_query_port(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
struct ib_port_attr *port_attr)
{
struct in_device *inetdev;
@@ -2044,7 +2048,7 @@ static int iw_query_port(struct ib_device *device,
}
static int __ib_query_port(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
struct ib_port_attr *port_attr)
{
union ib_gid gid = {};
@@ -2078,7 +2082,7 @@ static int __ib_query_port(struct ib_device *device,
* @port_attr pointer.
*/
int ib_query_port(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
struct ib_port_attr *port_attr)
{
if (!rdma_is_port_valid(device, port_num))
@@ -2130,7 +2134,7 @@ static void add_ndev_hash(struct ib_port_data *pdata)
* NETDEV_UNREGISTER event.
*/
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
- unsigned int port)
+ u32 port)
{
struct net_device *old_ndev;
struct ib_port_data *pdata;
@@ -2173,7 +2177,7 @@ EXPORT_SYMBOL(ib_device_set_netdev);
static void free_netdevs(struct ib_device *ib_dev)
{
unsigned long flags;
- unsigned int port;
+ u32 port;
if (!ib_dev->port_data)
return;
@@ -2204,7 +2208,7 @@ static void free_netdevs(struct ib_device *ib_dev)
}
struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
- unsigned int port)
+ u32 port)
{
struct ib_port_data *pdata;
struct net_device *res;
@@ -2291,7 +2295,7 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev,
roce_netdev_callback cb,
void *cookie)
{
- unsigned int port;
+ u32 port;
rdma_for_each_port (ib_dev, port)
if (rdma_protocol_roce(ib_dev, port)) {
@@ -2369,7 +2373,7 @@ int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
* ib_query_pkey() fetches the specified P_Key table entry.
*/
int ib_query_pkey(struct ib_device *device,
- u8 port_num, u16 index, u16 *pkey)
+ u32 port_num, u16 index, u16 *pkey)
{
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
@@ -2414,7 +2418,7 @@ EXPORT_SYMBOL(ib_modify_device);
* @port_modify_mask and @port_modify structure.
*/
int ib_modify_port(struct ib_device *device,
- u8 port_num, int port_modify_mask,
+ u32 port_num, int port_modify_mask,
struct ib_port_modify *port_modify)
{
int rc;
@@ -2446,10 +2450,10 @@ EXPORT_SYMBOL(ib_modify_port);
* parameter may be NULL.
*/
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- u8 *port_num, u16 *index)
+ u32 *port_num, u16 *index)
{
union ib_gid tmp_gid;
- unsigned int port;
+ u32 port;
int ret, i;
rdma_for_each_port (device, port) {
@@ -2483,7 +2487,7 @@ EXPORT_SYMBOL(ib_find_gid);
* @index: The index into the PKey table where the PKey was found.
*/
int ib_find_pkey(struct ib_device *device,
- u8 port_num, u16 pkey, u16 *index)
+ u32 port_num, u16 pkey, u16 *index)
{
int ret, i;
u16 tmp_pkey;
@@ -2526,7 +2530,7 @@ EXPORT_SYMBOL(ib_find_pkey);
*
*/
struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
- u8 port,
+ u32 port,
u16 pkey,
const union ib_gid *gid,
const struct sockaddr *addr)
@@ -2696,7 +2700,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, reg_dm_mr);
SET_DEVICE_OP(dev_ops, reg_user_mr);
SET_DEVICE_OP(dev_ops, reg_user_mr_dmabuf);
- SET_DEVICE_OP(dev_ops, req_ncomp_notif);
SET_DEVICE_OP(dev_ops, req_notify_cq);
SET_DEVICE_OP(dev_ops, rereg_user_mr);
SET_DEVICE_OP(dev_ops, resize_cq);
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 30a0ff76b332..932b26f50d03 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -528,7 +528,8 @@ add_mapping_response_exit:
}
/* netlink attribute policy for the response to add and query mapping request
- * and response with remote address info */
+ * and response with remote address info
+ */
static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = {
[IWPM_NLA_RQUERY_MAPPING_SEQ] = { .type = NLA_U32 },
[IWPM_NLA_RQUERY_LOCAL_ADDR] = {
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 9355e521d9f4..2081e4854fb0 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -61,7 +61,7 @@ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
{
u16 pkey;
struct ib_device *dev = qp_info->port_priv->device;
- u8 pnum = qp_info->port_priv->port_num;
+ u32 pnum = qp_info->port_priv->port_num;
struct ib_ud_wr *wr = &mad_send_wr->send_wr;
struct rdma_ah_attr attr = {};
@@ -118,7 +118,7 @@ static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
* Assumes ib_mad_port_list_lock is being held
*/
static inline struct ib_mad_port_private *
-__ib_get_mad_port(struct ib_device *device, int port_num)
+__ib_get_mad_port(struct ib_device *device, u32 port_num)
{
struct ib_mad_port_private *entry;
@@ -134,7 +134,7 @@ __ib_get_mad_port(struct ib_device *device, int port_num)
* for a device/port
*/
static inline struct ib_mad_port_private *
-ib_get_mad_port(struct ib_device *device, int port_num)
+ib_get_mad_port(struct ib_device *device, u32 port_num)
{
struct ib_mad_port_private *entry;
unsigned long flags;
@@ -155,8 +155,7 @@ static inline u8 convert_mgmt_class(u8 mgmt_class)
static int get_spl_qp_index(enum ib_qp_type qp_type)
{
- switch (qp_type)
- {
+ switch (qp_type) {
case IB_QPT_SMI:
return 0;
case IB_QPT_GSI:
@@ -222,7 +221,7 @@ EXPORT_SYMBOL(ib_response_mad);
* Context: Process context.
*/
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum ib_qp_type qp_type,
struct ib_mad_reg_req *mad_reg_req,
u8 rmpp_version,
@@ -549,7 +548,7 @@ static void dequeue_mad(struct ib_mad_list_head *mad_list)
}
static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
- u16 pkey_index, u8 port_num, struct ib_wc *wc)
+ u16 pkey_index, u32 port_num, struct ib_wc *wc)
{
memset(wc, 0, sizeof *wc);
wc->wr_cqe = cqe;
@@ -608,7 +607,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
struct ib_mad_port_private *port_priv;
struct ib_mad_agent_private *recv_mad_agent = NULL;
struct ib_device *device = mad_agent_priv->agent.device;
- u8 port_num;
+ u32 port_num;
struct ib_wc mad_wc;
struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
@@ -707,8 +706,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
(const struct ib_mad *)smp,
(struct ib_mad *)mad_priv->mad, &mad_size,
&out_mad_pkey_index);
- switch (ret)
- {
+ switch (ret) {
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
mad_agent_priv->agent.recv_handler) {
@@ -807,7 +805,7 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
/* Allocate data segments. */
for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
- seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
+ seg = kmalloc(sizeof(*seg) + seg_size, gfp_mask);
if (!seg) {
free_send_rmpp_list(send_wr);
return -ENOMEM;
@@ -837,12 +835,11 @@ int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
}
EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
-struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
- u32 remote_qpn, u16 pkey_index,
- int rmpp_active,
- int hdr_len, int data_len,
- gfp_t gfp_mask,
- u8 base_version)
+struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
+ u32 remote_qpn, u16 pkey_index,
+ int rmpp_active, int hdr_len,
+ int data_len, gfp_t gfp_mask,
+ u8 base_version)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr;
@@ -1275,11 +1272,9 @@ static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
int i;
/* Remove any methods for this mad agent */
- for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
- if (method->agent[i] == agent) {
+ for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
+ if (method->agent[i] == agent)
method->agent[i] = NULL;
- }
- }
}
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
@@ -1454,9 +1449,8 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
* Was MAD registration request supplied
* with original registration ?
*/
- if (!agent_priv->reg_req) {
+ if (!agent_priv->reg_req)
goto out;
- }
port_priv = agent_priv->qp_info->port_priv;
mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
@@ -1613,7 +1607,7 @@ out:
if (mad_agent && !mad_agent->agent.recv_handler) {
dev_notice(&port_priv->device->dev,
- "No receive handler for client %p on port %d\n",
+ "No receive handler for client %p on port %u\n",
&mad_agent->agent, port_priv->port_num);
deref_mad_agent(mad_agent);
mad_agent = NULL;
@@ -1677,15 +1671,16 @@ static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
rwc->recv_buf.mad->mad_hdr.mgmt_class;
}
-static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
- const struct ib_mad_send_wr_private *wr,
- const struct ib_mad_recv_wc *rwc )
+static inline int
+rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
+ const struct ib_mad_send_wr_private *wr,
+ const struct ib_mad_recv_wc *rwc)
{
struct rdma_ah_attr attr;
u8 send_resp, rcv_resp;
union ib_gid sgid;
struct ib_device *device = mad_agent_priv->agent.device;
- u8 port_num = mad_agent_priv->agent.port_num;
+ u32 port_num = mad_agent_priv->agent.port_num;
u8 lmc;
bool has_grh;
@@ -1834,7 +1829,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
deref_mad_agent(mad_agent_priv);
} else {
/* not user rmpp, revert to normal behavior and
- * drop the mad */
+ * drop the mad
+ */
ib_free_recv_mad(mad_recv_wc);
deref_mad_agent(mad_agent_priv);
return;
@@ -1860,14 +1856,12 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
mad_recv_wc);
deref_mad_agent(mad_agent_priv);
}
-
- return;
}
static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
const struct ib_mad_qp_info *qp_info,
const struct ib_wc *wc,
- int port_num,
+ u32 port_num,
struct ib_mad_private *recv,
struct ib_mad_private *response)
{
@@ -1954,7 +1948,7 @@ static enum smi_action
handle_opa_smi(struct ib_mad_port_private *port_priv,
struct ib_mad_qp_info *qp_info,
struct ib_wc *wc,
- int port_num,
+ u32 port_num,
struct ib_mad_private *recv,
struct ib_mad_private *response)
{
@@ -2010,7 +2004,7 @@ static enum smi_action
handle_smi(struct ib_mad_port_private *port_priv,
struct ib_mad_qp_info *qp_info,
struct ib_wc *wc,
- int port_num,
+ u32 port_num,
struct ib_mad_private *recv,
struct ib_mad_private *response,
bool opa)
@@ -2034,7 +2028,7 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
struct ib_mad_private_header *mad_priv_hdr;
struct ib_mad_private *recv, *response = NULL;
struct ib_mad_agent_private *mad_agent;
- int port_num;
+ u32 port_num;
int ret = IB_MAD_RESULT_SUCCESS;
size_t mad_size;
u16 resp_mad_pkey_index = 0;
@@ -2202,9 +2196,10 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
temp_mad_send_wr->timeout))
break;
}
- }
- else
+ } else {
list_item = &mad_agent_priv->wait_list;
+ }
+
list_add(&mad_send_wr->agent_list, list_item);
/* Reschedule a work item if we have a shorter timeout */
@@ -2258,7 +2253,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
adjust_timeout(mad_agent_priv);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- if (mad_send_wr->status != IB_WC_SUCCESS )
+ if (mad_send_wr->status != IB_WC_SUCCESS)
mad_send_wc->status = mad_send_wr->status;
if (ret == IB_RMPP_RESULT_INTERNAL)
ib_rmpp_send_handler(mad_send_wc);
@@ -2947,7 +2942,7 @@ static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
* Create the QP, PD, MR, and CQ if needed
*/
static int ib_mad_port_open(struct ib_device *device,
- int port_num)
+ u32 port_num)
{
int ret, cq_size;
struct ib_mad_port_private *port_priv;
@@ -3002,7 +2997,7 @@ static int ib_mad_port_open(struct ib_device *device,
if (ret)
goto error7;
- snprintf(name, sizeof name, "ib_mad%d", port_num);
+ snprintf(name, sizeof(name), "ib_mad%u", port_num);
port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!port_priv->wq) {
ret = -ENOMEM;
@@ -3048,7 +3043,7 @@ error3:
* If there are no classes using the port, free the port
* resources (CQ, MR, PD, QP) and remove the port's info structure
*/
-static int ib_mad_port_close(struct ib_device *device, int port_num)
+static int ib_mad_port_close(struct ib_device *device, u32 port_num)
{
struct ib_mad_port_private *port_priv;
unsigned long flags;
@@ -3057,7 +3052,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
port_priv = __ib_get_mad_port(device, port_num);
if (port_priv == NULL) {
spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
- dev_err(&device->dev, "Port %d not found\n", port_num);
+ dev_err(&device->dev, "Port %u not found\n", port_num);
return -ENODEV;
}
list_del_init(&port_priv->port_list);
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index e0573e4d0404..8af0619a39cd 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -382,8 +382,8 @@ static inline int get_seg_num(struct ib_mad_recv_buf *seg)
return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
}
-static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
- struct ib_mad_recv_buf *seg)
+static inline struct ib_mad_recv_buf *get_next_seg(struct list_head *rmpp_list,
+ struct ib_mad_recv_buf *seg)
{
if (seg->list.next == rmpp_list)
return NULL;
@@ -396,8 +396,8 @@ static inline int window_size(struct ib_mad_agent_private *agent)
return max(agent->qp_info->recv_queue.max_active >> 3, 1);
}
-static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
- int seg_num)
+static struct ib_mad_recv_buf *find_seg_location(struct list_head *rmpp_list,
+ int seg_num)
{
struct ib_mad_recv_buf *seg_buf;
int cur_seg_num;
@@ -449,7 +449,7 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
return hdr_size + rmpp_recv->seg_num * data_size - pad;
}
-static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
+static struct ib_mad_recv_wc *complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
{
struct ib_mad_recv_wc *rmpp_wc;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 57519ca6cd2c..a5dd4b7a74bc 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -63,7 +63,7 @@ struct mcast_port {
struct rb_root table;
atomic_t refcount;
struct completion comp;
- u8 port_num;
+ u32 port_num;
};
struct mcast_device {
@@ -605,7 +605,7 @@ found:
*/
struct ib_sa_multicast *
ib_sa_join_multicast(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
int (*callback)(int status,
@@ -690,7 +690,7 @@ void ib_sa_free_multicast(struct ib_sa_multicast *multicast)
}
EXPORT_SYMBOL(ib_sa_free_multicast);
-int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
+int ib_sa_get_mcmember_rec(struct ib_device *device, u32 port_num,
union ib_gid *mgid, struct ib_sa_mcmember_rec *rec)
{
struct mcast_device *dev;
@@ -732,7 +732,7 @@ EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
* success or appropriate error code.
*
*/
-int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
+int ib_init_ah_from_mcmember(struct ib_device *device, u32 port_num,
struct ib_sa_mcmember_rec *rec,
struct net_device *ndev,
enum ib_gid_type gid_type,
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index d306049c22a2..34d0cc1a4147 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -92,7 +92,9 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_CTX] = { .type = NLA_NESTED },
[RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_CTX_ENTRY] = { .type = NLA_NESTED },
[RDMA_NLDEV_ATTR_RES_DST_ADDR] = {
.len = sizeof(struct __kernel_sockaddr_storage) },
[RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 },
@@ -130,6 +132,11 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_RES_SRQ] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_SRQN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_SRQ_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_MIN_RANGE] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_MAX_RANGE] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 },
@@ -146,6 +153,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 },
[RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 },
[RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@ -242,7 +250,7 @@ static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
{
char fw[IB_FW_VERSION_NAME_MAX];
int ret = 0;
- u8 port;
+ u32 port;
if (fill_nldev_handle(msg, device))
return -EMSGSIZE;
@@ -385,6 +393,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
[RDMA_RESTRACK_CM_ID] = "cm_id",
[RDMA_RESTRACK_MR] = "mr",
[RDMA_RESTRACK_CTX] = "ctx",
+ [RDMA_RESTRACK_SRQ] = "srq",
};
struct nlattr *table_attr;
@@ -703,6 +712,135 @@ static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
err: return -EMSGSIZE;
}
+static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_ucontext *ctx = container_of(res, struct ib_ucontext, res);
+
+ if (rdma_is_kernel_res(res))
+ return 0;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id))
+ return -EMSGSIZE;
+
+ return fill_res_name_pid(msg, res);
+}
+
+static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range,
+ uint32_t max_range)
+{
+ struct nlattr *entry_attr;
+
+ if (!min_range)
+ return 0;
+
+ entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
+ if (!entry_attr)
+ return -EMSGSIZE;
+
+ if (min_range == max_range) {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range))
+ goto err;
+ } else {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range))
+ goto err;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range))
+ goto err;
+ }
+ nla_nest_end(msg, entry_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, entry_attr);
+ return -EMSGSIZE;
+}
+
+static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq)
+{
+ uint32_t min_range = 0, prev = 0;
+ struct rdma_restrack_entry *res;
+ struct rdma_restrack_root *rt;
+ struct nlattr *table_attr;
+ struct ib_qp *qp = NULL;
+ unsigned long id = 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ rt = &srq->device->res[RDMA_RESTRACK_QP];
+ xa_lock(&rt->xa);
+ xa_for_each(&rt->xa, id, res) {
+ if (!rdma_restrack_get(res))
+ continue;
+
+ qp = container_of(res, struct ib_qp, res);
+ if (!qp->srq || (qp->srq->res.id != srq->res.id)) {
+ rdma_restrack_put(res);
+ continue;
+ }
+
+ if (qp->qp_num < prev)
+ /* qp_num should be ascending */
+ goto err_loop;
+
+ if (min_range == 0) {
+ min_range = qp->qp_num;
+ } else if (qp->qp_num > (prev + 1)) {
+ if (fill_res_range_qp_entry(msg, min_range, prev))
+ goto err_loop;
+
+ min_range = qp->qp_num;
+ }
+ prev = qp->qp_num;
+ rdma_restrack_put(res);
+ }
+
+ xa_unlock(&rt->xa);
+
+ if (fill_res_range_qp_entry(msg, min_range, prev))
+ goto err;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_loop:
+ rdma_restrack_put(res);
+ xa_unlock(&rt->xa);
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_srq *srq = container_of(res, struct ib_srq, res);
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id))
+ goto err;
+
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type))
+ goto err;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id))
+ goto err;
+
+ if (ib_srq_has_cq(srq->srq_type)) {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN,
+ srq->ext.cq->res.id))
+ goto err;
+ }
+
+ if (fill_res_srq_qps(msg, srq))
+ goto err;
+
+ return fill_res_name_pid(msg, res);
+
+err:
+ return -EMSGSIZE;
+}
+
static int fill_stat_counter_mode(struct sk_buff *msg,
struct rdma_counter *counter)
{
@@ -1236,6 +1374,19 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
.entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
.id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID,
},
+ [RDMA_RESTRACK_CTX] = {
+ .nldev_attr = RDMA_NLDEV_ATTR_RES_CTX,
+ .flags = NLDEV_PER_DEV,
+ .entry = RDMA_NLDEV_ATTR_RES_CTX_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_CTXN,
+ },
+ [RDMA_RESTRACK_SRQ] = {
+ .nldev_attr = RDMA_NLDEV_ATTR_RES_SRQ,
+ .flags = NLDEV_PER_DEV,
+ .entry = RDMA_NLDEV_ATTR_RES_SRQ_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_SRQN,
+ },
+
};
static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1476,6 +1627,8 @@ RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR);
RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER);
+RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX);
+RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ);
static LIST_HEAD(link_ops);
static DECLARE_RWSEM(link_ops_rwsem);
@@ -1697,6 +1850,19 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_free(msg);
return err;
}
+
+ /*
+ * Copy-on-fork is supported.
+ * See commits:
+ * 70e806e4e645 ("mm: Do early cow for pinned pages during fork() for ptes")
+ * 4eae4efa2c29 ("hugetlb: do early cow when page pinned on src mm")
+ * for more details. Don't backport this without them.
+ *
+ * Return value ignored on purpose, assume copy-on-fork is not
+ * supported in case of failure.
+ */
+ nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1);
+
nlmsg_end(msg, nlh);
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
}
@@ -2139,6 +2305,14 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.doit = nldev_res_get_pd_doit,
.dump = nldev_res_get_pd_dumpit,
},
+ [RDMA_NLDEV_CMD_RES_CTX_GET] = {
+ .doit = nldev_res_get_ctx_doit,
+ .dump = nldev_res_get_ctx_dumpit,
+ },
+ [RDMA_NLDEV_CMD_RES_SRQ_GET] = {
+ .doit = nldev_res_get_srq_doit,
+ .dump = nldev_res_get_srq_dumpit,
+ },
[RDMA_NLDEV_CMD_SYS_GET] = {
.doit = nldev_sys_get_doit,
},
diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h
index af4879bdf3d6..64e2822af70f 100644
--- a/drivers/infiniband/core/opa_smi.h
+++ b/drivers/infiniband/core/opa_smi.h
@@ -40,11 +40,11 @@
#include "smi.h"
enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
- int port_num, int phys_port_cnt);
+ u32 port_num, int phys_port_cnt);
int opa_smi_get_fwd_port(struct opa_smp *smp);
extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
- bool is_switch, int port_num);
+ bool is_switch, u32 port_num);
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 75eafd9208aa..94d83b665a2f 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -112,7 +112,7 @@ static void assert_uverbs_usecnt(struct ib_uobject *uobj,
* however the type's allocat_commit function cannot have been called and the
* uobject cannot be on the uobjects_lists
*
- * For RDMA_REMOVE_DESTROY the caller shold be holding a kref (eg via
+ * For RDMA_REMOVE_DESTROY the caller should be holding a kref (eg via
* rdma_lookup_get_uobject) and the object is left in a state where the caller
* needs to call rdma_lookup_put_uobject.
*
@@ -916,7 +916,7 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
}
/*
- * Destroy the uncontext and every uobject associated with it.
+ * Destroy the ucontext and every uobject associated with it.
*
* This is internally locked and can be called in parallel from multiple
* contexts.
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index ffabaf327242..033207882c82 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -47,6 +47,7 @@ static const char *type2str(enum rdma_restrack_type type)
[RDMA_RESTRACK_MR] = "MR",
[RDMA_RESTRACK_CTX] = "CTX",
[RDMA_RESTRACK_COUNTER] = "COUNTER",
+ [RDMA_RESTRACK_SRQ] = "SRQ",
};
return names[type];
@@ -141,6 +142,8 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
return container_of(res, struct ib_ucontext, res)->device;
case RDMA_RESTRACK_COUNTER:
return container_of(res, struct rdma_counter, res)->device;
+ case RDMA_RESTRACK_SRQ:
+ return container_of(res, struct ib_srq, res)->device;
default:
WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type);
return NULL;
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 34fff94eaa38..7b638d91a4ec 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -70,7 +70,7 @@ struct netdev_event_work {
};
static const struct {
- bool (*is_supported)(const struct ib_device *device, u8 port_num);
+ bool (*is_supported)(const struct ib_device *device, u32 port_num);
enum ib_gid_type gid_type;
} PORT_CAP_TO_GID_TYPE[] = {
{rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
@@ -79,7 +79,7 @@ static const struct {
#define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
-unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
+unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port)
{
int i;
unsigned int ret_flags = 0;
@@ -96,7 +96,7 @@ unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
EXPORT_SYMBOL(roce_gid_type_mask_support);
static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
- u8 port, union ib_gid *gid,
+ u32 port, union ib_gid *gid,
struct ib_gid_attr *gid_attr)
{
int i;
@@ -144,7 +144,7 @@ static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_de
#define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
BONDING_SLAVE_STATE_NA)
static bool
-is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port,
+is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
struct net_device *real_dev;
@@ -168,7 +168,7 @@ is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port,
}
static bool
-is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port,
+is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
struct net_device *master_dev;
@@ -197,7 +197,7 @@ is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port,
* considered for deriving default RoCE GID, returns false otherwise.
*/
static bool
-is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port,
+is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
struct net_device *cookie_ndev = cookie;
@@ -223,13 +223,13 @@ is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port,
return res;
}
-static bool pass_all_filter(struct ib_device *ib_dev, u8 port,
+static bool pass_all_filter(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
return true;
}
-static bool upper_device_filter(struct ib_device *ib_dev, u8 port,
+static bool upper_device_filter(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
bool res;
@@ -260,7 +260,7 @@ static bool upper_device_filter(struct ib_device *ib_dev, u8 port,
* not have been established as slave device yet.
*/
static bool
-is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
+is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev,
void *cookie)
{
@@ -280,7 +280,7 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
static void update_gid_ip(enum gid_op_type gid_op,
struct ib_device *ib_dev,
- u8 port, struct net_device *ndev,
+ u32 port, struct net_device *ndev,
struct sockaddr *addr)
{
union ib_gid gid;
@@ -294,7 +294,7 @@ static void update_gid_ip(enum gid_op_type gid_op,
}
static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
- u8 port,
+ u32 port,
struct net_device *rdma_ndev,
struct net_device *event_ndev)
{
@@ -328,7 +328,7 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
}
static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
- u8 port, struct net_device *ndev)
+ u32 port, struct net_device *ndev)
{
const struct in_ifaddr *ifa;
struct in_device *in_dev;
@@ -372,7 +372,7 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
}
static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
- u8 port, struct net_device *ndev)
+ u32 port, struct net_device *ndev)
{
struct inet6_ifaddr *ifp;
struct inet6_dev *in6_dev;
@@ -417,7 +417,7 @@ static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
}
}
-static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
+static void _add_netdev_ips(struct ib_device *ib_dev, u32 port,
struct net_device *ndev)
{
enum_netdev_ipv4_ips(ib_dev, port, ndev);
@@ -425,13 +425,13 @@ static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
enum_netdev_ipv6_ips(ib_dev, port, ndev);
}
-static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
+static void add_netdev_ips(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
_add_netdev_ips(ib_dev, port, cookie);
}
-static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
+static void del_netdev_ips(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
@@ -446,7 +446,7 @@ static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
*
* del_default_gids() deletes the default GIDs of the event/cookie netdevice.
*/
-static void del_default_gids(struct ib_device *ib_dev, u8 port,
+static void del_default_gids(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
struct net_device *cookie_ndev = cookie;
@@ -458,7 +458,7 @@ static void del_default_gids(struct ib_device *ib_dev, u8 port,
IB_CACHE_GID_DEFAULT_MODE_DELETE);
}
-static void add_default_gids(struct ib_device *ib_dev, u8 port,
+static void add_default_gids(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
struct net_device *event_ndev = cookie;
@@ -470,7 +470,7 @@ static void add_default_gids(struct ib_device *ib_dev, u8 port,
}
static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
- u8 port,
+ u32 port,
struct net_device *rdma_ndev,
void *cookie)
{
@@ -515,7 +515,7 @@ void rdma_roce_rescan_device(struct ib_device *ib_dev)
EXPORT_SYMBOL(rdma_roce_rescan_device);
static void callback_for_addr_gid_device_scan(struct ib_device *device,
- u8 port,
+ u32 port,
struct net_device *rdma_ndev,
void *cookie)
{
@@ -547,10 +547,10 @@ static int netdev_upper_walk(struct net_device *upper,
return 0;
}
-static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
+static void handle_netdev_upper(struct ib_device *ib_dev, u32 port,
void *cookie,
void (*handle_netdev)(struct ib_device *ib_dev,
- u8 port,
+ u32 port,
struct net_device *ndev))
{
struct net_device *ndev = cookie;
@@ -574,25 +574,25 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
}
}
-static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
+static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
struct net_device *event_ndev)
{
ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
}
-static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
+static void del_netdev_upper_ips(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
}
-static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
+static void add_netdev_upper_ips(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
}
-static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
+static void del_netdev_default_ips_join(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev,
void *cookie)
{
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 31156e22d3e7..a588c2038479 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -25,7 +25,7 @@ MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
* registration is also enabled if registering memory might yield better
* performance than using multiple SGE entries, see rdma_rw_io_needs_mr()
*/
-static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
+static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u32 port_num)
{
if (rdma_protocol_iwarp(dev, port_num))
return true;
@@ -42,7 +42,7 @@ static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
* optimization otherwise. Additionally we have a debug option to force usage
* of MRs to help testing this code path.
*/
-static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
+static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u32 port_num,
enum dma_data_direction dir, int dma_nents)
{
if (dir == DMA_FROM_DEVICE) {
@@ -87,7 +87,7 @@ static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg)
}
/* Caller must have zero-initialized *reg. */
-static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
+static int rdma_rw_init_one_mr(struct ib_qp *qp, u32 port_num,
struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
u32 sg_cnt, u32 offset)
{
@@ -121,7 +121,7 @@ static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
}
static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
{
struct rdma_rw_reg_ctx *prev = NULL;
@@ -308,7 +308,7 @@ static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
* Returns the number of WQEs that will be needed on the workqueue if
* successful, or a negative error code.
*/
-int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
{
@@ -377,7 +377,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_init);
* successful, or a negative error code.
*/
int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
struct scatterlist *prot_sg, u32 prot_sg_cnt,
struct ib_sig_attrs *sig_attrs,
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
@@ -505,7 +505,7 @@ static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval)
* completion notification.
*/
struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
+ u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
{
struct ib_send_wr *first_wr, *last_wr;
int i;
@@ -562,7 +562,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_wrs);
* is not set @cqe must be set so that the caller gets a completion
* notification.
*/
-int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
{
struct ib_send_wr *first_wr;
@@ -581,8 +581,9 @@ EXPORT_SYMBOL(rdma_rw_ctx_post);
* @sg_cnt: number of entries in @sg
* @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
*/
-void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
- struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir)
+void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
+ enum dma_data_direction dir)
{
int i;
@@ -620,7 +621,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_destroy);
* @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
*/
void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
struct scatterlist *prot_sg, u32 prot_sg_cnt,
enum dma_data_direction dir)
{
@@ -647,7 +648,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
* compute max_rdma_ctxts and the size of the transport's Send and
* Send Completion Queues.
*/
-unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num,
+unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num,
unsigned int maxpages)
{
unsigned int mr_pages;
diff --git a/drivers/infiniband/core/sa.h b/drivers/infiniband/core/sa.h
index cbaaaa92fff3..143de37ae598 100644
--- a/drivers/infiniband/core/sa.h
+++ b/drivers/infiniband/core/sa.h
@@ -49,7 +49,7 @@ static inline void ib_sa_client_put(struct ib_sa_client *client)
}
int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num, u8 method,
+ struct ib_device *device, u32 port_num, u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
unsigned long timeout_ms, gfp_t gfp_mask,
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 9ef1a355131b..8f1705c403b4 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -95,7 +95,7 @@ struct ib_sa_port {
struct delayed_work ib_cpi_work;
spinlock_t classport_lock; /* protects class port info set */
spinlock_t ah_lock;
- u8 port_num;
+ u32 port_num;
};
struct ib_sa_device {
@@ -1194,7 +1194,7 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
}
EXPORT_SYMBOL(ib_sa_cancel_query);
-static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
+static u8 get_src_path_mask(struct ib_device *device, u32 port_num)
{
struct ib_sa_device *sa_dev;
struct ib_sa_port *port;
@@ -1213,7 +1213,7 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
return src_path_mask;
}
-static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
+static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num,
struct sa_path_rec *rec,
struct rdma_ah_attr *ah_attr,
const struct ib_gid_attr *gid_attr)
@@ -1251,7 +1251,7 @@ static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
* User must invoke rdma_destroy_ah_attr() to release reference to SGID
* attributes which are initialized using ib_init_ah_attr_from_path().
*/
-int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
+int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num,
struct sa_path_rec *rec,
struct rdma_ah_attr *ah_attr,
const struct ib_gid_attr *gid_attr)
@@ -1409,7 +1409,7 @@ EXPORT_SYMBOL(ib_sa_pack_path);
static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
struct ib_sa_device *sa_dev,
- u8 port_num)
+ u32 port_num)
{
struct ib_sa_port *port;
unsigned long flags;
@@ -1444,7 +1444,7 @@ enum opa_pr_supported {
*/
static int opa_pr_query_possible(struct ib_sa_client *client,
struct ib_sa_device *sa_dev,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
struct sa_path_rec *rec)
{
struct ib_port_attr port_attr;
@@ -1533,7 +1533,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
* the query.
*/
int ib_sa_path_rec_get(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
struct sa_path_rec *rec,
ib_sa_comp_mask comp_mask,
unsigned long timeout_ms, gfp_t gfp_mask,
@@ -1688,7 +1688,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
* the query.
*/
int ib_sa_service_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num, u8 method,
+ struct ib_device *device, u32 port_num, u8 method,
struct ib_sa_service_rec *rec,
ib_sa_comp_mask comp_mask,
unsigned long timeout_ms, gfp_t gfp_mask,
@@ -1784,7 +1784,7 @@ static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
}
int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
@@ -1876,7 +1876,7 @@ static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
}
int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
struct ib_sa_guidinfo_rec *rec,
ib_sa_comp_mask comp_mask, u8 method,
unsigned long timeout_ms, gfp_t gfp_mask,
@@ -2265,7 +2265,7 @@ static void ib_sa_event(struct ib_event_handler *handler,
unsigned long flags;
struct ib_sa_device *sa_dev =
container_of(handler, typeof(*sa_dev), event_handler);
- u8 port_num = event->element.port_num - sa_dev->start_port;
+ u32 port_num = event->element.port_num - sa_dev->start_port;
struct ib_sa_port *port = &sa_dev->port[port_num];
if (!rdma_cap_ib_sa(handler->device, port->port_num))
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 75e7ec017836..e5a78d1a63c9 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -193,7 +193,7 @@ static void qp_to_error(struct ib_qp_security *sec)
static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u64 subnet_prefix)
{
struct ib_port_pkey *pp, *tmp_pp;
@@ -245,7 +245,7 @@ static int port_pkey_list_insert(struct ib_port_pkey *pp)
struct pkey_index_qp_list *tmp_pkey;
struct pkey_index_qp_list *pkey;
struct ib_device *dev;
- u8 port_num = pp->port_num;
+ u32 port_num = pp->port_num;
int ret = 0;
if (pp->state != IB_PORT_PKEY_VALID)
@@ -538,7 +538,7 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec)
}
void ib_security_cache_change(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u64 subnet_prefix)
{
struct pkey_index_qp_list *pkey;
@@ -649,7 +649,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
}
static int ib_security_pkey_access(struct ib_device *dev,
- u8 port_num,
+ u32 port_num,
u16 pkey_index,
void *sec)
{
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index f19b23817c2b..45f09b75c893 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -41,7 +41,7 @@
#include "smi.h"
#include "opa_smi.h"
-static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
+static enum smi_action __smi_handle_dr_smp_send(bool is_switch, u32 port_num,
u8 *hop_ptr, u8 hop_cnt,
const u8 *initial_path,
const u8 *return_path,
@@ -127,7 +127,7 @@ static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
* Return IB_SMI_DISCARD if the SMP should be discarded
*/
enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
- bool is_switch, int port_num)
+ bool is_switch, u32 port_num)
{
return __smi_handle_dr_smp_send(is_switch, port_num,
&smp->hop_ptr, smp->hop_cnt,
@@ -139,7 +139,7 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
}
enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
- bool is_switch, int port_num)
+ bool is_switch, u32 port_num)
{
return __smi_handle_dr_smp_send(is_switch, port_num,
&smp->hop_ptr, smp->hop_cnt,
@@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
OPA_LID_PERMISSIVE);
}
-static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
+static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, u32 port_num,
int phys_port_cnt,
u8 *hop_ptr, u8 hop_cnt,
const u8 *initial_path,
@@ -238,7 +238,7 @@ static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
* Return IB_SMI_DISCARD if the SMP should be dropped
*/
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
- int port_num, int phys_port_cnt)
+ u32 port_num, int phys_port_cnt)
{
return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
&smp->hop_ptr, smp->hop_cnt,
@@ -254,7 +254,7 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
* Return IB_SMI_DISCARD if the SMP should be dropped
*/
enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
- int port_num, int phys_port_cnt)
+ u32 port_num, int phys_port_cnt)
{
return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
&smp->hop_ptr, smp->hop_cnt,
diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h
index 91d9b353ab85..e350ed623c45 100644
--- a/drivers/infiniband/core/smi.h
+++ b/drivers/infiniband/core/smi.h
@@ -52,11 +52,11 @@ enum smi_forward_action {
};
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
- int port_num, int phys_port_cnt);
+ u32 port_num, int phys_port_cnt);
int smi_get_fwd_port(struct ib_smp *smp);
extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
- bool is_switch, int port_num);
+ bool is_switch, u32 port_num);
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index b8abb30f80df..05b702de00e8 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -62,7 +62,7 @@ struct ib_port {
const struct attribute_group *pma_table;
struct attribute_group *hw_stats_ag;
struct rdma_hw_stats *hw_stats;
- u8 port_num;
+ u32 port_num;
};
struct port_attribute {
@@ -94,7 +94,7 @@ struct hw_stats_attribute {
const char *buf,
size_t count);
int index;
- u8 port_num;
+ u32 port_num;
};
static ssize_t port_attr_show(struct kobject *kobj,
@@ -297,7 +297,7 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
static const char *phys_state_to_str(enum ib_port_phys_state phys_state)
{
- static const char * phys_state_str[] = {
+ static const char *phys_state_str[] = {
"<unknown>",
"Sleep",
"Polling",
@@ -470,14 +470,14 @@ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
struct port_table_attribute port_pma_attr_##_name = { \
.attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
.index = (_offset) | ((_width) << 16) | ((_counter) << 24), \
- .attr_id = IB_PMA_PORT_COUNTERS , \
+ .attr_id = IB_PMA_PORT_COUNTERS, \
}
#define PORT_PMA_ATTR_EXT(_name, _width, _offset) \
struct port_table_attribute port_pma_attr_ext_##_name = { \
.attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
.index = (_offset) | ((_width) << 16), \
- .attr_id = IB_PMA_PORT_COUNTERS_EXT , \
+ .attr_id = IB_PMA_PORT_COUNTERS_EXT, \
}
/*
@@ -812,7 +812,7 @@ static const struct attribute_group *get_counter_table(struct ib_device *dev,
}
static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats,
- u8 port_num, int index)
+ u32 port_num, int index)
{
int ret;
@@ -938,7 +938,7 @@ static void free_hsag(struct kobject *kobj, struct attribute_group *attr_group)
kfree(attr_group);
}
-static struct attribute *alloc_hsa(int index, u8 port_num, const char *name)
+static struct attribute *alloc_hsa(int index, u32 port_num, const char *name)
{
struct hw_stats_attribute *hsa;
@@ -956,7 +956,7 @@ static struct attribute *alloc_hsa(int index, u8 port_num, const char *name)
return &hsa->attr;
}
-static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
+static struct attribute *alloc_hsa_lifespan(char *name, u32 port_num)
{
struct hw_stats_attribute *hsa;
@@ -975,7 +975,7 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
}
static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
- u8 port_num)
+ u32 port_num)
{
struct attribute_group *hsag;
struct rdma_hw_stats *stats;
@@ -1049,7 +1049,6 @@ err_free_hsag:
kfree(hsag);
err_free_stats:
kfree(stats);
- return;
}
static int add_port(struct ib_core_device *coredev, int port_num)
@@ -1075,9 +1074,8 @@ static int add_port(struct ib_core_device *coredev, int port_num)
ret = kobject_init_and_add(&p->kobj, &port_type,
coredev->ports_kobj,
"%d", port_num);
- if (ret) {
+ if (ret)
goto err_put;
- }
p->gid_attr_group = kzalloc(sizeof(*p->gid_attr_group), GFP_KERNEL);
if (!p->gid_attr_group) {
@@ -1088,9 +1086,8 @@ static int add_port(struct ib_core_device *coredev, int port_num)
p->gid_attr_group->port = p;
ret = kobject_init_and_add(&p->gid_attr_group->kobj, &gid_attr_type,
&p->kobj, "gid_attrs");
- if (ret) {
+ if (ret)
goto err_put_gid_attrs;
- }
if (device->ops.process_mad && is_full_dev) {
p->pma_table = get_counter_table(device, port_num);
@@ -1383,7 +1380,7 @@ void ib_free_port_attrs(struct ib_core_device *coredev)
int ib_setup_port_attrs(struct ib_core_device *coredev)
{
struct ib_device *device = rdma_device_to_ibdev(&coredev->dev);
- unsigned int port;
+ u32 port;
int ret;
coredev->ports_kobj = kobject_create_and_add("ports",
@@ -1437,7 +1434,7 @@ void ib_device_unregister_sysfs(struct ib_device *device)
* @ktype: pointer to the ktype for this kobject.
* @name: the name of the kobject
*/
-int ib_port_register_module_stat(struct ib_device *device, u8 port_num,
+int ib_port_register_module_stat(struct ib_device *device, u32 port_num,
struct kobject *kobj, struct kobj_type *ktype,
const char *name)
{
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index da2512c30ffd..15d57ba4d07a 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -231,7 +231,7 @@ static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
memcpy(dst->private_data, src->private_data,
src->private_data_len);
dst->private_data_len = src->private_data_len;
- dst->responder_resources =src->responder_resources;
+ dst->responder_resources = src->responder_resources;
dst->initiator_depth = src->initiator_depth;
dst->flow_control = src->flow_control;
dst->retry_count = src->retry_count;
@@ -1034,7 +1034,7 @@ static void ucma_copy_conn_param(struct rdma_cm_id *id,
{
dst->private_data = src->private_data;
dst->private_data_len = src->private_data_len;
- dst->responder_resources =src->responder_resources;
+ dst->responder_resources = src->responder_resources;
dst->initiator_depth = src->initiator_depth;
dst->flow_control = src->flow_control;
dst->retry_count = src->retry_count;
@@ -1708,8 +1708,8 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
ssize_t ret;
if (!ib_safe_file_access(filp)) {
- pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
- task_tgid_vnr(current), current->comm);
+ pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+ __func__, task_tgid_vnr(current), current->comm);
return -EACCES;
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 2dde99a9ba07..0eb40025075f 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -47,17 +47,17 @@
static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{
- struct sg_page_iter sg_iter;
- struct page *page;
+ bool make_dirty = umem->writable && dirty;
+ struct scatterlist *sg;
+ unsigned int i;
if (umem->nmap > 0)
ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
DMA_BIDIRECTIONAL);
- for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
- page = sg_page_iter_page(&sg_iter);
- unpin_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
- }
+ for_each_sg(umem->sg_head.sgl, sg, umem->sg_nents, i)
+ unpin_user_page_range_dirty_lock(sg_page(sg),
+ DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
sg_free_table(&umem->sg_head);
}
@@ -100,10 +100,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
*/
pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
- /* At minimum, drivers must support PAGE_SIZE or smaller */
- if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
- return 0;
-
umem->iova = va = virt;
/* The best result is the smallest page size that results in the minimum
* number of required pages. Compute the largest page size that could
@@ -309,8 +305,8 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
int ret;
if (offset > umem->length || length > umem->length - offset) {
- pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
- offset, umem->length, end);
+ pr_err("%s not in range. offset: %zd umem length: %zd end: %zd\n",
+ __func__, offset, umem->length, end);
return -EINVAL;
}
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index f9b5162d9260..0d65ce146fc4 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -168,6 +168,10 @@ void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
{
struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
+ dma_resv_lock(dmabuf->resv, NULL);
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+ dma_resv_unlock(dmabuf->resv);
+
dma_buf_detach(dmabuf, umem_dmabuf->attach);
dma_buf_put(dmabuf);
kfree(umem_dmabuf);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index dd7f3b437c6b..852efedda798 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -101,7 +101,7 @@ struct ib_umad_port {
struct ib_device *ib_dev;
struct ib_umad_device *umad_dev;
int dev_num;
- u8 port_num;
+ u32 port_num;
};
struct ib_umad_device {
@@ -165,8 +165,8 @@ static void ib_umad_dev_put(struct ib_umad_device *dev)
static int hdr_size(struct ib_umad_file *file)
{
- return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) :
- sizeof (struct ib_user_mad_hdr_old);
+ return file->use_pkey_index ? sizeof(struct ib_user_mad_hdr) :
+ sizeof(struct ib_user_mad_hdr_old);
}
/* caller must hold file->mutex */
@@ -688,8 +688,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
mutex_lock(&file->mutex);
if (!file->port->ib_dev) {
- dev_notice(&file->port->dev,
- "ib_umad_reg_agent: invalid device\n");
+ dev_notice(&file->port->dev, "%s: invalid device\n", __func__);
ret = -EPIPE;
goto out;
}
@@ -701,7 +700,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
if (ureq.qpn != 0 && ureq.qpn != 1) {
dev_notice(&file->port->dev,
- "ib_umad_reg_agent: invalid QPN %d specified\n",
+ "%s: invalid QPN %d specified\n", __func__,
ureq.qpn);
ret = -EINVAL;
goto out;
@@ -711,9 +710,9 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
if (!__get_agent(file, agent_id))
goto found;
- dev_notice(&file->port->dev,
- "ib_umad_reg_agent: Max Agents (%u) reached\n",
+ dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n", __func__,
IB_UMAD_MAX_AGENTS);
+
ret = -ENOMEM;
goto out;
@@ -790,8 +789,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
mutex_lock(&file->mutex);
if (!file->port->ib_dev) {
- dev_notice(&file->port->dev,
- "ib_umad_reg_agent2: invalid device\n");
+ dev_notice(&file->port->dev, "%s: invalid device\n", __func__);
ret = -EPIPE;
goto out;
}
@@ -802,17 +800,16 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
}
if (ureq.qpn != 0 && ureq.qpn != 1) {
- dev_notice(&file->port->dev,
- "ib_umad_reg_agent2: invalid QPN %d specified\n",
- ureq.qpn);
+ dev_notice(&file->port->dev, "%s: invalid QPN %d specified\n",
+ __func__, ureq.qpn);
ret = -EINVAL;
goto out;
}
if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) {
dev_notice(&file->port->dev,
- "ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n",
- ureq.flags, IB_USER_MAD_REG_FLAGS_CAP);
+ "%s failed: invalid registration flags specified 0x%x; supported 0x%x\n",
+ __func__, ureq.flags, IB_USER_MAD_REG_FLAGS_CAP);
ret = -EINVAL;
if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP,
@@ -827,8 +824,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
if (!__get_agent(file, agent_id))
goto found;
- dev_notice(&file->port->dev,
- "ib_umad_reg_agent2: Max Agents (%u) reached\n",
+ dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n", __func__,
IB_UMAD_MAX_AGENTS);
ret = -ENOMEM;
goto out;
@@ -840,7 +836,7 @@ found:
req.mgmt_class_version = ureq.mgmt_class_version;
if (ureq.oui & 0xff000000) {
dev_notice(&file->port->dev,
- "ib_umad_reg_agent2 failed: oui invalid 0x%08x\n",
+ "%s failed: oui invalid 0x%08x\n", __func__,
ureq.oui);
ret = -EINVAL;
goto out;
@@ -1145,7 +1141,7 @@ static const struct file_operations umad_sm_fops = {
static struct ib_umad_port *get_port(struct ib_device *ibdev,
struct ib_umad_device *umad_dev,
- unsigned int port)
+ u32 port)
{
if (!umad_dev)
return ERR_PTR(-EOPNOTSUPP);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index f5b8be3bedde..d5e15a8c870d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -364,7 +364,7 @@ static void copy_query_dev_fields(struct ib_ucontext *ucontext,
resp->max_srq_sge = attr->max_srq_sge;
resp->max_pkeys = attr->max_pkeys;
resp->local_ca_ack_delay = attr->local_ca_ack_delay;
- resp->phys_port_cnt = ib_dev->phys_port_cnt;
+ resp->phys_port_cnt = min_t(u32, ib_dev->phys_port_cnt, U8_MAX);
}
static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs)
@@ -2002,12 +2002,13 @@ static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs)
static void *alloc_wr(size_t wr_size, __u32 num_sge)
{
- if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
- sizeof (struct ib_sge))
+ if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof(struct ib_sge))) /
+ sizeof(struct ib_sge))
return NULL;
- return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
- num_sge * sizeof (struct ib_sge), GFP_KERNEL);
+ return kmalloc(ALIGN(wr_size, sizeof(struct ib_sge)) +
+ num_sge * sizeof(struct ib_sge),
+ GFP_KERNEL);
}
static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
@@ -2216,7 +2217,7 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
const struct ib_sge __user *sgls;
const void __user *wqes;
- if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
+ if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
return ERR_PTR(-EINVAL);
wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
@@ -2249,14 +2250,14 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
}
if (user_wr->num_sge >=
- (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
- sizeof (struct ib_sge)) {
+ (U32_MAX - ALIGN(sizeof(*next), sizeof(struct ib_sge))) /
+ sizeof(struct ib_sge)) {
ret = -EINVAL;
goto err;
}
- next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
- user_wr->num_sge * sizeof (struct ib_sge),
+ next = kmalloc(ALIGN(sizeof(*next), sizeof(struct ib_sge)) +
+ user_wr->num_sge * sizeof(struct ib_sge),
GFP_KERNEL);
if (!next) {
ret = -ENOMEM;
@@ -2274,8 +2275,8 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
next->num_sge = user_wr->num_sge;
if (next->num_sge) {
- next->sg_list = (void *) next +
- ALIGN(sizeof *next, sizeof (struct ib_sge));
+ next->sg_list = (void *)next +
+ ALIGN(sizeof(*next), sizeof(struct ib_sge));
if (copy_from_user(next->sg_list, sgls + sg_ind,
next->num_sge *
sizeof(struct ib_sge))) {
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index ff047eb024ab..990f0724acc6 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -752,9 +752,10 @@ int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx)
return uverbs_set_output(bundle, attr);
}
-int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
- size_t idx, s64 lower_bound, u64 upper_bound,
- s64 *def_val)
+int _uverbs_get_const_signed(s64 *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val)
{
const struct uverbs_attr *attr;
@@ -773,7 +774,30 @@ int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
return 0;
}
-EXPORT_SYMBOL(_uverbs_get_const);
+EXPORT_SYMBOL(_uverbs_get_const_signed);
+
+int _uverbs_get_const_unsigned(u64 *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 upper_bound, u64 *def_val)
+{
+ const struct uverbs_attr *attr;
+
+ attr = uverbs_attr_get(attrs_bundle, idx);
+ if (IS_ERR(attr)) {
+ if ((PTR_ERR(attr) != -ENOENT) || !def_val)
+ return PTR_ERR(attr);
+
+ *to = *def_val;
+ } else {
+ *to = attr->ptr_attr.data;
+ }
+
+ if (*to > upper_bound)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(_uverbs_get_const_unsigned);
int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
size_t idx, const void *from, size_t size)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 28464c58738c..2b0798151fb7 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -96,10 +96,10 @@ static const char * const wc_statuses[] = {
[IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
[IB_WC_LOC_PROT_ERR] = "local protection error",
[IB_WC_WR_FLUSH_ERR] = "WR flushed",
- [IB_WC_MW_BIND_ERR] = "memory management operation error",
+ [IB_WC_MW_BIND_ERR] = "memory bind operation error",
[IB_WC_BAD_RESP_ERR] = "bad response error",
[IB_WC_LOC_ACCESS_ERR] = "local access error",
- [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
+ [IB_WC_REM_INV_REQ_ERR] = "remote invalid request error",
[IB_WC_REM_ACCESS_ERR] = "remote access error",
[IB_WC_REM_OP_ERR] = "remote operation error",
[IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
@@ -227,7 +227,8 @@ rdma_node_get_transport(unsigned int node_type)
}
EXPORT_SYMBOL(rdma_node_get_transport);
-enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
+enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
+ u32 port_num)
{
enum rdma_transport_type lt;
if (device->ops.get_link_layer)
@@ -341,7 +342,8 @@ int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
}
/* uverbs manipulates usecnt with proper locking, while the kabi
- requires the caller to guarantee we can't race here. */
+ * requires the caller to guarantee we can't race here.
+ */
WARN_ON(atomic_read(&pd->usecnt));
ret = pd->device->ops.dealloc_pd(pd, udata);
@@ -658,7 +660,7 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
EXPORT_SYMBOL(ib_get_rdma_header_version);
static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
const struct ib_grh *grh)
{
int grh_version;
@@ -701,7 +703,7 @@ static bool find_gid_index(const union ib_gid *gid,
}
static const struct ib_gid_attr *
-get_sgid_attr_from_eth(struct ib_device *device, u8 port_num,
+get_sgid_attr_from_eth(struct ib_device *device, u32 port_num,
u16 vlan_id, const union ib_gid *sgid,
enum ib_gid_type gid_type)
{
@@ -788,7 +790,7 @@ static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
* On success the caller is responsible to call rdma_destroy_ah_attr on the
* attr.
*/
-int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
+int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
const struct ib_wc *wc, const struct ib_grh *grh,
struct rdma_ah_attr *ah_attr)
{
@@ -919,7 +921,7 @@ void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
EXPORT_SYMBOL(rdma_destroy_ah_attr);
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
- const struct ib_grh *grh, u8 port_num)
+ const struct ib_grh *grh, u32 port_num)
{
struct rdma_ah_attr ah_attr;
struct ib_ah *ah;
@@ -1037,8 +1039,12 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
}
atomic_inc(&pd->usecnt);
+ rdma_restrack_new(&srq->res, RDMA_RESTRACK_SRQ);
+ rdma_restrack_parent_name(&srq->res, &pd->res);
+
ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
if (ret) {
+ rdma_restrack_put(&srq->res);
atomic_dec(&srq->pd->usecnt);
if (srq->srq_type == IB_SRQT_XRC)
atomic_dec(&srq->ext.xrc.xrcd->usecnt);
@@ -1048,6 +1054,8 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
return ERR_PTR(ret);
}
+ rdma_restrack_add(&srq->res);
+
return srq;
}
EXPORT_SYMBOL(ib_create_srq_user);
@@ -1086,6 +1094,7 @@ int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
atomic_dec(&srq->ext.xrc.xrcd->usecnt);
if (ib_srq_has_cq(srq->srq_type))
atomic_dec(&srq->ext.cq->usecnt);
+ rdma_restrack_del(&srq->res);
kfree(srq);
return ret;
@@ -1673,7 +1682,7 @@ static bool is_qp_type_connected(const struct ib_qp *qp)
static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
- u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+ u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
const struct ib_gid_attr *old_sgid_attr_av;
const struct ib_gid_attr *old_sgid_attr_alt_av;
int ret;
@@ -1801,7 +1810,7 @@ int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
}
EXPORT_SYMBOL(ib_modify_qp_with_udata);
-int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width)
+int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
{
int rc;
u32 netdev_speed;
@@ -2467,7 +2476,7 @@ int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
}
EXPORT_SYMBOL(ib_check_mr_status);
-int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
+int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
int state)
{
if (!device->ops.set_vf_link_state)
@@ -2477,7 +2486,7 @@ int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
}
EXPORT_SYMBOL(ib_set_vf_link_state);
-int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
struct ifla_vf_info *info)
{
if (!device->ops.get_vf_config)
@@ -2487,7 +2496,7 @@ int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
}
EXPORT_SYMBOL(ib_get_vf_config);
-int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
+int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
struct ifla_vf_stats *stats)
{
if (!device->ops.get_vf_stats)
@@ -2497,7 +2506,7 @@ int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
}
EXPORT_SYMBOL(ib_get_vf_stats);
-int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
+int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
int type)
{
if (!device->ops.set_vf_guid)
@@ -2507,7 +2516,7 @@ int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
}
EXPORT_SYMBOL(ib_set_vf_guid);
-int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
struct ifla_vf_guid *node_guid,
struct ifla_vf_guid *port_guid)
{
@@ -2849,7 +2858,7 @@ void ib_drain_qp(struct ib_qp *qp)
}
EXPORT_SYMBOL(ib_drain_qp);
-struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
+struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *))
@@ -2875,7 +2884,7 @@ struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
}
EXPORT_SYMBOL(rdma_alloc_netdev);
-int rdma_init_netdev(struct ib_device *device, u8 port_num,
+int rdma_init_netdev(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *),
diff --git a/drivers/infiniband/hw/bnxt_re/Kconfig b/drivers/infiniband/hw/bnxt_re/Kconfig
index 0feac5132ce1..6a17f5cdb020 100644
--- a/drivers/infiniband/hw/bnxt_re/Kconfig
+++ b/drivers/infiniband/hw/bnxt_re/Kconfig
@@ -2,9 +2,7 @@
config INFINIBAND_BNXT_RE
tristate "Broadcom Netxtreme HCA support"
depends on 64BIT
- depends on ETHERNET && NETDEVICES && PCI && INET && DCB
- select NET_VENDOR_BROADCOM
- select BNXT
+ depends on INET && DCB && BNXT
help
This driver supports Broadcom NetXtreme-E 10/25/40/50 gigabit
RoCE HCAs. To compile this driver as a module, choose M here:
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index b930ea3dab7a..ba26d8e6a9c2 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -138,6 +138,7 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_QOS_WORK_REG 5
#define BNXT_RE_FLAG_RESOURCES_ALLOCATED 7
#define BNXT_RE_FLAG_RESOURCES_INITIALIZED 8
+#define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 5f5408cdf008..3e54e1ae75b4 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -114,7 +114,7 @@ static const char * const bnxt_re_stat_name[] = {
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port, int index)
+ u32 port, int index)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct ctx_hw_stats *bnxt_re_stats = rdev->qplib_ctx.stats.dma;
@@ -235,7 +235,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
}
struct rdma_hw_stats *bnxt_re_ib_alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
BUILD_BUG_ON(ARRAY_SIZE(bnxt_re_stat_name) != BNXT_RE_NUM_COUNTERS);
/* We support only per port stats */
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index 76399f477e5c..ede048607d6c 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -97,8 +97,8 @@ enum bnxt_re_hw_stats {
};
struct rdma_hw_stats *bnxt_re_ib_alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num);
+ u32 port_num);
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port, int index);
+ u32 port, int index);
#endif /* __BNXT_RE_HW_STATS_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index ba515efd4fdc..2efaa80bfbd2 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -189,7 +189,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
}
/* Port */
-int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
struct ib_port_attr *port_attr)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
@@ -229,7 +229,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
return 0;
}
-int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr port_attr;
@@ -254,7 +254,7 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
}
-int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
u16 index, u16 *pkey)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
@@ -266,7 +266,7 @@ int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
&rdev->qplib_res.pkey_tbl, index, pkey);
}
-int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
int index, union ib_gid *gid)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
@@ -374,7 +374,7 @@ int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
}
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 9a8130b79256..d68671cc6173 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -149,19 +149,19 @@ static inline u16 bnxt_re_get_rwqe_size(int nsge)
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
-int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
struct ib_port_attr *port_attr);
-int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable);
void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str);
-int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
u16 index, u16 *pkey);
int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context);
int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context);
-int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
int index, union ib_gid *gid);
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
- u8 port_num);
+ u32 port_num);
int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
int bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index fdb8c2478258..8bfbf0231a9e 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -81,6 +81,7 @@ static struct workqueue_struct *bnxt_re_wq;
static void bnxt_re_remove_device(struct bnxt_re_dev *rdev);
static void bnxt_re_dealloc_driver(struct ib_device *ib_dev);
static void bnxt_re_stop_irq(void *handle);
+static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
{
@@ -221,6 +222,37 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
/* for handling bnxt_en callbacks later */
static void bnxt_re_stop(void *p)
{
+ struct bnxt_re_dev *rdev = p;
+ struct bnxt *bp;
+
+ if (!rdev)
+ return;
+ ASSERT_RTNL();
+
+ /* L2 driver invokes this callback during device error/crash or device
+ * reset. Current RoCE driver doesn't recover the device in case of
+ * error. Handle the error by dispatching fatal events to all qps
+ * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
+ * L2 driver want to modify the MSIx table.
+ */
+ bp = netdev_priv(rdev->netdev);
+
+ ibdev_info(&rdev->ibdev, "Handle device stop call from L2 driver");
+ /* Check the current device state from L2 structure and move the
+ * device to detached state if FW_FATAL_COND is set.
+ * This prevents more commands to HW during clean-up,
+ * in case the device is already in error.
+ */
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+
+ bnxt_re_dev_stop(rdev);
+ bnxt_re_stop_irq(rdev);
+ /* Move the device states to detached and avoid sending any more
+ * commands to HW
+ */
+ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
}
static void bnxt_re_start(void *p)
@@ -234,6 +266,8 @@ static void bnxt_re_sriov_config(void *p, int num_vfs)
if (!rdev)
return;
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ return;
rdev->num_vfs = num_vfs;
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
bnxt_re_set_resource_limits(rdev);
@@ -427,6 +461,9 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
if (!en_dev)
return rc;
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ return 0;
+
memset(&fw_msg, 0, sizeof(fw_msg));
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
@@ -489,6 +526,9 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
if (!en_dev)
return rc;
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ return 0;
+
memset(&fw_msg, 0, sizeof(fw_msg));
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
@@ -561,24 +601,12 @@ static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
return container_of(ibdev, struct bnxt_re_dev, ibdev);
}
-static void bnxt_re_dev_unprobe(struct net_device *netdev,
- struct bnxt_en_dev *en_dev)
-{
- dev_put(netdev);
- module_put(en_dev->pdev->driver->driver.owner);
-}
-
static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
{
- struct bnxt *bp = netdev_priv(netdev);
struct bnxt_en_dev *en_dev;
struct pci_dev *pdev;
- /* Call bnxt_en's RoCE probe via indirect API */
- if (!bp->ulp_probe)
- return ERR_PTR(-EINVAL);
-
- en_dev = bp->ulp_probe(netdev);
+ en_dev = bnxt_ulp_probe(netdev);
if (IS_ERR(en_dev))
return en_dev;
@@ -593,10 +621,6 @@ static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
return ERR_PTR(-ENODEV);
}
- /* Bump net device reference count */
- if (!try_module_get(pdev->driver->driver.owner))
- return ERR_PTR(-ENODEV);
-
dev_hold(netdev);
return en_dev;
@@ -1523,13 +1547,12 @@ fail:
static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev)
{
- struct bnxt_en_dev *en_dev = rdev->en_dev;
struct net_device *netdev = rdev->netdev;
bnxt_re_dev_remove(rdev);
if (netdev)
- bnxt_re_dev_unprobe(netdev, en_dev);
+ dev_put(netdev);
}
static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
@@ -1551,7 +1574,7 @@ static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
*rdev = bnxt_re_dev_add(netdev, en_dev);
if (!*rdev) {
rc = -ENOMEM;
- bnxt_re_dev_unprobe(netdev, en_dev);
+ dev_put(netdev);
goto exit;
}
exit:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 995d4633b0a1..d4d4959c2434 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -2784,6 +2784,7 @@ do_rq:
dev_err(&cq->hwq.pdev->dev,
"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
cqe_cons, rq->max_wqe);
+ rc = -EINVAL;
goto done;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 441eb421e5e5..5d384def5e5f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -212,6 +212,10 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
u8 opcode, retry_cnt = 0xFF;
int rc = 0;
+ /* Prevent posting if f/w is not in a state to process */
+ if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
+ return 0;
+
do {
opcode = req->opcode;
rc = __send_message(rcfw, req, resp, sb, is_block);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 5f2f0a5a3560..9474c0046582 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -138,6 +138,8 @@ struct bnxt_qplib_qp_node {
#define FIRMWARE_INITIALIZED_FLAG (0)
#define FIRMWARE_FIRST_FLAG (31)
#define FIRMWARE_TIMED_OUT (3)
+#define ERR_DEVICE_DETACHED (4)
+
struct bnxt_qplib_cmdq_mbox {
struct bnxt_qplib_reg_desc reg;
void __iomem *prod;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index fa7878336100..3ca47004b752 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -854,6 +854,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
unmap_io:
pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
+ dpit->dbr_bar_reg_iomem = NULL;
return -ENOMEM;
}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index e42c812e74c3..291471d12197 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -145,7 +145,7 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status);
static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
static LIST_HEAD(timeout_list);
-static spinlock_t timeout_lock;
+static DEFINE_SPINLOCK(timeout_lock);
static void deref_cm_id(struct c4iw_ep_common *epc)
{
@@ -4452,7 +4452,6 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
int __init c4iw_cm_init(void)
{
- spin_lock_init(&timeout_lock);
skb_queue_head_init(&rxq);
workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index f85477f3b037..cdec5deb37a1 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -341,11 +341,6 @@ static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
return container_of(ibdev, struct c4iw_dev, ibdev);
}
-static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
-{
- return container_of(rdev, struct c4iw_dev, rdev);
-}
-
static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
{
return xa_load(&rhp->cqs, cqid);
@@ -659,12 +654,6 @@ static inline u32 c4iw_ib_to_tpt_access(int a)
FW_RI_MEM_ACCESS_LOCAL_READ;
}
-static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
-}
-
enum c4iw_mmid_state {
C4IW_STAG_STATE_VALID,
C4IW_STAG_STATE_INVALID
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 1f1f856f8715..3f1893e180dd 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -237,12 +237,12 @@ static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
return 0;
}
-static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
+static int c4iw_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
struct c4iw_dev *dev;
- pr_debug("ibdev %p, port %d, index %d, gid %p\n",
+ pr_debug("ibdev %p, port %u, index %d, gid %p\n",
ibdev, port, index, gid);
if (!port)
return -EINVAL;
@@ -295,7 +295,7 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
return 0;
}
-static int c4iw_query_port(struct ib_device *ibdev, u8 port,
+static int c4iw_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
int ret = 0;
@@ -378,7 +378,7 @@ static const char * const names[] = {
};
static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
@@ -391,7 +391,7 @@ static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev,
static int c4iw_get_mib(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port, int index)
+ u32 port, int index)
{
struct tp_tcp_stats v4, v6;
struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
@@ -420,7 +420,7 @@ static const struct attribute_group c4iw_attr_group = {
.attrs = c4iw_class_attributes,
};
-static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int c4iw_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 5c95c789f302..e800e8e8bed5 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -216,7 +216,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
goto out;
entry->qid = qid;
list_add_tail(&entry->entry, &uctx->cqids);
- for (i = qid; i & rdev->qpmask; i++) {
+ for (i = qid + 1; i & rdev->qpmask; i++) {
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
goto out;
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index b170817b2741..c3b0e2896475 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -487,11 +487,6 @@ static inline int t4_rq_empty(struct t4_wq *wq)
return wq->rq.in_use == 0;
}
-static inline int t4_rq_full(struct t4_wq *wq)
-{
- return wq->rq.in_use == (wq->rq.size - 1);
-}
-
static inline u32 t4_rq_avail(struct t4_wq *wq)
{
return wq->rq.size - 1 - wq->rq.in_use;
@@ -534,11 +529,6 @@ static inline int t4_sq_empty(struct t4_wq *wq)
return wq->sq.in_use == 0;
}
-static inline int t4_sq_full(struct t4_wq *wq)
-{
- return wq->sq.in_use == (wq->sq.size - 1);
-}
-
static inline u32 t4_sq_avail(struct t4_wq *wq)
{
return wq->sq.size - 1 - wq->sq.in_use;
@@ -679,11 +669,6 @@ static inline void t4_enable_wq_db(struct t4_wq *wq)
wq->rq.queue[wq->rq.size].status.db_off = 0;
}
-static inline int t4_wq_db_enabled(struct t4_wq *wq)
-{
- return !wq->rq.queue[wq->rq.size].status.db_off;
-}
-
enum t4_cq_flags {
CQ_ARMED = 1,
};
@@ -817,19 +802,6 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
return ret;
}
-static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
-{
- if (cq->sw_in_use == cq->size) {
- pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
- __func__, cq->cqid);
- cq->error = 1;
- return NULL;
- }
- if (cq->sw_in_use)
- return &cq->sw_queue[cq->sw_cidx];
- return NULL;
-}
-
static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{
int ret = 0;
@@ -843,11 +815,6 @@ static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
return ret;
}
-static inline int t4_cq_in_error(struct t4_cq *cq)
-{
- return *cq->qp_errp;
-}
-
static inline void t4_set_cq_in_error(struct t4_cq *cq)
{
*cq->qp_errp = 1;
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index e5d9712e98c4..ea322cec27d2 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -120,14 +120,14 @@ struct efa_ah {
int efa_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *udata);
-int efa_query_port(struct ib_device *ibdev, u8 port,
+int efa_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
-int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
+int efa_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid);
-int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey);
int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
@@ -142,7 +142,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
-int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable);
int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata);
void efa_dealloc_ucontext(struct ib_ucontext *ibucontext);
@@ -156,9 +156,9 @@ int efa_destroy_ah(struct ib_ah *ibah, u32 flags);
int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata);
enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
- u8 port_num);
-struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num);
+ u32 port_num);
+struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u32 port_num);
int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
- u8 port_num, int index);
+ u32 port_num, int index);
#endif /* _EFA_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 0f578734bddb..816cfd65b7ac 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/module.h>
@@ -209,11 +209,11 @@ static void efa_set_host_info(struct efa_dev *dev)
if (!hinf)
return;
- strlcpy(hinf->os_dist_str, utsname()->release,
- min(sizeof(hinf->os_dist_str), sizeof(utsname()->release)));
+ strscpy(hinf->os_dist_str, utsname()->release,
+ sizeof(hinf->os_dist_str));
hinf->os_type = EFA_ADMIN_OS_LINUX;
- strlcpy(hinf->kernel_ver_str, utsname()->version,
- min(sizeof(hinf->kernel_ver_str), sizeof(utsname()->version)));
+ strscpy(hinf->kernel_ver_str, utsname()->version,
+ sizeof(hinf->kernel_ver_str));
hinf->kernel_ver = LINUX_VERSION_CODE;
EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0);
EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0);
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 479b604e533a..51572f1dc611 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -247,7 +247,7 @@ int efa_query_device(struct ib_device *ibdev,
return 0;
}
-int efa_query_port(struct ib_device *ibdev, u8 port,
+int efa_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
struct efa_dev *dev = to_edev(ibdev);
@@ -319,7 +319,7 @@ int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
return 0;
}
-int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
+int efa_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
struct efa_dev *dev = to_edev(ibdev);
@@ -329,7 +329,7 @@ int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
return 0;
}
-int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
if (index > 0)
@@ -1619,7 +1619,7 @@ int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
return 0;
}
-int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -1904,7 +1904,7 @@ int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
return 0;
}
-struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
+struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u32 port_num)
{
return rdma_alloc_hw_stats_struct(efa_stats_names,
ARRAY_SIZE(efa_stats_names),
@@ -1912,7 +1912,7 @@ struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
}
int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
- u8 port_num, int index)
+ u32 port_num, int index)
{
struct efa_com_get_stats_params params = {};
union efa_com_get_stats_result result;
@@ -1981,7 +1981,7 @@ int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
}
enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_UNSPECIFIED;
}
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 04b1e8f021f6..16543f717527 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -962,7 +962,6 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
struct hfi1_msix_entry *msix)
{
struct cpu_mask_set *set = NULL;
- struct hfi1_ctxtdata *rcd;
struct hfi1_affinity_node *entry;
mutex_lock(&node_affinity.lock);
@@ -976,14 +975,15 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
case IRQ_GENERAL:
/* Don't do accounting for general contexts */
break;
- case IRQ_RCVCTXT:
- rcd = (struct hfi1_ctxtdata *)msix->arg;
+ case IRQ_RCVCTXT: {
+ struct hfi1_ctxtdata *rcd = msix->arg;
+
/* Don't do accounting for control contexts */
if (rcd->ctxt != HFI1_CTRL_CTXT)
set = &entry->rcv_intr;
break;
+ }
case IRQ_NETDEVCTXT:
- rcd = (struct hfi1_ctxtdata *)msix->arg;
set = &entry->def_intr;
break;
default:
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 993cbf37e0b9..5eeae8df415b 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1322,7 +1322,7 @@ CNTR_ELEM(#name, \
access_ibp_##cntr)
/**
- * hfi_addr_from_offset - return addr for readq/writeq
+ * hfi1_addr_from_offset - return addr for readq/writeq
* @dd: the dd device
* @offset: the offset of the CSR within bar0
*
@@ -8316,7 +8316,7 @@ static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
}
/**
- * gerneral_interrupt() - General interrupt handler
+ * general_interrupt - General interrupt handler
* @irq: MSIx IRQ vector
* @data: hfi1 devdata
*
@@ -15243,8 +15243,8 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
(dd->revision >> CCE_REVISION_SW_SHIFT)
& CCE_REVISION_SW_MASK);
- /* alloc netdev data */
- ret = hfi1_netdev_alloc(dd);
+ /* alloc VNIC/AIP rx data */
+ ret = hfi1_alloc_rx(dd);
if (ret)
goto bail_cleanup;
@@ -15348,7 +15348,7 @@ bail_clear_intr:
hfi1_comp_vectors_clean_up(dd);
msix_clean_up_interrupts(dd);
bail_cleanup:
- hfi1_netdev_free(dd);
+ hfi1_free_rx(dd);
hfi1_pcie_ddcleanup(dd);
bail_free:
hfi1_free_devdata(dd);
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 2c6f2de74d4d..ac26649d4463 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -822,11 +822,6 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
#define LCB_START DC_LCB_CSRS
#define LCB_END DC_8051_CSRS /* next block is 8051 */
-static inline int is_lcb_offset(u32 offset)
-{
- return (offset >= LCB_START && offset < LCB_END);
-}
-
extern uint num_vls;
extern uint disable_integrity;
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 0b64aa87ab73..f88bb4af245f 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1026,7 +1026,7 @@ static bool __set_armed_to_active(struct hfi1_packet *packet)
}
/**
- * armed to active - the fast path for armed to active
+ * set_armed_to_active - the fast path for armed to active
* @packet: the packet structure
*
* Return true if packet processing needs to bail.
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.c b/drivers/infiniband/hw/hfi1/exp_rcv.c
index 91f13140ddf2..a414214f6035 100644
--- a/drivers/infiniband/hw/hfi1/exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.c
@@ -49,7 +49,7 @@
#include "trace.h"
/**
- * exp_tid_group_init - initialize exp_tid_set
+ * hfi1_exp_tid_set_init - initialize exp_tid_set
* @set: the set
*/
static void hfi1_exp_tid_set_init(struct exp_tid_set *set)
@@ -70,7 +70,7 @@ void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd)
}
/**
- * alloc_ctxt_rcv_groups - initialize expected receive groups
+ * hfi1_alloc_ctxt_rcv_groups - initialize expected receive groups
* @rcd: the context to add the groupings to
*/
int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
@@ -100,7 +100,7 @@ int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
}
/**
- * free_ctxt_rcv_groups - free expected receive groups
+ * hfi1_free_ctxt_rcv_groups - free expected receive groups
* @rcd: the context to free
*
* The routine dismantles the expect receive linked
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index 0e83d4b61e46..2cf102b5abd4 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -1916,6 +1916,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
__func__, (ptr -
(u32 *)dd->platform_config.data));
+ ret = -EINVAL;
goto bail;
}
/* Jump the CRC DWORD */
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 2a9a040569eb..867ae0b1aa95 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -69,7 +69,6 @@
#include <rdma/ib_hdrs.h>
#include <rdma/opa_addr.h>
#include <linux/rhashtable.h>
-#include <linux/netdevice.h>
#include <rdma/rdma_vt.h>
#include "chip_registers.h"
@@ -717,12 +716,6 @@ static inline void incr_cntr64(u64 *cntr)
(*cntr)++;
}
-static inline void incr_cntr32(u32 *cntr)
-{
- if (*cntr < (u32)-1LL)
- (*cntr)++;
-}
-
#define MAX_NAME_SIZE 64
struct hfi1_msix_entry {
enum irq_type type;
@@ -864,7 +857,7 @@ struct hfi1_pportdata {
u8 rx_pol_inv;
u8 hw_pidx; /* physical port index */
- u8 port; /* IB port number and index into dd->pports - 1 */
+ u32 port; /* IB port number and index into dd->pports - 1 */
/* type of neighbor node */
u8 neighbor_type;
u8 neighbor_normal;
@@ -1066,6 +1059,7 @@ struct sdma_vl_map;
#define SERIAL_MAX 16 /* length of the serial number */
typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64);
+struct hfi1_netdev_rx;
struct hfi1_devdata {
struct hfi1_ibdev verbs_dev; /* must be first */
/* pointers to related structs for this device */
@@ -1408,7 +1402,7 @@ struct hfi1_devdata {
/* Lock to protect IRQ SRC register access */
spinlock_t irq_src_lock;
int vnic_num_vports;
- struct net_device *dummy_netdev;
+ struct hfi1_netdev_rx *netdev_rx;
struct hfi1_affinity_node *affinity_entry;
/* Keeps track of IPoIB RSM rule users */
@@ -1480,7 +1474,7 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
struct hfi1_ctxtdata **rcd);
void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd);
void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
- struct hfi1_devdata *dd, u8 hw_pidx, u8 port);
+ struct hfi1_devdata *dd, u8 hw_pidx, u32 port);
void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
int hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
@@ -1976,10 +1970,10 @@ static inline struct hfi1_ibdev *dev_from_rdi(struct rvt_dev_info *rdi)
return container_of(rdi, struct hfi1_ibdev, rdi);
}
-static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port)
+static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u32 port)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+ u32 pidx = port - 1; /* IB number port from 1, hdw from 0 */
WARN_ON(pidx >= dd->num_pports);
return &dd->pport[pidx].ibport_data;
@@ -2198,7 +2192,7 @@ extern const struct attribute_group ib_hfi1_attr_group;
int hfi1_device_create(struct hfi1_devdata *dd);
void hfi1_device_remove(struct hfi1_devdata *dd);
-int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
+int hfi1_create_port_files(struct ib_device *ibdev, u32 port_num,
struct kobject *kobj);
int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd);
void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 786c6316273f..e3a8a420c045 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -627,7 +627,7 @@ static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
* Common code for initializing the physical port structure.
*/
void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
- struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
+ struct hfi1_devdata *dd, u8 hw_pidx, u32 port)
{
int i;
uint default_pkey_idx;
@@ -1775,7 +1775,7 @@ static void remove_one(struct pci_dev *pdev)
hfi1_unregister_ib_device(dd);
/* free netdev data */
- hfi1_netdev_free(dd);
+ hfi1_free_rx(dd);
/*
* Disable the IB link, disable interrupts on the device,
@@ -1860,7 +1860,8 @@ bail:
}
/**
- * allocate eager buffers, both kernel and user contexts.
+ * hfi1_setup_eagerbufs - llocate eager buffers, both kernel and user
+ * contexts.
* @rcd: the context we are setting up.
*
* Allocate the eager TID buffers and program them into hip.
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index d580aa17ae37..cda81a7843c2 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -321,7 +321,7 @@ static inline void iowait_drain_wakeup(struct iowait *wait)
/**
* iowait_get_txhead() - get packet off of iowait list
*
- * @wait iowait_work struture
+ * @wait: iowait_work structure
*/
static inline struct sdma_txreq *iowait_get_txhead(struct iowait_work *wait)
{
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
index f650cac9d424..2cff38b105ac 100644
--- a/drivers/infiniband/hw/hfi1/ipoib.h
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -52,8 +52,9 @@ union hfi1_ipoib_flow {
* @producer_lock: producer sync lock
* @consumer_lock: consumer sync lock
*/
+struct ipoib_txreq;
struct hfi1_ipoib_circ_buf {
- void **items;
+ struct ipoib_txreq **items;
unsigned long head;
unsigned long tail;
unsigned long max_items;
@@ -125,10 +126,10 @@ hfi1_ipoib_priv(const struct net_device *dev)
return &((struct hfi1_ipoib_rdma_netdev *)netdev_priv(dev))->dev_priv;
}
-int hfi1_ipoib_send_dma(struct net_device *dev,
- struct sk_buff *skb,
- struct ib_ah *address,
- u32 dqpn);
+int hfi1_ipoib_send(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ib_ah *address,
+ u32 dqpn);
int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv);
void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv);
@@ -143,8 +144,10 @@ struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
int size, void *data);
int hfi1_ipoib_rn_get_params(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params);
+void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q);
+
#endif /* _IPOIB_H */
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
index 3242290eb6a7..e594a961f513 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_main.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -101,14 +101,6 @@ static const struct net_device_ops hfi1_ipoib_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
};
-static int hfi1_ipoib_send(struct net_device *dev,
- struct sk_buff *skb,
- struct ib_ah *address,
- u32 dqpn)
-{
- return hfi1_ipoib_send_dma(dev, skb, address, dqpn);
-}
-
static int hfi1_ipoib_mcast_attach(struct net_device *dev,
struct ib_device *device,
union ib_gid *mgid,
@@ -194,7 +186,7 @@ static void hfi1_ipoib_set_id(struct net_device *dev, int id)
}
static int hfi1_ipoib_setup_rn(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
struct net_device *netdev,
void *param)
{
@@ -204,6 +196,7 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device,
int rc;
rn->send = hfi1_ipoib_send;
+ rn->tx_timeout = hfi1_ipoib_tx_timeout;
rn->attach_mcast = hfi1_ipoib_mcast_attach;
rn->detach_mcast = hfi1_ipoib_mcast_detach;
rn->set_id = hfi1_ipoib_set_id;
@@ -243,7 +236,7 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device,
}
int hfi1_ipoib_rn_get_params(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params)
{
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index edd4eeac8dd1..993f9838b6c8 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -15,6 +15,7 @@
#include "verbs.h"
#include "trace_ibhdrs.h"
#include "ipoib.h"
+#include "trace_tx.h"
/* Add a convenience helper */
#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
@@ -63,12 +64,14 @@ static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
{
+ trace_hfi1_txq_stop(txq);
if (atomic_inc_return(&txq->stops) == 1)
netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
}
static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
{
+ trace_hfi1_txq_wake(txq);
if (atomic_dec_and_test(&txq->stops))
netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
}
@@ -89,8 +92,10 @@ static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
{
++txq->sent_txreqs;
if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
- !atomic_xchg(&txq->ring_full, 1))
+ !atomic_xchg(&txq->ring_full, 1)) {
+ trace_hfi1_txq_full(txq);
hfi1_ipoib_stop_txq(txq);
+ }
}
static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
@@ -112,8 +117,10 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
* to protect against ring overflow.
*/
if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
- atomic_xchg(&txq->ring_full, 0))
+ atomic_xchg(&txq->ring_full, 0)) {
+ trace_hfi1_txq_xmit_unstopped(txq);
hfi1_ipoib_wake_txq(txq);
+ }
}
static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
@@ -202,7 +209,7 @@ static void hfi1_ipoib_add_tx(struct ipoib_txreq *tx)
/* Finish storing txreq before incrementing head. */
smp_store_release(&tx_ring->head, CIRC_ADD(head, 1, max_tx));
- napi_schedule(tx->txq->napi);
+ napi_schedule_irqoff(tx->txq->napi);
} else {
struct hfi1_ipoib_txq *txq = tx->txq;
struct hfi1_ipoib_dev_priv *priv = tx->priv;
@@ -405,6 +412,7 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
sdma_select_engine_sc(priv->dd,
txp->flow.tx_queue,
txp->flow.sc5);
+ trace_hfi1_flow_switch(txp->txq);
}
return tx;
@@ -525,6 +533,7 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
if (txq->flow.as_int != txp->flow.as_int) {
int ret;
+ trace_hfi1_flow_flush(txq);
ret = hfi1_ipoib_flush_tx_list(dev, txq);
if (unlikely(ret)) {
if (ret == -EBUSY)
@@ -572,10 +581,10 @@ static u8 hfi1_ipoib_calc_entropy(struct sk_buff *skb)
return (u8)skb_get_queue_mapping(skb);
}
-int hfi1_ipoib_send_dma(struct net_device *dev,
- struct sk_buff *skb,
- struct ib_ah *address,
- u32 dqpn)
+int hfi1_ipoib_send(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ib_ah *address,
+ u32 dqpn)
{
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
struct ipoib_txparms txp;
@@ -635,8 +644,10 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
/* came from non-list submit */
list_add_tail(&txreq->list, &txq->tx_list);
if (list_empty(&txq->wait.list)) {
- if (!atomic_xchg(&txq->no_desc, 1))
+ if (!atomic_xchg(&txq->no_desc, 1)) {
+ trace_hfi1_txq_queued(txq);
hfi1_ipoib_stop_txq(txq);
+ }
iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
}
@@ -659,6 +670,7 @@ static void hfi1_ipoib_sdma_wakeup(struct iowait *wait, int reason)
struct hfi1_ipoib_txq *txq =
container_of(wait, struct hfi1_ipoib_txq, wait);
+ trace_hfi1_txq_wakeup(txq);
if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
iowait_schedule(wait, system_highpri_wq, WORK_CPU_UNBOUND);
}
@@ -702,14 +714,14 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
priv->tx_napis = kcalloc_node(dev->num_tx_queues,
sizeof(struct napi_struct),
- GFP_ATOMIC,
+ GFP_KERNEL,
priv->dd->node);
if (!priv->tx_napis)
goto free_txreq_cache;
priv->txqs = kcalloc_node(dev->num_tx_queues,
sizeof(struct hfi1_ipoib_txq),
- GFP_ATOMIC,
+ GFP_KERNEL,
priv->dd->node);
if (!priv->txqs)
goto free_tx_napis;
@@ -741,9 +753,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
priv->dd->node);
txq->tx_ring.items =
- vzalloc_node(array_size(tx_ring_size,
- sizeof(struct ipoib_txreq)),
- priv->dd->node);
+ kcalloc_node(tx_ring_size,
+ sizeof(struct ipoib_txreq *),
+ GFP_KERNEL, priv->dd->node);
if (!txq->tx_ring.items)
goto free_txqs;
@@ -764,7 +776,7 @@ free_txqs:
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
netif_napi_del(txq->napi);
- vfree(txq->tx_ring.items);
+ kfree(txq->tx_ring.items);
}
kfree(priv->txqs);
@@ -817,7 +829,7 @@ void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
hfi1_ipoib_drain_tx_list(txq);
netif_napi_del(txq->napi);
(void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
- vfree(txq->tx_ring.items);
+ kfree(txq->tx_ring.items);
}
kfree(priv->txqs);
@@ -854,3 +866,32 @@ void hfi1_ipoib_napi_tx_disable(struct net_device *dev)
(void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
}
}
+
+void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct hfi1_ipoib_txq *txq = &priv->txqs[q];
+ u64 completed = atomic64_read(&txq->complete_txreqs);
+
+ dd_dev_info(priv->dd, "timeout txq %llx q %u stopped %u stops %d no_desc %d ring_full %d\n",
+ (unsigned long long)txq, q,
+ __netif_subqueue_stopped(dev, txq->q_idx),
+ atomic_read(&txq->stops),
+ atomic_read(&txq->no_desc),
+ atomic_read(&txq->ring_full));
+ dd_dev_info(priv->dd, "sde %llx engine %u\n",
+ (unsigned long long)txq->sde,
+ txq->sde ? txq->sde->this_idx : 0);
+ dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
+ dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
+ txq->sent_txreqs, completed, hfi1_ipoib_used(txq));
+ dd_dev_info(priv->dd, "tx_queue_len %u max_items %lu\n",
+ dev->tx_queue_len, txq->tx_ring.max_items);
+ dd_dev_info(priv->dd, "head %lu tail %lu\n",
+ txq->tx_ring.head, txq->tx_ring.tail);
+ dd_dev_info(priv->dd, "wait queued %u\n",
+ !list_empty(&txq->wait.list));
+ dd_dev_info(priv->dd, "tx_list empty %u\n",
+ list_empty(&txq->tx_list));
+}
+
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index e2f2f7847aed..1fe5e702f31d 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -108,7 +108,7 @@ static u16 hfi1_lookup_pkey_value(struct hfi1_ibport *ibp, int pkey_idx)
return 0;
}
-void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u32 port)
{
struct ib_event event;
@@ -297,7 +297,7 @@ static struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u32 dlid)
struct rvt_qp *qp0;
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_devdata *dd = dd_from_ppd(ppd);
- u8 port_num = ppd->port;
+ u32 port_num = ppd->port;
memset(&attr, 0, sizeof(attr));
attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
@@ -515,7 +515,7 @@ static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
/*
* Send a Port Capability Mask Changed trap (ch. 14.3.11).
*/
-void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
+void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num)
{
struct trap_node *trap;
struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
@@ -581,7 +581,7 @@ void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
u8 *data, struct ib_device *ibdev,
- u8 port, u32 *resp_len, u32 max_len)
+ u32 port, u32 *resp_len, u32 max_len)
{
struct opa_node_description *nd;
@@ -601,12 +601,12 @@ static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
}
static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct opa_node_info *ni;
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
+ u32 pidx = port - 1; /* IB number port from 1, hw from 0 */
ni = (struct opa_node_info *)data;
@@ -641,11 +641,11 @@ static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
}
static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
+ u32 port)
{
struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
+ u32 pidx = port - 1; /* IB number port from 1, hw from 0 */
/* GUID 0 is illegal */
if (smp->attr_mod || pidx >= dd->num_pports ||
@@ -794,7 +794,7 @@ void read_ltp_rtt(struct hfi1_devdata *dd)
}
static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
int i;
@@ -1009,7 +1009,7 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
* @port: the IB port number
* @pkeys: the pkey table is placed here
*/
-static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
+static int get_pkeys(struct hfi1_devdata *dd, u32 port, u16 *pkeys)
{
struct hfi1_pportdata *ppd = dd->pport + port - 1;
@@ -1019,7 +1019,7 @@ static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
}
static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -1349,7 +1349,7 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
*
*/
static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len, int local_mad)
{
struct opa_port_info *pi = (struct opa_port_info *)data;
@@ -1667,7 +1667,7 @@ get_only:
* @port: the IB port number
* @pkeys: the PKEY table
*/
-static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
+static int set_pkeys(struct hfi1_devdata *dd, u32 port, u16 *pkeys)
{
struct hfi1_pportdata *ppd;
int i;
@@ -1718,7 +1718,7 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
}
static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -1732,7 +1732,7 @@ static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
u32 size = 0;
if (n_blocks_sent == 0) {
- pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
+ pr_warn("OPA Get PKey AM Invalid : P = %u; B = 0x%x; N = 0x%x\n",
port, start_block, n_blocks_sent);
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
@@ -1825,7 +1825,7 @@ static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
}
static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -1848,7 +1848,7 @@ static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -1877,7 +1877,7 @@ static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -1900,7 +1900,7 @@ static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -1921,7 +1921,7 @@ static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
u32 n_blocks = OPA_AM_NBLK(am);
@@ -1943,7 +1943,7 @@ static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
u32 n_blocks = OPA_AM_NBLK(am);
@@ -1985,7 +1985,7 @@ static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
u32 n_blocks = OPA_AM_NPORT(am);
@@ -2010,7 +2010,7 @@ static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
u32 n_blocks = OPA_AM_NPORT(am);
@@ -2042,7 +2042,7 @@ static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
u32 nports = OPA_AM_NPORT(am);
@@ -2084,7 +2084,7 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len, int local_mad)
{
u32 nports = OPA_AM_NPORT(am);
@@ -2132,7 +2132,7 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -2184,7 +2184,7 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port, u32 *resp_len,
+ struct ib_device *ibdev, u32 port, u32 *resp_len,
u32 max_len)
{
u32 num_ports = OPA_AM_NPORT(am);
@@ -2208,7 +2208,7 @@ static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port, u32 *resp_len,
+ struct ib_device *ibdev, u32 port, u32 *resp_len,
u32 max_len)
{
u32 num_ports = OPA_AM_NPORT(am);
@@ -2232,7 +2232,7 @@ static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
@@ -2274,7 +2274,7 @@ static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
@@ -2722,7 +2722,7 @@ u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd,
static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u32 port, u32 *resp_len)
{
struct opa_port_status_req *req =
(struct opa_port_status_req *)pmp->data;
@@ -2732,7 +2732,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
unsigned long vl;
size_t response_data_size;
u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
- u8 port_num = req->port_num;
+ u32 port_num = req->port_num;
u8 num_vls = hweight64(vl_select_mask);
struct _vls_pctrs *vlinfo;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -2888,7 +2888,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
return reply((struct ib_mad_hdr *)pmp);
}
-static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
+static u64 get_error_counter_summary(struct ib_device *ibdev, u32 port,
u8 res_lli, u8 res_ler)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -2973,7 +2973,7 @@ static void pma_get_opa_port_dctrs(struct ib_device *ibdev,
static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u32 port, u32 *resp_len)
{
struct opa_port_data_counters_msg *req =
(struct opa_port_data_counters_msg *)pmp->data;
@@ -2987,7 +2987,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
u8 lq, num_vls;
u8 res_lli, res_ler;
u64 port_mask;
- u8 port_num;
+ u32 port_num;
unsigned long vl;
unsigned long vl_select_mask;
int vfi;
@@ -3123,7 +3123,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
}
static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
+ struct ib_device *ibdev, u32 port)
{
struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)
pmp->data;
@@ -3151,7 +3151,7 @@ bail:
}
static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
- struct _port_ectrs *rsp, u8 port)
+ struct _port_ectrs *rsp, u32 port)
{
u64 tmp, tmp2;
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -3194,11 +3194,11 @@ static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u32 port, u32 *resp_len)
{
size_t response_data_size;
struct _port_ectrs *rsp;
- u8 port_num;
+ u32 port_num;
struct opa_port_error_counters64_msg *req;
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
u32 num_ports;
@@ -3283,7 +3283,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
}
static int pma_get_ib_portcounters(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
+ struct ib_device *ibdev, u32 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
pmp->data;
@@ -3369,7 +3369,7 @@ bail:
static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u32 port, u32 *resp_len)
{
size_t response_data_size;
struct _port_ei *rsp;
@@ -3377,7 +3377,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
u64 port_mask;
u32 num_ports;
- u8 port_num;
+ u32 port_num;
u8 num_pslm;
u64 reg;
@@ -3468,7 +3468,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u32 port, u32 *resp_len)
{
struct opa_clear_port_status *req =
(struct opa_clear_port_status *)pmp->data;
@@ -3620,14 +3620,14 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u32 port, u32 *resp_len)
{
struct _port_ei *rsp;
struct opa_port_error_info_msg *req;
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
u64 port_mask;
u32 num_ports;
- u8 port_num;
+ u32 port_num;
u8 num_pslm;
u32 error_info_select;
@@ -3702,7 +3702,7 @@ struct opa_congestion_info_attr {
} __packed;
static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct opa_congestion_info_attr *p =
@@ -3727,7 +3727,7 @@ static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
u8 *data, struct ib_device *ibdev,
- u8 port, u32 *resp_len, u32 max_len)
+ u32 port, u32 *resp_len, u32 max_len)
{
int i;
struct opa_congestion_setting_attr *p =
@@ -3819,7 +3819,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
}
static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct opa_congestion_setting_attr *p =
@@ -3860,7 +3860,7 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
u8 *data, struct ib_device *ibdev,
- u8 port, u32 *resp_len, u32 max_len)
+ u32 port, u32 *resp_len, u32 max_len)
{
struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
@@ -3925,7 +3925,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
}
static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct ib_cc_table_attr *cc_table_attr =
@@ -3977,7 +3977,7 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data;
@@ -4036,7 +4036,7 @@ struct opa_led_info {
#define OPA_LED_MASK BIT(OPA_LED_SHIFT)
static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -4066,7 +4066,7 @@ static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -4089,7 +4089,7 @@ static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
}
static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
- u8 *data, struct ib_device *ibdev, u8 port,
+ u8 *data, struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
int ret;
@@ -4179,7 +4179,7 @@ static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
}
static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
- u8 *data, struct ib_device *ibdev, u8 port,
+ u8 *data, struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len, int local_mad)
{
int ret;
@@ -4254,7 +4254,7 @@ static inline void set_aggr_error(struct opa_aggregate *ag)
}
static int subn_get_opa_aggregate(struct opa_smp *smp,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len)
{
int i;
@@ -4303,7 +4303,7 @@ static int subn_get_opa_aggregate(struct opa_smp *smp,
}
static int subn_set_opa_aggregate(struct opa_smp *smp,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, int local_mad)
{
int i;
@@ -4509,7 +4509,7 @@ static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp,
}
static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
- u8 port, const struct opa_mad *in_mad,
+ u32 port, const struct opa_mad *in_mad,
struct opa_mad *out_mad,
u32 *resp_len, int local_mad)
{
@@ -4614,7 +4614,7 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
}
static int process_subn(struct ib_device *ibdev, int mad_flags,
- u8 port, const struct ib_mad *in_mad,
+ u32 port, const struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
struct ib_smp *smp = (struct ib_smp *)out_mad;
@@ -4672,7 +4672,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
return ret;
}
-static int process_perf(struct ib_device *ibdev, u8 port,
+static int process_perf(struct ib_device *ibdev, u32 port,
const struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
@@ -4734,7 +4734,7 @@ static int process_perf(struct ib_device *ibdev, u8 port,
return ret;
}
-static int process_perf_opa(struct ib_device *ibdev, u8 port,
+static int process_perf_opa(struct ib_device *ibdev, u32 port,
const struct opa_mad *in_mad,
struct opa_mad *out_mad, u32 *resp_len)
{
@@ -4816,7 +4816,7 @@ static int process_perf_opa(struct ib_device *ibdev, u8 port,
}
static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
- u8 port, const struct ib_wc *in_wc,
+ u32 port, const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
const struct opa_mad *in_mad,
struct opa_mad *out_mad, size_t *out_mad_size,
@@ -4869,7 +4869,7 @@ bail:
return ret;
}
-static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
+static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u32 port,
const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
const struct ib_mad *in_mad,
@@ -4914,7 +4914,7 @@ static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
*
* This is called by the ib_mad module.
*/
-int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
+int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u32 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in_mad, struct ib_mad *out_mad,
size_t *out_mad_size, u16 *out_mad_pkey_index)
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index 889e63d3f2cc..0205d308ef5e 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -436,7 +436,7 @@ struct sc2vlnt {
COUNTER_MASK(1, 3) | \
COUNTER_MASK(1, 4))
-void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u32 port);
void hfi1_handle_trap_timer(struct timer_list *t);
u16 tx_link_width(u16 link_width);
u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd, u16 link_width,
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index f3fb28e3d5d7..d213f65d4cdd 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -89,7 +89,7 @@ int hfi1_mmu_rb_register(void *ops_arg,
struct mmu_rb_handler *h;
int ret;
- h = kmalloc(sizeof(*h), GFP_KERNEL);
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hfi1/msix.c b/drivers/infiniband/hw/hfi1/msix.c
index cf3040bb177f..57a5f02ebc77 100644
--- a/drivers/infiniband/hw/hfi1/msix.c
+++ b/drivers/infiniband/hw/hfi1/msix.c
@@ -206,7 +206,7 @@ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
}
/**
- * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
+ * msix_netdev_request_rcd_irq - Helper function for RCVAVAIL IRQs
* for netdev context
* @rcd: valid netdev contexti
*/
@@ -221,7 +221,7 @@ int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd)
}
/**
- * msix_request_smda_ira() - Helper for getting SDMA IRQ resources
+ * msix_request_sdma_irq - Helper for getting SDMA IRQ resources
* @sde: valid sdma engine
*
*/
@@ -243,7 +243,7 @@ int msix_request_sdma_irq(struct sdma_engine *sde)
}
/**
- * msix_request_general_irq(void) - Helper for getting general IRQ
+ * msix_request_general_irq - Helper for getting general IRQ
* resources
* @dd: valid device data
*/
@@ -269,7 +269,7 @@ int msix_request_general_irq(struct hfi1_devdata *dd)
}
/**
- * enable_sdma_src() - Helper to enable SDMA IRQ srcs
+ * enable_sdma_srcs - Helper to enable SDMA IRQ srcs
* @dd: valid devdata structure
* @i: index of SDMA engine
*/
@@ -349,7 +349,7 @@ void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr)
}
/**
- * hfi1_clean_up_msix_interrupts() - Free all MSIx IRQ resources
+ * msix_clean_up_interrupts - Free all MSIx IRQ resources
* @dd: valid device data data structure
*
* Free the MSIx and associated PCI resources, if they have been allocated.
@@ -372,7 +372,7 @@ void msix_clean_up_interrupts(struct hfi1_devdata *dd)
}
/**
- * msix_netdev_syncrhonize_irq() - netdev IRQ synchronize
+ * msix_netdev_synchronize_irq - netdev IRQ synchronize
* @dd: valid devdata
*/
void msix_netdev_synchronize_irq(struct hfi1_devdata *dd)
diff --git a/drivers/infiniband/hw/hfi1/netdev.h b/drivers/infiniband/hw/hfi1/netdev.h
index 947543a3e0c4..8aa074670a9c 100644
--- a/drivers/infiniband/hw/hfi1/netdev.h
+++ b/drivers/infiniband/hw/hfi1/netdev.h
@@ -14,15 +14,14 @@
/**
* struct hfi1_netdev_rxq - Receive Queue for HFI
- * dummy netdev. Both IPoIB and VNIC netdevices will be working on
- * top of this device.
+ * Both IPoIB and VNIC netdevices will be working on the rx abstraction.
* @napi: napi object
- * @priv: ptr to netdev_priv
+ * @rx: ptr to netdev_rx
* @rcd: ptr to receive context data
*/
struct hfi1_netdev_rxq {
struct napi_struct napi;
- struct hfi1_netdev_priv *priv;
+ struct hfi1_netdev_rx *rx;
struct hfi1_ctxtdata *rcd;
};
@@ -36,7 +35,8 @@ struct hfi1_netdev_rxq {
#define NUM_NETDEV_MAP_ENTRIES HFI1_MAX_NETDEV_CTXTS
/**
- * struct hfi1_netdev_priv: data required to setup and run HFI netdev.
+ * struct hfi1_netdev_rx: data required to setup and run HFI netdev.
+ * @rx_napi: the dummy netdevice to support "polling" the receive contexts
* @dd: hfi1_devdata
* @rxq: pointer to dummy netdev receive queues.
* @num_rx_q: number of receive queues
@@ -48,7 +48,8 @@ struct hfi1_netdev_rxq {
* @netdevs: atomic counter of netdevs using dummy netdev.
* When 0 receive queues will be freed.
*/
-struct hfi1_netdev_priv {
+struct hfi1_netdev_rx {
+ struct net_device rx_napi;
struct hfi1_devdata *dd;
struct hfi1_netdev_rxq *rxq;
int num_rx_q;
@@ -61,41 +62,27 @@ struct hfi1_netdev_priv {
};
static inline
-struct hfi1_netdev_priv *hfi1_netdev_priv(struct net_device *dev)
-{
- return (struct hfi1_netdev_priv *)&dev[1];
-}
-
-static inline
int hfi1_netdev_ctxt_count(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
-
- return priv->num_rx_q;
+ return dd->netdev_rx->num_rx_q;
}
static inline
struct hfi1_ctxtdata *hfi1_netdev_get_ctxt(struct hfi1_devdata *dd, int ctxt)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
-
- return priv->rxq[ctxt].rcd;
+ return dd->netdev_rx->rxq[ctxt].rcd;
}
static inline
int hfi1_netdev_get_free_rmt_idx(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
-
- return priv->rmt_start;
+ return dd->netdev_rx->rmt_start;
}
static inline
void hfi1_netdev_set_free_rmt_idx(struct hfi1_devdata *dd, int rmt_idx)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
-
- priv->rmt_start = rmt_idx;
+ dd->netdev_rx->rmt_start = rmt_idx;
}
u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
@@ -105,8 +92,8 @@ void hfi1_netdev_enable_queues(struct hfi1_devdata *dd);
void hfi1_netdev_disable_queues(struct hfi1_devdata *dd);
int hfi1_netdev_rx_init(struct hfi1_devdata *dd);
int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd);
-int hfi1_netdev_alloc(struct hfi1_devdata *dd);
-void hfi1_netdev_free(struct hfi1_devdata *dd);
+int hfi1_alloc_rx(struct hfi1_devdata *dd);
+void hfi1_free_rx(struct hfi1_devdata *dd);
int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data);
void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id);
void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id);
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
index 1bcab992ac26..03b098a494b5 100644
--- a/drivers/infiniband/hw/hfi1/netdev_rx.c
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -17,11 +17,11 @@
#include <linux/etherdevice.h>
#include <rdma/ib_verbs.h>
-static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_priv *priv,
+static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_rx *rx,
struct hfi1_ctxtdata *uctxt)
{
unsigned int rcvctrl_ops;
- struct hfi1_devdata *dd = priv->dd;
+ struct hfi1_devdata *dd = rx->dd;
int ret;
uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions;
@@ -118,11 +118,11 @@ static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd,
hfi1_free_ctxt(uctxt);
}
-static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv,
+static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_rx *rx,
struct hfi1_ctxtdata **ctxt)
{
int rc;
- struct hfi1_devdata *dd = priv->dd;
+ struct hfi1_devdata *dd = rx->dd;
rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
if (rc) {
@@ -130,7 +130,7 @@ static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv,
return rc;
}
- rc = hfi1_netdev_setup_ctxt(priv, *ctxt);
+ rc = hfi1_netdev_setup_ctxt(rx, *ctxt);
if (rc) {
dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
hfi1_netdev_deallocate_ctxt(dd, *ctxt);
@@ -183,31 +183,31 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
(u32)HFI1_MAX_NETDEV_CTXTS);
}
-static int hfi1_netdev_rxq_init(struct net_device *dev)
+static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx)
{
int i;
int rc;
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
- struct hfi1_devdata *dd = priv->dd;
+ struct hfi1_devdata *dd = rx->dd;
+ struct net_device *dev = &rx->rx_napi;
- priv->num_rx_q = dd->num_netdev_contexts;
- priv->rxq = kcalloc_node(priv->num_rx_q, sizeof(struct hfi1_netdev_rxq),
- GFP_KERNEL, dd->node);
+ rx->num_rx_q = dd->num_netdev_contexts;
+ rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq),
+ GFP_KERNEL, dd->node);
- if (!priv->rxq) {
+ if (!rx->rxq) {
dd_dev_err(dd, "Unable to allocate netdev queue data\n");
return (-ENOMEM);
}
- for (i = 0; i < priv->num_rx_q; i++) {
- struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
- rc = hfi1_netdev_allot_ctxt(priv, &rxq->rcd);
+ rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd);
if (rc)
goto bail_context_irq_failure;
hfi1_rcd_get(rxq->rcd);
- rxq->priv = priv;
+ rxq->rx = rx;
rxq->rcd->napi = &rxq->napi;
dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
i, rxq->rcd->ctxt);
@@ -227,7 +227,7 @@ static int hfi1_netdev_rxq_init(struct net_device *dev)
bail_context_irq_failure:
dd_dev_err(dd, "Unable to allot receive context\n");
for (; i >= 0; i--) {
- struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
if (rxq->rcd) {
hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
@@ -235,20 +235,19 @@ bail_context_irq_failure:
rxq->rcd = NULL;
}
}
- kfree(priv->rxq);
- priv->rxq = NULL;
+ kfree(rx->rxq);
+ rx->rxq = NULL;
return rc;
}
-static void hfi1_netdev_rxq_deinit(struct net_device *dev)
+static void hfi1_netdev_rxq_deinit(struct hfi1_netdev_rx *rx)
{
int i;
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
- struct hfi1_devdata *dd = priv->dd;
+ struct hfi1_devdata *dd = rx->dd;
- for (i = 0; i < priv->num_rx_q; i++) {
- struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
netif_napi_del(&rxq->napi);
hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
@@ -256,41 +255,41 @@ static void hfi1_netdev_rxq_deinit(struct net_device *dev)
rxq->rcd = NULL;
}
- kfree(priv->rxq);
- priv->rxq = NULL;
- priv->num_rx_q = 0;
+ kfree(rx->rxq);
+ rx->rxq = NULL;
+ rx->num_rx_q = 0;
}
-static void enable_queues(struct hfi1_netdev_priv *priv)
+static void enable_queues(struct hfi1_netdev_rx *rx)
{
int i;
- for (i = 0; i < priv->num_rx_q; i++) {
- struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
- dd_dev_info(priv->dd, "enabling queue %d on context %d\n", i,
+ dd_dev_info(rx->dd, "enabling queue %d on context %d\n", i,
rxq->rcd->ctxt);
napi_enable(&rxq->napi);
- hfi1_rcvctrl(priv->dd,
+ hfi1_rcvctrl(rx->dd,
HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
rxq->rcd);
}
}
-static void disable_queues(struct hfi1_netdev_priv *priv)
+static void disable_queues(struct hfi1_netdev_rx *rx)
{
int i;
- msix_netdev_synchronize_irq(priv->dd);
+ msix_netdev_synchronize_irq(rx->dd);
- for (i = 0; i < priv->num_rx_q; i++) {
- struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
- dd_dev_info(priv->dd, "disabling queue %d on context %d\n", i,
+ dd_dev_info(rx->dd, "disabling queue %d on context %d\n", i,
rxq->rcd->ctxt);
/* wait for napi if it was scheduled */
- hfi1_rcvctrl(priv->dd,
+ hfi1_rcvctrl(rx->dd,
HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
rxq->rcd);
napi_synchronize(&rxq->napi);
@@ -307,15 +306,14 @@ static void disable_queues(struct hfi1_netdev_priv *priv)
*/
int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
int res;
- if (atomic_fetch_inc(&priv->netdevs))
+ if (atomic_fetch_inc(&rx->netdevs))
return 0;
mutex_lock(&hfi1_mutex);
- init_dummy_netdev(dd->dummy_netdev);
- res = hfi1_netdev_rxq_init(dd->dummy_netdev);
+ res = hfi1_netdev_rxq_init(rx);
mutex_unlock(&hfi1_mutex);
return res;
}
@@ -328,12 +326,12 @@ int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
*/
int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
/* destroy the RX queues only if it is the last netdev going away */
- if (atomic_fetch_add_unless(&priv->netdevs, -1, 0) == 1) {
+ if (atomic_fetch_add_unless(&rx->netdevs, -1, 0) == 1) {
mutex_lock(&hfi1_mutex);
- hfi1_netdev_rxq_deinit(dd->dummy_netdev);
+ hfi1_netdev_rxq_deinit(rx);
mutex_unlock(&hfi1_mutex);
}
@@ -341,39 +339,43 @@ int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
}
/**
- * hfi1_netdev_alloc - Allocates netdev and private data. It is required
- * because RMT index and MSI-X interrupt can be set only
- * during driver initialization.
- *
+ * hfi1_alloc_rx - Allocates the rx support structure
* @dd: hfi1 dev data
+ *
+ * Allocate the rx structure to support gathering the receive
+ * resources and the dummy netdev.
+ *
+ * Updates dd struct pointer upon success.
+ *
+ * Return: 0 (success) -error on failure
+ *
*/
-int hfi1_netdev_alloc(struct hfi1_devdata *dd)
+int hfi1_alloc_rx(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv;
- const int netdev_size = sizeof(*dd->dummy_netdev) +
- sizeof(struct hfi1_netdev_priv);
+ struct hfi1_netdev_rx *rx;
- dd_dev_info(dd, "allocating netdev size %d\n", netdev_size);
- dd->dummy_netdev = kcalloc_node(1, netdev_size, GFP_KERNEL, dd->node);
+ dd_dev_info(dd, "allocating rx size %ld\n", sizeof(*rx));
+ rx = kzalloc_node(sizeof(*rx), GFP_KERNEL, dd->node);
- if (!dd->dummy_netdev)
+ if (!rx)
return -ENOMEM;
+ rx->dd = dd;
+ init_dummy_netdev(&rx->rx_napi);
- priv = hfi1_netdev_priv(dd->dummy_netdev);
- priv->dd = dd;
- xa_init(&priv->dev_tbl);
- atomic_set(&priv->enabled, 0);
- atomic_set(&priv->netdevs, 0);
+ xa_init(&rx->dev_tbl);
+ atomic_set(&rx->enabled, 0);
+ atomic_set(&rx->netdevs, 0);
+ dd->netdev_rx = rx;
return 0;
}
-void hfi1_netdev_free(struct hfi1_devdata *dd)
+void hfi1_free_rx(struct hfi1_devdata *dd)
{
- if (dd->dummy_netdev) {
- dd_dev_info(dd, "hfi1 netdev freed\n");
- kfree(dd->dummy_netdev);
- dd->dummy_netdev = NULL;
+ if (dd->netdev_rx) {
+ dd_dev_info(dd, "hfi1 rx freed\n");
+ kfree(dd->netdev_rx);
+ dd->netdev_rx = NULL;
}
}
@@ -388,33 +390,33 @@ void hfi1_netdev_free(struct hfi1_devdata *dd)
*/
void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv;
+ struct hfi1_netdev_rx *rx;
- if (!dd->dummy_netdev)
+ if (!dd->netdev_rx)
return;
- priv = hfi1_netdev_priv(dd->dummy_netdev);
- if (atomic_fetch_inc(&priv->enabled))
+ rx = dd->netdev_rx;
+ if (atomic_fetch_inc(&rx->enabled))
return;
mutex_lock(&hfi1_mutex);
- enable_queues(priv);
+ enable_queues(rx);
mutex_unlock(&hfi1_mutex);
}
void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv;
+ struct hfi1_netdev_rx *rx;
- if (!dd->dummy_netdev)
+ if (!dd->netdev_rx)
return;
- priv = hfi1_netdev_priv(dd->dummy_netdev);
- if (atomic_dec_if_positive(&priv->enabled))
+ rx = dd->netdev_rx;
+ if (atomic_dec_if_positive(&rx->enabled))
return;
mutex_lock(&hfi1_mutex);
- disable_queues(priv);
+ disable_queues(rx);
mutex_unlock(&hfi1_mutex);
}
@@ -430,9 +432,9 @@ void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
*/
int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
- return xa_insert(&priv->dev_tbl, id, data, GFP_NOWAIT);
+ return xa_insert(&rx->dev_tbl, id, data, GFP_NOWAIT);
}
/**
@@ -444,9 +446,9 @@ int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
*/
void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
- return xa_erase(&priv->dev_tbl, id);
+ return xa_erase(&rx->dev_tbl, id);
}
/**
@@ -457,24 +459,24 @@ void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
*/
void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
- return xa_load(&priv->dev_tbl, id);
+ return xa_load(&rx->dev_tbl, id);
}
/**
- * hfi1_netdev_get_first_dat - Gets first entry with greater or equal id.
+ * hfi1_netdev_get_first_data - Gets first entry with greater or equal id.
*
* @dd: hfi1 dev data
* @start_id: requested integer id up to INT_MAX
*/
void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
unsigned long index = *start_id;
void *ret;
- ret = xa_find(&priv->dev_tbl, &index, UINT_MAX, XA_PRESENT);
+ ret = xa_find(&rx->dev_tbl, &index, UINT_MAX, XA_PRESENT);
*start_id = (int)index;
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 46b5290b2839..1fcc6e9666e0 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1285,7 +1285,7 @@ bail:
}
/**
- * sdma_clean() Clean up allocated memory
+ * sdma_clean - Clean up allocated memory
* @dd: struct hfi1_devdata
* @num_engines: num sdma engines
*
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 7a851191f987..f57d55272dd2 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -907,24 +907,6 @@ static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq,
return 0;
}
-/**
- * sdma_iowait_schedule() - initialize wait structure
- * @sde: sdma_engine to schedule
- * @wait: wait struct to schedule
- *
- * This function initializes the iowait
- * structure embedded in the QP or PQ.
- *
- */
-static inline void sdma_iowait_schedule(
- struct sdma_engine *sde,
- struct iowait *wait)
-{
- struct hfi1_pportdata *ppd = sde->dd->pport;
-
- iowait_schedule(wait, ppd->hfi1_wq, sde->cpu);
-}
-
/* for use by interrupt handling */
void sdma_engine_error(struct sdma_engine *sde, u64 status);
void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 5650130e68d4..eaf441ece25e 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -649,7 +649,7 @@ const struct attribute_group ib_hfi1_attr_group = {
.attrs = hfi1_attributes,
};
-int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
+int hfi1_create_port_files(struct ib_device *ibdev, u32 port_num,
struct kobject *kobj)
{
struct hfi1_pportdata *ppd;
diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h
index 769e5e4710c6..d44fc54858b9 100644
--- a/drivers/infiniband/hw/hfi1/trace_tx.h
+++ b/drivers/infiniband/hw/hfi1/trace_tx.h
@@ -53,6 +53,8 @@
#include "hfi.h"
#include "mad.h"
#include "sdma.h"
+#include "ipoib.h"
+#include "user_sdma.h"
const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
@@ -653,6 +655,80 @@ TRACE_EVENT(hfi1_sdma_user_completion,
__entry->code)
);
+TRACE_EVENT(hfi1_usdma_defer,
+ TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
+ struct sdma_engine *sde,
+ struct iowait *wait),
+ TP_ARGS(pq, sde, wait),
+ TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
+ __field(struct hfi1_user_sdma_pkt_q *, pq)
+ __field(struct sdma_engine *, sde)
+ __field(struct iowait *, wait)
+ __field(int, engine)
+ __field(int, empty)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
+ __entry->pq = pq;
+ __entry->sde = sde;
+ __entry->wait = wait;
+ __entry->engine = sde->this_idx;
+ __entry->empty = list_empty(&__entry->wait->list);
+ ),
+ TP_printk("[%s] pq %llx sde %llx wait %llx engine %d empty %d",
+ __get_str(dev),
+ (unsigned long long)__entry->pq,
+ (unsigned long long)__entry->sde,
+ (unsigned long long)__entry->wait,
+ __entry->engine,
+ __entry->empty
+ )
+);
+
+TRACE_EVENT(hfi1_usdma_activate,
+ TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
+ struct iowait *wait,
+ int reason),
+ TP_ARGS(pq, wait, reason),
+ TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
+ __field(struct hfi1_user_sdma_pkt_q *, pq)
+ __field(struct iowait *, wait)
+ __field(int, reason)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
+ __entry->pq = pq;
+ __entry->wait = wait;
+ __entry->reason = reason;
+ ),
+ TP_printk("[%s] pq %llx wait %llx reason %d",
+ __get_str(dev),
+ (unsigned long long)__entry->pq,
+ (unsigned long long)__entry->wait,
+ __entry->reason
+ )
+);
+
+TRACE_EVENT(hfi1_usdma_we,
+ TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
+ int we_ret),
+ TP_ARGS(pq, we_ret),
+ TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
+ __field(struct hfi1_user_sdma_pkt_q *, pq)
+ __field(int, state)
+ __field(int, we_ret)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
+ __entry->pq = pq;
+ __entry->state = pq->state;
+ __entry->we_ret = we_ret;
+ ),
+ TP_printk("[%s] pq %llx state %d we_ret %d",
+ __get_str(dev),
+ (unsigned long long)__entry->pq,
+ __entry->state,
+ __entry->we_ret
+ )
+);
+
const char *print_u32_array(struct trace_seq *, u32 *, int);
#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
@@ -858,6 +934,109 @@ DEFINE_EVENT(
TP_ARGS(qp, flag)
);
+DECLARE_EVENT_CLASS(/* AIP */
+ hfi1_ipoib_txq_template,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(txq->priv->dd)
+ __field(struct hfi1_ipoib_txq *, txq)
+ __field(struct sdma_engine *, sde)
+ __field(ulong, head)
+ __field(ulong, tail)
+ __field(uint, used)
+ __field(uint, flow)
+ __field(int, stops)
+ __field(int, no_desc)
+ __field(u8, idx)
+ __field(u8, stopped)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(txq->priv->dd)
+ __entry->txq = txq;
+ __entry->sde = txq->sde;
+ __entry->head = txq->tx_ring.head;
+ __entry->tail = txq->tx_ring.tail;
+ __entry->idx = txq->q_idx;
+ __entry->used =
+ txq->sent_txreqs -
+ atomic64_read(&txq->complete_txreqs);
+ __entry->flow = txq->flow.as_int;
+ __entry->stops = atomic_read(&txq->stops);
+ __entry->no_desc = atomic_read(&txq->no_desc);
+ __entry->stopped =
+ __netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
+ ),
+ TP_printk(/* print */
+ "[%s] txq %llx idx %u sde %llx head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
+ __get_str(dev),
+ (unsigned long long)__entry->txq,
+ __entry->idx,
+ (unsigned long long)__entry->sde,
+ __entry->head,
+ __entry->tail,
+ __entry->flow,
+ __entry->used,
+ __entry->stops,
+ __entry->no_desc,
+ __entry->stopped
+ )
+);
+
+DEFINE_EVENT(/* queue stop */
+ hfi1_ipoib_txq_template, hfi1_txq_stop,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* queue wake */
+ hfi1_ipoib_txq_template, hfi1_txq_wake,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* flow flush */
+ hfi1_ipoib_txq_template, hfi1_flow_flush,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* flow switch */
+ hfi1_ipoib_txq_template, hfi1_flow_switch,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* wakeup */
+ hfi1_ipoib_txq_template, hfi1_txq_wakeup,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* full */
+ hfi1_ipoib_txq_template, hfi1_txq_full,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* queued */
+ hfi1_ipoib_txq_template, hfi1_txq_queued,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* xmit_stopped */
+ hfi1_ipoib_txq_template, hfi1_txq_xmit_stopped,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* xmit_unstopped */
+ hfi1_ipoib_txq_template, hfi1_txq_xmit_unstopped,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
#endif /* __HFI1_TRACE_TX_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 4a4956f96a7e..da5b2e37355a 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -133,6 +133,7 @@ static int defer_packet_queue(
container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
write_seqlock(&sde->waitlock);
+ trace_hfi1_usdma_defer(pq, sde, &pq->busy);
if (sdma_progress(sde, seq, txreq))
goto eagain;
/*
@@ -157,7 +158,8 @@ static void activate_packet_queue(struct iowait *wait, int reason)
{
struct hfi1_user_sdma_pkt_q *pq =
container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
- pq->busy.lock = NULL;
+
+ trace_hfi1_usdma_activate(pq, wait, reason);
xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
wake_up(&wait->wait_dma);
};
@@ -599,13 +601,17 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
while (req->seqsubmitted != req->info.npkts) {
ret = user_sdma_send_pkts(req, pcount);
if (ret < 0) {
+ int we_ret;
+
if (ret != -EBUSY)
goto free_req;
- if (wait_event_interruptible_timeout(
+ we_ret = wait_event_interruptible_timeout(
pq->busy.wait_dma,
pq->state == SDMA_PKT_Q_ACTIVE,
msecs_to_jiffies(
- SDMA_IOWAIT_TIMEOUT)) <= 0)
+ SDMA_IOWAIT_TIMEOUT));
+ trace_hfi1_usdma_we(pq, we_ret);
+ if (we_ret <= 0)
flush_pq_iowait(pq);
}
}
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index 1e8c02fe8ad1..fabe58139906 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -53,6 +53,7 @@
#include "common.h"
#include "iowait.h"
#include "user_exp_rcv.h"
+#include "mmu_rb.h"
/* The maximum number of Data io vectors per message/request */
#define MAX_VECTORS_PER_REQ 8
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 0dd4bb0a5a7e..554294340caa 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1407,7 +1407,7 @@ static inline u16 opa_width_to_ib(u16 in)
}
}
-static int query_port(struct rvt_dev_info *rdi, u8 port_num,
+static int query_port(struct rvt_dev_info *rdi, u32 port_num,
struct ib_port_attr *props)
{
struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
@@ -1485,7 +1485,7 @@ bail:
return ret;
}
-static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
+static int shut_down_port(struct rvt_dev_info *rdi, u32 port_num)
{
struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
@@ -1694,7 +1694,7 @@ static int init_cntr_names(const char *names_in,
}
static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
int i, err;
@@ -1758,7 +1758,7 @@ static u64 hfi1_sps_ints(void)
}
static int get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
- u8 port, int index)
+ u32 port, int index)
{
u64 *values;
int count;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index d36e3e14896d..420df17cd184 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -325,10 +325,10 @@ static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait)
*/
void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl,
u32 qp1, u32 qp2, u32 lid1, u32 lid2);
-void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num);
+void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num);
void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
-int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
+int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u32 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in_mad, struct ib_mad *out_mad,
size_t *out_mad_size, u16 *out_mad_pkey_index);
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index d2d526c5a756..4bdfc7932376 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -99,11 +99,6 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
return tx;
}
-static inline struct sdma_txreq *get_sdma_txreq(struct verbs_txreq *tx)
-{
- return &tx->txreq;
-}
-
static inline struct verbs_txreq *get_waiting_verbs_txreq(struct iowait_work *w)
{
struct sdma_txreq *stx;
diff --git a/drivers/infiniband/hw/hfi1/vnic.h b/drivers/infiniband/hw/hfi1/vnic.h
index 66150a13f374..a7a450e2cf2c 100644
--- a/drivers/infiniband/hw/hfi1/vnic.h
+++ b/drivers/infiniband/hw/hfi1/vnic.h
@@ -156,7 +156,7 @@ bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo,
/* vnic rdma netdev operations */
struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum rdma_netdev_t type,
const char *name,
unsigned char name_assign_type,
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index a90824de0f57..7e79c0578ecf 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -593,7 +593,7 @@ static void hfi1_vnic_free_rn(struct net_device *netdev)
}
struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum rdma_netdev_t type,
const char *name,
unsigned char name_assign_type,
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 4bcaaa0524b1..5d389ed55376 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -304,6 +304,9 @@ done:
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
{
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ hns_roce_cleanup_xrcd_table(hr_dev);
+
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
hns_roce_cleanup_srq_table(hr_dev);
hns_roce_cleanup_qp_table(hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 339e3fd98b0b..8f68cc3ff193 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -38,22 +38,14 @@
#define CMD_POLL_TOKEN 0xffff
#define CMD_MAX_NUM 32
-#define CMD_TOKEN_MASK 0x1f
static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier,
u8 op_modifier, u16 op, u16 token,
int event)
{
- struct hns_roce_cmdq *cmd = &hr_dev->cmd;
- int ret;
-
- mutex_lock(&cmd->hcr_mutex);
- ret = hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier,
- op_modifier, op, token, event);
- mutex_unlock(&cmd->hcr_mutex);
-
- return ret;
+ return hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier,
+ op_modifier, op, token, event);
}
/* this should be called with "poll_sem" */
@@ -62,18 +54,19 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
u8 op_modifier, u16 op,
unsigned int timeout)
{
- struct device *dev = hr_dev->dev;
int ret;
ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
in_modifier, op_modifier, op,
CMD_POLL_TOKEN, 0);
if (ret) {
- dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to post mailbox %x in poll mode, ret = %d.\n",
+ op, ret);
return ret;
}
- return hr_dev->hw->chk_mbox(hr_dev, timeout);
+ return hr_dev->hw->poll_mbox_done(hr_dev, timeout);
}
static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
@@ -96,15 +89,18 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
struct hns_roce_cmd_context *context =
&hr_dev->cmd.context[token % hr_dev->cmd.max_cmds];
- if (token != context->token)
+ if (unlikely(token != context->token)) {
+ dev_err_ratelimited(hr_dev->dev,
+ "[cmd] invalid ae token %x,context token is %x!\n",
+ token, context->token);
return;
+ }
context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO);
context->out_param = out_param;
complete(&context->done);
}
-/* this should be called with "use_events" */
static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, unsigned long in_modifier,
u8 op_modifier, u16 op,
@@ -116,44 +112,44 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
int ret;
spin_lock(&cmd->context_lock);
- WARN_ON(cmd->free_head < 0);
- context = &cmd->context[cmd->free_head];
- context->token += cmd->token_mask + 1;
- cmd->free_head = context->next;
+
+ do {
+ context = &cmd->context[cmd->free_head];
+ cmd->free_head = context->next;
+ } while (context->busy);
+
+ context->busy = 1;
+ context->token += cmd->max_cmds;
+
spin_unlock(&cmd->context_lock);
- init_completion(&context->done);
+ reinit_completion(&context->done);
ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
in_modifier, op_modifier, op,
context->token, 1);
- if (ret)
+ if (ret) {
+ dev_err_ratelimited(dev,
+ "failed to post mailbox %x in event mode, ret = %d.\n",
+ op, ret);
goto out;
+ }
- /*
- * It is timeout when wait_for_completion_timeout return 0
- * The return value is the time limit set in advance
- * how many seconds showing
- */
if (!wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout))) {
- dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n");
+ dev_err_ratelimited(dev, "[cmd] token %x mailbox %x timeout.\n",
+ context->token, op);
ret = -EBUSY;
goto out;
}
ret = context->result;
- if (ret) {
- dev_err(dev, "[cmd]event mod cmd process error!err=%d\n", ret);
- goto out;
- }
+ if (ret)
+ dev_err_ratelimited(dev, "[cmd] token %x mailbox %x error %d\n",
+ context->token, op, ret);
out:
- spin_lock(&cmd->context_lock);
- context->next = cmd->free_head;
- cmd->free_head = context - cmd->context;
- spin_unlock(&cmd->context_lock);
-
+ context->busy = 0;
return ret;
}
@@ -175,44 +171,28 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op,
unsigned int timeout)
{
- int ret;
+ bool is_busy;
- if (hr_dev->hw->rst_prc_mbox) {
- ret = hr_dev->hw->rst_prc_mbox(hr_dev);
- if (ret == CMD_RST_PRC_SUCCESS)
- return 0;
- else if (ret == CMD_RST_PRC_EBUSY)
- return -EBUSY;
- }
+ if (hr_dev->hw->chk_mbox_avail)
+ if (!hr_dev->hw->chk_mbox_avail(hr_dev, &is_busy))
+ return is_busy ? -EBUSY : 0;
if (hr_dev->cmd.use_events)
- ret = hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
- in_modifier, op_modifier, op,
- timeout);
+ return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op,
+ timeout);
else
- ret = hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
- in_modifier, op_modifier, op,
- timeout);
-
- if (ret == CMD_RST_PRC_EBUSY)
- return -EBUSY;
-
- if (ret && (hr_dev->hw->rst_prc_mbox &&
- hr_dev->hw->rst_prc_mbox(hr_dev) == CMD_RST_PRC_SUCCESS))
- return 0;
-
- return ret;
+ return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op,
+ timeout);
}
int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
{
- struct device *dev = hr_dev->dev;
-
- mutex_init(&hr_dev->cmd.hcr_mutex);
sema_init(&hr_dev->cmd.poll_sem, 1);
hr_dev->cmd.use_events = 0;
hr_dev->cmd.max_cmds = CMD_MAX_NUM;
- hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
+ hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", hr_dev->dev,
HNS_ROCE_MAILBOX_SIZE,
HNS_ROCE_MAILBOX_SIZE, 0);
if (!hr_dev->cmd.pool)
@@ -239,16 +219,16 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
for (i = 0; i < hr_cmd->max_cmds; ++i) {
hr_cmd->context[i].token = i;
hr_cmd->context[i].next = i + 1;
+ init_completion(&hr_cmd->context[i].done);
}
-
- hr_cmd->context[hr_cmd->max_cmds - 1].next = -1;
+ hr_cmd->context[hr_cmd->max_cmds - 1].next = 0;
hr_cmd->free_head = 0;
sema_init(&hr_cmd->event_sem, hr_cmd->max_cmds);
spin_lock_init(&hr_cmd->context_lock);
- hr_cmd->token_mask = CMD_TOKEN_MASK;
hr_cmd->use_events = 1;
+ down(&hr_cmd->poll_sem);
return 0;
}
@@ -259,6 +239,8 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
kfree(hr_cmd->context);
hr_cmd->use_events = 0;
+
+ up(&hr_cmd->poll_sem);
}
struct hns_roce_cmd_mailbox *
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 23c438cef40d..d5fe56c78394 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -48,7 +48,8 @@
#define roce_set_field(origin, mask, shift, val) \
do { \
(origin) &= ~cpu_to_le32(mask); \
- (origin) |= cpu_to_le32(((u32)(val) << (u32)(shift)) & (mask)); \
+ (origin) |= \
+ cpu_to_le32(((u32)(val) << (u32)(shift)) & (mask)); \
} while (0)
#define roce_set_bit(origin, shift, val) \
@@ -59,9 +60,9 @@
#define _hr_reg_enable(ptr, field_type, field_h, field_l) \
({ \
const field_type *_ptr = ptr; \
- *((__le32 *)_ptr + (field_h) / 32) |= \
- cpu_to_le32(BIT((field_l) % 32)) + \
- BUILD_BUG_ON_ZERO((field_h) != (field_l)); \
+ *((__le32 *)_ptr + (field_h) / 32) |= cpu_to_le32( \
+ BIT((field_l) % 32) + \
+ BUILD_BUG_ON_ZERO((field_h) != (field_l))); \
})
#define hr_reg_enable(ptr, field) _hr_reg_enable(ptr, field)
@@ -69,11 +70,9 @@
#define _hr_reg_clear(ptr, field_type, field_h, field_l) \
({ \
const field_type *_ptr = ptr; \
+ BUILD_BUG_ON(((field_h) / 32) != ((field_l) / 32)); \
*((__le32 *)_ptr + (field_h) / 32) &= \
- cpu_to_le32( \
- ~GENMASK((field_h) % 32, (field_l) % 32)) + \
- BUILD_BUG_ON_ZERO(((field_h) / 32) != \
- ((field_l) / 32)); \
+ ~cpu_to_le32(GENMASK((field_h) % 32, (field_l) % 32)); \
})
#define hr_reg_clear(ptr, field) _hr_reg_clear(ptr, field)
@@ -87,6 +86,16 @@
#define hr_reg_write(ptr, field, val) _hr_reg_write(ptr, field, val)
+#define _hr_reg_read(ptr, field_type, field_h, field_l) \
+ ({ \
+ const field_type *_ptr = ptr; \
+ BUILD_BUG_ON(((field_h) / 32) != ((field_l) / 32)); \
+ FIELD_GET(GENMASK((field_h) % 32, (field_l) % 32), \
+ le32_to_cpu(*((__le32 *)_ptr + (field_h) / 32))); \
+ })
+
+#define hr_reg_read(ptr, field) _hr_reg_read(ptr, field)
+
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 74fc4940b03a..800884b074f2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -225,7 +225,7 @@ static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
struct ib_udata *udata, unsigned long addr,
struct hns_roce_ib_create_cq_resp *resp)
{
- bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB;
+ bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB;
struct hns_roce_ucontext *uctx;
int err;
@@ -250,8 +250,8 @@ static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
*hr_cq->set_ci_db = 0;
hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
}
- hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
+ hr_cq->db_reg = hr_dev->reg_base + hr_dev->odb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
}
return 0;
@@ -276,6 +276,57 @@ static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
}
}
+static int verify_cq_create_attr(struct hns_roce_dev *hr_dev,
+ const struct ib_cq_init_attr *attr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+
+ if (!attr->cqe || attr->cqe > hr_dev->caps.max_cqes) {
+ ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
+ attr->cqe, hr_dev->caps.max_cqes);
+ return -EINVAL;
+ }
+
+ if (attr->comp_vector >= hr_dev->caps.num_comp_vectors) {
+ ibdev_err(ibdev, "failed to check CQ vector = %u, max = %d.\n",
+ attr->comp_vector, hr_dev->caps.num_comp_vectors);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_cq_ucmd(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
+ struct hns_roce_ib_create_cq *ucmd)
+{
+ struct ib_device *ibdev = hr_cq->ib_cq.device;
+ int ret;
+
+ ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd)));
+ if (ret) {
+ ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
+ struct hns_roce_ib_create_cq *ucmd)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+
+ cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
+ cq_entries = roundup_pow_of_two(cq_entries);
+ hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
+ hr_cq->cq_depth = cq_entries;
+ hr_cq->vector = vector;
+
+ spin_lock_init(&hr_cq->lock);
+ INIT_LIST_HEAD(&hr_cq->sq_list);
+ INIT_LIST_HEAD(&hr_cq->rq_list);
+}
+
static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
struct hns_roce_ib_create_cq *ucmd)
{
@@ -299,44 +350,23 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_ib_create_cq ucmd = {};
- int vector = attr->comp_vector;
- u32 cq_entries = attr->cqe;
int ret;
if (attr->flags)
return -EOPNOTSUPP;
- if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
- ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
- cq_entries, hr_dev->caps.max_cqes);
- return -EINVAL;
- }
-
- if (vector >= hr_dev->caps.num_comp_vectors) {
- ibdev_err(ibdev, "failed to check CQ vector = %d, max = %d.\n",
- vector, hr_dev->caps.num_comp_vectors);
- return -EINVAL;
- }
-
- cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
- cq_entries = roundup_pow_of_two(cq_entries);
- hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
- hr_cq->cq_depth = cq_entries;
- hr_cq->vector = vector;
- spin_lock_init(&hr_cq->lock);
- INIT_LIST_HEAD(&hr_cq->sq_list);
- INIT_LIST_HEAD(&hr_cq->rq_list);
+ ret = verify_cq_create_attr(hr_dev, attr);
+ if (ret)
+ return ret;
if (udata) {
- ret = ib_copy_from_udata(&ucmd, udata,
- min(udata->inlen, sizeof(ucmd)));
- if (ret) {
- ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n",
- ret);
+ ret = get_cq_ucmd(hr_cq, udata, &ucmd);
+ if (ret)
return ret;
- }
}
+ set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
+
set_cqe_size(hr_cq, udata, &ucmd);
ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 3d6b7a2db496..97800d2b9d39 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -137,6 +137,7 @@ enum {
SERV_TYPE_UC,
SERV_TYPE_RD,
SERV_TYPE_UD,
+ SERV_TYPE_XRC = 5,
};
enum hns_roce_qp_state {
@@ -168,6 +169,8 @@ enum hns_roce_event {
HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
HNS_ROCE_EVENT_TYPE_MB = 0x13,
HNS_ROCE_EVENT_TYPE_FLR = 0x15,
+ HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION = 0x16,
+ HNS_ROCE_EVENT_TYPE_INVALID_XRCETH = 0x17,
};
#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12
@@ -176,9 +179,10 @@ enum {
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
- HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3),
- HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4),
+ HNS_ROCE_CAP_FLAG_CQ_RECORD_DB = BIT(3),
+ HNS_ROCE_CAP_FLAG_QP_RECORD_DB = BIT(4),
HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
+ HNS_ROCE_CAP_FLAG_XRC = BIT(6),
HNS_ROCE_CAP_FLAG_MW = BIT(7),
HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
@@ -214,12 +218,6 @@ enum {
HNS_ROCE_RST_DIRECT_RETURN = 0,
};
-enum {
- CMD_RST_PRC_OTHERS,
- CMD_RST_PRC_SUCCESS,
- CMD_RST_PRC_EBUSY,
-};
-
#define HNS_ROCE_CMD_SUCCESS 1
/* The minimum page size is 4K for hardware */
@@ -244,6 +242,11 @@ struct hns_roce_pd {
unsigned long pdn;
};
+struct hns_roce_xrcd {
+ struct ib_xrcd ibxrcd;
+ u32 xrcdn;
+};
+
struct hns_roce_bitmap {
/* Bitmap Traversal last a bit which is 1 */
unsigned long last;
@@ -363,7 +366,7 @@ struct hns_roce_wq {
int wqe_shift; /* WQE size */
u32 head;
u32 tail;
- void __iomem *db_reg_l;
+ void __iomem *db_reg;
};
struct hns_roce_sge {
@@ -437,7 +440,7 @@ struct hns_roce_cq {
u32 cq_depth;
u32 cons_index;
u32 *set_ci_db;
- void __iomem *cq_db_l;
+ void __iomem *db_reg;
u16 *tptr_addr;
int arm_sn;
int cqe_size;
@@ -467,7 +470,8 @@ struct hns_roce_srq {
u32 rsv_sge;
int wqe_shift;
u32 cqn;
- void __iomem *db_reg_l;
+ u32 xrcdn;
+ void __iomem *db_reg;
atomic_t refcount;
struct completion free;
@@ -546,6 +550,7 @@ struct hns_roce_cmd_context {
int next;
u64 out_param;
u16 token;
+ u16 busy;
};
struct hns_roce_cmdq {
@@ -562,11 +567,6 @@ struct hns_roce_cmdq {
int free_head;
struct hns_roce_cmd_context *context;
/*
- * Result of get integer part
- * which max_comds compute according a power of 2
- */
- u16 token_mask;
- /*
* Process whether use event mode, init default non-zero
* After the event queue of cmd event ready,
* can switch into event mode
@@ -640,6 +640,8 @@ struct hns_roce_qp {
enum hns_roce_event event_type);
unsigned long qpn;
+ u32 xrcdn;
+
atomic_t refcount;
struct completion free;
@@ -695,7 +697,7 @@ struct hns_roce_aeqe {
struct hns_roce_eq {
struct hns_roce_dev *hr_dev;
- void __iomem *doorbell;
+ void __iomem *db_reg;
int type_flag; /* Aeq:1 ceq:0 */
int eqn;
@@ -723,6 +725,13 @@ struct hns_roce_eq_table {
void __iomem **eqc_base; /* only for hw v1 */
};
+enum cong_type {
+ CONG_TYPE_DCQCN,
+ CONG_TYPE_LDCP,
+ CONG_TYPE_HC3,
+ CONG_TYPE_DIP,
+};
+
struct hns_roce_caps {
u64 fw_ver;
u8 num_ports;
@@ -759,13 +768,14 @@ struct hns_roce_caps {
int num_other_vectors;
u32 num_mtpts;
u32 num_mtt_segs;
- u32 num_cqe_segs;
u32 num_srqwqe_segs;
u32 num_idx_segs;
int reserved_mrws;
int reserved_uars;
int num_pds;
int reserved_pds;
+ u32 num_xrcds;
+ u32 reserved_xrcds;
u32 mtt_entry_sz;
u32 cqe_sz;
u32 page_size_cap;
@@ -794,6 +804,9 @@ struct hns_roce_caps {
u32 cqc_bt_num;
u32 cqc_timer_bt_num;
u32 mpt_bt_num;
+ u32 eqc_bt_num;
+ u32 smac_bt_num;
+ u32 sgid_bt_num;
u32 sccc_bt_num;
u32 gmv_bt_num;
u32 qpc_ba_pg_sz;
@@ -851,6 +864,7 @@ struct hns_roce_caps {
u16 default_aeq_period;
u16 default_aeq_arm_st;
u16 default_ceq_arm_st;
+ enum cong_type cong_type;
};
struct hns_roce_dfx_hw {
@@ -874,9 +888,10 @@ struct hns_roce_hw {
int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
u16 token, int event);
- int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned int timeout);
- int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev);
- int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
+ int (*poll_mbox_done)(struct hns_roce_dev *hr_dev,
+ unsigned int timeout);
+ bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
+ int (*set_gid)(struct hns_roce_dev *hr_dev, u32 port, int gid_index,
const union ib_gid *gid, const struct ib_gid_attr *attr);
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
@@ -897,33 +912,17 @@ struct hns_roce_hw {
int (*clear_hem)(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj,
int step_idx);
- int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state);
- int (*destroy_qp)(struct ib_qp *ibqp, struct ib_udata *udata);
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp);
- int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr);
- int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
- const struct ib_recv_wr **bad_recv_wr);
- int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
- int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
struct ib_udata *udata);
int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
- int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
- int (*modify_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
- enum ib_srq_attr_mask srq_attr_mask,
- struct ib_udata *udata);
- int (*query_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
- int (*post_srq_recv)(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr);
const struct ib_device_ops *hns_roce_dev_ops;
const struct ib_device_ops *hns_roce_dev_srq_ops;
};
@@ -945,6 +944,8 @@ struct hns_roce_dev {
enum hns_roce_device_state state;
struct list_head qp_list; /* list of all qps on this dev */
spinlock_t qp_list_lock; /* protect qp_list */
+ struct list_head dip_list; /* list of all dest ips on this dev */
+ spinlock_t dip_list_lock; /* protect dip_list */
struct list_head pgdir_list;
struct mutex pgdir_mutex;
@@ -963,6 +964,7 @@ struct hns_roce_dev {
struct hns_roce_cmdq cmd;
struct hns_roce_bitmap pd_bitmap;
+ struct hns_roce_bitmap xrcd_bitmap;
struct hns_roce_uar_table uar_table;
struct hns_roce_mr_table mr_table;
struct hns_roce_cq_table cq_table;
@@ -986,6 +988,9 @@ struct hns_roce_dev {
void *priv;
struct workqueue_struct *irq_workq;
const struct hns_roce_dfx_hw *dfx;
+ u32 func_num;
+ u32 is_vf;
+ u32 cong_algo_tmpl_id;
};
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
@@ -1004,6 +1009,11 @@ static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
return container_of(ibpd, struct hns_roce_pd, ibpd);
}
+static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd)
+{
+ return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd);
+}
+
static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
{
return container_of(ibah, struct hns_roce_ah, ibah);
@@ -1136,6 +1146,7 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
+int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev);
@@ -1143,6 +1154,7 @@ void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev);
+void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev);
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
@@ -1207,6 +1219,9 @@ int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
struct ib_udata *udata);
int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
+int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
+
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
@@ -1246,7 +1261,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
-u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
+u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 5346fdca9473..620acf66b22c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -54,7 +54,7 @@
* GID[0][0], GID[1][0],.....GID[N - 1][0],
* And so on
*/
-u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
+u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index)
{
return gid_index * hr_dev->caps.num_ports + port;
}
@@ -345,7 +345,7 @@ out:
doorbell[0] = sq_db.u32_4;
doorbell[1] = sq_db.u32_8;
- hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
+ hns_roce_write64_k(doorbell, qp->sq.db_reg);
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
@@ -440,7 +440,7 @@ out:
doorbell[0] = rq_db.u32_4;
doorbell[1] = rq_db.u32_8;
- hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
+ hns_roce_write64_k(doorbell, hr_qp->rq.db_reg);
}
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
@@ -538,7 +538,7 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
/*
* 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
* using 4K page, and shift more 32 because of
- * caculating the high 32 bit value evaluated to hardware.
+ * calculating the high 32 bit value evaluated to hardware.
*/
roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
@@ -711,7 +711,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
int i, j;
u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
u8 phy_port;
- u8 port = 0;
+ u32 port = 0;
u8 sl;
/* Reserved cq for loop qp */
@@ -1189,7 +1189,7 @@ static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
/*
* 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
* using 4K page, and shift more 32 because of
- * caculating the high 32 bit value evaluated to hardware.
+ * calculating the high 32 bit value evaluated to hardware.
*/
roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
@@ -1382,7 +1382,6 @@ static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
ret = hns_roce_v1_rsv_lp_qp(hr_dev);
if (ret) {
dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
- flush_workqueue(free_mr->free_mr_wq);
destroy_workqueue(free_mr->free_mr_wq);
}
@@ -1394,7 +1393,6 @@ static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv = hr_dev->priv;
struct hns_roce_free_mr *free_mr = &priv->free_mr;
- flush_workqueue(free_mr->free_mr_wq);
destroy_workqueue(free_mr->free_mr_wq);
hns_roce_v1_release_lp_qp(hr_dev);
@@ -1676,7 +1674,7 @@ static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
return 0;
}
-static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
+static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u32 port,
int gid_index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
@@ -1939,7 +1937,7 @@ static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
- hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
+ hns_roce_write64_k(doorbell, hr_cq->db_reg);
}
static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
@@ -2041,7 +2039,7 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
/**
* 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
* using 4K page, and shift more 32 because of
- * caculating the high 32 bit value evaluated to hardware.
+ * calculating the high 32 bit value evaluated to hardware.
*/
roce_set_field(cq_context->cqc_byte_20,
CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
@@ -2092,7 +2090,7 @@ static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
hr_cq->cqn | notification_flag);
- hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
+ hns_roce_write64_k(doorbell, hr_cq->db_reg);
return 0;
}
@@ -2673,8 +2671,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int ret = -EINVAL;
u64 sq_ba = 0;
u64 rq_ba = 0;
- int port;
- u8 port_num;
+ u32 port;
+ u32 port_num;
u8 *dmac;
u8 *smac;
@@ -3217,12 +3215,12 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
if (ibqp->uobject) {
- hr_qp->rq.db_reg_l = hr_dev->reg_base +
+ hr_qp->rq.db_reg = hr_dev->reg_base +
hr_dev->odb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index;
}
- hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
+ hns_roce_write64_k(doorbell, hr_qp->rq.db_reg);
}
hr_qp->state = new_state;
@@ -3449,8 +3447,7 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
((roce_get_bit(context->qpc_bytes_4,
QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
- if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
- hr_qp->ibqp.qp_type == IB_QPT_UC) {
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
struct ib_global_route *grh =
rdma_ah_retrieve_grh(&qp_attr->ah_attr);
@@ -3604,7 +3601,7 @@ static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
static void set_eq_cons_index_v1(struct hns_roce_eq *eq, u32 req_not)
{
roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
- (req_not << eq->log_entries), eq->doorbell);
+ (req_not << eq->log_entries), eq->db_reg);
}
static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
@@ -4170,7 +4167,7 @@ static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
* Configure eq extended address 45~49 bit.
* 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
* using 4K page, and shift more 32 because of
- * caculating the high 32 bit value evaluated to hardware.
+ * calculating the high 32 bit value evaluated to hardware.
*/
roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
@@ -4234,9 +4231,9 @@ static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
ROCEE_CAEP_CEQC_SHIFT_0_REG +
CEQ_REG_OFFSET * i;
eq->type_flag = HNS_ROCE_CEQ;
- eq->doorbell = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
- CEQ_REG_OFFSET * i;
+ eq->db_reg = hr_dev->reg_base +
+ ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
+ CEQ_REG_OFFSET * i;
eq->entries = hr_dev->caps.ceqe_depth;
eq->log_entries = ilog2(eq->entries);
eq->eqe_size = HNS_ROCE_CEQE_SIZE;
@@ -4245,8 +4242,8 @@ static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
eq_table->eqc_base[i] = hr_dev->reg_base +
ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
eq->type_flag = HNS_ROCE_AEQ;
- eq->doorbell = hr_dev->reg_base +
- ROCEE_CAEP_AEQE_CONS_IDX_REG;
+ eq->db_reg = hr_dev->reg_base +
+ ROCEE_CAEP_AEQE_CONS_IDX_REG;
eq->entries = hr_dev->caps.aeqe_depth;
eq->log_entries = ilog2(eq->entries);
eq->eqe_size = HNS_ROCE_AEQE_SIZE;
@@ -4349,7 +4346,7 @@ static const struct hns_roce_hw hns_roce_hw_v1 = {
.hw_init = hns_roce_v1_init,
.hw_exit = hns_roce_v1_exit,
.post_mbox = hns_roce_v1_post_mbox,
- .chk_mbox = hns_roce_v1_chk_mbox,
+ .poll_mbox_done = hns_roce_v1_chk_mbox,
.set_gid = hns_roce_v1_set_gid,
.set_mac = hns_roce_v1_set_mac,
.set_mtu = hns_roce_v1_set_mtu,
@@ -4357,12 +4354,6 @@ static const struct hns_roce_hw hns_roce_hw_v1 = {
.write_cqc = hns_roce_v1_write_cqc,
.clear_hem = hns_roce_v1_clear_hem,
.modify_qp = hns_roce_v1_modify_qp,
- .query_qp = hns_roce_v1_query_qp,
- .destroy_qp = hns_roce_v1_destroy_qp,
- .post_send = hns_roce_v1_post_send,
- .post_recv = hns_roce_v1_post_recv,
- .req_notify_cq = hns_roce_v1_req_notify_cq,
- .poll_cq = hns_roce_v1_poll_cq,
.dereg_mr = hns_roce_v1_dereg_mr,
.destroy_cq = hns_roce_v1_destroy_cq,
.init_eq = hns_roce_v1_init_eq_table,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index ce26f97b2ca2..7652dafe32ec 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -48,6 +48,12 @@
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
+enum {
+ CMD_RST_PRC_OTHERS,
+ CMD_RST_PRC_SUCCESS,
+ CMD_RST_PRC_EBUSY,
+};
+
static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
struct ib_sge *sg)
{
@@ -632,24 +638,60 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
* around the mailbox calls. Hence, use the deferred flush for
* now.
*/
- if (qp->state == IB_QPS_ERR) {
+ if (unlikely(qp->state == IB_QPS_ERR)) {
if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
init_flush_work(hr_dev, qp);
} else {
struct hns_roce_v2_db sq_db = {};
- roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
- V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
- roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
- V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
+ roce_set_field(sq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
+ qp->doorbell_qpn);
+ roce_set_field(sq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
+ HNS_ROCE_V2_SQ_DB);
+
/* indicates data on new BAR, 0 : SQ doorbell, 1 : DWQE */
roce_set_bit(sq_db.byte_4, V2_DB_FLAG_S, 0);
- roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
- V2_DB_PARAMETER_IDX_S, qp->sq.head);
- roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
- V2_DB_PARAMETER_SL_S, qp->sl);
+ roce_set_field(sq_db.parameter, V2_DB_PRODUCER_IDX_M,
+ V2_DB_PRODUCER_IDX_S, qp->sq.head);
+ roce_set_field(sq_db.parameter, V2_DB_SL_M, V2_DB_SL_S,
+ qp->sl);
+
+ hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg);
+ }
+}
+
+static inline void update_rq_db(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *qp)
+{
+ /*
+ * Hip08 hardware cannot flush the WQEs in RQ if the QP state
+ * gets into errored mode. Hence, as a workaround to this
+ * hardware limitation, driver needs to assist in flushing. But
+ * the flushing operation uses mailbox to convey the QP state to
+ * the hardware and which can sleep due to the mutex protection
+ * around the mailbox calls. Hence, use the deferred flush for
+ * now.
+ */
+ if (unlikely(qp->state == IB_QPS_ERR)) {
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+ init_flush_work(hr_dev, qp);
+ } else {
+ if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
+ *qp->rdb.db_record =
+ qp->rq.head & V2_DB_PRODUCER_IDX_M;
+ } else {
+ struct hns_roce_v2_db rq_db = {};
- hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
+ roce_set_field(rq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
+ qp->qpn);
+ roce_set_field(rq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
+ HNS_ROCE_V2_RQ_DB);
+ roce_set_field(rq_db.parameter, V2_DB_PRODUCER_IDX_M,
+ V2_DB_PRODUCER_IDX_S, qp->rq.head);
+
+ hns_roce_write64(hr_dev, (__le32 *)&rq_db,
+ qp->rq.db_reg);
+ }
}
}
@@ -681,8 +723,7 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M,
V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head);
- hns_roce_write512(hr_dev, wqe, hr_dev->mem_base +
- HNS_ROCE_DWQE_SIZE * qp->ibqp.qp_num);
+ hns_roce_write512(hr_dev, wqe, qp->sq.db_reg);
}
static int hns_roce_v2_post_send(struct ib_qp *ibqp,
@@ -879,22 +920,7 @@ out:
if (likely(nreq)) {
hr_qp->rq.head += nreq;
- /*
- * Hip08 hardware cannot flush the WQEs in RQ if the QP state
- * gets into errored mode. Hence, as a workaround to this
- * hardware limitation, driver needs to assist in flushing. But
- * the flushing operation uses mailbox to convey the QP state to
- * the hardware and which can sleep due to the mutex protection
- * around the mailbox calls. Hence, use the deferred flush for
- * now.
- */
- if (hr_qp->state == IB_QPS_ERR) {
- if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG,
- &hr_qp->flush_flag))
- init_flush_work(hr_dev, hr_qp);
- } else {
- *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
- }
+ update_rq_db(hr_dev, hr_qp);
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
@@ -1016,13 +1042,14 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
}
if (likely(nreq)) {
- srq_db.byte_4 =
- cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
- (srq->srqn & V2_DB_BYTE_4_TAG_M));
- srq_db.parameter =
- cpu_to_le32(srq->idx_que.head & V2_DB_PARAMETER_IDX_M);
+ roce_set_field(srq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
+ srq->srqn);
+ roce_set_field(srq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
+ HNS_ROCE_V2_SRQ_DB);
+ roce_set_field(srq_db.parameter, V2_DB_PRODUCER_IDX_M,
+ V2_DB_PRODUCER_IDX_S, srq->idx_que.head);
- hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
+ hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg);
}
spin_unlock_irqrestore(&srq->lock, flags);
@@ -1030,7 +1057,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
return ret;
}
-static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
+static u32 hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
{
@@ -1053,7 +1080,7 @@ static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
return CMD_RST_PRC_SUCCESS;
}
-static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
+static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
{
@@ -1081,7 +1108,7 @@ static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
return CMD_RST_PRC_SUCCESS;
}
-static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
+static u32 hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
@@ -1098,10 +1125,9 @@ static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
return CMD_RST_PRC_EBUSY;
}
-static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
+static u32 check_aedev_reset_status(struct hns_roce_dev *hr_dev,
+ struct hnae3_handle *handle)
{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long instance_stage; /* the current instance stage */
unsigned long reset_stage; /* the current reset stage */
@@ -1109,9 +1135,6 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
bool sw_resetting;
bool hw_resetting;
- if (hr_dev->is_reset)
- return CMD_RST_PRC_SUCCESS;
-
/* Get information about reset from NIC driver or RoCE driver itself,
* the meaning of the following variables from NIC driver are described
* as below:
@@ -1122,19 +1145,53 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
instance_stage = handle->rinfo.instance_state;
reset_stage = handle->rinfo.reset_state;
reset_cnt = ops->ae_dev_reset_cnt(handle);
- hw_resetting = ops->get_cmdq_stat(handle);
- sw_resetting = ops->ae_dev_resetting(handle);
-
if (reset_cnt != hr_dev->reset_cnt)
return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
reset_stage);
- else if (hw_resetting)
+
+ hw_resetting = ops->get_cmdq_stat(handle);
+ if (hw_resetting)
return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
reset_stage);
- else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
+
+ sw_resetting = ops->ae_dev_resetting(handle);
+ if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
return hns_roce_v2_cmd_sw_resetting(hr_dev);
- return 0;
+ return CMD_RST_PRC_OTHERS;
+}
+
+static bool check_device_is_in_reset(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle))
+ return true;
+
+ if (ops->get_hw_reset_stat(handle))
+ return true;
+
+ if (ops->ae_dev_resetting(handle))
+ return true;
+
+ return false;
+}
+
+static bool v2_chk_mbox_is_avail(struct hns_roce_dev *hr_dev, bool *busy)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ u32 status;
+
+ if (hr_dev->is_reset)
+ status = CMD_RST_PRC_SUCCESS;
+ else
+ status = check_aedev_reset_status(hr_dev, priv->handle);
+
+ *busy = (status == CMD_RST_PRC_EBUSY);
+
+ return status == CMD_RST_PRC_OTHERS;
}
static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
@@ -1152,6 +1209,9 @@ static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
ring->desc_dma_addr = 0;
kfree(ring->desc);
ring->desc = NULL;
+
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to map cmq desc addr.\n");
return -ENOMEM;
}
@@ -1228,14 +1288,16 @@ static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
/* Init CSQ */
ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
if (ret) {
- dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to init CSQ, ret = %d.\n", ret);
return ret;
}
/* Init CRQ */
ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
if (ret) {
- dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to init CRQ, ret = %d.\n", ret);
goto err_crq;
}
@@ -1352,27 +1414,36 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num)
{
- int retval;
+ bool busy;
int ret;
- ret = hns_roce_v2_rst_process_cmd(hr_dev);
- if (ret == CMD_RST_PRC_SUCCESS)
- return 0;
- if (ret == CMD_RST_PRC_EBUSY)
- return -EBUSY;
+ if (!v2_chk_mbox_is_avail(hr_dev, &busy))
+ return busy ? -EBUSY : 0;
ret = __hns_roce_cmq_send(hr_dev, desc, num);
if (ret) {
- retval = hns_roce_v2_rst_process_cmd(hr_dev);
- if (retval == CMD_RST_PRC_SUCCESS)
- return 0;
- else if (retval == CMD_RST_PRC_EBUSY)
- return -EBUSY;
+ if (!v2_chk_mbox_is_avail(hr_dev, &busy))
+ return busy ? -EBUSY : 0;
}
return ret;
}
+static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
+ dma_addr_t base_addr, u16 op)
+{
+ struct hns_roce_cmd_mailbox *mbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ int ret;
+
+ if (IS_ERR(mbox))
+ return PTR_ERR(mbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, obj, 0, op,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ hns_roce_free_cmd_mailbox(hr_dev, mbox);
+ return ret;
+}
+
static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
{
struct hns_roce_query_version *resp;
@@ -1391,92 +1462,90 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
return 0;
}
-static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
+static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
+ struct hnae3_handle *handle)
{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
- unsigned long reset_cnt;
- bool sw_resetting;
- bool hw_resetting;
+ unsigned long end;
- reset_cnt = ops->ae_dev_reset_cnt(handle);
- hw_resetting = ops->get_hw_reset_stat(handle);
- sw_resetting = ops->ae_dev_resetting(handle);
+ hr_dev->dis_db = true;
- if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting)
- return true;
+ dev_warn(hr_dev->dev,
+ "Func clear is pending, device in resetting state.\n");
+ end = HNS_ROCE_V2_HW_RST_TIMEOUT;
+ while (end) {
+ if (!ops->get_hw_reset_stat(handle)) {
+ hr_dev->is_reset = true;
+ dev_info(hr_dev->dev,
+ "Func clear success after reset.\n");
+ return;
+ }
+ msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
+ end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
+ }
- return false;
+ dev_warn(hr_dev->dev, "Func clear failed.\n");
}
-static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
- int flag)
+static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
+ struct hnae3_handle *handle)
{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
- unsigned long instance_stage;
- unsigned long reset_cnt;
unsigned long end;
- bool sw_resetting;
- bool hw_resetting;
- instance_stage = handle->rinfo.instance_state;
- reset_cnt = ops->ae_dev_reset_cnt(handle);
- hw_resetting = ops->get_hw_reset_stat(handle);
- sw_resetting = ops->ae_dev_resetting(handle);
+ hr_dev->dis_db = true;
+
+ dev_warn(hr_dev->dev,
+ "Func clear is pending, device in resetting state.\n");
+ end = HNS_ROCE_V2_HW_RST_TIMEOUT;
+ while (end) {
+ if (ops->ae_dev_reset_cnt(handle) !=
+ hr_dev->reset_cnt) {
+ hr_dev->is_reset = true;
+ dev_info(hr_dev->dev,
+ "Func clear success after sw reset\n");
+ return;
+ }
+ msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
+ end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
+ }
+
+ dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
+}
+
+static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
+ int flag)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
- if (reset_cnt != hr_dev->reset_cnt) {
+ if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
hr_dev->dis_db = true;
hr_dev->is_reset = true;
dev_info(hr_dev->dev, "Func clear success after reset.\n");
- } else if (hw_resetting) {
- hr_dev->dis_db = true;
+ return;
+ }
- dev_warn(hr_dev->dev,
- "Func clear is pending, device in resetting state.\n");
- end = HNS_ROCE_V2_HW_RST_TIMEOUT;
- while (end) {
- if (!ops->get_hw_reset_stat(handle)) {
- hr_dev->is_reset = true;
- dev_info(hr_dev->dev,
- "Func clear success after reset.\n");
- return;
- }
- msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
- end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
- }
+ if (ops->get_hw_reset_stat(handle)) {
+ func_clr_hw_resetting_state(hr_dev, handle);
+ return;
+ }
- dev_warn(hr_dev->dev, "Func clear failed.\n");
- } else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) {
- hr_dev->dis_db = true;
+ if (ops->ae_dev_resetting(handle) &&
+ handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) {
+ func_clr_sw_resetting_state(hr_dev, handle);
+ return;
+ }
+ if (retval && !flag)
dev_warn(hr_dev->dev,
- "Func clear is pending, device in resetting state.\n");
- end = HNS_ROCE_V2_HW_RST_TIMEOUT;
- while (end) {
- if (ops->ae_dev_reset_cnt(handle) !=
- hr_dev->reset_cnt) {
- hr_dev->is_reset = true;
- dev_info(hr_dev->dev,
- "Func clear success after sw reset\n");
- return;
- }
- msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
- end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
- }
-
- dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
- } else {
- if (retval && !flag)
- dev_warn(hr_dev->dev,
- "Func clear read failed, ret = %d.\n", retval);
+ "Func clear read failed, ret = %d.\n", retval);
- dev_warn(hr_dev->dev, "Func clear failed.\n");
- }
+ dev_warn(hr_dev->dev, "Func clear failed.\n");
}
-static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
+
+static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
{
bool fclr_write_fail_flag = false;
struct hns_roce_func_clear *resp;
@@ -1484,11 +1553,12 @@ static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
unsigned long end;
int ret = 0;
- if (hns_roce_func_clr_chk_rst(hr_dev))
+ if (check_device_is_in_reset(hr_dev))
goto out;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
resp = (struct hns_roce_func_clear *)desc.data;
+ resp->rst_funcid_en = cpu_to_le32(vf_id);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
@@ -1501,7 +1571,7 @@ static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
while (end) {
- if (hns_roce_func_clr_chk_rst(hr_dev))
+ if (check_device_is_in_reset(hr_dev))
goto out;
msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
@@ -1509,18 +1579,45 @@ static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
true);
+ resp->rst_funcid_en = cpu_to_le32(vf_id);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
continue;
if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
- hr_dev->is_reset = true;
+ if (vf_id == 0)
+ hr_dev->is_reset = true;
return;
}
}
out:
- hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
+ hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag);
+}
+
+static void hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
+{
+ enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_cmq_req *req_a;
+
+ req_a = (struct hns_roce_cmq_req *)desc[0].data;
+ hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
+ hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id);
+ hns_roce_cmq_send(hr_dev, desc, 2);
+}
+
+static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
+{
+ int i;
+
+ for (i = hr_dev->func_num - 1; i >= 0; i--) {
+ __hns_roce_function_clear(hr_dev, i);
+ if (i != 0)
+ hns_roce_free_vf_resource(hr_dev, i);
+ }
}
static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
@@ -1540,79 +1637,107 @@ static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
return 0;
}
+static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc;
+ int ret;
+
+ if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) {
+ hr_dev->func_num = 1;
+ return 0;
+ }
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_FUNC_INFO,
+ true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ hr_dev->func_num = 1;
+ return ret;
+ }
+
+ hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num);
+ hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id);
+
+ return 0;
+}
+
static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_cfg_global_param *req;
struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
false);
- req = (struct hns_roce_cfg_global_param *)desc.data;
- memset(req, 0, sizeof(*req));
- roce_set_field(req->time_cfg_udp_port,
- CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
- CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
- roce_set_field(req->time_cfg_udp_port,
- CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
- CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S,
- ROCE_V2_UDP_DPORT);
+ hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, 0x3e8);
+ hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
-static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
+static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
{
struct hns_roce_cmq_desc desc[2];
- struct hns_roce_pf_res_a *req_a;
- struct hns_roce_pf_res_b *req_b;
+ struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
+ struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
+ struct hns_roce_caps *caps = &hr_dev->caps;
+ enum hns_roce_opcode_type opcode;
+ u32 func_num;
int ret;
- hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_QUERY_PF_RES,
- true);
- desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ if (is_vf) {
+ opcode = HNS_ROCE_OPC_QUERY_VF_RES;
+ func_num = 1;
+ } else {
+ opcode = HNS_ROCE_OPC_QUERY_PF_RES;
+ func_num = hr_dev->func_num;
+ }
- hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_QUERY_PF_RES,
- true);
+ hns_roce_cmq_setup_basic_desc(&desc[0], opcode, true);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ hns_roce_cmq_setup_basic_desc(&desc[1], opcode, true);
ret = hns_roce_cmq_send(hr_dev, desc, 2);
if (ret)
return ret;
- req_a = (struct hns_roce_pf_res_a *)desc[0].data;
- req_b = (struct hns_roce_pf_res_b *)desc[1].data;
-
- hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
- PF_RES_DATA_1_PF_QPC_BT_NUM_M,
- PF_RES_DATA_1_PF_QPC_BT_NUM_S);
- hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
- PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
- PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
- hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
- PF_RES_DATA_3_PF_CQC_BT_NUM_M,
- PF_RES_DATA_3_PF_CQC_BT_NUM_S);
- hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
- PF_RES_DATA_4_PF_MPT_BT_NUM_M,
- PF_RES_DATA_4_PF_MPT_BT_NUM_S);
-
- hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
- PF_RES_DATA_3_PF_SL_NUM_M,
- PF_RES_DATA_3_PF_SL_NUM_S);
- hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
- PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
- PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
-
- hr_dev->caps.gmv_bt_num = roce_get_field(req_b->gmv_idx_num,
- PF_RES_DATA_5_PF_GMV_BT_NUM_M,
- PF_RES_DATA_5_PF_GMV_BT_NUM_S);
+ caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num;
+ caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num;
+ caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num;
+ caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num;
+ caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num;
+ caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num;
+ caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num;
+ caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num;
+
+ if (is_vf) {
+ caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num;
+ caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) /
+ func_num;
+ } else {
+ caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num;
+ caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) /
+ func_num;
+ }
return 0;
}
+static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
+{
+ return load_func_res_caps(hr_dev, false);
+}
+
+static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
+{
+ return load_func_res_caps(hr_dev, true);
+}
+
static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_pf_timer_res_a *req_a;
struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ struct hns_roce_caps *caps = &hr_dev->caps;
int ret;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
@@ -1622,24 +1747,17 @@ static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
if (ret)
return ret;
- req_a = (struct hns_roce_pf_timer_res_a *)desc.data;
-
- hr_dev->caps.qpc_timer_bt_num =
- roce_get_field(req_a->qpc_timer_bt_idx_num,
- PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
- PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
- hr_dev->caps.cqc_timer_bt_num =
- roce_get_field(req_a->cqc_timer_bt_idx_num,
- PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
- PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
+ caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM);
+ caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM);
return 0;
}
-static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, int vf_id)
+static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
+ u32 vf_id)
{
- struct hns_roce_cmq_desc desc;
struct hns_roce_vf_switch *swt;
+ struct hns_roce_cmq_desc desc;
int ret;
swt = (struct hns_roce_vf_switch *)desc.data;
@@ -1661,153 +1779,127 @@ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, int vf_id)
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
-static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
+static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_cmq_desc desc[2];
- struct hns_roce_vf_res_a *req_a;
- struct hns_roce_vf_res_b *req_b;
+ u32 vf_id;
+ int ret;
+
+ for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
+ ret = __hns_roce_set_vf_switch_param(hr_dev, vf_id);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
- req_a = (struct hns_roce_vf_res_a *)desc[0].data;
- req_b = (struct hns_roce_vf_res_b *)desc[1].data;
+static int __hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
+{
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
+ struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
+ enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
+ struct hns_roce_caps *caps = &hr_dev->caps;
- hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_ALLOC_VF_RES,
- false);
+ hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
- hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_ALLOC_VF_RES,
- false);
+ hr_reg_write(r_a, FUNC_RES_A_VF_ID, vf_id);
+
+ hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num);
+ hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num);
+ hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num);
+ hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num);
+ hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num);
- roce_set_field(req_a->vf_qpc_bt_idx_num,
- VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
- VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
- roce_set_field(req_a->vf_qpc_bt_idx_num,
- VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
- VF_RES_A_DATA_1_VF_QPC_BT_NUM_S, HNS_ROCE_VF_QPC_BT_NUM);
-
- roce_set_field(req_a->vf_srqc_bt_idx_num,
- VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
- VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
- roce_set_field(req_a->vf_srqc_bt_idx_num,
- VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
- VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
- HNS_ROCE_VF_SRQC_BT_NUM);
-
- roce_set_field(req_a->vf_cqc_bt_idx_num,
- VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
- VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
- roce_set_field(req_a->vf_cqc_bt_idx_num,
- VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
- VF_RES_A_DATA_3_VF_CQC_BT_NUM_S, HNS_ROCE_VF_CQC_BT_NUM);
-
- roce_set_field(req_a->vf_mpt_bt_idx_num,
- VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
- VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
- roce_set_field(req_a->vf_mpt_bt_idx_num,
- VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
- VF_RES_A_DATA_4_VF_MPT_BT_NUM_S, HNS_ROCE_VF_MPT_BT_NUM);
-
- roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_IDX_M,
- VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
- roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_NUM_M,
- VF_RES_A_DATA_5_VF_EQC_NUM_S, HNS_ROCE_VF_EQC_NUM);
-
- roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_IDX_M,
- VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
- roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_NUM_M,
- VF_RES_B_DATA_1_VF_SMAC_NUM_S, HNS_ROCE_VF_SMAC_NUM);
-
- roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_IDX_M,
- VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
- roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_NUM_M,
- VF_RES_B_DATA_2_VF_SGID_NUM_S, HNS_ROCE_VF_SGID_NUM);
-
- roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_QID_IDX_M,
- VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
- roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_SL_NUM_M,
- VF_RES_B_DATA_3_VF_SL_NUM_S, HNS_ROCE_VF_SL_NUM);
-
- roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
- VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
- roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
- VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
- HNS_ROCE_VF_SCCC_BT_NUM);
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num);
+ hr_reg_write(r_b, FUNC_RES_B_GMV_BT_IDX,
+ vf_id * caps->gmv_bt_num);
+ } else {
+ hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num);
+ hr_reg_write(r_b, FUNC_RES_B_SGID_IDX,
+ vf_id * caps->sgid_bt_num);
+ hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num);
+ hr_reg_write(r_b, FUNC_RES_B_SMAC_IDX,
+ vf_id * caps->smac_bt_num);
+ }
return hns_roce_cmq_send(hr_dev, desc, 2);
}
+static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
+{
+ int vf_id;
+ int ret;
+
+ for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
+ ret = __hns_roce_alloc_vf_resource(hr_dev, vf_id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
{
- u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
- u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
- u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
- u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
- u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
- struct hns_roce_cfg_bt_attr *req;
struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ struct hns_roce_caps *caps = &hr_dev->caps;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
- req = (struct hns_roce_cfg_bt_attr *)desc.data;
- memset(req, 0, sizeof(*req));
-
- roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
- CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
- hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
- CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
- hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
- CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
- qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
-
- roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
- CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
- hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
- CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
- hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
- CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
- srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
-
- roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
- CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
- hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
- CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
- hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
- CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
- cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
-
- roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
- CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
- hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
- CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
- hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
- CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
- mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
-
- roce_set_field(req->vf_sccc_cfg,
- CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
- CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
- hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_sccc_cfg,
- CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
- CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
- hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_sccc_cfg,
- CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
- CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
- sccc_hop_num ==
- HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
+
+ hr_reg_write(req, CFG_BT_ATTR_QPC_BA_PGSZ,
+ caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_QPC_BUF_PGSZ,
+ caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_QPC_HOPNUM,
+ to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps));
+
+ hr_reg_write(req, CFG_BT_ATTR_SRQC_BA_PGSZ,
+ caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_SRQC_BUF_PGSZ,
+ caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_SRQC_HOPNUM,
+ to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs));
+
+ hr_reg_write(req, CFG_BT_ATTR_CQC_BA_PGSZ,
+ caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_CQC_BUF_PGSZ,
+ caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_CQC_HOPNUM,
+ to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs));
+
+ hr_reg_write(req, CFG_BT_ATTR_MPT_BA_PGSZ,
+ caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_MPT_BUF_PGSZ,
+ caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_MPT_HOPNUM,
+ to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts));
+
+ hr_reg_write(req, CFG_BT_ATTR_SCCC_BA_PGSZ,
+ caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_SCCC_BUF_PGSZ,
+ caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_SCCC_HOPNUM,
+ to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps));
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
static void set_default_caps(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_caps *caps = &hr_dev->caps;
caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
@@ -1819,24 +1911,24 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
- caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
- caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
+ caps->num_comp_vectors =
+ min_t(u32, caps->eqc_bt_num - 1,
+ (u32)priv->handle->rinfo.num_vectors - 2);
caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
- caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
+ caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
- caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
@@ -1844,56 +1936,39 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ;
- caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
caps->reserved_lkey = 0;
caps->reserved_pds = 0;
+ caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
caps->reserved_mrws = 1;
caps->reserved_uars = 0;
caps->reserved_cqs = 0;
caps->reserved_srqs = 0;
caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
- caps->qpc_ba_pg_sz = 0;
- caps->qpc_buf_pg_sz = 0;
caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
- caps->srqc_ba_pg_sz = 0;
- caps->srqc_buf_pg_sz = 0;
caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
- caps->cqc_ba_pg_sz = 0;
- caps->cqc_buf_pg_sz = 0;
caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
- caps->mpt_ba_pg_sz = 0;
- caps->mpt_buf_pg_sz = 0;
caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
- caps->mtt_ba_pg_sz = 0;
- caps->mtt_buf_pg_sz = 0;
caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
+ caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM;
caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM;
caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM;
- caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
- caps->cqe_buf_pg_sz = 0;
caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
- caps->srqwqe_ba_pg_sz = 0;
- caps->srqwqe_buf_pg_sz = 0;
caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
- caps->idx_ba_pg_sz = 0;
- caps->idx_buf_pg_sz = 0;
caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
- caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
+ caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
+ caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
- HNS_ROCE_CAP_FLAG_RECORD_DB |
- HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
+ HNS_ROCE_CAP_FLAG_CQ_RECORD_DB |
+ HNS_ROCE_CAP_FLAG_QP_RECORD_DB;
caps->pkey_table_len[0] = 1;
- caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
- caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
- caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
caps->local_ca_ack_delay = 0;
caps->max_mtu = IB_MTU_4096;
@@ -1902,22 +1977,15 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
- HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
+ HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL | HNS_ROCE_CAP_FLAG_XRC;
caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
- caps->qpc_timer_ba_pg_sz = 0;
- caps->qpc_timer_buf_pg_sz = 0;
caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
- caps->cqc_timer_ba_pg_sz = 0;
- caps->cqc_timer_buf_pg_sz = 0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
- caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
- caps->sccc_ba_pg_sz = 0;
- caps->sccc_buf_pg_sz = 0;
caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
@@ -1930,10 +1998,17 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
caps->gmv_entry_sz);
caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
- caps->gmv_ba_pg_sz = 0;
- caps->gmv_buf_pg_sz = 0;
caps->gid_table_len[0] = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
caps->gmv_entry_sz);
+ caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INL_EXT;
+ } else {
+ caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
+ caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
+ caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
+ caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
+ caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
+ caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
+ caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
}
}
@@ -1979,6 +2054,70 @@ static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
*buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
}
+static void set_hem_page_size(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_caps *caps = &hr_dev->caps;
+
+ /* EQ */
+ caps->eqe_ba_pg_sz = 0;
+ caps->eqe_buf_pg_sz = 0;
+
+ /* Link Table */
+ caps->tsq_buf_pg_sz = 0;
+
+ /* MR */
+ caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
+ caps->pbl_buf_pg_sz = 0;
+ calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
+ caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
+ HEM_TYPE_MTPT);
+
+ /* QP */
+ caps->qpc_timer_ba_pg_sz = 0;
+ caps->qpc_timer_buf_pg_sz = 0;
+ caps->mtt_ba_pg_sz = 0;
+ caps->mtt_buf_pg_sz = 0;
+ calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
+ caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
+ HEM_TYPE_QPC);
+
+ if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
+ calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num,
+ caps->sccc_bt_num, &caps->sccc_buf_pg_sz,
+ &caps->sccc_ba_pg_sz, HEM_TYPE_SCCC);
+
+ /* CQ */
+ calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
+ caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
+ HEM_TYPE_CQC);
+ calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num,
+ 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
+
+ if (caps->cqc_timer_entry_sz)
+ calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
+ caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
+ &caps->cqc_timer_buf_pg_sz,
+ &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
+
+ /* SRQ */
+ if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) {
+ calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz,
+ caps->srqc_hop_num, caps->srqc_bt_num,
+ &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz,
+ HEM_TYPE_SRQC);
+ calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
+ caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
+ &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
+ calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz,
+ caps->idx_hop_num, 1, &caps->idx_buf_pg_sz,
+ &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
+ }
+
+ /* GMV */
+ caps->gmv_ba_pg_sz = 0;
+ caps->gmv_buf_pg_sz = 0;
+}
+
static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
@@ -2062,6 +2201,9 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs,
V2_QUERY_PF_CAPS_C_MAX_GID_M,
V2_QUERY_PF_CAPS_C_MAX_GID_S);
+
+ caps->gid_table_len[0] /= hr_dev->func_num;
+
caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
@@ -2079,13 +2221,18 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs,
V2_QUERY_PF_CAPS_D_NUM_SRQS_M,
V2_QUERY_PF_CAPS_D_NUM_SRQS_S);
+ caps->cong_type = roce_get_field(resp_d->wq_hop_num_max_srqs,
+ V2_QUERY_PF_CAPS_D_CONG_TYPE_M,
+ V2_QUERY_PF_CAPS_D_CONG_TYPE_S);
caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
+
caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth,
V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M,
V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S);
caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth,
V2_QUERY_PF_CAPS_D_NUM_CEQS_M,
V2_QUERY_PF_CAPS_D_NUM_CEQS_S);
+
caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth,
V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M,
V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S);
@@ -2133,8 +2280,8 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
- caps->mtt_ba_pg_sz = 0;
- caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
+ caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
+ caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
@@ -2166,99 +2313,82 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
caps->gmv_entry_sz);
caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
- caps->gmv_ba_pg_sz = 0;
- caps->gmv_buf_pg_sz = 0;
caps->gid_table_len[0] = caps->gmv_bt_num *
(HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
}
- calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
- caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
- HEM_TYPE_QPC);
- calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
- caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
- HEM_TYPE_MTPT);
- calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
- caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
- HEM_TYPE_CQC);
- calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, caps->srqc_hop_num,
- caps->srqc_bt_num, &caps->srqc_buf_pg_sz,
- &caps->srqc_ba_pg_sz, HEM_TYPE_SRQC);
-
- caps->sccc_hop_num = ctx_hop_num;
caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
- calc_pg_sz(caps->num_qps, caps->sccc_sz,
- caps->sccc_hop_num, caps->sccc_bt_num,
- &caps->sccc_buf_pg_sz, &caps->sccc_ba_pg_sz,
- HEM_TYPE_SCCC);
- calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
- caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
- &caps->cqc_timer_buf_pg_sz,
- &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
-
- calc_pg_sz(caps->num_cqe_segs, caps->mtt_entry_sz, caps->cqe_hop_num,
- 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
- calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
- caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
- &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
- calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, caps->idx_hop_num,
- 1, &caps->idx_buf_pg_sz, &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
-
return 0;
}
-static int hns_roce_config_qpc_size(struct hns_roce_dev *hr_dev)
+static int config_hem_entry_size(struct hns_roce_dev *hr_dev, u32 type, u32 val)
{
struct hns_roce_cmq_desc desc;
- struct hns_roce_cfg_entry_size *cfg_size =
- (struct hns_roce_cfg_entry_size *)desc.data;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
false);
- cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_QPC_SIZE);
- cfg_size->size = cpu_to_le32(hr_dev->caps.qpc_sz);
-
- return hns_roce_cmq_send(hr_dev, &desc, 1);
-}
-
-static int hns_roce_config_sccc_size(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_cmq_desc desc;
- struct hns_roce_cfg_entry_size *cfg_size =
- (struct hns_roce_cfg_entry_size *)desc.data;
-
- hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
- false);
-
- cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_SCCC_SIZE);
- cfg_size->size = cpu_to_le32(hr_dev->caps.sccc_sz);
+ hr_reg_write(req, CFG_HEM_ENTRY_SIZE_TYPE, type);
+ hr_reg_write(req, CFG_HEM_ENTRY_SIZE_VALUE, val);
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_caps *caps = &hr_dev->caps;
int ret;
if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
return 0;
- ret = hns_roce_config_qpc_size(hr_dev);
+ ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
+ caps->qpc_sz);
if (ret) {
dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
return ret;
}
- ret = hns_roce_config_sccc_size(hr_dev);
+ ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_SCCC_SIZE,
+ caps->sccc_sz);
if (ret)
dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
return ret;
}
+static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+
+ hr_dev->vendor_part_id = hr_dev->pci_dev->device;
+ hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
+ hr_dev->func_num = 1;
+
+ ret = hns_roce_query_vf_resource(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "Query the VF resource fail, ret = %d.\n", ret);
+ return ret;
+ }
+
+ set_default_caps(hr_dev);
+ set_hem_page_size(hr_dev);
+
+ ret = hns_roce_v2_set_bt(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "Configure the VF bt attribute fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
{
struct hns_roce_caps *caps = &hr_dev->caps;
@@ -2278,6 +2408,16 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret;
}
+ if (hr_dev->is_vf)
+ return hns_roce_v2_vf_profile(hr_dev);
+
+ ret = hns_roce_query_func_info(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Query function info fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
ret = hns_roce_config_global_param(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
@@ -2300,7 +2440,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret;
}
- ret = hns_roce_set_vf_switch_param(hr_dev, 0);
+ ret = hns_roce_set_vf_switch_param(hr_dev);
if (ret) {
dev_err(hr_dev->dev,
"failed to set function switch param, ret = %d.\n",
@@ -2311,13 +2451,8 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
hr_dev->vendor_part_id = hr_dev->pci_dev->device;
hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
- caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
- caps->pbl_buf_pg_sz = 0;
caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
- caps->eqe_ba_pg_sz = 0;
- caps->eqe_buf_pg_sz = 0;
caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
- caps->tsq_buf_pg_sz = 0;
ret = hns_roce_query_pf_caps(hr_dev);
if (ret)
@@ -2330,6 +2465,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret;
}
+ set_hem_page_size(hr_dev);
ret = hns_roce_v2_set_bt(hr_dev);
if (ret) {
dev_err(hr_dev->dev,
@@ -2507,6 +2643,22 @@ static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
link_tbl->table.map);
}
+static void free_dip_list(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_dip *hr_dip;
+ struct hns_roce_dip *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
+
+ list_for_each_entry_safe(hr_dip, tmp, &hr_dev->dip_list, node) {
+ list_del(&hr_dip->node);
+ kfree(hr_dip);
+ }
+
+ spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
+}
+
static int get_hem_table(struct hns_roce_dev *hr_dev)
{
unsigned int qpc_count;
@@ -2515,6 +2667,17 @@ static int get_hem_table(struct hns_roce_dev *hr_dev)
int ret;
int i;
+ /* Alloc memory for source address table buffer space chunk */
+ for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
+ gmv_count++) {
+ ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
+ if (ret)
+ goto err_gmv_failed;
+ }
+
+ if (hr_dev->is_vf)
+ return 0;
+
/* Alloc memory for QPC Timer buffer space chunk */
for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
qpc_count++) {
@@ -2537,23 +2700,8 @@ static int get_hem_table(struct hns_roce_dev *hr_dev)
}
}
- /* Alloc memory for GMV(GID/MAC/VLAN) table buffer space chunk */
- for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
- gmv_count++) {
- ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
- if (ret) {
- dev_err(hr_dev->dev,
- "failed to get gmv table, ret = %d.\n", ret);
- goto err_gmv_failed;
- }
- }
-
return 0;
-err_gmv_failed:
- for (i = 0; i < gmv_count; i++)
- hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
-
err_cqc_timer_failed:
for (i = 0; i < cqc_count; i++)
hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
@@ -2562,19 +2710,47 @@ err_qpc_timer_failed:
for (i = 0; i < qpc_count; i++)
hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
+err_gmv_failed:
+ for (i = 0; i < gmv_count; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
+
return ret;
}
+static void put_hem_table(struct hns_roce_dev *hr_dev)
+{
+ int i;
+
+ for (i = 0; i < hr_dev->caps.gmv_entry_num; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
+
+ if (hr_dev->is_vf)
+ return;
+
+ for (i = 0; i < hr_dev->caps.qpc_timer_bt_num; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
+
+ for (i = 0; i < hr_dev->caps.cqc_timer_bt_num; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
+}
+
static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
int ret;
+ ret = get_hem_table(hr_dev);
+ if (ret)
+ return ret;
+
+ if (hr_dev->is_vf)
+ return 0;
+
/* TSQ includes SQ doorbell and ack doorbell */
ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
if (ret) {
dev_err(hr_dev->dev, "failed to init TSQ, ret = %d.\n", ret);
- return ret;
+ goto err_tsq_init_failed;
}
ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
@@ -2583,17 +2759,13 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
goto err_tpq_init_failed;
}
- ret = get_hem_table(hr_dev);
- if (ret)
- goto err_get_hem_table_failed;
-
return 0;
-err_get_hem_table_failed:
- hns_roce_free_link_table(hr_dev, &priv->tpq);
+err_tsq_init_failed:
+ put_hem_table(hr_dev);
err_tpq_init_failed:
- hns_roce_free_link_table(hr_dev, &priv->tsq);
+ hns_roce_free_link_table(hr_dev, &priv->tpq);
return ret;
}
@@ -2604,38 +2776,13 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
hns_roce_function_clear(hr_dev);
- hns_roce_free_link_table(hr_dev, &priv->tpq);
- hns_roce_free_link_table(hr_dev, &priv->tsq);
-}
-
-static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_cmq_desc desc;
- struct hns_roce_mbox_status *mb_st =
- (struct hns_roce_mbox_status *)desc.data;
- int status;
-
- hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
-
- status = hns_roce_cmq_send(hr_dev, &desc, 1);
- if (status)
- return status;
-
- return le32_to_cpu(mb_st->mb_status_hw_run);
-}
-
-static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
-{
- u32 status = hns_roce_query_mbox_status(hr_dev);
-
- return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
-}
-
-static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
-{
- u32 status = hns_roce_query_mbox_status(hr_dev);
+ if (!hr_dev->is_vf) {
+ hns_roce_free_link_table(hr_dev, &priv->tpq);
+ hns_roce_free_link_table(hr_dev, &priv->tsq);
+ }
- return status & HNS_ROCE_HW_MB_STATUS_MASK;
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
+ free_dip_list(hr_dev);
}
static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
@@ -2657,58 +2804,97 @@ static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
-static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
- u64 out_param, u32 in_modifier, u8 op_modifier,
- u16 op, u16 token, int event)
+static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
+ u8 *complete_status)
{
- struct device *dev = hr_dev->dev;
+ struct hns_roce_mbox_status *mb_st;
+ struct hns_roce_cmq_desc desc;
unsigned long end;
- int ret;
+ int ret = -EBUSY;
+ u32 status;
+ bool busy;
+
+ mb_st = (struct hns_roce_mbox_status *)desc.data;
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (v2_chk_mbox_is_avail(hr_dev, &busy)) {
+ status = 0;
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST,
+ true);
+ ret = __hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (!ret) {
+ status = le32_to_cpu(mb_st->mb_status_hw_run);
+ /* No pending message exists in ROCEE mbox. */
+ if (!(status & MB_ST_HW_RUN_M))
+ break;
+ } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
+ break;
+ }
- end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
- while (hns_roce_v2_cmd_pending(hr_dev)) {
if (time_after(jiffies, end)) {
- dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
- (int)end);
- return -EAGAIN;
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to wait mbox status 0x%x\n",
+ status);
+ return -ETIMEDOUT;
}
+
cond_resched();
+ ret = -EBUSY;
}
- ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
- op_modifier, op, token, event);
- if (ret)
- dev_err(dev, "Post mailbox fail(%d)\n", ret);
+ if (!ret) {
+ *complete_status = (u8)(status & MB_ST_COMPLETE_M);
+ } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
+ /* Ignore all errors if the mbox is unavailable. */
+ ret = 0;
+ *complete_status = MB_ST_COMPLETE_M;
+ }
return ret;
}
-static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
- unsigned int timeout)
+static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, u32 in_modifier, u8 op_modifier,
+ u16 op, u16 token, int event)
{
- struct device *dev = hr_dev->dev;
- unsigned long end;
- u32 status;
-
- end = msecs_to_jiffies(timeout) + jiffies;
- while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
- cond_resched();
+ u8 status = 0;
+ int ret;
- if (hns_roce_v2_cmd_pending(hr_dev)) {
- dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
- return -ETIMEDOUT;
+ /* Waiting for the mbox to be idle */
+ ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS,
+ &status);
+ if (unlikely(ret)) {
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to check post mbox status = 0x%x, ret = %d.\n",
+ status, ret);
+ return ret;
}
- status = hns_roce_v2_cmd_complete(hr_dev);
- if (status != 0x1) {
- if (status == CMD_RST_PRC_EBUSY)
- return status;
+ /* Post new message to mbox */
+ ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
+ op_modifier, op, token, event);
+ if (ret)
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to post mailbox, ret = %d.\n", ret);
+
+ return ret;
+}
- dev_err(dev, "mailbox status 0x%x!\n", status);
- return -EBUSY;
+static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev, unsigned int timeout)
+{
+ u8 status = 0;
+ int ret;
+
+ ret = v2_wait_mbox_complete(hr_dev, timeout, &status);
+ if (!ret) {
+ if (status != MB_ST_COMPLETE_SUCC)
+ return -EBUSY;
+ } else {
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to check mbox status = 0x%x, ret = %d.\n",
+ status, ret);
}
- return 0;
+ return ret;
}
static void copy_gid(void *dest, const union ib_gid *gid)
@@ -2790,7 +2976,7 @@ static int config_gmv_table(struct hns_roce_dev *hr_dev,
return hns_roce_cmq_send(hr_dev, desc, 2);
}
-static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
+static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port,
int gid_index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
@@ -3079,14 +3265,31 @@ static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
!!(n & hr_cq->cq_depth)) ? cqe : NULL;
}
-static inline void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 ci)
+static inline void update_cq_db(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq)
{
- *hr_cq->set_ci_db = ci & V2_CQ_DB_PARAMETER_CONS_IDX_M;
+ if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) {
+ *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M;
+ } else {
+ struct hns_roce_v2_db cq_db = {};
+
+ roce_set_field(cq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
+ hr_cq->cqn);
+ roce_set_field(cq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
+ HNS_ROCE_V2_CQ_DB);
+ roce_set_field(cq_db.parameter, V2_CQ_DB_CONS_IDX_M,
+ V2_CQ_DB_CONS_IDX_S, hr_cq->cons_index);
+ roce_set_field(cq_db.parameter, V2_CQ_DB_CMD_SN_M,
+ V2_CQ_DB_CMD_SN_S, 1);
+
+ hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
+ }
}
static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
struct hns_roce_srq *srq)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
struct hns_roce_v2_cqe *cqe, *dest;
u32 prod_index;
int nfreed = 0;
@@ -3129,7 +3332,7 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
if (nfreed) {
hr_cq->cons_index += nfreed;
- hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
+ update_cq_db(hr_dev, hr_cq);
}
}
@@ -3224,37 +3427,33 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
- u32 notification_flag;
- __le32 doorbell[2];
+ struct hns_roce_v2_db cq_db = {};
+ u32 notify_flag;
- doorbell[0] = 0;
- doorbell[1] = 0;
-
- notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
- V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
/*
- * flags = 0; Notification Flag = 1, next
- * flags = 1; Notification Flag = 0, solocited
+ * flags = 0, then notify_flag : next
+ * flags = 1, then notify flag : solocited
*/
- roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
- hr_cq->cqn);
- roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
- HNS_ROCE_V2_CQ_DB_NTR);
- roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
- V2_CQ_DB_PARAMETER_CONS_IDX_S, hr_cq->cons_index);
- roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
- V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
- roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
- notification_flag);
-
- hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
+ notify_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
+ V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
+
+ roce_set_field(cq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S, hr_cq->cqn);
+ roce_set_field(cq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
+ HNS_ROCE_V2_CQ_DB_NOTIFY);
+ roce_set_field(cq_db.parameter, V2_CQ_DB_CONS_IDX_M,
+ V2_CQ_DB_CONS_IDX_S, hr_cq->cons_index);
+ roce_set_field(cq_db.parameter, V2_CQ_DB_CMD_SN_M,
+ V2_CQ_DB_CMD_SN_S, hr_cq->arm_sn);
+ roce_set_bit(cq_db.parameter, V2_CQ_DB_NOTIFY_TYPE_S, notify_flag);
+
+ hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
return 0;
}
static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
- struct hns_roce_qp **cur_qp,
- struct ib_wc *wc)
+ struct hns_roce_qp *qp,
+ struct ib_wc *wc)
{
struct hns_roce_rinl_sge *sge_list;
u32 wr_num, wr_cnt, sge_num;
@@ -3263,11 +3462,11 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
- wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
+ wr_cnt = wr_num & (qp->rq.wqe_cnt - 1);
- sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
- sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
- wqe_buf = hns_roce_get_recv_wqe(*cur_qp, wr_cnt);
+ sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list;
+ sge_num = qp->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
+ wqe_buf = hns_roce_get_recv_wqe(qp, wr_cnt);
data_len = wc->byte_len;
for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
@@ -3401,21 +3600,205 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
init_flush_work(hr_dev, qp);
}
+static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
+ struct hns_roce_qp **cur_qp)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+ struct hns_roce_qp *hr_qp = *cur_qp;
+ u32 qpn;
+
+ qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
+ V2_CQE_BYTE_16_LCL_QPN_S) &
+ HNS_ROCE_V2_CQE_QPN_MASK;
+
+ if (!hr_qp || qpn != hr_qp->qpn) {
+ hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
+ if (unlikely(!hr_qp)) {
+ ibdev_err(&hr_dev->ib_dev,
+ "CQ %06lx with entry for unknown QPN %06x\n",
+ hr_cq->cqn, qpn);
+ return -EINVAL;
+ }
+ *cur_qp = hr_qp;
+ }
+
+ return 0;
+}
+
+/*
+ * mapped-value = 1 + real-value
+ * The ib wc opcode's real value is start from 0, In order to distinguish
+ * between initialized and uninitialized map values, we plus 1 to the actual
+ * value when defining the mapping, so that the validity can be identified by
+ * checking whether the mapped value is greater than 0.
+ */
+#define HR_WC_OP_MAP(hr_key, ib_key) \
+ [HNS_ROCE_V2_WQE_OP_ ## hr_key] = 1 + IB_WC_ ## ib_key
+
+static const u32 wc_send_op_map[] = {
+ HR_WC_OP_MAP(SEND, SEND),
+ HR_WC_OP_MAP(SEND_WITH_INV, SEND),
+ HR_WC_OP_MAP(SEND_WITH_IMM, SEND),
+ HR_WC_OP_MAP(RDMA_READ, RDMA_READ),
+ HR_WC_OP_MAP(RDMA_WRITE, RDMA_WRITE),
+ HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE),
+ HR_WC_OP_MAP(LOCAL_INV, LOCAL_INV),
+ HR_WC_OP_MAP(ATOM_CMP_AND_SWAP, COMP_SWAP),
+ HR_WC_OP_MAP(ATOM_FETCH_AND_ADD, FETCH_ADD),
+ HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP),
+ HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD, MASKED_FETCH_ADD),
+ HR_WC_OP_MAP(FAST_REG_PMR, REG_MR),
+ HR_WC_OP_MAP(BIND_MW, REG_MR),
+};
+
+static int to_ib_wc_send_op(u32 hr_opcode)
+{
+ if (hr_opcode >= ARRAY_SIZE(wc_send_op_map))
+ return -EINVAL;
+
+ return wc_send_op_map[hr_opcode] ? wc_send_op_map[hr_opcode] - 1 :
+ -EINVAL;
+}
+
+static const u32 wc_recv_op_map[] = {
+ HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, WITH_IMM),
+ HR_WC_OP_MAP(SEND, RECV),
+ HR_WC_OP_MAP(SEND_WITH_IMM, WITH_IMM),
+ HR_WC_OP_MAP(SEND_WITH_INV, RECV),
+};
+
+static int to_ib_wc_recv_op(u32 hr_opcode)
+{
+ if (hr_opcode >= ARRAY_SIZE(wc_recv_op_map))
+ return -EINVAL;
+
+ return wc_recv_op_map[hr_opcode] ? wc_recv_op_map[hr_opcode] - 1 :
+ -EINVAL;
+}
+
+static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
+{
+ u32 hr_opcode;
+ int ib_opcode;
+
+ wc->wc_flags = 0;
+
+ hr_opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
+ V2_CQE_BYTE_4_OPCODE_S) & 0x1f;
+ switch (hr_opcode) {
+ case HNS_ROCE_V2_WQE_OP_RDMA_READ:
+ wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+ break;
+ case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
+ case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ break;
+ case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ break;
+ case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
+ case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
+ case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
+ case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
+ wc->byte_len = 8;
+ break;
+ default:
+ break;
+ }
+
+ ib_opcode = to_ib_wc_send_op(hr_opcode);
+ if (ib_opcode < 0)
+ wc->status = IB_WC_GENERAL_ERR;
+ else
+ wc->opcode = ib_opcode;
+}
+
+static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode,
+ struct hns_roce_v2_cqe *cqe)
+{
+ return wc->qp->qp_type != IB_QPT_UD &&
+ wc->qp->qp_type != IB_QPT_GSI &&
+ (hr_opcode == HNS_ROCE_V2_OPCODE_SEND ||
+ hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
+ hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
+ roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S);
+}
+
+static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
+{
+ struct hns_roce_qp *qp = to_hr_qp(wc->qp);
+ u32 hr_opcode;
+ int ib_opcode;
+ int ret;
+
+ wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+
+ hr_opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
+ V2_CQE_BYTE_4_OPCODE_S) & 0x1f;
+ switch (hr_opcode) {
+ case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
+ case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata));
+ break;
+ case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
+ wc->wc_flags = IB_WC_WITH_INVALIDATE;
+ wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
+ break;
+ default:
+ wc->wc_flags = 0;
+ }
+
+ ib_opcode = to_ib_wc_recv_op(hr_opcode);
+ if (ib_opcode < 0)
+ wc->status = IB_WC_GENERAL_ERR;
+ else
+ wc->opcode = ib_opcode;
+
+ if (is_rq_inl_enabled(wc, hr_opcode, cqe)) {
+ ret = hns_roce_handle_recv_inl_wqe(cqe, qp, wc);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ wc->sl = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
+ V2_CQE_BYTE_32_SL_S);
+ wc->src_qp = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_RMT_QPN_M,
+ V2_CQE_BYTE_32_RMT_QPN_S);
+ wc->slid = 0;
+ wc->wc_flags |= roce_get_bit(cqe->byte_32, V2_CQE_BYTE_32_GRH_S) ?
+ IB_WC_GRH : 0;
+ wc->port_num = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_PORTN_M,
+ V2_CQE_BYTE_32_PORTN_S);
+ wc->pkey_index = 0;
+
+ if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
+ wc->vlan_id = roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_VID_M,
+ V2_CQE_BYTE_28_VID_S);
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ } else {
+ wc->vlan_id = 0xffff;
+ }
+
+ wc->network_hdr_type = roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_PORT_TYPE_M,
+ V2_CQE_BYTE_28_PORT_TYPE_S);
+
+ return 0;
+}
+
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{
struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+ struct hns_roce_qp *qp = *cur_qp;
struct hns_roce_srq *srq = NULL;
struct hns_roce_v2_cqe *cqe;
- struct hns_roce_qp *hr_qp;
struct hns_roce_wq *wq;
int is_send;
- u16 wqe_ctr;
- u32 opcode;
- u32 qpn;
+ u16 wqe_idx;
int ret;
- /* Find cqe according to consumer index */
cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
if (!cqe)
return -EAGAIN;
@@ -3424,189 +3807,50 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
/* Memory barrier */
rmb();
- /* 0->SQ, 1->RQ */
- is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
-
- qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
- V2_CQE_BYTE_16_LCL_QPN_S);
-
- if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
- hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
- if (unlikely(!hr_qp)) {
- ibdev_err(&hr_dev->ib_dev,
- "CQ %06lx with entry for unknown QPN %06x\n",
- hr_cq->cqn, qpn & HNS_ROCE_V2_CQE_QPN_MASK);
- return -EINVAL;
- }
- *cur_qp = hr_qp;
- }
+ ret = get_cur_qp(hr_cq, cqe, &qp);
+ if (ret)
+ return ret;
- wc->qp = &(*cur_qp)->ibqp;
+ wc->qp = &qp->ibqp;
wc->vendor_err = 0;
+ wqe_idx = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
+ V2_CQE_BYTE_4_WQE_INDX_S);
+
+ is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
if (is_send) {
- wq = &(*cur_qp)->sq;
- if ((*cur_qp)->sq_signal_bits) {
- /*
- * If sg_signal_bit is 1,
- * firstly tail pointer updated to wqe
- * which current cqe correspond to
- */
- wqe_ctr = (u16)roce_get_field(cqe->byte_4,
- V2_CQE_BYTE_4_WQE_INDX_M,
- V2_CQE_BYTE_4_WQE_INDX_S);
- wq->tail += (wqe_ctr - (u16)wq->tail) &
+ wq = &qp->sq;
+
+ /* If sg_signal_bit is set, tail pointer will be updated to
+ * the WQE corresponding to the current CQE.
+ */
+ if (qp->sq_signal_bits)
+ wq->tail += (wqe_idx - (u16)wq->tail) &
(wq->wqe_cnt - 1);
- }
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
- } else if ((*cur_qp)->ibqp.srq) {
- srq = to_hr_srq((*cur_qp)->ibqp.srq);
- wqe_ctr = (u16)roce_get_field(cqe->byte_4,
- V2_CQE_BYTE_4_WQE_INDX_M,
- V2_CQE_BYTE_4_WQE_INDX_S);
- wc->wr_id = srq->wrid[wqe_ctr];
- hns_roce_free_srq_wqe(srq, wqe_ctr);
- } else {
- /* Update tail pointer, record wr_id */
- wq = &(*cur_qp)->rq;
- wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
- ++wq->tail;
- }
-
- get_cqe_status(hr_dev, *cur_qp, hr_cq, cqe, wc);
- if (unlikely(wc->status != IB_WC_SUCCESS))
- return 0;
- if (is_send) {
- wc->wc_flags = 0;
- /* SQ corresponding to CQE */
- switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
- V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
- case HNS_ROCE_V2_WQE_OP_SEND:
- wc->opcode = IB_WC_SEND;
- break;
- case HNS_ROCE_V2_WQE_OP_SEND_WITH_INV:
- wc->opcode = IB_WC_SEND;
- break;
- case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
- wc->opcode = IB_WC_SEND;
- wc->wc_flags |= IB_WC_WITH_IMM;
- break;
- case HNS_ROCE_V2_WQE_OP_RDMA_READ:
- wc->opcode = IB_WC_RDMA_READ;
- wc->byte_len = le32_to_cpu(cqe->byte_cnt);
- break;
- case HNS_ROCE_V2_WQE_OP_RDMA_WRITE:
- wc->opcode = IB_WC_RDMA_WRITE;
- break;
- case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
- wc->opcode = IB_WC_RDMA_WRITE;
- wc->wc_flags |= IB_WC_WITH_IMM;
- break;
- case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
- wc->opcode = IB_WC_LOCAL_INV;
- wc->wc_flags |= IB_WC_WITH_INVALIDATE;
- break;
- case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
- wc->opcode = IB_WC_COMP_SWAP;
- wc->byte_len = 8;
- break;
- case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
- wc->opcode = IB_WC_FETCH_ADD;
- wc->byte_len = 8;
- break;
- case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
- wc->opcode = IB_WC_MASKED_COMP_SWAP;
- wc->byte_len = 8;
- break;
- case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
- wc->opcode = IB_WC_MASKED_FETCH_ADD;
- wc->byte_len = 8;
- break;
- case HNS_ROCE_V2_WQE_OP_FAST_REG_PMR:
- wc->opcode = IB_WC_REG_MR;
- break;
- case HNS_ROCE_V2_WQE_OP_BIND_MW:
- wc->opcode = IB_WC_REG_MR;
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
+ fill_send_wc(wc, cqe);
} else {
- /* RQ correspond to CQE */
- wc->byte_len = le32_to_cpu(cqe->byte_cnt);
-
- opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
- V2_CQE_BYTE_4_OPCODE_S);
- switch (opcode & 0x1f) {
- case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
- wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
- wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data =
- cpu_to_be32(le32_to_cpu(cqe->immtdata));
- break;
- case HNS_ROCE_V2_OPCODE_SEND:
- wc->opcode = IB_WC_RECV;
- wc->wc_flags = 0;
- break;
- case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
- wc->opcode = IB_WC_RECV;
- wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data =
- cpu_to_be32(le32_to_cpu(cqe->immtdata));
- break;
- case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
- wc->opcode = IB_WC_RECV;
- wc->wc_flags = IB_WC_WITH_INVALIDATE;
- wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
-
- if ((wc->qp->qp_type == IB_QPT_RC ||
- wc->qp->qp_type == IB_QPT_UC) &&
- (opcode == HNS_ROCE_V2_OPCODE_SEND ||
- opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
- opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
- (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
- ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
- if (unlikely(ret))
- return -EAGAIN;
- }
-
- wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
- V2_CQE_BYTE_32_SL_S);
- wc->src_qp = (u8)roce_get_field(cqe->byte_32,
- V2_CQE_BYTE_32_RMT_QPN_M,
- V2_CQE_BYTE_32_RMT_QPN_S);
- wc->slid = 0;
- wc->wc_flags |= (roce_get_bit(cqe->byte_32,
- V2_CQE_BYTE_32_GRH_S) ?
- IB_WC_GRH : 0);
- wc->port_num = roce_get_field(cqe->byte_32,
- V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
- wc->pkey_index = 0;
-
- if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
- wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
- V2_CQE_BYTE_28_VID_M,
- V2_CQE_BYTE_28_VID_S);
- wc->wc_flags |= IB_WC_WITH_VLAN;
+ if (qp->ibqp.srq) {
+ srq = to_hr_srq(qp->ibqp.srq);
+ wc->wr_id = srq->wrid[wqe_idx];
+ hns_roce_free_srq_wqe(srq, wqe_idx);
} else {
- wc->vlan_id = 0xffff;
+ wq = &qp->rq;
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ ++wq->tail;
}
- wc->network_hdr_type = roce_get_field(cqe->byte_28,
- V2_CQE_BYTE_28_PORT_TYPE_M,
- V2_CQE_BYTE_28_PORT_TYPE_S);
+ ret = fill_recv_wc(wc, cqe);
}
- return 0;
+ get_cqe_status(hr_dev, qp, hr_cq, cqe, wc);
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ return 0;
+
+ return ret;
}
static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
@@ -3638,7 +3882,7 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
}
if (npolled)
- hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
+ update_cq_db(hr_dev, hr_cq);
out:
spin_unlock_irqrestore(&hr_cq->lock, flags);
@@ -3647,12 +3891,9 @@ out:
}
static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
- int step_idx)
+ int step_idx, u16 *mbox_op)
{
- int op;
-
- if (type == HEM_TYPE_SCCC && step_idx)
- return -EINVAL;
+ u16 op;
switch (type) {
case HEM_TYPE_QPC:
@@ -3677,51 +3918,49 @@ static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
break;
default:
- dev_warn(hr_dev->dev,
- "table %u not to be written by mailbox!\n", type);
+ dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type);
return -EINVAL;
}
- return op + step_idx;
+ *mbox_op = op + step_idx;
+
+ return 0;
}
-static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, u64 bt_ba,
- u32 hem_type, int step_idx)
+static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
+ dma_addr_t base_addr)
{
- struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_cmq_desc desc;
- struct hns_roce_cfg_gmv_bt *gmv_bt =
- (struct hns_roce_cfg_gmv_bt *)desc.data;
- int ret;
- int op;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ u32 idx = obj / (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz);
+ u64 addr = to_hr_hw_page_addr(base_addr);
- if (hem_type == HEM_TYPE_GMV) {
- hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT,
- false);
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
- gmv_bt->gmv_ba_l = cpu_to_le32(bt_ba >> HNS_HW_PAGE_SHIFT);
- gmv_bt->gmv_ba_h = cpu_to_le32(bt_ba >> (HNS_HW_PAGE_SHIFT +
- 32));
- gmv_bt->gmv_bt_idx = cpu_to_le32(obj /
- (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz));
+ hr_reg_write(req, CFG_GMV_BT_BA_L, lower_32_bits(addr));
+ hr_reg_write(req, CFG_GMV_BT_BA_H, upper_32_bits(addr));
+ hr_reg_write(req, CFG_GMV_BT_IDX, idx);
- return hns_roce_cmq_send(hr_dev, &desc, 1);
- }
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
- op = get_op_for_set_hem(hr_dev, hem_type, step_idx);
- if (op < 0)
- return 0;
+static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj,
+ dma_addr_t base_addr, u32 hem_type, int step_idx)
+{
+ int ret;
+ u16 op;
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
+ if (unlikely(hem_type == HEM_TYPE_GMV))
+ return config_gmv_ba_to_hw(hr_dev, obj, base_addr);
- ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
- 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx))
+ return 0;
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &op);
+ if (ret < 0)
+ return ret;
- return ret;
+ return config_hem_ba_to_hw(hr_dev, obj, base_addr, op);
}
static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
@@ -3911,6 +4150,16 @@ static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
ilog2(hr_qp->rq.wqe_cnt));
}
+static inline int get_cqn(struct ib_cq *ib_cq)
+{
+ return ib_cq ? to_hr_cq(ib_cq)->cqn : 0;
+}
+
+static inline int get_pdn(struct ib_pd *ib_pd)
+{
+ return ib_pd ? to_hr_pd(ib_pd)->pdn : 0;
+}
+
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -3927,13 +4176,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
* 0 at the same time, else set them to 0x1.
*/
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
- V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
+ V2_QPC_BYTE_4_TST_S, to_hr_qp_type(ibqp->qp_type));
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
- V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
+ V2_QPC_BYTE_16_PD_S, get_pdn(ibqp->pd));
roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
@@ -3944,6 +4193,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
+ if (ibqp->qp_type == IB_QPT_XRC_TGT) {
+ context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn);
+
+ roce_set_bit(context->byte_80_rnr_rx_cqn,
+ V2_QPC_BYTE_80_XRC_QP_TYPE_S, 1);
+ }
+
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
roce_set_bit(context->byte_68_rq_db,
V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
@@ -3954,23 +4210,27 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
((u32)hr_qp->rdb.dma) >> 1);
context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
- (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
+ if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
+ roce_set_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RQIE_S,
+ !!(hr_dev->caps.flags &
+ HNS_ROCE_CAP_FLAG_RQ_INLINE));
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
- V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+ V2_QPC_BYTE_80_RX_CQN_S, get_cqn(ibqp->recv_cq));
+
if (ibqp->srq) {
+ roce_set_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQ_EN_S, 1);
roce_set_field(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
to_hr_srq(ibqp->srq)->srqn);
- roce_set_bit(context->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_SRQ_EN_S, 1);
}
roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
- V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
+ V2_QPC_BYTE_252_TX_CQN_S, get_cqn(ibqp->send_cq));
if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ)
return;
@@ -3993,22 +4253,23 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
* 0 at the same time, else set them to 0x1.
*/
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
- V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
+ V2_QPC_BYTE_4_TST_S, to_hr_qp_type(ibqp->qp_type));
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0);
roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
- V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
+ V2_QPC_BYTE_16_PD_S, get_pdn(ibqp->pd));
+
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
V2_QPC_BYTE_16_PD_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
- V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+ V2_QPC_BYTE_80_RX_CQN_S, get_cqn(ibqp->recv_cq));
roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
V2_QPC_BYTE_80_RX_CQN_S, 0);
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
- V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
+ V2_QPC_BYTE_252_TX_CQN_S, get_cqn(ibqp->send_cq));
roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, 0);
@@ -4133,17 +4394,6 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
- roce_set_field(context->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
-
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
- V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
-
return 0;
}
@@ -4240,7 +4490,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
u64 *mtts;
u8 *dmac;
u8 *smac;
- int port;
+ u32 port;
int ret;
ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
@@ -4454,6 +4704,143 @@ static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
return rdma_flow_label_to_udp_sport(fl);
}
+static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ u32 *dip_idx)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_dip *hr_dip;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
+
+ list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
+ if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16))
+ goto out;
+ }
+
+ /* If no dgid is found, a new dip and a mapping between dgid and
+ * dip_idx will be created.
+ */
+ hr_dip = kzalloc(sizeof(*hr_dip), GFP_ATOMIC);
+ if (!hr_dip) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+ hr_dip->dip_idx = *dip_idx = ibqp->qp_num;
+ list_add_tail(&hr_dip->node, &hr_dev->dip_list);
+
+out:
+ spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
+ return ret;
+}
+
+enum {
+ CONG_DCQCN,
+ CONG_WINDOW,
+};
+
+enum {
+ UNSUPPORT_CONG_LEVEL,
+ SUPPORT_CONG_LEVEL,
+};
+
+enum {
+ CONG_LDCP,
+ CONG_HC3,
+};
+
+enum {
+ DIP_INVALID,
+ DIP_VALID,
+};
+
+static int check_cong_type(struct ib_qp *ibqp,
+ struct hns_roce_congestion_algorithm *cong_alg)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+
+ /* different congestion types match different configurations */
+ switch (hr_dev->caps.cong_type) {
+ case CONG_TYPE_DCQCN:
+ cong_alg->alg_sel = CONG_DCQCN;
+ cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
+ cong_alg->dip_vld = DIP_INVALID;
+ break;
+ case CONG_TYPE_LDCP:
+ cong_alg->alg_sel = CONG_WINDOW;
+ cong_alg->alg_sub_sel = CONG_LDCP;
+ cong_alg->dip_vld = DIP_INVALID;
+ break;
+ case CONG_TYPE_HC3:
+ cong_alg->alg_sel = CONG_WINDOW;
+ cong_alg->alg_sub_sel = CONG_HC3;
+ cong_alg->dip_vld = DIP_INVALID;
+ break;
+ case CONG_TYPE_DIP:
+ cong_alg->alg_sel = CONG_DCQCN;
+ cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
+ cong_alg->dip_vld = DIP_VALID;
+ break;
+ default:
+ ibdev_err(&hr_dev->ib_dev,
+ "error type(%u) for congestion selection.\n",
+ hr_dev->caps.cong_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_congestion_algorithm cong_field;
+ struct ib_device *ibdev = ibqp->device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ u32 dip_idx = 0;
+ int ret;
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ||
+ grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE)
+ return 0;
+
+ ret = check_cong_type(ibqp, &cong_field);
+ if (ret)
+ return ret;
+
+ hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
+ hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
+ hr_reg_write(qpc_mask, QPC_CONG_ALGO_TMPL_ID, 0);
+ hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
+ hr_reg_write(&qpc_mask->ext, QPCEX_CONG_ALG_SEL, 0);
+ hr_reg_write(&context->ext, QPCEX_CONG_ALG_SUB_SEL,
+ cong_field.alg_sub_sel);
+ hr_reg_write(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL, 0);
+ hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld);
+ hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD, 0);
+
+ /* if dip is disabled, there is no need to set dip idx */
+ if (cong_field.dip_vld == 0)
+ return 0;
+
+ ret = get_dip_ctx_idx(ibqp, attr, &dip_idx);
+ if (ret) {
+ ibdev_err(ibdev, "failed to fill cong field, ret = %d.\n", ret);
+ return ret;
+ }
+
+ hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX, dip_idx);
+ hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX, 0);
+
+ return 0;
+}
+
static int hns_roce_v2_set_path(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -4537,6 +4924,10 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
+ ret = fill_cong_field(ibqp, attr, context, qpc_mask);
+ if (ret)
+ return ret;
+
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, get_tclass(&attr->ah_attr.grh));
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
@@ -4687,7 +5078,6 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
V2_QPC_BYTE_244_RNR_CNT_S, 0);
}
- /* RC&UC&UD required attr */
if (attr_mask & IB_QP_SQ_PSN) {
roce_set_field(context->byte_172_sq_psn,
V2_QPC_BYTE_172_SQ_CUR_PSN_M,
@@ -4765,7 +5155,6 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
}
- /* RC&UC required attr */
if (attr_mask & IB_QP_RQ_PSN) {
roce_set_field(context->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_EPSN_M,
@@ -4808,6 +5197,29 @@ static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
}
}
+static void clear_qp(struct hns_roce_qp *hr_qp)
+{
+ struct ib_qp *ibqp = &hr_qp->ibqp;
+
+ if (ibqp->send_cq)
+ hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
+ hr_qp->qpn, NULL);
+
+ if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq)
+ hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq),
+ hr_qp->qpn, ibqp->srq ?
+ to_hr_srq(ibqp->srq) : NULL);
+
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
+ *hr_qp->rdb.db_record = 0;
+
+ hr_qp->rq.head = 0;
+ hr_qp->rq.tail = 0;
+ hr_qp->sq.head = 0;
+ hr_qp->sq.tail = 0;
+ hr_qp->next_sge = 0;
+}
+
static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
@@ -4842,19 +5254,23 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
/* When QP state is err, SQ and RQ WQE should be flushed */
if (new_state == IB_QPS_ERR) {
- spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
- hr_qp->state = IB_QPS_ERR;
- roce_set_field(context->byte_160_sq_ci_pi,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
- hr_qp->sq.head);
- roce_set_field(qpc_mask->byte_160_sq_ci_pi,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
- spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
-
- if (!ibqp->srq) {
+ if (ibqp->qp_type != IB_QPT_XRC_TGT) {
+ spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
+ hr_qp->state = IB_QPS_ERR;
+ roce_set_field(context->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
+ hr_qp->sq.head);
+ roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+ spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
+ }
+
+ if (!ibqp->srq && ibqp->qp_type != IB_QPT_XRC_INI &&
+ ibqp->qp_type != IB_QPT_XRC_TGT) {
spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
+ hr_qp->state = IB_QPS_ERR;
roce_set_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
@@ -4873,7 +5289,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
goto out;
roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
- ibqp->srq ? 1 : 0);
+ ((to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC) ||
+ ibqp->srq) ? 1 : 0);
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_INV_CREDIT_S, 0);
@@ -4894,21 +5311,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
- if (new_state == IB_QPS_RESET && !ibqp->uobject) {
- hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
- ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
- if (ibqp->send_cq != ibqp->recv_cq)
- hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
- hr_qp->qpn, NULL);
-
- hr_qp->rq.head = 0;
- hr_qp->rq.tail = 0;
- hr_qp->sq.head = 0;
- hr_qp->sq.tail = 0;
- hr_qp->next_sge = 0;
- if (hr_qp->rq.wqe_cnt)
- *hr_qp->rdb.db_record = 0;
- }
+ if (new_state == IB_QPS_RESET && !ibqp->uobject)
+ clear_qp(hr_qp);
out:
return ret;
@@ -5019,7 +5423,8 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
- hr_qp->ibqp.qp_type == IB_QPT_UC) {
+ hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
struct ib_global_route *grh =
rdma_ah_retrieve_grh(&qp_attr->ah_attr);
@@ -5051,6 +5456,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
V2_QPC_BYTE_140_RR_MAX_M,
V2_QPC_BYTE_140_RR_MAX_S);
+
qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
V2_QPC_BYTE_80_MIN_RNR_TIME_M,
V2_QPC_BYTE_80_MIN_RNR_TIME_S);
@@ -5068,6 +5474,7 @@ done:
qp_attr->cur_qp_state = qp_attr->qp_state;
qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
+ qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
if (!ibqp->uobject) {
qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
@@ -5085,6 +5492,15 @@ out:
return ret;
}
+static inline int modify_qp_is_ok(struct hns_roce_qp *hr_qp)
+{
+ return ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
+ hr_qp->ibqp.qp_type == IB_QPT_UD ||
+ hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
+ hr_qp->state != IB_QPS_RESET);
+}
+
static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
struct ib_udata *udata)
@@ -5094,9 +5510,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
unsigned long flags;
int ret = 0;
- if ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
- hr_qp->ibqp.qp_type == IB_QPT_UD) &&
- hr_qp->state != IB_QPS_RESET) {
+ if (modify_qp_is_ok(hr_qp)) {
/* Modify qp to reset before destroying qp */
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET);
@@ -5275,9 +5689,11 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
}
hr_reg_write(ctx, SRQC_SRQ_ST, 1);
+ hr_reg_write(ctx, SRQC_SRQ_TYPE,
+ !!(srq->ibsrq.srq_type == IB_SRQT_XRC));
hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn);
hr_reg_write(ctx, SRQC_SRQN, srq->srqn);
- hr_reg_write(ctx, SRQC_XRCD, 0);
+ hr_reg_write(ctx, SRQC_XRCD, srq->xrcdn);
hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn);
hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt));
hr_reg_write(ctx, SRQC_RQWS,
@@ -5481,6 +5897,12 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
case HNS_ROCE_EVENT_TYPE_FLR:
ibdev_warn(ibdev, "Function level reset.\n");
break;
+ case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
+ ibdev_err(ibdev, "xrc domain violation error.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
+ ibdev_err(ibdev, "invalid xrceth error.\n");
+ break;
default:
break;
}
@@ -5505,33 +5927,30 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
queue_work(hr_dev->irq_workq, &(irq_work->work));
}
-static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
+static void update_eq_db(struct hns_roce_eq *eq)
{
struct hns_roce_dev *hr_dev = eq->hr_dev;
- __le32 doorbell[2] = {};
+ struct hns_roce_v2_db eq_db = {};
if (eq->type_flag == HNS_ROCE_AEQ) {
- roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
- HNS_ROCE_V2_EQ_DB_CMD_S,
+ roce_set_field(eq_db.byte_4, V2_EQ_DB_CMD_M, V2_EQ_DB_CMD_S,
eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
HNS_ROCE_EQ_DB_CMD_AEQ :
HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
} else {
- roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
- HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
+ roce_set_field(eq_db.byte_4, V2_EQ_DB_TAG_M, V2_EQ_DB_TAG_S,
+ eq->eqn);
- roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
- HNS_ROCE_V2_EQ_DB_CMD_S,
+ roce_set_field(eq_db.byte_4, V2_EQ_DB_CMD_M, V2_EQ_DB_CMD_S,
eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
HNS_ROCE_EQ_DB_CMD_CEQ :
HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
}
- roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
- HNS_ROCE_V2_EQ_DB_PARA_S,
- (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
+ roce_set_field(eq_db.parameter, V2_EQ_DB_CONS_IDX_M,
+ V2_EQ_DB_CONS_IDX_S, eq->cons_index);
- hns_roce_write64(hr_dev, doorbell, eq->doorbell);
+ hns_roce_write64(hr_dev, (__le32 *)&eq_db, eq->db_reg);
}
static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
@@ -5581,6 +6000,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
+ case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
hns_roce_qp_event(hr_dev, queue_num, event_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
@@ -5616,7 +6037,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
aeqe = next_aeqe_sw_v2(eq);
}
- set_eq_cons_index_v2(eq);
+ update_eq_db(eq);
return aeqe_found;
}
@@ -5656,7 +6077,7 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
ceqe = next_ceqe_sw_v2(eq);
}
- set_eq_cons_index_v2(eq);
+ update_eq_db(eq);
return ceqe_found;
}
@@ -5710,58 +6131,34 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
int_work = 1;
- } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
- dev_err(dev, "BUS ERR!\n");
-
- int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S;
- roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
-
- int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
- roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
-
- int_work = 1;
- } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
- dev_err(dev, "OTHER ERR!\n");
+ } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_RAS_INT_S)) {
+ dev_err(dev, "RAS interrupt!\n");
- int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S;
+ int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_RAS_INT_S;
roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
int_work = 1;
- } else
+ } else {
dev_err(dev, "There is no abnormal irq found!\n");
+ }
return IRQ_RETVAL(int_work);
}
static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
- int eq_num, int enable_flag)
+ int eq_num, u32 enable_flag)
{
int i;
- if (enable_flag == EQ_ENABLE) {
- for (i = 0; i < eq_num; i++)
- roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
- i * EQ_REG_OFFSET,
- HNS_ROCE_V2_VF_EVENT_INT_EN_M);
-
- roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
- HNS_ROCE_V2_VF_ABN_INT_EN_M);
- roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
- HNS_ROCE_V2_VF_ABN_INT_CFG_M);
- } else {
- for (i = 0; i < eq_num; i++)
- roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
- i * EQ_REG_OFFSET,
- HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
+ for (i = 0; i < eq_num; i++)
+ roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
+ i * EQ_REG_OFFSET, enable_flag);
- roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
- HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
- roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
- HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
- }
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, enable_flag);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
}
static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
@@ -5786,6 +6183,16 @@ static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
hns_roce_mtr_destroy(hr_dev, &eq->mtr);
}
+static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+{
+ eq->db_reg = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
+ eq->cons_index = 0;
+ eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
+ eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
+ eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
+ eq->shift = ilog2((unsigned int)eq->entries);
+}
+
static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
void *mb_buf)
{
@@ -5797,13 +6204,7 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
eqc = mb_buf;
memset(eqc, 0, sizeof(struct hns_roce_eq_context));
- /* init eqc */
- eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
- eq->cons_index = 0;
- eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
- eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
- eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
- eq->shift = ilog2((unsigned int)eq->entries);
+ init_eq_config(hr_dev, eq);
/* if not multi-hop, eqe buffer only use one trunk */
count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
@@ -5813,102 +6214,34 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
return -ENOBUFS;
}
- /* set eqc state */
- roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQ_ST_M, HNS_ROCE_EQC_EQ_ST_S,
- HNS_ROCE_V2_EQ_STATE_VALID);
-
- /* set eqe hop num */
- roce_set_field(eqc->byte_4, HNS_ROCE_EQC_HOP_NUM_M,
- HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
-
- /* set eqc over_ignore */
- roce_set_field(eqc->byte_4, HNS_ROCE_EQC_OVER_IGNORE_M,
- HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
-
- /* set eqc coalesce */
- roce_set_field(eqc->byte_4, HNS_ROCE_EQC_COALESCE_M,
- HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
-
- /* set eqc arm_state */
- roce_set_field(eqc->byte_4, HNS_ROCE_EQC_ARM_ST_M,
- HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
-
- /* set eqn */
- roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQN_M, HNS_ROCE_EQC_EQN_S,
- eq->eqn);
-
- /* set eqe_cnt */
- roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQE_CNT_M,
- HNS_ROCE_EQC_EQE_CNT_S, HNS_ROCE_EQ_INIT_EQE_CNT);
-
- /* set eqe_ba_pg_sz */
- roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BA_PG_SZ_M,
- HNS_ROCE_EQC_BA_PG_SZ_S,
- to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
-
- /* set eqe_buf_pg_sz */
- roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BUF_PG_SZ_M,
- HNS_ROCE_EQC_BUF_PG_SZ_S,
- to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
-
- /* set eq_producer_idx */
- roce_set_field(eqc->byte_8, HNS_ROCE_EQC_PROD_INDX_M,
- HNS_ROCE_EQC_PROD_INDX_S, HNS_ROCE_EQ_INIT_PROD_IDX);
-
- /* set eq_max_cnt */
- roce_set_field(eqc->byte_12, HNS_ROCE_EQC_MAX_CNT_M,
- HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
-
- /* set eq_period */
- roce_set_field(eqc->byte_12, HNS_ROCE_EQC_PERIOD_M,
- HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
-
- /* set eqe_report_timer */
- roce_set_field(eqc->eqe_report_timer, HNS_ROCE_EQC_REPORT_TIMER_M,
- HNS_ROCE_EQC_REPORT_TIMER_S,
- HNS_ROCE_EQ_INIT_REPORT_TIMER);
-
- /* set bt_ba [34:3] */
- roce_set_field(eqc->eqe_ba0, HNS_ROCE_EQC_EQE_BA_L_M,
- HNS_ROCE_EQC_EQE_BA_L_S, bt_ba >> 3);
-
- /* set bt_ba [64:35] */
- roce_set_field(eqc->eqe_ba1, HNS_ROCE_EQC_EQE_BA_H_M,
- HNS_ROCE_EQC_EQE_BA_H_S, bt_ba >> 35);
-
- /* set eq shift */
- roce_set_field(eqc->byte_28, HNS_ROCE_EQC_SHIFT_M, HNS_ROCE_EQC_SHIFT_S,
- eq->shift);
-
- /* set eq MSI_IDX */
- roce_set_field(eqc->byte_28, HNS_ROCE_EQC_MSI_INDX_M,
- HNS_ROCE_EQC_MSI_INDX_S, HNS_ROCE_EQ_INIT_MSI_IDX);
-
- /* set cur_eqe_ba [27:12] */
- roce_set_field(eqc->byte_28, HNS_ROCE_EQC_CUR_EQE_BA_L_M,
- HNS_ROCE_EQC_CUR_EQE_BA_L_S, eqe_ba[0] >> 12);
-
- /* set cur_eqe_ba [59:28] */
- roce_set_field(eqc->byte_32, HNS_ROCE_EQC_CUR_EQE_BA_M_M,
- HNS_ROCE_EQC_CUR_EQE_BA_M_S, eqe_ba[0] >> 28);
-
- /* set cur_eqe_ba [63:60] */
- roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CUR_EQE_BA_H_M,
- HNS_ROCE_EQC_CUR_EQE_BA_H_S, eqe_ba[0] >> 60);
-
- /* set eq consumer idx */
- roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
- HNS_ROCE_EQC_CONS_INDX_S, HNS_ROCE_EQ_INIT_CONS_IDX);
-
- roce_set_field(eqc->byte_40, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
- HNS_ROCE_EQC_NXT_EQE_BA_L_S, eqe_ba[1] >> 12);
-
- roce_set_field(eqc->byte_44, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
- HNS_ROCE_EQC_NXT_EQE_BA_H_S, eqe_ba[1] >> 44);
-
- roce_set_field(eqc->byte_44, HNS_ROCE_EQC_EQE_SIZE_M,
- HNS_ROCE_EQC_EQE_SIZE_S,
- eq->eqe_size == HNS_ROCE_V3_EQE_SIZE ? 1 : 0);
+ hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
+ hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
+ hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);
+ hr_reg_write(eqc, EQC_COALESCE, eq->coalesce);
+ hr_reg_write(eqc, EQC_ARM_ST, eq->arm_st);
+ hr_reg_write(eqc, EQC_EQN, eq->eqn);
+ hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT);
+ hr_reg_write(eqc, EQC_EQE_BA_PG_SZ,
+ to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
+ hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ,
+ to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
+ hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
+ hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
+
+ hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
+ hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
+ hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
+ hr_reg_write(eqc, EQC_EQE_BA_H, bt_ba >> 35);
+ hr_reg_write(eqc, EQC_SHIFT, eq->shift);
+ hr_reg_write(eqc, EQC_MSI_INDX, HNS_ROCE_EQ_INIT_MSI_IDX);
+ hr_reg_write(eqc, EQC_CUR_EQE_BA_L, eqe_ba[0] >> 12);
+ hr_reg_write(eqc, EQC_CUR_EQE_BA_M, eqe_ba[0] >> 28);
+ hr_reg_write(eqc, EQC_CUR_EQE_BA_H, eqe_ba[0] >> 60);
+ hr_reg_write(eqc, EQC_EQ_CONS_INDX, HNS_ROCE_EQ_INIT_CONS_IDX);
+ hr_reg_write(eqc, EQC_NEX_EQE_BA_L, eqe_ba[1] >> 12);
+ hr_reg_write(eqc, EQC_NEX_EQE_BA_H, eqe_ba[1] >> 44);
+ hr_reg_write(eqc, EQC_EQE_SIZE,
+ !!(eq->eqe_size == HNS_ROCE_V3_EQE_SIZE));
return 0;
}
@@ -6166,6 +6499,7 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
__hns_roce_free_irq(hr_dev);
+ destroy_workqueue(hr_dev->irq_workq);
for (i = 0; i < eq_num; i++) {
hns_roce_v2_destroy_eqc(hr_dev, i);
@@ -6174,9 +6508,6 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
}
kfree(eq_table->eq);
-
- flush_workqueue(hr_dev->irq_workq);
- destroy_workqueue(hr_dev->irq_workq);
}
static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
@@ -6205,9 +6536,9 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.hw_profile = hns_roce_v2_profile,
.hw_init = hns_roce_v2_init,
.hw_exit = hns_roce_v2_exit,
- .post_mbox = hns_roce_v2_post_mbox,
- .chk_mbox = hns_roce_v2_chk_mbox,
- .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
+ .post_mbox = v2_post_mbox,
+ .poll_mbox_done = v2_poll_mbox_done,
+ .chk_mbox_avail = v2_chk_mbox_is_avail,
.set_gid = hns_roce_v2_set_gid,
.set_mac = hns_roce_v2_set_mac,
.write_mtpt = hns_roce_v2_write_mtpt,
@@ -6218,20 +6549,10 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.set_hem = hns_roce_v2_set_hem,
.clear_hem = hns_roce_v2_clear_hem,
.modify_qp = hns_roce_v2_modify_qp,
- .query_qp = hns_roce_v2_query_qp,
- .destroy_qp = hns_roce_v2_destroy_qp,
.qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
- .modify_cq = hns_roce_v2_modify_cq,
- .post_send = hns_roce_v2_post_send,
- .post_recv = hns_roce_v2_post_recv,
- .req_notify_cq = hns_roce_v2_req_notify_cq,
- .poll_cq = hns_roce_v2_poll_cq,
.init_eq = hns_roce_v2_init_eq_table,
.cleanup_eq = hns_roce_v2_cleanup_eq_table,
.write_srqc = hns_roce_v2_write_srqc,
- .modify_srq = hns_roce_v2_modify_srq,
- .query_srq = hns_roce_v2_query_srq,
- .post_srq_recv = hns_roce_v2_post_srq_recv,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
};
@@ -6243,6 +6564,8 @@ static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
{0, }
};
@@ -6253,9 +6576,12 @@ static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
+ const struct pci_device_id *id;
int i;
hr_dev->pci_dev = handle->pdev;
+ id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
+ hr_dev->is_vf = id->driver_data;
hr_dev->dev = &handle->pdev->dev;
hr_dev->hw = &hns_roce_hw_v2;
hr_dev->dfx = &hns_roce_dfx_hw_v2;
@@ -6272,7 +6598,7 @@ static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
hr_dev->iboe.netdevs[0]->dev_addr);
- for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
+ for (i = 0; i < handle->rinfo.num_vectors; i++)
hr_dev->irq[i] = pci_irq_vector(handle->pdev,
i + handle->rinfo.base_vector);
@@ -6356,6 +6682,9 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
if (!id)
return 0;
+ if (id->driver_data && handle->pdev->revision < PCI_REVISION_ID_HIP09)
+ return 0;
+
ret = __hns_roce_hw_v2_init_instance(handle);
if (ret) {
handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 39621fb6ec16..a2100a629859 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -40,13 +40,11 @@
#define HNS_ROCE_VF_SRQC_BT_NUM 64
#define HNS_ROCE_VF_CQC_BT_NUM 64
#define HNS_ROCE_VF_MPT_BT_NUM 64
-#define HNS_ROCE_VF_EQC_NUM 64
#define HNS_ROCE_VF_SMAC_NUM 32
-#define HNS_ROCE_VF_SGID_NUM 32
#define HNS_ROCE_VF_SL_NUM 8
#define HNS_ROCE_VF_GMV_BT_NUM 256
-#define HNS_ROCE_V2_MAX_QP_NUM 0x100000
+#define HNS_ROCE_V2_MAX_QP_NUM 0x1000
#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
#define HNS_ROCE_V2_MAX_SRQ 0x100000
@@ -61,6 +59,7 @@
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
+#define HNS_ROCE_V2_MAX_SQ_INL_EXT 0x400
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
@@ -74,6 +73,8 @@
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000
+#define HNS_ROCE_V2_MAX_XRCD_NUM 0x1000000
+#define HNS_ROCE_V2_RSV_XRCD_NUM 0
#define HNS_ROCE_V2_MAX_QP_INIT_RDMA 128
#define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128
#define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64
@@ -121,7 +122,7 @@
#define HNS_ROCE_BA_PG_SZ_SUPPORTED_256K 6
#define HNS_ROCE_BA_PG_SZ_SUPPORTED_16K 2
-#define HNS_ROCE_V2_GID_INDEX_NUM 256
+#define HNS_ROCE_V2_GID_INDEX_NUM 16
#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18)
@@ -143,6 +144,8 @@
#define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5
+#define HNS_ROCE_CONG_SIZE 64
+
#define check_whether_last_step(hop_num, step_idx) \
((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \
(step_idx == 1 && hop_num == 1) || \
@@ -195,11 +198,11 @@ enum {
};
enum {
- HNS_ROCE_V2_SQ_DB = 0x0,
- HNS_ROCE_V2_RQ_DB = 0x1,
- HNS_ROCE_V2_SRQ_DB = 0x2,
- HNS_ROCE_V2_CQ_DB_PTR = 0x3,
- HNS_ROCE_V2_CQ_DB_NTR = 0x4,
+ HNS_ROCE_V2_SQ_DB,
+ HNS_ROCE_V2_RQ_DB,
+ HNS_ROCE_V2_SRQ_DB,
+ HNS_ROCE_V2_CQ_DB,
+ HNS_ROCE_V2_CQ_DB_NOTIFY
};
enum {
@@ -233,6 +236,7 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403,
HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404,
HNS_ROCE_OPC_QUERY_PF_TIMER_RES = 0x8406,
+ HNS_ROCE_OPC_QUERY_FUNC_INFO = 0x8407,
HNS_ROCE_OPC_QUERY_PF_CAPS_NUM = 0x8408,
HNS_ROCE_OPC_CFG_ENTRY_SIZE = 0x8409,
HNS_ROCE_OPC_CFG_SGID_TB = 0x8500,
@@ -244,6 +248,7 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_CLR_SCCC = 0x8509,
HNS_ROCE_OPC_QUERY_SCCC = 0x850a,
HNS_ROCE_OPC_RESET_SCCC = 0x850b,
+ HNS_ROCE_OPC_QUERY_VF_RES = 0x850e,
HNS_ROCE_OPC_CFG_GMV_TBL = 0x850f,
HNS_ROCE_OPC_CFG_GMV_BT = 0x8510,
HNS_SWITCH_PARAMETER_CFG = 0x1033,
@@ -255,10 +260,20 @@ enum {
};
enum hns_roce_cmd_return_status {
- CMD_EXEC_SUCCESS = 0,
- CMD_NO_AUTH = 1,
- CMD_NOT_EXEC = 2,
- CMD_QUEUE_FULL = 3,
+ CMD_EXEC_SUCCESS,
+ CMD_NO_AUTH,
+ CMD_NOT_EXIST,
+ CMD_CRQ_FULL,
+ CMD_NEXT_ERR,
+ CMD_NOT_EXEC,
+ CMD_PARA_ERR,
+ CMD_RESULT_ERR,
+ CMD_TIMEOUT,
+ CMD_HILINK_ERR,
+ CMD_INFO_ILLEGAL,
+ CMD_INVALID,
+ CMD_ROH_CHECK_FAIL,
+ CMD_OTHER_ERR = 0xff
};
enum hns_roce_sgid_type {
@@ -399,7 +414,8 @@ struct hns_roce_srq_context {
#define SRQC_CONSUMER_IDX SRQC_FIELD_LOC(127, 112)
#define SRQC_WQE_BT_BA_L SRQC_FIELD_LOC(159, 128)
#define SRQC_WQE_BT_BA_H SRQC_FIELD_LOC(188, 160)
-#define SRQC_RSV2 SRQC_FIELD_LOC(191, 189)
+#define SRQC_RSV2 SRQC_FIELD_LOC(190, 189)
+#define SRQC_SRQ_TYPE SRQC_FIELD_LOC(191, 191)
#define SRQC_PD SRQC_FIELD_LOC(215, 192)
#define SRQC_RQWS SRQC_FIELD_LOC(219, 216)
#define SRQC_RSV3 SRQC_FIELD_LOC(223, 220)
@@ -572,6 +588,10 @@ struct hns_roce_v2_qp_context {
struct hns_roce_v2_qp_context_ex ext;
};
+#define QPC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_qp_context, h, l)
+
+#define QPC_CONG_ALGO_TMPL_ID QPC_FIELD_LOC(455, 448)
+
#define V2_QPC_BYTE_4_TST_S 0
#define V2_QPC_BYTE_4_TST_M GENMASK(2, 0)
@@ -663,9 +683,6 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_56_LP_PKTN_INI_S 28
#define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28)
-#define V2_QPC_BYTE_60_TEMPID_S 0
-#define V2_QPC_BYTE_60_TEMPID_M GENMASK(7, 0)
-
#define V2_QPC_BYTE_60_SCC_TOKEN_S 8
#define V2_QPC_BYTE_60_SCC_TOKEN_M GENMASK(26, 8)
@@ -698,6 +715,8 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_80_RX_CQN_S 0
#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
+#define V2_QPC_BYTE_80_XRC_QP_TYPE_S 24
+
#define V2_QPC_BYTE_80_MIN_RNR_TIME_S 27
#define V2_QPC_BYTE_80_MIN_RNR_TIME_M GENMASK(31, 27)
@@ -940,6 +959,10 @@ struct hns_roce_v2_qp_context {
#define QPCEX_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_qp_context_ex, h, l)
+#define QPCEX_CONG_ALG_SEL QPCEX_FIELD_LOC(0, 0)
+#define QPCEX_CONG_ALG_SUB_SEL QPCEX_FIELD_LOC(1, 1)
+#define QPCEX_DIP_CTX_IDX_VLD QPCEX_FIELD_LOC(2, 2)
+#define QPCEX_DIP_CTX_IDX QPCEX_FIELD_LOC(22, 3)
#define QPCEX_STASH QPCEX_FIELD_LOC(82, 82)
#define V2_QP_RWE_S 1 /* rdma write enable */
@@ -1130,33 +1153,27 @@ struct hns_roce_v2_mpt_entry {
#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S 28
#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M GENMASK(31, 28)
-#define V2_DB_BYTE_4_TAG_S 0
-#define V2_DB_BYTE_4_TAG_M GENMASK(23, 0)
+#define V2_DB_TAG_S 0
+#define V2_DB_TAG_M GENMASK(23, 0)
-#define V2_DB_BYTE_4_CMD_S 24
-#define V2_DB_BYTE_4_CMD_M GENMASK(27, 24)
+#define V2_DB_CMD_S 24
+#define V2_DB_CMD_M GENMASK(27, 24)
#define V2_DB_FLAG_S 31
-#define V2_DB_PARAMETER_IDX_S 0
-#define V2_DB_PARAMETER_IDX_M GENMASK(15, 0)
+#define V2_DB_PRODUCER_IDX_S 0
+#define V2_DB_PRODUCER_IDX_M GENMASK(15, 0)
-#define V2_DB_PARAMETER_SL_S 16
-#define V2_DB_PARAMETER_SL_M GENMASK(18, 16)
+#define V2_DB_SL_S 16
+#define V2_DB_SL_M GENMASK(18, 16)
-#define V2_CQ_DB_BYTE_4_TAG_S 0
-#define V2_CQ_DB_BYTE_4_TAG_M GENMASK(23, 0)
+#define V2_CQ_DB_CONS_IDX_S 0
+#define V2_CQ_DB_CONS_IDX_M GENMASK(23, 0)
-#define V2_CQ_DB_BYTE_4_CMD_S 24
-#define V2_CQ_DB_BYTE_4_CMD_M GENMASK(27, 24)
+#define V2_CQ_DB_NOTIFY_TYPE_S 24
-#define V2_CQ_DB_PARAMETER_CONS_IDX_S 0
-#define V2_CQ_DB_PARAMETER_CONS_IDX_M GENMASK(23, 0)
-
-#define V2_CQ_DB_PARAMETER_CMD_SN_S 25
-#define V2_CQ_DB_PARAMETER_CMD_SN_M GENMASK(26, 25)
-
-#define V2_CQ_DB_PARAMETER_NOTIFY_S 24
+#define V2_CQ_DB_CMD_SN_S 25
+#define V2_CQ_DB_CMD_SN_M GENMASK(26, 25)
struct hns_roce_v2_ud_send_wqe {
__le32 byte_4;
@@ -1359,194 +1376,44 @@ struct hns_roce_cfg_llm_b {
#define CFG_LLM_TAIL_PTR_S 0
#define CFG_LLM_TAIL_PTR_M GENMASK(11, 0)
-struct hns_roce_cfg_global_param {
- __le32 time_cfg_udp_port;
- __le32 rsv[5];
-};
-
-#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S 0
-#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M GENMASK(9, 0)
-
-#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S 16
-#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M GENMASK(31, 16)
-
-struct hns_roce_pf_res_a {
- __le32 rsv;
- __le32 qpc_bt_idx_num;
- __le32 srqc_bt_idx_num;
- __le32 cqc_bt_idx_num;
- __le32 mpt_bt_idx_num;
- __le32 eqc_bt_idx_num;
-};
-
-#define PF_RES_DATA_1_PF_QPC_BT_IDX_S 0
-#define PF_RES_DATA_1_PF_QPC_BT_IDX_M GENMASK(10, 0)
-
-#define PF_RES_DATA_1_PF_QPC_BT_NUM_S 16
-#define PF_RES_DATA_1_PF_QPC_BT_NUM_M GENMASK(27, 16)
-
-#define PF_RES_DATA_2_PF_SRQC_BT_IDX_S 0
-#define PF_RES_DATA_2_PF_SRQC_BT_IDX_M GENMASK(8, 0)
-
-#define PF_RES_DATA_2_PF_SRQC_BT_NUM_S 16
-#define PF_RES_DATA_2_PF_SRQC_BT_NUM_M GENMASK(25, 16)
-
-#define PF_RES_DATA_3_PF_CQC_BT_IDX_S 0
-#define PF_RES_DATA_3_PF_CQC_BT_IDX_M GENMASK(8, 0)
-
-#define PF_RES_DATA_3_PF_CQC_BT_NUM_S 16
-#define PF_RES_DATA_3_PF_CQC_BT_NUM_M GENMASK(25, 16)
-
-#define PF_RES_DATA_4_PF_MPT_BT_IDX_S 0
-#define PF_RES_DATA_4_PF_MPT_BT_IDX_M GENMASK(8, 0)
-
-#define PF_RES_DATA_4_PF_MPT_BT_NUM_S 16
-#define PF_RES_DATA_4_PF_MPT_BT_NUM_M GENMASK(25, 16)
-
-#define PF_RES_DATA_5_PF_EQC_BT_IDX_S 0
-#define PF_RES_DATA_5_PF_EQC_BT_IDX_M GENMASK(8, 0)
-
-#define PF_RES_DATA_5_PF_EQC_BT_NUM_S 16
-#define PF_RES_DATA_5_PF_EQC_BT_NUM_M GENMASK(25, 16)
-
-struct hns_roce_pf_res_b {
- __le32 rsv0;
- __le32 smac_idx_num;
- __le32 sgid_idx_num;
- __le32 qid_idx_sl_num;
- __le32 sccc_bt_idx_num;
- __le32 gmv_idx_num;
-};
-
-#define PF_RES_DATA_1_PF_SMAC_IDX_S 0
-#define PF_RES_DATA_1_PF_SMAC_IDX_M GENMASK(7, 0)
-
-#define PF_RES_DATA_1_PF_SMAC_NUM_S 8
-#define PF_RES_DATA_1_PF_SMAC_NUM_M GENMASK(16, 8)
-
-#define PF_RES_DATA_2_PF_SGID_IDX_S 0
-#define PF_RES_DATA_2_PF_SGID_IDX_M GENMASK(7, 0)
-
-#define PF_RES_DATA_2_PF_SGID_NUM_S 8
-#define PF_RES_DATA_2_PF_SGID_NUM_M GENMASK(16, 8)
-
-#define PF_RES_DATA_3_PF_QID_IDX_S 0
-#define PF_RES_DATA_3_PF_QID_IDX_M GENMASK(9, 0)
-
-#define PF_RES_DATA_3_PF_SL_NUM_S 16
-#define PF_RES_DATA_3_PF_SL_NUM_M GENMASK(26, 16)
-
-#define PF_RES_DATA_4_PF_SCCC_BT_IDX_S 0
-#define PF_RES_DATA_4_PF_SCCC_BT_IDX_M GENMASK(8, 0)
-
-#define PF_RES_DATA_4_PF_SCCC_BT_NUM_S 9
-#define PF_RES_DATA_4_PF_SCCC_BT_NUM_M GENMASK(17, 9)
-
-#define PF_RES_DATA_5_PF_GMV_BT_IDX_S 0
-#define PF_RES_DATA_5_PF_GMV_BT_IDX_M GENMASK(7, 0)
+/* Fields of HNS_ROCE_OPC_CFG_GLOBAL_PARAM */
+#define CFG_GLOBAL_PARAM_1US_CYCLES CMQ_REQ_FIELD_LOC(9, 0)
+#define CFG_GLOBAL_PARAM_UDP_PORT CMQ_REQ_FIELD_LOC(31, 16)
-#define PF_RES_DATA_5_PF_GMV_BT_NUM_S 8
-#define PF_RES_DATA_5_PF_GMV_BT_NUM_M GENMASK(16, 8)
-
-struct hns_roce_pf_timer_res_a {
- __le32 rsv0;
- __le32 qpc_timer_bt_idx_num;
- __le32 cqc_timer_bt_idx_num;
- __le32 rsv[3];
-};
-
-#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_S 0
-#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_M GENMASK(11, 0)
-
-#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S 16
-#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M GENMASK(28, 16)
-
-#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_S 0
-#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_M GENMASK(10, 0)
-
-#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S 16
-#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M GENMASK(27, 16)
-
-struct hns_roce_vf_res_a {
- __le32 vf_id;
- __le32 vf_qpc_bt_idx_num;
- __le32 vf_srqc_bt_idx_num;
- __le32 vf_cqc_bt_idx_num;
- __le32 vf_mpt_bt_idx_num;
- __le32 vf_eqc_bt_idx_num;
-};
-
-#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_S 0
-#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_M GENMASK(10, 0)
-
-#define VF_RES_A_DATA_1_VF_QPC_BT_NUM_S 16
-#define VF_RES_A_DATA_1_VF_QPC_BT_NUM_M GENMASK(27, 16)
-
-#define VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S 0
-#define VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M GENMASK(8, 0)
-
-#define VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S 16
-#define VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M GENMASK(25, 16)
-
-#define VF_RES_A_DATA_3_VF_CQC_BT_IDX_S 0
-#define VF_RES_A_DATA_3_VF_CQC_BT_IDX_M GENMASK(8, 0)
-
-#define VF_RES_A_DATA_3_VF_CQC_BT_NUM_S 16
-#define VF_RES_A_DATA_3_VF_CQC_BT_NUM_M GENMASK(25, 16)
-
-#define VF_RES_A_DATA_4_VF_MPT_BT_IDX_S 0
-#define VF_RES_A_DATA_4_VF_MPT_BT_IDX_M GENMASK(8, 0)
-
-#define VF_RES_A_DATA_4_VF_MPT_BT_NUM_S 16
-#define VF_RES_A_DATA_4_VF_MPT_BT_NUM_M GENMASK(25, 16)
-
-#define VF_RES_A_DATA_5_VF_EQC_IDX_S 0
-#define VF_RES_A_DATA_5_VF_EQC_IDX_M GENMASK(8, 0)
-
-#define VF_RES_A_DATA_5_VF_EQC_NUM_S 16
-#define VF_RES_A_DATA_5_VF_EQC_NUM_M GENMASK(25, 16)
-
-struct hns_roce_vf_res_b {
- __le32 rsv0;
- __le32 vf_smac_idx_num;
- __le32 vf_sgid_idx_num;
- __le32 vf_qid_idx_sl_num;
- __le32 vf_sccc_idx_num;
- __le32 vf_gmv_idx_num;
-};
-
-#define VF_RES_B_DATA_0_VF_ID_S 0
-#define VF_RES_B_DATA_0_VF_ID_M GENMASK(7, 0)
-
-#define VF_RES_B_DATA_1_VF_SMAC_IDX_S 0
-#define VF_RES_B_DATA_1_VF_SMAC_IDX_M GENMASK(7, 0)
-
-#define VF_RES_B_DATA_1_VF_SMAC_NUM_S 8
-#define VF_RES_B_DATA_1_VF_SMAC_NUM_M GENMASK(16, 8)
-
-#define VF_RES_B_DATA_2_VF_SGID_IDX_S 0
-#define VF_RES_B_DATA_2_VF_SGID_IDX_M GENMASK(7, 0)
-
-#define VF_RES_B_DATA_2_VF_SGID_NUM_S 8
-#define VF_RES_B_DATA_2_VF_SGID_NUM_M GENMASK(16, 8)
-
-#define VF_RES_B_DATA_3_VF_QID_IDX_S 0
-#define VF_RES_B_DATA_3_VF_QID_IDX_M GENMASK(9, 0)
-
-#define VF_RES_B_DATA_3_VF_SL_NUM_S 16
-#define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
-
-#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S 0
-#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M GENMASK(8, 0)
-
-#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S 9
-#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M GENMASK(17, 9)
-
-#define VF_RES_B_DATA_5_VF_GMV_BT_IDX_S 0
-#define VF_RES_B_DATA_5_VF_GMV_BT_IDX_M GENMASK(7, 0)
-
-#define VF_RES_B_DATA_5_VF_GMV_BT_NUM_S 16
-#define VF_RES_B_DATA_5_VF_GMV_BT_NUM_M GENMASK(24, 16)
+/*
+ * Fields of HNS_ROCE_OPC_QUERY_PF_RES, HNS_ROCE_OPC_QUERY_VF_RES
+ * and HNS_ROCE_OPC_ALLOC_VF_RES
+ */
+#define FUNC_RES_A_VF_ID CMQ_REQ_FIELD_LOC(7, 0)
+#define FUNC_RES_A_QPC_BT_IDX CMQ_REQ_FIELD_LOC(42, 32)
+#define FUNC_RES_A_QPC_BT_NUM CMQ_REQ_FIELD_LOC(59, 48)
+#define FUNC_RES_A_SRQC_BT_IDX CMQ_REQ_FIELD_LOC(72, 64)
+#define FUNC_RES_A_SRQC_BT_NUM CMQ_REQ_FIELD_LOC(89, 80)
+#define FUNC_RES_A_CQC_BT_IDX CMQ_REQ_FIELD_LOC(104, 96)
+#define FUNC_RES_A_CQC_BT_NUM CMQ_REQ_FIELD_LOC(121, 112)
+#define FUNC_RES_A_MPT_BT_IDX CMQ_REQ_FIELD_LOC(136, 128)
+#define FUNC_RES_A_MPT_BT_NUM CMQ_REQ_FIELD_LOC(153, 144)
+#define FUNC_RES_A_EQC_BT_IDX CMQ_REQ_FIELD_LOC(168, 160)
+#define FUNC_RES_A_EQC_BT_NUM CMQ_REQ_FIELD_LOC(185, 176)
+#define FUNC_RES_B_SMAC_IDX CMQ_REQ_FIELD_LOC(39, 32)
+#define FUNC_RES_B_SMAC_NUM CMQ_REQ_FIELD_LOC(48, 40)
+#define FUNC_RES_B_SGID_IDX CMQ_REQ_FIELD_LOC(71, 64)
+#define FUNC_RES_B_SGID_NUM CMQ_REQ_FIELD_LOC(80, 72)
+#define FUNC_RES_B_QID_IDX CMQ_REQ_FIELD_LOC(105, 96)
+#define FUNC_RES_B_QID_NUM CMQ_REQ_FIELD_LOC(122, 112)
+#define FUNC_RES_V_QID_NUM CMQ_REQ_FIELD_LOC(115, 112)
+
+#define FUNC_RES_B_SCCC_BT_IDX CMQ_REQ_FIELD_LOC(136, 128)
+#define FUNC_RES_B_SCCC_BT_NUM CMQ_REQ_FIELD_LOC(145, 137)
+#define FUNC_RES_B_GMV_BT_IDX CMQ_REQ_FIELD_LOC(167, 160)
+#define FUNC_RES_B_GMV_BT_NUM CMQ_REQ_FIELD_LOC(176, 168)
+#define FUNC_RES_V_GMV_BT_NUM CMQ_REQ_FIELD_LOC(184, 176)
+
+/* Fields of HNS_ROCE_OPC_QUERY_PF_TIMER_RES */
+#define PF_TIMER_RES_QPC_ITEM_IDX CMQ_REQ_FIELD_LOC(43, 32)
+#define PF_TIMER_RES_QPC_ITEM_NUM CMQ_REQ_FIELD_LOC(60, 48)
+#define PF_TIMER_RES_CQC_ITEM_IDX CMQ_REQ_FIELD_LOC(74, 64)
+#define PF_TIMER_RES_CQC_ITEM_NUM CMQ_REQ_FIELD_LOC(91, 80)
struct hns_roce_vf_switch {
__le32 rocee_sel;
@@ -1578,59 +1445,43 @@ struct hns_roce_mbox_status {
__le32 rsv[5];
};
-struct hns_roce_cfg_bt_attr {
- __le32 vf_qpc_cfg;
- __le32 vf_srqc_cfg;
- __le32 vf_cqc_cfg;
- __le32 vf_mpt_cfg;
- __le32 vf_sccc_cfg;
- __le32 rsv;
+#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
+
+#define MB_ST_HW_RUN_M BIT(31)
+#define MB_ST_COMPLETE_M GENMASK(7, 0)
+
+#define MB_ST_COMPLETE_SUCC 1
+
+/* Fields of HNS_ROCE_OPC_CFG_BT_ATTR */
+#define CFG_BT_ATTR_QPC_BA_PGSZ CMQ_REQ_FIELD_LOC(3, 0)
+#define CFG_BT_ATTR_QPC_BUF_PGSZ CMQ_REQ_FIELD_LOC(7, 4)
+#define CFG_BT_ATTR_QPC_HOPNUM CMQ_REQ_FIELD_LOC(9, 8)
+#define CFG_BT_ATTR_SRQC_BA_PGSZ CMQ_REQ_FIELD_LOC(35, 32)
+#define CFG_BT_ATTR_SRQC_BUF_PGSZ CMQ_REQ_FIELD_LOC(39, 36)
+#define CFG_BT_ATTR_SRQC_HOPNUM CMQ_REQ_FIELD_LOC(41, 40)
+#define CFG_BT_ATTR_CQC_BA_PGSZ CMQ_REQ_FIELD_LOC(67, 64)
+#define CFG_BT_ATTR_CQC_BUF_PGSZ CMQ_REQ_FIELD_LOC(71, 68)
+#define CFG_BT_ATTR_CQC_HOPNUM CMQ_REQ_FIELD_LOC(73, 72)
+#define CFG_BT_ATTR_MPT_BA_PGSZ CMQ_REQ_FIELD_LOC(99, 96)
+#define CFG_BT_ATTR_MPT_BUF_PGSZ CMQ_REQ_FIELD_LOC(103, 100)
+#define CFG_BT_ATTR_MPT_HOPNUM CMQ_REQ_FIELD_LOC(105, 104)
+#define CFG_BT_ATTR_SCCC_BA_PGSZ CMQ_REQ_FIELD_LOC(131, 128)
+#define CFG_BT_ATTR_SCCC_BUF_PGSZ CMQ_REQ_FIELD_LOC(135, 132)
+#define CFG_BT_ATTR_SCCC_HOPNUM CMQ_REQ_FIELD_LOC(137, 136)
+
+/* Fields of HNS_ROCE_OPC_CFG_ENTRY_SIZE */
+#define CFG_HEM_ENTRY_SIZE_TYPE CMQ_REQ_FIELD_LOC(31, 0)
+enum {
+ HNS_ROCE_CFG_QPC_SIZE = BIT(0),
+ HNS_ROCE_CFG_SCCC_SIZE = BIT(1),
};
-#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0
-#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M GENMASK(3, 0)
-
-#define CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S 4
-#define CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M GENMASK(7, 4)
-
-#define CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S 8
-#define CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M GENMASK(9, 8)
-
-#define CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S 0
-#define CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M GENMASK(3, 0)
-
-#define CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S 4
-#define CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M GENMASK(7, 4)
-
-#define CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S 8
-#define CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M GENMASK(9, 8)
-
-#define CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S 0
-#define CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M GENMASK(3, 0)
-
-#define CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S 4
-#define CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M GENMASK(7, 4)
-
-#define CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S 8
-#define CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M GENMASK(9, 8)
-
-#define CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S 0
-#define CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M GENMASK(3, 0)
-
-#define CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S 4
-#define CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M GENMASK(7, 4)
-
-#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8
-#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
-
-#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S 0
-#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M GENMASK(3, 0)
-
-#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S 4
-#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M GENMASK(7, 4)
+#define CFG_HEM_ENTRY_SIZE_VALUE CMQ_REQ_FIELD_LOC(191, 160)
-#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S 8
-#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M GENMASK(9, 8)
+/* Fields of HNS_ROCE_OPC_CFG_GMV_BT */
+#define CFG_GMV_BT_BA_L CMQ_REQ_FIELD_LOC(31, 0)
+#define CFG_GMV_BT_BA_H CMQ_REQ_FIELD_LOC(51, 32)
+#define CFG_GMV_BT_IDX CMQ_REQ_FIELD_LOC(95, 64)
struct hns_roce_cfg_sgid_tb {
__le32 table_idx_rsv;
@@ -1641,17 +1492,6 @@ struct hns_roce_cfg_sgid_tb {
__le32 vf_sgid_type_rsv;
};
-enum {
- HNS_ROCE_CFG_QPC_SIZE = BIT(0),
- HNS_ROCE_CFG_SCCC_SIZE = BIT(1),
-};
-
-struct hns_roce_cfg_entry_size {
- __le32 type;
- __le32 rsv[4];
- __le32 size;
-};
-
#define CFG_SGID_TB_TABLE_IDX_S 0
#define CFG_SGID_TB_TABLE_IDX_M GENMASK(7, 0)
@@ -1670,16 +1510,6 @@ struct hns_roce_cfg_smac_tb {
#define CFG_SMAC_TB_VF_SMAC_H_S 0
#define CFG_SMAC_TB_VF_SMAC_H_M GENMASK(15, 0)
-struct hns_roce_cfg_gmv_bt {
- __le32 gmv_ba_l;
- __le32 gmv_ba_h;
- __le32 gmv_bt_idx;
- __le32 rsv[3];
-};
-
-#define CFG_GMV_BA_H_S 0
-#define CFG_GMV_BA_H_M GENMASK(19, 0)
-
struct hns_roce_cfg_gmv_tb_a {
__le32 vf_sgid_l;
__le32 vf_sgid_ml;
@@ -1805,6 +1635,14 @@ struct hns_roce_query_pf_caps_d {
#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S 24
#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M GENMASK(25, 24)
+#define V2_QUERY_PF_CAPS_D_CONG_TYPE_S 26
+#define V2_QUERY_PF_CAPS_D_CONG_TYPE_M GENMASK(29, 26)
+
+struct hns_roce_congestion_algorithm {
+ u8 alg_sel;
+ u8 alg_sub_sel;
+ u8 dip_vld;
+};
#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S 0
#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M GENMASK(21, 0)
@@ -1859,18 +1697,27 @@ struct hns_roce_query_pf_caps_e {
#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_S 0
#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_M GENMASK(19, 0)
+struct hns_roce_cmq_req {
+ __le32 data[6];
+};
+
+#define CMQ_REQ_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cmq_req, h, l)
+
struct hns_roce_cmq_desc {
__le16 opcode;
__le16 flag;
__le16 retval;
__le16 rsv;
- __le32 data[6];
-};
-
-#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
+ union {
+ __le32 data[6];
+ struct {
+ __le32 own_func_num;
+ __le32 own_mac_id;
+ __le32 rsv[4];
+ } func_info;
+ };
-#define HNS_ROCE_HW_RUN_BIT_SHIFT 31
-#define HNS_ROCE_HW_MB_STATUS_MASK 0xFF
+};
struct hns_roce_v2_cmq_ring {
dma_addr_t desc_dma_addr;
@@ -1932,6 +1779,12 @@ struct hns_roce_eq_context {
__le32 rsv[5];
};
+struct hns_roce_dip {
+ u8 dgid[GID_LEN_V2];
+ u8 dip_idx;
+ struct list_head node; /* all dips are on a list */
+};
+
#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
#define HNS_ROCE_AEQ_DEFAULT_INTERVAL 0x0
#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0
@@ -1966,8 +1819,7 @@ struct hns_roce_eq_context {
#define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000
#define HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S 0
-#define HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S 1
-#define HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S 2
+#define HNS_ROCE_V2_VF_INT_ST_RAS_INT_S 1
#define HNS_ROCE_EQ_DB_CMD_AEQ 0x0
#define HNS_ROCE_EQ_DB_CMD_AEQ_ARMED 0x1
@@ -1982,96 +1834,38 @@ struct hns_roce_eq_context {
#define HNS_ROCE_INT_NAME_LEN 32
#define HNS_ROCE_V2_EQN_M GENMASK(23, 0)
-#define HNS_ROCE_V2_CONS_IDX_M GENMASK(23, 0)
-
#define HNS_ROCE_V2_VF_ABN_INT_EN_S 0
#define HNS_ROCE_V2_VF_ABN_INT_EN_M GENMASK(0, 0)
#define HNS_ROCE_V2_VF_ABN_INT_ST_M GENMASK(2, 0)
#define HNS_ROCE_V2_VF_ABN_INT_CFG_M GENMASK(2, 0)
#define HNS_ROCE_V2_VF_EVENT_INT_EN_M GENMASK(0, 0)
-/* WORD0 */
-#define HNS_ROCE_EQC_EQ_ST_S 0
-#define HNS_ROCE_EQC_EQ_ST_M GENMASK(1, 0)
-
-#define HNS_ROCE_EQC_HOP_NUM_S 2
-#define HNS_ROCE_EQC_HOP_NUM_M GENMASK(3, 2)
-
-#define HNS_ROCE_EQC_OVER_IGNORE_S 4
-#define HNS_ROCE_EQC_OVER_IGNORE_M GENMASK(4, 4)
-
-#define HNS_ROCE_EQC_COALESCE_S 5
-#define HNS_ROCE_EQC_COALESCE_M GENMASK(5, 5)
-
-#define HNS_ROCE_EQC_ARM_ST_S 6
-#define HNS_ROCE_EQC_ARM_ST_M GENMASK(7, 6)
-
-#define HNS_ROCE_EQC_EQN_S 8
-#define HNS_ROCE_EQC_EQN_M GENMASK(15, 8)
-
-#define HNS_ROCE_EQC_EQE_CNT_S 16
-#define HNS_ROCE_EQC_EQE_CNT_M GENMASK(31, 16)
-
-/* WORD1 */
-#define HNS_ROCE_EQC_BA_PG_SZ_S 0
-#define HNS_ROCE_EQC_BA_PG_SZ_M GENMASK(3, 0)
-
-#define HNS_ROCE_EQC_BUF_PG_SZ_S 4
-#define HNS_ROCE_EQC_BUF_PG_SZ_M GENMASK(7, 4)
-
-#define HNS_ROCE_EQC_PROD_INDX_S 8
-#define HNS_ROCE_EQC_PROD_INDX_M GENMASK(31, 8)
-
-/* WORD2 */
-#define HNS_ROCE_EQC_MAX_CNT_S 0
-#define HNS_ROCE_EQC_MAX_CNT_M GENMASK(15, 0)
-
-#define HNS_ROCE_EQC_PERIOD_S 16
-#define HNS_ROCE_EQC_PERIOD_M GENMASK(31, 16)
-
-/* WORD3 */
-#define HNS_ROCE_EQC_REPORT_TIMER_S 0
-#define HNS_ROCE_EQC_REPORT_TIMER_M GENMASK(31, 0)
-
-/* WORD4 */
-#define HNS_ROCE_EQC_EQE_BA_L_S 0
-#define HNS_ROCE_EQC_EQE_BA_L_M GENMASK(31, 0)
-
-/* WORD5 */
-#define HNS_ROCE_EQC_EQE_BA_H_S 0
-#define HNS_ROCE_EQC_EQE_BA_H_M GENMASK(28, 0)
-
-/* WORD6 */
-#define HNS_ROCE_EQC_SHIFT_S 0
-#define HNS_ROCE_EQC_SHIFT_M GENMASK(7, 0)
-
-#define HNS_ROCE_EQC_MSI_INDX_S 8
-#define HNS_ROCE_EQC_MSI_INDX_M GENMASK(15, 8)
-
-#define HNS_ROCE_EQC_CUR_EQE_BA_L_S 16
-#define HNS_ROCE_EQC_CUR_EQE_BA_L_M GENMASK(31, 16)
-
-/* WORD7 */
-#define HNS_ROCE_EQC_CUR_EQE_BA_M_S 0
-#define HNS_ROCE_EQC_CUR_EQE_BA_M_M GENMASK(31, 0)
-
-/* WORD8 */
-#define HNS_ROCE_EQC_CUR_EQE_BA_H_S 0
-#define HNS_ROCE_EQC_CUR_EQE_BA_H_M GENMASK(3, 0)
-
-#define HNS_ROCE_EQC_CONS_INDX_S 8
-#define HNS_ROCE_EQC_CONS_INDX_M GENMASK(31, 8)
-
-/* WORD9 */
-#define HNS_ROCE_EQC_NXT_EQE_BA_L_S 0
-#define HNS_ROCE_EQC_NXT_EQE_BA_L_M GENMASK(31, 0)
-
-/* WORD10 */
-#define HNS_ROCE_EQC_NXT_EQE_BA_H_S 0
-#define HNS_ROCE_EQC_NXT_EQE_BA_H_M GENMASK(19, 0)
-
-#define HNS_ROCE_EQC_EQE_SIZE_S 20
-#define HNS_ROCE_EQC_EQE_SIZE_M GENMASK(21, 20)
+#define EQC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_eq_context, h, l)
+
+#define EQC_EQ_ST EQC_FIELD_LOC(1, 0)
+#define EQC_EQE_HOP_NUM EQC_FIELD_LOC(3, 2)
+#define EQC_OVER_IGNORE EQC_FIELD_LOC(4, 4)
+#define EQC_COALESCE EQC_FIELD_LOC(5, 5)
+#define EQC_ARM_ST EQC_FIELD_LOC(7, 6)
+#define EQC_EQN EQC_FIELD_LOC(15, 8)
+#define EQC_EQE_CNT EQC_FIELD_LOC(31, 16)
+#define EQC_EQE_BA_PG_SZ EQC_FIELD_LOC(35, 32)
+#define EQC_EQE_BUF_PG_SZ EQC_FIELD_LOC(39, 36)
+#define EQC_EQ_PROD_INDX EQC_FIELD_LOC(63, 40)
+#define EQC_EQ_MAX_CNT EQC_FIELD_LOC(79, 64)
+#define EQC_EQ_PERIOD EQC_FIELD_LOC(95, 80)
+#define EQC_EQE_REPORT_TIMER EQC_FIELD_LOC(127, 96)
+#define EQC_EQE_BA_L EQC_FIELD_LOC(159, 128)
+#define EQC_EQE_BA_H EQC_FIELD_LOC(188, 160)
+#define EQC_SHIFT EQC_FIELD_LOC(199, 192)
+#define EQC_MSI_INDX EQC_FIELD_LOC(207, 200)
+#define EQC_CUR_EQE_BA_L EQC_FIELD_LOC(223, 208)
+#define EQC_CUR_EQE_BA_M EQC_FIELD_LOC(255, 224)
+#define EQC_CUR_EQE_BA_H EQC_FIELD_LOC(259, 256)
+#define EQC_EQ_CONS_INDX EQC_FIELD_LOC(287, 264)
+#define EQC_NEX_EQE_BA_L EQC_FIELD_LOC(319, 288)
+#define EQC_NEX_EQE_BA_H EQC_FIELD_LOC(339, 320)
+#define EQC_EQE_SIZE EQC_FIELD_LOC(341, 340)
#define HNS_ROCE_V2_CEQE_COMP_CQN_S 0
#define HNS_ROCE_V2_CEQE_COMP_CQN_M GENMASK(23, 0)
@@ -2082,14 +1876,14 @@ struct hns_roce_eq_context {
#define HNS_ROCE_V2_AEQE_SUB_TYPE_S 8
#define HNS_ROCE_V2_AEQE_SUB_TYPE_M GENMASK(15, 8)
-#define HNS_ROCE_V2_EQ_DB_CMD_S 16
-#define HNS_ROCE_V2_EQ_DB_CMD_M GENMASK(17, 16)
+#define V2_EQ_DB_TAG_S 0
+#define V2_EQ_DB_TAG_M GENMASK(7, 0)
-#define HNS_ROCE_V2_EQ_DB_TAG_S 0
-#define HNS_ROCE_V2_EQ_DB_TAG_M GENMASK(7, 0)
+#define V2_EQ_DB_CMD_S 16
+#define V2_EQ_DB_CMD_M GENMASK(17, 16)
-#define HNS_ROCE_V2_EQ_DB_PARA_S 0
-#define HNS_ROCE_V2_EQ_DB_PARA_M GENMASK(23, 0)
+#define V2_EQ_DB_CONS_IDX_S 0
+#define V2_EQ_DB_CONS_IDX_M GENMASK(23, 0)
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c9c0836394a2..6c6e82b11d8b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -42,7 +42,7 @@
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
-static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
+static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port, u8 *addr)
{
u8 phy_port;
u32 i;
@@ -63,7 +63,7 @@ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
- u8 port = attr->port_num - 1;
+ u32 port = attr->port_num - 1;
int ret;
if (port >= hr_dev->caps.num_ports)
@@ -77,7 +77,7 @@ static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
- u8 port = attr->port_num - 1;
+ u32 port = attr->port_num - 1;
int ret;
if (port >= hr_dev->caps.num_ports)
@@ -88,7 +88,7 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
return ret;
}
-static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
+static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port,
unsigned long event)
{
struct device *dev = hr_dev->dev;
@@ -128,7 +128,7 @@ static int hns_roce_netdev_event(struct notifier_block *self,
struct hns_roce_ib_iboe *iboe = NULL;
struct hns_roce_dev *hr_dev = NULL;
int ret;
- u8 port;
+ u32 port;
hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
iboe = &hr_dev->iboe;
@@ -207,10 +207,13 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ props->device_cap_flags |= IB_DEVICE_XRC;
+
return 0;
}
-static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
+static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
struct ib_port_attr *props)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
@@ -218,7 +221,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
struct net_device *net_dev;
unsigned long flags;
enum ib_mtu mtu;
- u8 port;
+ u32 port;
port = port_num - 1;
@@ -258,12 +261,12 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
}
static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
-static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
+static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index,
u16 *pkey)
{
*pkey = PKEY_ID;
@@ -300,12 +303,14 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
return -EAGAIN;
resp.qp_tab_size = hr_dev->caps.num_qps;
+ resp.srq_tab_size = hr_dev->caps.num_srqs;
ret = hns_roce_uar_alloc(hr_dev, &context->uar);
if (ret)
goto error_fail_uar_alloc;
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+ hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
INIT_LIST_HEAD(&context->page_list);
mutex_init(&context->page_mutex);
}
@@ -365,7 +370,7 @@ static int hns_roce_mmap(struct ib_ucontext *context,
}
}
-static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
+static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -390,6 +395,19 @@ static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
}
+static void hns_roce_get_fw_ver(struct ib_device *device, char *str)
+{
+ u64 fw_ver = to_hr_dev(device)->caps.fw_ver;
+ unsigned int major, minor, sub_minor;
+
+ major = upper_32_bits(fw_ver);
+ minor = high_16_bits(lower_32_bits(fw_ver));
+ sub_minor = low_16_bits(fw_ver);
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%04u", major, minor,
+ sub_minor);
+}
+
static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
{
struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
@@ -405,6 +423,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.uverbs_abi_ver = 1,
.uverbs_no_driver_id_binding = 1,
+ .get_dev_fw_str = hns_roce_get_fw_ver,
.add_gid = hns_roce_add_gid,
.alloc_pd = hns_roce_alloc_pd,
.alloc_ucontext = hns_roce_alloc_ucontext,
@@ -461,6 +480,13 @@ static const struct ib_device_ops hns_roce_dev_srq_ops = {
INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
};
+static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
+ .alloc_xrcd = hns_roce_alloc_xrcd,
+ .dealloc_xrcd = hns_roce_dealloc_xrcd,
+
+ INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd),
+};
+
static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
{
int ret;
@@ -484,20 +510,20 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR)
ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
- /* MW */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW)
ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
- /* FRMR */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
- /* SRQ */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops);
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops);
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ ib_set_device_ops(ib_dev, &hns_roce_dev_xrcd_ops);
+
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
for (i = 0; i < hr_dev->caps.num_ports; i++) {
@@ -704,7 +730,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
spin_lock_init(&hr_dev->sm_lock);
spin_lock_init(&hr_dev->bt_cmd_lock);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+ hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
INIT_LIST_HEAD(&hr_dev->pgdir_list);
mutex_init(&hr_dev->pgdir_mutex);
}
@@ -727,10 +754,19 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
goto err_uar_alloc_free;
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
+ ret = hns_roce_init_xrcd_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to init xrcd table, ret = %d.\n",
+ ret);
+ goto err_pd_table_free;
+ }
+ }
+
ret = hns_roce_init_mr_table(hr_dev);
if (ret) {
dev_err(dev, "Failed to init memory region table.\n");
- goto err_pd_table_free;
+ goto err_xrcd_table_free;
}
hns_roce_init_cq_table(hr_dev);
@@ -759,6 +795,10 @@ err_cq_table_free:
hns_roce_cleanup_cq_table(hr_dev);
hns_roce_cleanup_mr_table(hr_dev);
+err_xrcd_table_free:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ hns_roce_cleanup_xrcd_table(hr_dev);
+
err_pd_table_free:
hns_roce_cleanup_pd_table(hr_dev);
@@ -886,6 +926,8 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
INIT_LIST_HEAD(&hr_dev->qp_list);
spin_lock_init(&hr_dev->qp_list_lock);
+ INIT_LIST_HEAD(&hr_dev->dip_list);
+ spin_lock_init(&hr_dev->dip_list_lock);
ret = hns_roce_register_device(hr_dev);
if (ret)
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index cca818d05a8f..a5813bf567b2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -137,3 +137,62 @@ void hns_roce_cleanup_uar_table(struct hns_roce_dev *hr_dev)
{
hns_roce_bitmap_cleanup(&hr_dev->uar_table.bitmap);
}
+
+static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev, u32 *xrcdn)
+{
+ unsigned long obj;
+ int ret;
+
+ ret = hns_roce_bitmap_alloc(&hr_dev->xrcd_bitmap, &obj);
+ if (ret)
+ return ret;
+
+ *xrcdn = obj;
+
+ return 0;
+}
+
+static void hns_roce_xrcd_free(struct hns_roce_dev *hr_dev,
+ u32 xrcdn)
+{
+ hns_roce_bitmap_free(&hr_dev->xrcd_bitmap, xrcdn, BITMAP_NO_RR);
+}
+
+int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev)
+{
+ return hns_roce_bitmap_init(&hr_dev->xrcd_bitmap,
+ hr_dev->caps.num_xrcds,
+ hr_dev->caps.num_xrcds - 1,
+ hr_dev->caps.reserved_xrcds, 0);
+}
+
+void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_bitmap_cleanup(&hr_dev->xrcd_bitmap);
+}
+
+int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_xrcd->device);
+ struct hns_roce_xrcd *xrcd = to_hr_xrcd(ib_xrcd);
+ int ret;
+
+ if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
+ return -EINVAL;
+
+ ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
+ if (ret) {
+ dev_err(hr_dev->dev, "failed to alloc xrcdn, ret = %d.\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
+{
+ hns_roce_xrcd_free(to_hr_dev(ib_xrcd->device),
+ to_hr_xrcd(ib_xrcd)->xrcdn);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 004aca9086ab..230a909ba9bc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -98,7 +98,9 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
(event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
- event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) {
+ event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
+ event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) {
qp->state = IB_QPS_ERR;
if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
init_flush_work(hr_dev, qp);
@@ -142,6 +144,8 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
event.event = IB_EVENT_QP_REQ_ERR;
break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
+ case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
event.event = IB_EVENT_QP_ACCESS_ERR;
break;
default:
@@ -366,8 +370,13 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
unsigned long flags;
list_del(&hr_qp->node);
- list_del(&hr_qp->sq_node);
- list_del(&hr_qp->rq_node);
+
+ if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
+ list_del(&hr_qp->sq_node);
+
+ if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI &&
+ hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
+ list_del(&hr_qp->rq_node);
xa_lock_irqsave(xa, flags);
__xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
@@ -478,7 +487,9 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
hr_qp->rq.max_gs);
hr_qp->rq.wqe_cnt = cnt;
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
+ hr_qp->ibqp.qp_type != IB_QPT_UD &&
+ hr_qp->ibqp.qp_type != IB_QPT_GSI)
hr_qp->rq_inl_buf.wqe_cnt = cnt;
else
hr_qp->rq_inl_buf.wqe_cnt = 0;
@@ -776,7 +787,7 @@ static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
struct hns_roce_ib_create_qp_resp *resp,
struct hns_roce_ib_create_qp *ucmd)
{
- return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
hns_roce_qp_has_sq(init_attr) &&
udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
@@ -787,7 +798,7 @@ static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
struct ib_udata *udata,
struct hns_roce_ib_create_qp_resp *resp)
{
- return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
hns_roce_qp_has_rq(init_attr));
}
@@ -795,7 +806,7 @@ static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
struct ib_qp_init_attr *init_attr)
{
- return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
hns_roce_qp_has_rq(init_attr));
}
@@ -840,11 +851,16 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
}
} else {
- /* QP doorbell register address */
- hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
- hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ hr_qp->sq.db_reg = hr_dev->mem_base +
+ HNS_ROCE_DWQE_SIZE * hr_qp->qpn;
+ else
+ hr_qp->sq.db_reg =
+ hr_dev->reg_base + hr_dev->sdb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
+
+ hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
if (kernel_qp_has_rdb(hr_dev, init_attr)) {
ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
@@ -1011,36 +1027,36 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
}
- ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
- if (ret) {
- ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
- ret);
- goto err_wrid;
- }
-
ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
if (ret) {
ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret);
- goto err_db;
+ goto err_buf;
}
ret = alloc_qpn(hr_dev, hr_qp);
if (ret) {
ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
- goto err_buf;
+ goto err_qpn;
+ }
+
+ ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
+ ret);
+ goto err_db;
}
ret = alloc_qpc(hr_dev, hr_qp);
if (ret) {
ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n",
ret);
- goto err_qpn;
+ goto err_qpc;
}
ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
if (ret) {
ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret);
- goto err_qpc;
+ goto err_store;
}
if (udata) {
@@ -1055,7 +1071,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
if (ret)
- goto err_store;
+ goto err_flow_ctrl;
}
hr_qp->ibqp.qp_num = hr_qp->qpn;
@@ -1065,17 +1081,17 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
return 0;
-err_store:
+err_flow_ctrl:
hns_roce_qp_remove(hr_dev, hr_qp);
-err_qpc:
+err_store:
free_qpc(hr_dev, hr_qp);
-err_qpn:
+err_qpc:
+ free_qp_db(hr_dev, hr_qp, udata);
+err_db:
free_qpn(hr_dev, hr_qp);
-err_buf:
+err_qpn:
free_qp_buf(hr_dev, hr_qp);
-err_db:
- free_qp_db(hr_dev, hr_qp, udata);
-err_wrid:
+err_buf:
free_kernel_wrid(hr_qp);
return ret;
}
@@ -1100,11 +1116,16 @@ static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
bool is_user)
{
switch (type) {
+ case IB_QPT_XRC_INI:
+ case IB_QPT_XRC_TGT:
+ if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
+ goto out;
+ break;
case IB_QPT_UD:
if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 &&
is_user)
goto out;
- fallthrough;
+ break;
case IB_QPT_RC:
case IB_QPT_GSI:
break;
@@ -1124,8 +1145,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
- struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct ib_device *ibdev = pd ? pd->device : init_attr->xrcd->device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
struct hns_roce_qp *hr_qp;
int ret;
@@ -1137,6 +1158,15 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
if (!hr_qp)
return ERR_PTR(-ENOMEM);
+ if (init_attr->qp_type == IB_QPT_XRC_INI)
+ init_attr->recv_cq = NULL;
+
+ if (init_attr->qp_type == IB_QPT_XRC_TGT) {
+ hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
+ init_attr->recv_cq = NULL;
+ init_attr->send_cq = NULL;
+ }
+
if (init_attr->qp_type == IB_QPT_GSI) {
hr_qp->port = init_attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
@@ -1156,20 +1186,18 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
int to_hr_qp_type(int qp_type)
{
- int transport_type;
-
- if (qp_type == IB_QPT_RC)
- transport_type = SERV_TYPE_RC;
- else if (qp_type == IB_QPT_UC)
- transport_type = SERV_TYPE_UC;
- else if (qp_type == IB_QPT_UD)
- transport_type = SERV_TYPE_UD;
- else if (qp_type == IB_QPT_GSI)
- transport_type = SERV_TYPE_UD;
- else
- transport_type = -1;
-
- return transport_type;
+ switch (qp_type) {
+ case IB_QPT_RC:
+ return SERV_TYPE_RC;
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ return SERV_TYPE_UD;
+ case IB_QPT_XRC_INI:
+ case IB_QPT_XRC_TGT:
+ return SERV_TYPE_XRC;
+ default:
+ return -1;
+ }
}
static int check_mtu_validate(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index d5a6de0e7095..546d182c577a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -314,6 +314,9 @@ static void set_srq_ext_param(struct hns_roce_srq *srq,
{
srq->cqn = ib_srq_has_cq(init_attr->srq_type) ?
to_hr_cq(init_attr->ext.cq)->cqn : 0;
+
+ srq->xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
+ to_hr_xrcd(init_attr->ext.xrc.xrcd)->xrcdn : 0;
}
static int set_srq_param(struct hns_roce_srq *srq,
@@ -412,7 +415,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
}
}
- srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
+ srq->db_reg = hr_dev->reg_base + SRQ_DB_REG;
srq->event = hns_roce_ib_srq_event;
atomic_set(&srq->refcount, 1);
init_completion(&srq->free);
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 6a79502c8b53..be4094ac4fac 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -504,15 +504,6 @@ static inline void i40iw_free_resource(struct i40iw_device *iwdev,
spin_unlock_irqrestore(&iwdev->resource_lock, flags);
}
-/**
- * to_iwhdl - Get the handler from the device pointer
- * @iwdev: device pointer
- **/
-static inline struct i40iw_handler *to_iwhdl(struct i40iw_device *iw_dev)
-{
- return container_of(iw_dev, struct i40iw_handler, device);
-}
-
struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev);
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index ac65c8237b2e..2450b7dd51f6 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -905,7 +905,7 @@ static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node,
}
/**
- * recv_mpa - process an IETF MPA frame
+ * i40iw_parse_mpa - process an IETF MPA frame
* @cm_node: connection's node
* @buffer: Data pointer
* @type: to return accept or reject
@@ -4360,7 +4360,7 @@ void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
}
/**
- * i40iw_ifdown_notify - process an ifdown on an interface
+ * i40iw_if_notify - process an ifdown on an interface
* @iwdev: device pointer
* @netdev: network interface device structure
* @ipaddr: Pointer to IPv4 or IPv6 address
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.c b/drivers/infiniband/hw/i40iw/i40iw_hmc.c
index 8bd72af9e099..b44bfc1d239b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hmc.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hmc.c
@@ -285,7 +285,7 @@ static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *d
}
/**
- * i40iw_create_iw_hmc_obj - allocate backing store for hmc objects
+ * i40iw_sc_create_hmc_obj - allocate backing store for hmc objects
* @dev: pointer to the device structure
* @info: pointer to i40iw_hmc_iw_create_obj_info struct
*
@@ -434,7 +434,7 @@ static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,
}
/**
- * i40iw_del_iw_hmc_obj - remove pe hmc objects
+ * i40iw_sc_del_hmc_obj - remove pe hmc objects
* @dev: pointer to the device structure
* @info: pointer to i40iw_hmc_del_obj_info struct
* @reset: true if called before reset
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index ab4cb11950dc..b496f30ce066 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -78,7 +78,7 @@ static struct i40e_client i40iw_client;
static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
static LIST_HEAD(i40iw_handlers);
-static spinlock_t i40iw_handler_lock;
+static DEFINE_SPINLOCK(i40iw_handler_lock);
static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
u32 vf_id, u8 *msg, u16 len);
@@ -251,7 +251,7 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
}
/**
- * i40iw_disable_irqs - disable device interrupts
+ * i40iw_disable_irq - disable device interrupts
* @dev: hardware control device structure
* @msix_vec: msix vector to disable irq
* @dev_id: parameter to pass to free_irq (used during irq setup)
@@ -2043,7 +2043,6 @@ static int __init i40iw_init_module(void)
i40iw_client.ops = &i40e_ops;
memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
i40iw_client.type = I40E_CLIENT_IWARP;
- spin_lock_init(&i40iw_handler_lock);
ret = i40e_register_client(&i40iw_client);
i40iw_register_notifiers();
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
index d474aad62a81..d938ccb195b1 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
@@ -51,17 +51,6 @@ static inline void set_64bit_val(u64 *wqe_words, u32 byte_index, u64 value)
}
/**
- * set_32bit_val - set 32 value to hw wqe
- * @wqe_words: wqe addr to write
- * @byte_index: index in wqe
- * @value: value to write
- **/
-static inline void set_32bit_val(u32 *wqe_words, u32 byte_index, u32 value)
-{
- wqe_words[byte_index >> 2] = value;
-}
-
-/**
* get_64bit_val - read 64 bit value from wqe
* @wqe_words: wqe addr
* @byte_index: index to read from
@@ -72,17 +61,6 @@ static inline void get_64bit_val(u64 *wqe_words, u32 byte_index, u64 *value)
*value = wqe_words[byte_index >> 3];
}
-/**
- * get_32bit_val - read 32 bit value from wqe
- * @wqe_words: wqe addr
- * @byte_index: index to reaad from
- * @value: return 32 bit value
- **/
-static inline void get_32bit_val(u32 *wqe_words, u32 byte_index, u32 *value)
-{
- *value = wqe_words[byte_index >> 2];
-}
-
struct i40iw_dma_mem {
void *va;
dma_addr_t pa;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
index 53e5cd1a2bd6..146a4148219b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
@@ -393,12 +393,9 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
pble_rsrc->unallocated_pble -= (chunk->size >> 3);
- list_add(&chunk->list, &pble_rsrc->pinfo.clist);
sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
- if (sd_entry->valid)
- return 0;
- if (dev->is_pf) {
+ if (dev->is_pf && !sd_entry->valid) {
ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
sd_reg_val, idx->sd_idx,
sd_entry->entry_type, true);
@@ -409,6 +406,7 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
}
sd_entry->valid = true;
+ list_add(&chunk->list, &pble_rsrc->pinfo.clist);
return 0;
error:
kfree(chunk);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index d1c8cc0a6236..88fb68e866ba 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -1000,7 +1000,7 @@ static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx)
}
/**
- * i40iw_ieq_get_fpdu - given length return fpdu length
+ * i40iw_ieq_get_fpdu_length - given length return fpdu length
* @length: length if fpdu
*/
static u16 i40iw_ieq_get_fpdu_length(u16 length)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 76f052b12c14..9ff825f7860b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -890,7 +890,7 @@ void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
}
/**
- * i40iw_terminate_imeout - timeout happened
+ * i40iw_terminate_timeout - timeout happened
* @t: points to iwarp qp
*/
static void i40iw_terminate_timeout(struct timer_list *t)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index f18d146a6079..b876d722fcc8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -94,7 +94,7 @@ static int i40iw_query_device(struct ib_device *ibdev,
* @props: returning device attributes
*/
static int i40iw_query_port(struct ib_device *ibdev,
- u8 port,
+ u32 port,
struct ib_port_attr *props)
{
props->lid = 1;
@@ -647,7 +647,7 @@ error:
}
/**
- * i40iw_query - query qp attributes
+ * i40iw_query_qp - query qp attributes
* @ibqp: qp pointer
* @attr: attributes pointer
* @attr_mask: Not used
@@ -1846,7 +1846,7 @@ static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
}
/**
- * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
+ * i40iw_del_memlist - Deleting pbl list entries for CQ/QP
* @iwmr: iwmr for IB's user page addresses
* @ucontext: ptr to user context
*/
@@ -2347,7 +2347,7 @@ static int i40iw_req_notify_cq(struct ib_cq *ibcq,
* @port_num: port number
* @immutable: immutable data for the port return
*/
-static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int i40iw_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -2446,7 +2446,7 @@ static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str)
* @port_num: port number
*/
static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
struct i40iw_device *iwdev = to_iwdev(ibdev);
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -2477,7 +2477,7 @@ static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
*/
static int i40iw_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port_num, int index)
+ u32 port_num, int index)
{
struct i40iw_device *iwdev = to_iwdev(ibdev);
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -2504,7 +2504,7 @@ static int i40iw_get_hw_stats(struct ib_device *ibdev,
* @gid: Global ID
*/
static int i40iw_query_gid(struct ib_device *ibdev,
- u8 port,
+ u32 port,
int index,
union ib_gid *gid)
{
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
index aca9061688ae..e34a1522132c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
@@ -333,7 +333,7 @@ static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback
}
/**
- * pf_add_hmc_obj - Callback for Add HMC Object
+ * pf_add_hmc_obj_callback - Callback for Add HMC Object
* @work_vf_dev: pointer to the VF Device
*/
static void pf_add_hmc_obj_callback(void *work_vf_dev)
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index cca414ecfcd5..571d9c542024 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -73,12 +73,12 @@ static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
int *resched_delay_sec);
void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
- u8 port_num, u8 *p_data)
+ u32 port_num, u8 *p_data)
{
int i;
u64 guid_indexes;
int slave_id;
- int port_index = port_num - 1;
+ u32 port_index = port_num - 1;
if (!mlx4_is_master(dev->dev))
return;
@@ -86,7 +86,7 @@ void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
ports_guid[port_num - 1].
all_rec_per_port[block_num].guid_indexes);
- pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);
+ pr_debug("port: %u, guid_indexes: 0x%llx\n", port_num, guid_indexes);
for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
/* The location of the specific index starts from bit number 4
@@ -184,7 +184,7 @@ unlock:
* port_number - 1 or 2
*/
void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
- int block_num, u8 port_num,
+ int block_num, u32 port_num,
u8 *p_data)
{
int i;
@@ -206,7 +206,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
ports_guid[port_num - 1].
all_rec_per_port[block_num].guid_indexes);
- pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);
+ pr_debug("port: %u, guid_indexes: 0x%llx\n", port_num, guid_indexes);
/*calculate the slaves and notify them*/
for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
@@ -260,11 +260,11 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
&gen_event);
- pr_debug("slave: %d, port: %d prev_port_state: %d,"
+ pr_debug("slave: %d, port: %u prev_port_state: %d,"
" new_port_state: %d, gen_event: %d\n",
slave_id, port_num, prev_state, new_state, gen_event);
if (gen_event == SLAVE_PORT_GEN_EVENT_UP) {
- pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
+ pr_debug("sending PORT_UP event to slave: %d, port: %u\n",
slave_id, port_num);
mlx4_gen_port_state_change_eqe(dev->dev, slave_id,
port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE);
@@ -274,7 +274,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
&gen_event);
if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) {
- pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
+ pr_debug("sending PORT DOWN event to slave: %d, port: %u\n",
slave_id, port_num);
mlx4_gen_port_state_change_eqe(dev->dev,
slave_id,
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index f3ace85552f3..d13ecbdd4391 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -88,8 +88,8 @@ struct mlx4_rcv_tunnel_mad {
struct ib_mad mad;
} __packed;
-static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
-static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
+static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u32 port_num);
+static void handle_lid_change_event(struct mlx4_ib_dev *dev, u32 port_num);
static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
int block, u32 change_bitmap);
@@ -186,7 +186,7 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
return err;
}
-static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
+static void update_sm_ah(struct mlx4_ib_dev *dev, u32 port_num, u16 lid, u8 sl)
{
struct ib_ah *new_ah;
struct rdma_ah_attr ah_attr;
@@ -217,8 +217,8 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
* Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
* synthesize LID change, Client-Rereg, GID change, and P_Key change events.
*/
-static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad,
- u16 prev_lid)
+static void smp_snoop(struct ib_device *ibdev, u32 port_num,
+ const struct ib_mad *mad, u16 prev_lid)
{
struct ib_port_info *pinfo;
u16 lid;
@@ -274,7 +274,7 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
be16_to_cpu(base[i]);
}
}
- pr_debug("PKEY Change event: port=%d, "
+ pr_debug("PKEY Change event: port=%u, "
"block=0x%x, change_bitmap=0x%x\n",
port_num, bn, pkey_change_bitmap);
@@ -380,7 +380,8 @@ static void node_desc_override(struct ib_device *dev,
}
}
-static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad)
+static void forward_trap(struct mlx4_ib_dev *dev, u32 port_num,
+ const struct ib_mad *mad)
{
int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
struct ib_mad_send_buf *send_buf;
@@ -429,7 +430,7 @@ static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave
return ret;
}
-int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
+int mlx4_ib_find_real_gid(struct ib_device *ibdev, u32 port, __be64 guid)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
int i;
@@ -443,7 +444,7 @@ int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
- u8 port, u16 pkey, u16 *ix)
+ u32 port, u16 pkey, u16 *ix)
{
int i, ret;
u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
@@ -507,7 +508,7 @@ static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
return (qpn >= proxy_start && qpn <= proxy_start + 1);
}
-int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
+int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u32 port,
enum ib_qp_type dest_qpt, struct ib_wc *wc,
struct ib_grh *grh, struct ib_mad *mad)
{
@@ -678,7 +679,7 @@ end:
return ret;
}
-static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
+static int mlx4_ib_demux_mad(struct ib_device *ibdev, u32 port,
struct ib_wc *wc, struct ib_grh *grh,
struct ib_mad *mad)
{
@@ -818,7 +819,7 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
return 0;
}
-static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
@@ -932,9 +933,10 @@ static int iboe_process_mad_port_info(void *out_mad)
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
-static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad *in_mad, struct ib_mad *out_mad)
+static int iboe_process_mad(struct ib_device *ibdev, int mad_flags,
+ u32 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
struct mlx4_counter counter_stats;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
@@ -979,7 +981,7 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return err;
}
-int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index)
@@ -1073,7 +1075,7 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
}
}
-static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
+static void handle_lid_change_event(struct mlx4_ib_dev *dev, u32 port_num)
{
mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
@@ -1082,7 +1084,7 @@ static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
}
-static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
+static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u32 port_num)
{
/* re-configure the alias-guid and mcg's */
if (mlx4_is_master(dev->dev)) {
@@ -1121,7 +1123,7 @@ static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
GET_MASK_FROM_EQE(eqe));
}
-static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
+static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u32 port_num,
u32 guid_tbl_blk_num, u32 change_bitmap)
{
struct ib_smp *in_mad = NULL;
@@ -1177,7 +1179,7 @@ void handle_port_mgmt_change_event(struct work_struct *work)
struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
struct mlx4_ib_dev *dev = ew->ib_dev;
struct mlx4_eqe *eqe = &(ew->ib_eqe);
- u8 port = eqe->event.port_mgmt_change.port;
+ u32 port = eqe->event.port_mgmt_change.port;
u32 changed_attr;
u32 tbl_block;
u32 change_bitmap;
@@ -1274,7 +1276,7 @@ void handle_port_mgmt_change_event(struct work_struct *work)
kfree(ew);
}
-void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
+void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u32 port_num,
enum ib_event_type type)
{
struct ib_event event;
@@ -1351,7 +1353,7 @@ static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
return ret;
}
-int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
+int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u32 port,
enum ib_qp_type dest_qpt, u16 pkey_index,
u32 remote_qpn, u32 qkey, struct rdma_ah_attr *attr,
u8 *s_mac, u16 vlan_id, struct ib_mad *mad)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f26a0d920842..22898d97ecbd 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -81,7 +81,7 @@ static const char mlx4_ib_version[] =
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
- u8 port_num);
+ u32 port_num);
static struct workqueue_struct *wq;
@@ -129,7 +129,8 @@ static int num_ib_ports(struct mlx4_dev *dev)
return ib_ports;
}
-static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
+static struct net_device *mlx4_ib_get_netdev(struct ib_device *device,
+ u32 port_num)
{
struct mlx4_ib_dev *ibdev = to_mdev(device);
struct net_device *dev;
@@ -160,7 +161,7 @@ static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_n
static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
struct mlx4_ib_dev *ibdev,
- u8 port_num)
+ u32 port_num)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
@@ -193,7 +194,7 @@ static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
struct mlx4_ib_dev *ibdev,
- u8 port_num)
+ u32 port_num)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
@@ -238,7 +239,7 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
static int mlx4_ib_update_gids(struct gid_entry *gids,
struct mlx4_ib_dev *ibdev,
- u8 port_num)
+ u32 port_num)
{
if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
@@ -407,7 +408,7 @@ int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
int real_index = -EINVAL;
int i;
unsigned long flags;
- u8 port_num = attr->port_num;
+ u32 port_num = attr->port_num;
if (port_num > MLX4_MAX_PORTS)
return -EINVAL;
@@ -649,7 +650,7 @@ out:
}
static enum rdma_link_layer
-mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
+mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num)
{
struct mlx4_dev *dev = to_mdev(device)->dev;
@@ -657,7 +658,7 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
}
-static int ib_link_query_port(struct ib_device *ibdev, u8 port,
+static int ib_link_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props, int netw_view)
{
struct ib_smp *in_mad = NULL;
@@ -753,7 +754,7 @@ static u8 state_to_phys_state(enum ib_port_state state)
IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
}
-static int eth_link_query_port(struct ib_device *ibdev, u8 port,
+static int eth_link_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
@@ -814,7 +815,7 @@ out:
return err;
}
-int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props, int netw_view)
{
int err;
@@ -828,14 +829,14 @@ int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
return err;
}
-static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+static int mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
/* returns host view */
return __mlx4_ib_query_port(ibdev, port, props, 0);
}
-int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid, int netw_view)
{
struct ib_smp *in_mad = NULL;
@@ -891,7 +892,7 @@ out:
return err;
}
-static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+static int mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
if (rdma_protocol_ib(ibdev, port))
@@ -899,7 +900,8 @@ static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
return 0;
}
-static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
+static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port,
+ u64 *sl2vl_tbl)
{
union sl2vl_tbl_to_u64 sl2vl64;
struct ib_smp *in_mad = NULL;
@@ -959,7 +961,7 @@ static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
}
}
-int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey, int netw_view)
{
struct ib_smp *in_mad = NULL;
@@ -992,7 +994,8 @@ out:
return err;
}
-static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+static int mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
{
return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
}
@@ -1033,8 +1036,8 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
return 0;
}
-static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
- u32 cap_mask)
+static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u32 port,
+ int reset_qkey_viols, u32 cap_mask)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
@@ -1059,7 +1062,7 @@ static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_vio
return err;
}
-static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
+static int mlx4_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
struct ib_port_modify *props)
{
struct mlx4_ib_dev *mdev = to_mdev(ibdev);
@@ -2103,7 +2106,7 @@ static const struct diag_counter diag_device_only[] = {
};
static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_diag_counters *diag = dev->diag_counters;
@@ -2118,7 +2121,7 @@ static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port, int index)
+ u32 port, int index)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_diag_counters *diag = dev->diag_counters;
@@ -2466,7 +2469,7 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
ibdev->eq_table = NULL;
}
-static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int mlx4_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 78c9bb79ec75..e856cf23a0a1 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -429,7 +429,7 @@ struct mlx4_sriov_alias_guid_port_rec_det {
struct mlx4_sriov_alias_guid_info_rec_det all_rec_per_port[NUM_ALIAS_GUID_REC_IN_PORT];
struct workqueue_struct *wq;
struct delayed_work alias_guid_work;
- u8 port;
+ u32 port;
u32 state_flags;
struct mlx4_sriov_alias_guid *parent;
struct list_head cb_list;
@@ -657,7 +657,7 @@ struct mlx4_ib_qp_tunnel_init_attr {
struct ib_qp_init_attr init_attr;
int slave;
enum ib_qp_type proxy_qp_type;
- u8 port;
+ u32 port;
};
struct mlx4_uverbs_ex_query_device {
@@ -810,24 +810,24 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const void *in_mad, void *response_mad);
-int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index);
int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
-int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props, int netw_view);
-int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey, int netw_view);
-int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid, int netw_view);
static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
{
- u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
+ u32 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
return true;
@@ -841,7 +841,7 @@ void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave);
int mlx4_ib_mcg_init(void);
void mlx4_ib_mcg_destroy(void);
-int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid);
+int mlx4_ib_find_real_gid(struct ib_device *ibdev, u32 port, __be64 guid);
int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, int slave,
struct ib_sa_mad *sa_mad);
@@ -851,16 +851,16 @@ int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
union ib_gid *gid);
-void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
+void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u32 port_num,
enum ib_event_type type);
void mlx4_ib_tunnels_update_work(struct work_struct *work);
-int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
+int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u32 port,
enum ib_qp_type qpt, struct ib_wc *wc,
struct ib_grh *grh, struct ib_mad *mad);
-int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
+int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u32 port,
enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
u32 qkey, struct rdma_ah_attr *attr, u8 *s_mac,
u16 vlan_id, struct ib_mad *mad);
@@ -884,10 +884,10 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port);
void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
int block_num,
- u8 port_num, u8 *p_data);
+ u32 port_num, u8 *p_data);
void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev,
- int block_num, u8 port_num,
+ int block_num, u32 port_num,
u8 *p_data);
int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 651785bd57f2..92ddbcc00eb2 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -3135,7 +3135,6 @@ static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr,
}
if (is_eth) {
- struct in6_addr in6;
u16 ether_type;
u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
@@ -3148,8 +3147,6 @@ static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr,
memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2);
memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
- memcpy(&in6, sgid.raw, sizeof(in6));
-
if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index b4c009bb0db6..f43380106bd0 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -6,6 +6,7 @@ mlx5_ib-y := ah.o \
cong.o \
counters.o \
cq.o \
+ dm.o \
doorbell.o \
gsi.o \
ib_virt.o \
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 234f29912ba9..a8db8a051170 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -47,107 +47,6 @@ int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
return mlx5_cmd_exec_inout(dev, query_cong_params, in, out);
}
-int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
- u64 length, u32 alignment)
-{
- struct mlx5_core_dev *dev = dm->dev;
- u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
- >> PAGE_SHIFT;
- u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
- u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
- u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
- u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
- u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
- u32 mlx5_alignment;
- u64 page_idx = 0;
- int ret = 0;
-
- if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
- return -EINVAL;
-
- /* mlx5 device sets alignment as 64*2^driver_value
- * so normalizing is needed.
- */
- mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
- alignment - MLX5_MEMIC_BASE_ALIGN;
- if (mlx5_alignment > max_alignment)
- return -EINVAL;
-
- MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
- MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
- MLX5_SET(alloc_memic_in, in, memic_size, length);
- MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
- mlx5_alignment);
-
- while (page_idx < num_memic_hw_pages) {
- spin_lock(&dm->lock);
- page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
- num_memic_hw_pages,
- page_idx,
- num_pages, 0);
-
- if (page_idx < num_memic_hw_pages)
- bitmap_set(dm->memic_alloc_pages,
- page_idx, num_pages);
-
- spin_unlock(&dm->lock);
-
- if (page_idx >= num_memic_hw_pages)
- break;
-
- MLX5_SET64(alloc_memic_in, in, range_start_addr,
- hw_start_addr + (page_idx * PAGE_SIZE));
-
- ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
- if (ret) {
- spin_lock(&dm->lock);
- bitmap_clear(dm->memic_alloc_pages,
- page_idx, num_pages);
- spin_unlock(&dm->lock);
-
- if (ret == -EAGAIN) {
- page_idx++;
- continue;
- }
-
- return ret;
- }
-
- *addr = dev->bar_addr +
- MLX5_GET64(alloc_memic_out, out, memic_start_addr);
-
- return 0;
- }
-
- return -ENOMEM;
-}
-
-void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
-{
- struct mlx5_core_dev *dev = dm->dev;
- u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
- u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
- u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
- u64 start_page_idx;
- int err;
-
- addr -= dev->bar_addr;
- start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
-
- MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
- MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
- MLX5_SET(dealloc_memic_in, in, memic_size, length);
-
- err = mlx5_cmd_exec_in(dev, dealloc_memic, in);
- if (err)
- return;
-
- spin_lock(&dm->lock);
- bitmap_clear(dm->memic_alloc_pages,
- start_page_idx, num_pages);
- spin_unlock(&dm->lock);
-}
-
void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
{
u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 88ea6ef8f2cb..66c96292ed43 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -41,9 +41,6 @@ int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey);
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
void *out);
-int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
- u64 length, u32 alignment);
-void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length);
int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid);
void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index b9291e482428..0b61df52332a 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -267,7 +267,7 @@ static void mlx5_ib_set_cc_param_mask_val(void *field, int offset,
}
}
-static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
+static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u32 port_num,
int offset, u32 *var)
{
int outlen = MLX5_ST_SZ_BYTES(query_cong_params_out);
@@ -304,7 +304,7 @@ alloc_err:
return err;
}
-static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
+static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u32 port_num,
int offset, u32 var)
{
int inlen = MLX5_ST_SZ_BYTES(modify_cong_params_in);
@@ -397,7 +397,7 @@ static const struct file_operations dbg_cc_fops = {
.read = get_param,
};
-void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
{
if (!mlx5_debugfs_root ||
!dev->port[port_num].dbg_cc_params ||
@@ -409,7 +409,7 @@ void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
dev->port[port_num].dbg_cc_params = NULL;
}
-void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
+void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
{
struct mlx5_ib_dbg_cc_params *dbg_cc_params;
struct mlx5_core_dev *mdev;
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 084652e2b15a..e365341057cb 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -139,7 +139,7 @@ static int mlx5_ib_create_counters(struct ib_counters *counters,
static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
- u8 port_num)
+ u32 port_num)
{
return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts :
&dev->port[port_num].cnts;
@@ -154,7 +154,7 @@ static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
* device port combination in switchdev and non switchdev mode of the
* parent device.
*/
-u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num)
+u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num)
{
const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
@@ -162,7 +162,7 @@ u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num)
}
static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
const struct mlx5_ib_counters *cnts;
@@ -236,13 +236,13 @@ free:
static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port_num, int index)
+ u32 port_num, int index)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
struct mlx5_core_dev *mdev;
int ret, num_counters;
- u8 mdev_port_num;
+ u32 mdev_port_num;
if (!stats)
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/counters.h b/drivers/infiniband/hw/mlx5/counters.h
index 1aa30c2f3f4d..6bcaaa52e2b2 100644
--- a/drivers/infiniband/hw/mlx5/counters.h
+++ b/drivers/infiniband/hw/mlx5/counters.h
@@ -13,5 +13,5 @@ void mlx5_ib_counters_cleanup(struct mlx5_ib_dev *dev);
void mlx5_ib_counters_clear_description(struct ib_counters *counters);
int mlx5_ib_flow_counters_set_data(struct ib_counters *ibcounters,
struct mlx5_ib_create_flow *ucmd);
-u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num);
+u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num);
#endif /* _MLX5_IB_COUNTERS_H */
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 07b8350929cd..a0b677accd96 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2185,27 +2185,69 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
return 0;
}
+static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
+ unsigned long pgsz_bitmap)
+{
+ unsigned long page_size;
+
+ /* Don't bother checking larger page sizes as offset must be zero and
+ * total DEVX umem length must be equal to total umem length.
+ */
+ pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length),
+ PAGE_SHIFT),
+ MLX5_ADAPTER_PAGE_SHIFT);
+ if (!pgsz_bitmap)
+ return 0;
+
+ page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, U64_MAX);
+ if (!page_size)
+ return 0;
+
+ /* If the page_size is less than the CPU page size then we can use the
+ * offset and create a umem which is a subset of the page list.
+ * For larger page sizes we can't be sure the DMA list reflects the
+ * VA so we must ensure that the umem extent is exactly equal to the
+ * page list. Reduce the page size until one of these cases is true.
+ */
+ while ((ib_umem_dma_offset(umem, page_size) != 0 ||
+ (umem->length % page_size) != 0) &&
+ page_size > PAGE_SIZE)
+ page_size /= 2;
+
+ return page_size;
+}
+
static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
struct uverbs_attr_bundle *attrs,
struct devx_umem *obj,
struct devx_umem_reg_cmd *cmd)
{
+ unsigned long pgsz_bitmap;
unsigned int page_size;
__be64 *mtt;
void *umem;
+ int ret;
/*
- * We don't know what the user intends to use this umem for, but the HW
- * restrictions must be met. MR, doorbell records, QP, WQ and CQ all
- * have different requirements. Since we have no idea how to sort this
- * out, only support PAGE_SIZE with the expectation that userspace will
- * provide the necessary alignments inside the known PAGE_SIZE and that
- * FW will check everything.
+ * If the user does not pass in pgsz_bitmap then the user promises not
+ * to use umem_offset!=0 in any commands that allocate on top of the
+ * umem.
+ *
+ * If the user wants to use a umem_offset then it must pass in
+ * pgsz_bitmap which guides the maximum page size and thus maximum
+ * object alignment inside the umem. See the PRM.
+ *
+ * Users are not allowed to use IOVA here, mkeys are not supported on
+ * umem.
*/
- page_size = ib_umem_find_best_pgoff(
- obj->umem, PAGE_SIZE,
- __mlx5_page_offset_to_bitmask(__mlx5_bit_sz(umem, page_offset),
- 0));
+ ret = uverbs_get_const_default(&pgsz_bitmap, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
+ GENMASK_ULL(63,
+ min(PAGE_SHIFT, MLX5_ADAPTER_PAGE_SHIFT)));
+ if (ret)
+ return ret;
+
+ page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap);
if (!page_size)
return -EINVAL;
@@ -2791,6 +2833,8 @@ DECLARE_UVERBS_NAMED_METHOD(
UA_MANDATORY),
UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
enum ib_access_flags),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
+ u64),
UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
UVERBS_ATTR_TYPE(u32),
UA_MANDATORY));
diff --git a/drivers/infiniband/hw/mlx5/dm.c b/drivers/infiniband/hw/mlx5/dm.c
new file mode 100644
index 000000000000..094bf85589db
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/dm.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "dm.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
+ u64 length, u32 alignment)
+{
+ struct mlx5_core_dev *dev = dm->dev;
+ u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
+ >> PAGE_SHIFT;
+ u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
+ u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
+ u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+ u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
+ u32 mlx5_alignment;
+ u64 page_idx = 0;
+ int ret = 0;
+
+ if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
+ return -EINVAL;
+
+ /* mlx5 device sets alignment as 64*2^driver_value
+ * so normalizing is needed.
+ */
+ mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
+ alignment - MLX5_MEMIC_BASE_ALIGN;
+ if (mlx5_alignment > max_alignment)
+ return -EINVAL;
+
+ MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
+ MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
+ MLX5_SET(alloc_memic_in, in, memic_size, length);
+ MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
+ mlx5_alignment);
+
+ while (page_idx < num_memic_hw_pages) {
+ spin_lock(&dm->lock);
+ page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
+ num_memic_hw_pages,
+ page_idx,
+ num_pages, 0);
+
+ if (page_idx < num_memic_hw_pages)
+ bitmap_set(dm->memic_alloc_pages,
+ page_idx, num_pages);
+
+ spin_unlock(&dm->lock);
+
+ if (page_idx >= num_memic_hw_pages)
+ break;
+
+ MLX5_SET64(alloc_memic_in, in, range_start_addr,
+ hw_start_addr + (page_idx * PAGE_SIZE));
+
+ ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
+ if (ret) {
+ spin_lock(&dm->lock);
+ bitmap_clear(dm->memic_alloc_pages,
+ page_idx, num_pages);
+ spin_unlock(&dm->lock);
+
+ if (ret == -EAGAIN) {
+ page_idx++;
+ continue;
+ }
+
+ return ret;
+ }
+
+ *addr = dev->bar_addr +
+ MLX5_GET64(alloc_memic_out, out, memic_start_addr);
+
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr,
+ u64 length)
+{
+ struct mlx5_core_dev *dev = dm->dev;
+ u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
+ u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+ u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
+ u64 start_page_idx;
+ int err;
+
+ addr -= dev->bar_addr;
+ start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
+
+ MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
+ MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
+ MLX5_SET(dealloc_memic_in, in, memic_size, length);
+
+ err = mlx5_cmd_exec_in(dev, dealloc_memic, in);
+ if (err)
+ return;
+
+ spin_lock(&dm->lock);
+ bitmap_clear(dm->memic_alloc_pages,
+ start_page_idx, num_pages);
+ spin_unlock(&dm->lock);
+}
+
+void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
+ u8 operation)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
+ struct mlx5_core_dev *dev = dm->dev;
+
+ MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
+ MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC);
+ MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
+ MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
+
+ mlx5_cmd_exec_in(dev, modify_memic, in);
+}
+
+static int mlx5_cmd_alloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
+ u8 operation, phys_addr_t *op_addr)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_memic_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
+ struct mlx5_core_dev *dev = dm->dev;
+ int err;
+
+ MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
+ MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_ALLOC);
+ MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
+ MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
+
+ err = mlx5_cmd_exec_inout(dev, modify_memic, in, out);
+ if (err)
+ return err;
+
+ *op_addr = dev->bar_addr +
+ MLX5_GET64(modify_memic_out, out, memic_operation_addr);
+ return 0;
+}
+
+static int add_dm_mmap_entry(struct ib_ucontext *context,
+ struct mlx5_user_mmap_entry *mentry, u8 mmap_flag,
+ size_t size, u64 address)
+{
+ mentry->mmap_flag = mmap_flag;
+ mentry->address = address;
+
+ return rdma_user_mmap_entry_insert_range(
+ context, &mentry->rdma_entry, size,
+ MLX5_IB_MMAP_DEVICE_MEM << 16,
+ (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
+}
+
+static void mlx5_ib_dm_memic_free(struct kref *kref)
+{
+ struct mlx5_ib_dm_memic *dm =
+ container_of(kref, struct mlx5_ib_dm_memic, ref);
+ struct mlx5_ib_dev *dev = to_mdev(dm->base.ibdm.device);
+
+ mlx5_cmd_dealloc_memic(&dev->dm, dm->base.dev_addr, dm->base.size);
+ kfree(dm);
+}
+
+static int copy_op_to_user(struct mlx5_ib_dm_op_entry *op_entry,
+ struct uverbs_attr_bundle *attrs)
+{
+ u64 start_offset;
+ u16 page_idx;
+ int err;
+
+ page_idx = op_entry->mentry.rdma_entry.start_pgoff & 0xFFFF;
+ start_offset = op_entry->op_addr & ~PAGE_MASK;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
+ &page_idx, sizeof(page_idx));
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs,
+ MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
+ &start_offset, sizeof(start_offset));
+}
+
+static int map_existing_op(struct mlx5_ib_dm_memic *dm, u8 op,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_dm_op_entry *op_entry;
+
+ op_entry = xa_load(&dm->ops, op);
+ if (!op_entry)
+ return -ENOENT;
+
+ return copy_op_to_user(op_entry, attrs);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE);
+ struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+ struct ib_dm *ibdm = uobj->object;
+ struct mlx5_ib_dm_memic *dm = to_memic(ibdm);
+ struct mlx5_ib_dm_op_entry *op_entry;
+ int err;
+ u8 op;
+
+ err = uverbs_copy_from(&op, attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP);
+ if (err)
+ return err;
+
+ if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op)))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&dm->ops_xa_lock);
+ err = map_existing_op(dm, op, attrs);
+ if (!err || err != -ENOENT)
+ goto err_unlock;
+
+ op_entry = kzalloc(sizeof(*op_entry), GFP_KERNEL);
+ if (!op_entry)
+ goto err_unlock;
+
+ err = mlx5_cmd_alloc_memic_op(&dev->dm, dm->base.dev_addr, op,
+ &op_entry->op_addr);
+ if (err) {
+ kfree(op_entry);
+ goto err_unlock;
+ }
+ op_entry->op = op;
+ op_entry->dm = dm;
+
+ err = add_dm_mmap_entry(uobj->context, &op_entry->mentry,
+ MLX5_IB_MMAP_TYPE_MEMIC_OP, dm->base.size,
+ op_entry->op_addr & PAGE_MASK);
+ if (err) {
+ mlx5_cmd_dealloc_memic_op(&dev->dm, dm->base.dev_addr, op);
+ kfree(op_entry);
+ goto err_unlock;
+ }
+ /* From this point, entry will be freed by mmap_free */
+ kref_get(&dm->ref);
+
+ err = copy_op_to_user(op_entry, attrs);
+ if (err)
+ goto err_remove;
+
+ err = xa_insert(&dm->ops, op, op_entry, GFP_KERNEL);
+ if (err)
+ goto err_remove;
+ mutex_unlock(&dm->ops_xa_lock);
+
+ return 0;
+
+err_remove:
+ rdma_user_mmap_entry_remove(&op_entry->mentry.rdma_entry);
+err_unlock:
+ mutex_unlock(&dm->ops_xa_lock);
+
+ return err;
+}
+
+static struct ib_dm *handle_alloc_dm_memic(struct ib_ucontext *ctx,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
+ struct mlx5_ib_dm_memic *dm;
+ u64 start_offset;
+ u16 page_idx;
+ int err;
+ u64 address;
+
+ if (!MLX5_CAP_DEV_MEM(dm_db->dev, memic))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return ERR_PTR(-ENOMEM);
+
+ dm->base.type = MLX5_IB_UAPI_DM_TYPE_MEMIC;
+ dm->base.size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
+ dm->base.ibdm.device = ctx->device;
+
+ kref_init(&dm->ref);
+ xa_init(&dm->ops);
+ mutex_init(&dm->ops_xa_lock);
+ dm->req_length = attr->length;
+
+ err = mlx5_cmd_alloc_memic(dm_db, &dm->base.dev_addr,
+ dm->base.size, attr->alignment);
+ if (err) {
+ kfree(dm);
+ return ERR_PTR(err);
+ }
+
+ address = dm->base.dev_addr & PAGE_MASK;
+ err = add_dm_mmap_entry(ctx, &dm->mentry, MLX5_IB_MMAP_TYPE_MEMIC,
+ dm->base.size, address);
+ if (err) {
+ mlx5_cmd_dealloc_memic(dm_db, dm->base.dev_addr, dm->base.size);
+ kfree(dm);
+ return ERR_PTR(err);
+ }
+
+ page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+ &page_idx, sizeof(page_idx));
+ if (err)
+ goto err_copy;
+
+ start_offset = dm->base.dev_addr & ~PAGE_MASK;
+ err = uverbs_copy_to(attrs,
+ MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ &start_offset, sizeof(start_offset));
+ if (err)
+ goto err_copy;
+
+ return &dm->base.ibdm;
+
+err_copy:
+ rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
+ return ERR_PTR(err);
+}
+
+static enum mlx5_sw_icm_type get_icm_type(int uapi_type)
+{
+ return uapi_type == MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM ?
+ MLX5_SW_ICM_TYPE_STEERING :
+ MLX5_SW_ICM_TYPE_HEADER_MODIFY;
+}
+
+static struct ib_dm *handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs,
+ int type)
+{
+ struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
+ enum mlx5_sw_icm_type icm_type = get_icm_type(type);
+ struct mlx5_ib_dm_icm *dm;
+ u64 act_size;
+ int err;
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return ERR_PTR(-ENOMEM);
+
+ dm->base.type = type;
+ dm->base.ibdm.device = ctx->device;
+
+ if (!capable(CAP_SYS_RAWIO) || !capable(CAP_NET_RAW)) {
+ err = -EPERM;
+ goto free;
+ }
+
+ if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2))) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+
+ /* Allocation size must a multiple of the basic block size
+ * and a power of 2.
+ */
+ act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
+ act_size = roundup_pow_of_two(act_size);
+
+ dm->base.size = act_size;
+ err = mlx5_dm_sw_icm_alloc(dev, icm_type, act_size, attr->alignment,
+ to_mucontext(ctx)->devx_uid,
+ &dm->base.dev_addr, &dm->obj_id);
+ if (err)
+ goto free;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ &dm->base.dev_addr, sizeof(dm->base.dev_addr));
+ if (err) {
+ mlx5_dm_sw_icm_dealloc(dev, icm_type, dm->base.size,
+ to_mucontext(ctx)->devx_uid,
+ dm->base.dev_addr, dm->obj_id);
+ goto free;
+ }
+ return &dm->base.ibdm;
+free:
+ kfree(dm);
+ return ERR_PTR(err);
+}
+
+struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ enum mlx5_ib_uapi_dm_type type;
+ int err;
+
+ err = uverbs_get_const_default(&type, attrs,
+ MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+ MLX5_IB_UAPI_DM_TYPE_MEMIC);
+ if (err)
+ return ERR_PTR(err);
+
+ mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
+ type, attr->length, attr->alignment);
+
+ switch (type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ return handle_alloc_dm_memic(context, attr, attrs);
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ return handle_alloc_dm_sw_icm(context, attr, attrs, type);
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ return handle_alloc_dm_sw_icm(context, attr, attrs, type);
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+}
+
+static void dm_memic_remove_ops(struct mlx5_ib_dm_memic *dm)
+{
+ struct mlx5_ib_dm_op_entry *entry;
+ unsigned long idx;
+
+ mutex_lock(&dm->ops_xa_lock);
+ xa_for_each(&dm->ops, idx, entry) {
+ xa_erase(&dm->ops, idx);
+ rdma_user_mmap_entry_remove(&entry->mentry.rdma_entry);
+ }
+ mutex_unlock(&dm->ops_xa_lock);
+}
+
+static void mlx5_dm_memic_dealloc(struct mlx5_ib_dm_memic *dm)
+{
+ dm_memic_remove_ops(dm);
+ rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
+}
+
+static int mlx5_dm_icm_dealloc(struct mlx5_ib_ucontext *ctx,
+ struct mlx5_ib_dm_icm *dm)
+{
+ enum mlx5_sw_icm_type type = get_icm_type(dm->base.type);
+ struct mlx5_core_dev *dev = to_mdev(dm->base.ibdm.device)->mdev;
+ int err;
+
+ err = mlx5_dm_sw_icm_dealloc(dev, type, dm->base.size, ctx->devx_uid,
+ dm->base.dev_addr, dm->obj_id);
+ if (!err)
+ kfree(dm);
+ return 0;
+}
+
+static int mlx5_ib_dealloc_dm(struct ib_dm *ibdm,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dm *dm = to_mdm(ibdm);
+
+ switch (dm->type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ mlx5_dm_memic_dealloc(to_memic(ibdm));
+ return 0;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ return mlx5_dm_icm_dealloc(ctx, to_icm(ibdm));
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_QUERY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_dm *ibdm =
+ uverbs_attr_get_obj(attrs, MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE);
+ struct mlx5_ib_dm *dm = to_mdm(ibdm);
+ struct mlx5_ib_dm_memic *memic;
+ u64 start_offset;
+ u16 page_idx;
+ int err;
+
+ if (dm->type != MLX5_IB_UAPI_DM_TYPE_MEMIC)
+ return -EOPNOTSUPP;
+
+ memic = to_memic(ibdm);
+ page_idx = memic->mentry.rdma_entry.start_pgoff & 0xFFFF;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
+ &page_idx, sizeof(page_idx));
+ if (err)
+ return err;
+
+ start_offset = memic->base.dev_addr & ~PAGE_MASK;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
+ &start_offset, sizeof(start_offset));
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
+ &memic->req_length,
+ sizeof(memic->req_length));
+}
+
+void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev,
+ struct mlx5_user_mmap_entry *mentry)
+{
+ struct mlx5_ib_dm_op_entry *op_entry;
+ struct mlx5_ib_dm_memic *mdm;
+
+ switch (mentry->mmap_flag) {
+ case MLX5_IB_MMAP_TYPE_MEMIC:
+ mdm = container_of(mentry, struct mlx5_ib_dm_memic, mentry);
+ kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
+ break;
+ case MLX5_IB_MMAP_TYPE_MEMIC_OP:
+ op_entry = container_of(mentry, struct mlx5_ib_dm_op_entry,
+ mentry);
+ mdm = op_entry->dm;
+ mlx5_cmd_dealloc_memic_op(&dev->dm, mdm->base.dev_addr,
+ op_entry->op);
+ kfree(op_entry);
+ kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
+ break;
+ default:
+ WARN_ON(true);
+ }
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DM_QUERY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE, UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_READ, UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
+ UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u16), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
+ UVERBS_ATTR_TYPE(u64), UA_MANDATORY));
+
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_dm, UVERBS_OBJECT_DM, UVERBS_METHOD_DM_ALLOC,
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u16), UA_OPTIONAL),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+ enum mlx5_ib_uapi_dm_type, UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DM_MAP_OP_ADDR,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE,
+ UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP,
+ UVERBS_ATTR_TYPE(u8),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u16),
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DM,
+ &UVERBS_METHOD(MLX5_IB_METHOD_DM_MAP_OP_ADDR),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DM_QUERY));
+
+const struct uapi_definition mlx5_ib_dm_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DM),
+ {},
+};
+
+const struct ib_device_ops mlx5_ib_dev_dm_ops = {
+ .alloc_dm = mlx5_ib_alloc_dm,
+ .dealloc_dm = mlx5_ib_dealloc_dm,
+ .reg_dm_mr = mlx5_ib_reg_dm_mr,
+};
diff --git a/drivers/infiniband/hw/mlx5/dm.h b/drivers/infiniband/hw/mlx5/dm.h
new file mode 100644
index 000000000000..9674a80d8d70
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/dm.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_DM_H
+#define _MLX5_IB_DM_H
+
+#include "mlx5_ib.h"
+
+extern const struct ib_device_ops mlx5_ib_dev_dm_ops;
+extern const struct uapi_definition mlx5_ib_dm_defs[];
+
+struct mlx5_ib_dm {
+ struct ib_dm ibdm;
+ u32 type;
+ phys_addr_t dev_addr;
+ size_t size;
+};
+
+struct mlx5_ib_dm_op_entry {
+ struct mlx5_user_mmap_entry mentry;
+ phys_addr_t op_addr;
+ struct mlx5_ib_dm_memic *dm;
+ u8 op;
+};
+
+struct mlx5_ib_dm_memic {
+ struct mlx5_ib_dm base;
+ struct mlx5_user_mmap_entry mentry;
+ struct xarray ops;
+ struct mutex ops_xa_lock;
+ struct kref ref;
+ size_t req_length;
+};
+
+struct mlx5_ib_dm_icm {
+ struct mlx5_ib_dm base;
+ u32 obj_id;
+};
+
+static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
+{
+ return container_of(ibdm, struct mlx5_ib_dm, ibdm);
+}
+
+static inline struct mlx5_ib_dm_memic *to_memic(struct ib_dm *ibdm)
+{
+ return container_of(ibdm, struct mlx5_ib_dm_memic, base.ibdm);
+}
+
+static inline struct mlx5_ib_dm_icm *to_icm(struct ib_dm *ibdm)
+{
+ return container_of(ibdm, struct mlx5_ib_dm_icm, base.ibdm);
+}
+
+struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev,
+ struct mlx5_user_mmap_entry *mentry);
+void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr,
+ u64 length);
+void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
+ u8 operation);
+
+#endif /* _MLX5_IB_DM_H */
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 25da0b05b4e2..2fc6a60c4e77 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -879,7 +879,7 @@ static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_for_match(esw,
+ mlx5_eswitch_get_vport_metadata_for_match(rep->esw,
rep->vport));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_2);
@@ -1528,8 +1528,8 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
dst_num++;
}
- handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
- flow_context, flow_act,
+ handler = _create_raw_flow_rule(dev, ft_prio, dst_num ? dst : NULL,
+ fs_matcher, flow_context, flow_act,
cmd_in, inlen, dst_num);
if (IS_ERR(handler)) {
@@ -1885,8 +1885,9 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
else
*dest_id = mqp->raw_packet_qp.rq.tirn;
*dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
- fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
+ } else if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) &&
+ !(*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)) {
*dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
}
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 9164cc069ad4..b25e0b33a11a 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -20,7 +20,7 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
rep->rep_data[REP_IB].priv = ibdev;
write_lock(&ibdev->port[vport_index].roce.netdev_lock);
ibdev->port[vport_index].roce.netdev =
- mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
+ mlx5_ib_get_rep_netdev(rep->esw, rep->vport);
write_unlock(&ibdev->port[vport_index].roce.netdev_lock);
return 0;
@@ -29,7 +29,7 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
static int
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- int num_ports = mlx5_eswitch_get_total_vports(dev);
+ u32 num_ports = mlx5_eswitch_get_total_vports(dev);
const struct mlx5_ib_profile *profile;
struct mlx5_ib_dev *ibdev;
int vport_index;
@@ -110,7 +110,7 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq,
- u16 port)
+ u32 port)
{
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
struct mlx5_eswitch_rep *rep;
@@ -123,8 +123,7 @@ struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
rep = dev->port[port - 1].rep;
- return mlx5_eswitch_add_send_to_vport_rule(esw, rep->vport,
- sq->base.mqp.qpn);
+ return mlx5_eswitch_add_send_to_vport_rule(esw, rep, sq->base.mqp.qpn);
}
static int mlx5r_rep_probe(struct auxiliary_device *adev,
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
index ce1dcb105dbd..9c55e5c528b4 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.h
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -16,7 +16,7 @@ int mlx5r_rep_init(void);
void mlx5r_rep_cleanup(void);
struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq,
- u16 port);
+ u32 port);
struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
u16 vport_num);
#else /* CONFIG_MLX5_ESWITCH */
@@ -25,7 +25,7 @@ static inline void mlx5r_rep_cleanup(void) {}
static inline
struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq,
- u16 port)
+ u32 port)
{
return NULL;
}
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
index 46b2d370fb3f..f2f62875d072 100644
--- a/drivers/infiniband/hw/mlx5/ib_virt.c
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -48,7 +48,7 @@ static inline u32 mlx_to_net_policy(enum port_state_policy mlx_policy)
}
}
-int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u32 port,
struct ifla_vf_info *info)
{
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -91,7 +91,7 @@ static inline enum port_state_policy net_to_mlx_policy(int policy)
}
int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
- u8 port, int state)
+ u32 port, int state)
{
struct mlx5_ib_dev *dev = to_mdev(device);
struct mlx5_core_dev *mdev = dev->mdev;
@@ -119,7 +119,7 @@ out:
}
int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
- u8 port, struct ifla_vf_stats *stats)
+ u32 port, struct ifla_vf_stats *stats)
{
int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
struct mlx5_core_dev *mdev;
@@ -149,7 +149,8 @@ ex:
return err;
}
-static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
+static int set_vf_node_guid(struct ib_device *device, int vf, u32 port,
+ u64 guid)
{
struct mlx5_ib_dev *dev = to_mdev(device);
struct mlx5_core_dev *mdev = dev->mdev;
@@ -172,7 +173,8 @@ static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
return err;
}
-static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
+static int set_vf_port_guid(struct ib_device *device, int vf, u32 port,
+ u64 guid)
{
struct mlx5_ib_dev *dev = to_mdev(device);
struct mlx5_core_dev *mdev = dev->mdev;
@@ -195,7 +197,7 @@ static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
return err;
}
-int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
u64 guid, int type)
{
if (type == IFLA_VF_IB_NODE_GUID)
@@ -206,7 +208,7 @@ int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
return -EINVAL;
}
-int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
struct ifla_vf_guid *node_guid,
struct ifla_vf_guid *port_guid)
{
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 652c6ccf1881..ec242a5a17a3 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -42,7 +42,7 @@ enum {
MLX5_IB_VENDOR_CLASS2 = 0xa
};
-static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u8 port_num,
+static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u32 port_num,
struct ib_mad *in_mad)
{
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED &&
@@ -52,7 +52,7 @@ static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u8 port_num,
}
static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
- int ignore_bkey, u8 port, const struct ib_wc *in_wc,
+ int ignore_bkey, u32 port, const struct ib_wc *in_wc,
const struct ib_grh *in_grh, const void *in_mad,
void *response_mad)
{
@@ -147,12 +147,12 @@ static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
vl_15_dropped);
}
-static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
+static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
struct mlx5_core_dev *mdev;
bool native_port = true;
- u8 mdev_port_num;
+ u32 mdev_port_num;
void *out_cnt;
int err;
@@ -216,7 +216,7 @@ done:
return err;
}
-int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index)
@@ -444,7 +444,7 @@ out:
return err;
}
-int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
struct ib_smp *in_mad = NULL;
@@ -473,7 +473,7 @@ out:
return err;
}
-int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
struct ib_smp *in_mad = NULL;
@@ -513,7 +513,7 @@ out:
return err;
}
-int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 0d69a697d75f..6d1dd09a4388 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -34,6 +34,7 @@
#include "ib_rep.h"
#include "cmd.h"
#include "devx.h"
+#include "dm.h"
#include "fs.h"
#include "srq.h"
#include "qp.h"
@@ -42,6 +43,7 @@
#include "counters.h"
#include <linux/mlx5/accel.h>
#include <rdma/uverbs_std_types.h>
+#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
#include <rdma/ib_umem_odp.h>
@@ -100,7 +102,7 @@ mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
}
static enum rdma_link_layer
-mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
+mlx5_ib_port_link_layer(struct ib_device *device, u32 port_num)
{
struct mlx5_ib_dev *dev = to_mdev(device);
int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
@@ -109,7 +111,7 @@ mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
}
static int get_port_state(struct ib_device *ibdev,
- u8 port_num,
+ u32 port_num,
enum ib_port_state *state)
{
struct ib_port_attr attr;
@@ -124,9 +126,8 @@ static int get_port_state(struct ib_device *ibdev,
static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
struct net_device *ndev,
- u8 *port_num)
+ u32 *port_num)
{
- struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
struct net_device *rep_ndev;
struct mlx5_ib_port *port;
int i;
@@ -137,7 +138,7 @@ static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
continue;
read_lock(&port->roce.netdev_lock);
- rep_ndev = mlx5_ib_get_rep_netdev(esw,
+ rep_ndev = mlx5_ib_get_rep_netdev(port->rep->esw,
port->rep->vport);
if (rep_ndev == ndev) {
read_unlock(&port->roce.netdev_lock);
@@ -155,7 +156,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
{
struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- u8 port_num = roce->native_port_num;
+ u32 port_num = roce->native_port_num;
struct mlx5_core_dev *mdev;
struct mlx5_ib_dev *ibdev;
@@ -234,7 +235,7 @@ done:
}
static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
struct mlx5_ib_dev *ibdev = to_mdev(device);
struct net_device *ndev;
@@ -262,8 +263,8 @@ out:
}
struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
- u8 ib_port_num,
- u8 *native_port_num)
+ u32 ib_port_num,
+ u32 *native_port_num)
{
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
ib_port_num);
@@ -297,7 +298,7 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
return mdev;
}
-void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
+void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num)
{
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
port_num);
@@ -453,7 +454,7 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u16 *active_speed,
active_width);
}
-static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
+static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -463,7 +464,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
enum ib_mtu ndev_ib_mtu;
bool put_mdev = true;
u32 eth_prot_oper;
- u8 mdev_port_num;
+ u32 mdev_port_num;
bool ext;
int err;
@@ -499,7 +500,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width, ext);
- if (!dev->is_rep && mlx5_is_roce_enabled(mdev)) {
+ if (!dev->is_rep && dev->mdev->roce.roce_en) {
u16 qkey_viol_cntr;
props->port_cap_flags |= IB_PORT_CM_SUP;
@@ -550,19 +551,19 @@ out:
return err;
}
-static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
+static int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
unsigned int index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
- enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
+ enum ib_gid_type gid_type;
u16 vlan_id = 0xffff;
u8 roce_version = 0;
u8 roce_l3_type = 0;
u8 mac[ETH_ALEN];
int ret;
+ gid_type = attr->gid_type;
if (gid) {
- gid_type = attr->gid_type;
ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
if (ret)
return ret;
@@ -574,7 +575,7 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
break;
case IB_GID_TYPE_ROCE_UDP_ENCAP:
roce_version = MLX5_ROCE_VERSION_2;
- if (ipv6_addr_v4mapped((void *)gid))
+ if (gid && ipv6_addr_v4mapped((void *)gid))
roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
else
roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
@@ -601,7 +602,7 @@ static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
__always_unused void **context)
{
return set_roce_addr(to_mdev(attr->device), attr->port_num,
- attr->index, NULL, NULL);
+ attr->index, NULL, attr);
}
__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
@@ -1268,7 +1269,7 @@ static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
return 0;
}
-static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
+static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
@@ -1336,7 +1337,7 @@ out:
return err;
}
-int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
+int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
unsigned int count;
@@ -1381,13 +1382,13 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
return ret;
}
-static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
+static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
return mlx5_query_port_roce(ibdev, port, props);
}
-static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
/* Default special Pkey for representor device port as per the
@@ -1397,7 +1398,7 @@ static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
return 0;
}
-static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
@@ -1416,13 +1417,13 @@ static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
}
-static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
+static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port,
u16 index, u16 *pkey)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev;
bool put_mdev = true;
- u8 mdev_port_num;
+ u32 mdev_port_num;
int err;
mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
@@ -1443,7 +1444,7 @@ static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
return err;
}
-static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
switch (mlx5_get_vport_access_method(ibdev)) {
@@ -1487,12 +1488,12 @@ static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
return err;
}
-static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
+static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask,
u32 value)
{
struct mlx5_hca_vport_context ctx = {};
struct mlx5_core_dev *mdev;
- u8 mdev_port_num;
+ u32 mdev_port_num;
int err;
mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
@@ -1521,7 +1522,7 @@ out:
return err;
}
-static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
+static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
struct ib_port_modify *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
@@ -1930,7 +1931,7 @@ uar_done:
print_lib_caps(dev, context->lib_caps);
if (mlx5_ib_lag_should_assign_affinity(dev)) {
- u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
+ u32 port = mlx5_core_native_port_num(dev->mdev) - 1;
atomic_set(&context->tx_port_affinity,
atomic_add_return(
@@ -2088,14 +2089,11 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
struct mlx5_var_table *var_table = &dev->var_table;
- struct mlx5_ib_dm *mdm;
switch (mentry->mmap_flag) {
case MLX5_IB_MMAP_TYPE_MEMIC:
- mdm = container_of(mentry, struct mlx5_ib_dm, mentry);
- mlx5_cmd_dealloc_memic(&dev->dm, mdm->dev_addr,
- mdm->size);
- kfree(mdm);
+ case MLX5_IB_MMAP_TYPE_MEMIC_OP:
+ mlx5_ib_dm_mmap_free(dev, mentry);
break;
case MLX5_IB_MMAP_TYPE_VAR:
mutex_lock(&var_table->bitmap_lock);
@@ -2220,19 +2218,6 @@ free_bfreg:
return err;
}
-static int add_dm_mmap_entry(struct ib_ucontext *context,
- struct mlx5_ib_dm *mdm,
- u64 address)
-{
- mdm->mentry.mmap_flag = MLX5_IB_MMAP_TYPE_MEMIC;
- mdm->mentry.address = address;
- return rdma_user_mmap_entry_insert_range(
- context, &mdm->mentry.rdma_entry,
- mdm->size,
- MLX5_IB_MMAP_DEVICE_MEM << 16,
- (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
-}
-
static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
{
unsigned long idx;
@@ -2334,206 +2319,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
return 0;
}
-static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
- u32 type)
-{
- switch (type) {
- case MLX5_IB_UAPI_DM_TYPE_MEMIC:
- if (!MLX5_CAP_DEV_MEM(dev->mdev, memic))
- return -EOPNOTSUPP;
- break;
- case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
- case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- if (!capable(CAP_SYS_RAWIO) ||
- !capable(CAP_NET_RAW))
- return -EPERM;
-
- if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
- MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner) ||
- MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2) ||
- MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner_v2)))
- return -EOPNOTSUPP;
- break;
- }
-
- return 0;
-}
-
-static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
- struct mlx5_ib_dm *dm,
- struct ib_dm_alloc_attr *attr,
- struct uverbs_attr_bundle *attrs)
-{
- struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
- u64 start_offset;
- u16 page_idx;
- int err;
- u64 address;
-
- dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
-
- err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr,
- dm->size, attr->alignment);
- if (err)
- return err;
-
- address = dm->dev_addr & PAGE_MASK;
- err = add_dm_mmap_entry(ctx, dm, address);
- if (err)
- goto err_dealloc;
-
- page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
- err = uverbs_copy_to(attrs,
- MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
- &page_idx,
- sizeof(page_idx));
- if (err)
- goto err_copy;
-
- start_offset = dm->dev_addr & ~PAGE_MASK;
- err = uverbs_copy_to(attrs,
- MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
- &start_offset, sizeof(start_offset));
- if (err)
- goto err_copy;
-
- return 0;
-
-err_copy:
- rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
-err_dealloc:
- mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
-
- return err;
-}
-
-static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
- struct mlx5_ib_dm *dm,
- struct ib_dm_alloc_attr *attr,
- struct uverbs_attr_bundle *attrs,
- int type)
-{
- struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
- u64 act_size;
- int err;
-
- /* Allocation size must a multiple of the basic block size
- * and a power of 2.
- */
- act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
- act_size = roundup_pow_of_two(act_size);
-
- dm->size = act_size;
- err = mlx5_dm_sw_icm_alloc(dev, type, act_size, attr->alignment,
- to_mucontext(ctx)->devx_uid, &dm->dev_addr,
- &dm->icm_dm.obj_id);
- if (err)
- return err;
-
- err = uverbs_copy_to(attrs,
- MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
- &dm->dev_addr, sizeof(dm->dev_addr));
- if (err)
- mlx5_dm_sw_icm_dealloc(dev, type, dm->size,
- to_mucontext(ctx)->devx_uid, dm->dev_addr,
- dm->icm_dm.obj_id);
-
- return err;
-}
-
-struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_dm_alloc_attr *attr,
- struct uverbs_attr_bundle *attrs)
-{
- struct mlx5_ib_dm *dm;
- enum mlx5_ib_uapi_dm_type type;
- int err;
-
- err = uverbs_get_const_default(&type, attrs,
- MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
- MLX5_IB_UAPI_DM_TYPE_MEMIC);
- if (err)
- return ERR_PTR(err);
-
- mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
- type, attr->length, attr->alignment);
-
- err = check_dm_type_support(to_mdev(ibdev), type);
- if (err)
- return ERR_PTR(err);
-
- dm = kzalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return ERR_PTR(-ENOMEM);
-
- dm->type = type;
-
- switch (type) {
- case MLX5_IB_UAPI_DM_TYPE_MEMIC:
- err = handle_alloc_dm_memic(context, dm,
- attr,
- attrs);
- break;
- case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
- err = handle_alloc_dm_sw_icm(context, dm,
- attr, attrs,
- MLX5_SW_ICM_TYPE_STEERING);
- break;
- case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- err = handle_alloc_dm_sw_icm(context, dm,
- attr, attrs,
- MLX5_SW_ICM_TYPE_HEADER_MODIFY);
- break;
- default:
- err = -EOPNOTSUPP;
- }
-
- if (err)
- goto err_free;
-
- return &dm->ibdm;
-
-err_free:
- kfree(dm);
- return ERR_PTR(err);
-}
-
-int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
-{
- struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
- &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
- struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
- struct mlx5_ib_dm *dm = to_mdm(ibdm);
- int ret;
-
- switch (dm->type) {
- case MLX5_IB_UAPI_DM_TYPE_MEMIC:
- rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
- return 0;
- case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
- ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING,
- dm->size, ctx->devx_uid, dm->dev_addr,
- dm->icm_dm.obj_id);
- if (ret)
- return ret;
- break;
- case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY,
- dm->size, ctx->devx_uid, dm->dev_addr,
- dm->icm_dm.obj_id);
- if (ret)
- return ret;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- kfree(dm);
-
- return 0;
-}
-
static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct mlx5_ib_pd *pd = to_mpd(ibpd);
@@ -2780,7 +2565,7 @@ static void delay_drop_handler(struct work_struct *work)
static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
struct ib_event *ibev)
{
- u8 port = (eqe->data.port.port >> 4) & 0xf;
+ u32 port = (eqe->data.port.port >> 4) & 0xf;
switch (eqe->sub_type) {
case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
@@ -2796,7 +2581,7 @@ static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe
static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
struct ib_event *ibev)
{
- u8 port = (eqe->data.port.port >> 4) & 0xf;
+ u32 port = (eqe->data.port.port >> 4) & 0xf;
ibev->element.port_num = port;
@@ -3153,7 +2938,7 @@ static u32 get_core_cap_flags(struct ib_device *ibdev,
return ret;
}
-static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -3181,7 +2966,7 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
+static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -3253,7 +3038,7 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
}
}
-static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
+static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
{
int err;
@@ -3267,7 +3052,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
return 0;
}
-static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
+static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
{
if (dev->port[port_num].roce.nb.notifier_call) {
unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
@@ -3301,7 +3086,7 @@ static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
mlx5_nic_vport_disable_roce(dev->mdev);
}
-static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
+static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params)
{
@@ -3353,7 +3138,7 @@ static const struct file_operations fops_delay_drop_timeout = {
static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
struct mlx5_ib_multiport_info *mpi)
{
- u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
struct mlx5_ib_port *port = &ibdev->port[port_num];
int comps;
int err;
@@ -3399,7 +3184,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
- mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
+ mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1);
/* Log an error, still needed to cleanup the pointers and add
* it back to the list.
*/
@@ -3413,14 +3198,14 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
struct mlx5_ib_multiport_info *mpi)
{
- u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
int err;
lockdep_assert_held(&mlx5_ib_multiport_mutex);
spin_lock(&ibdev->port[port_num].mp.mpi_lock);
if (ibdev->port[port_num].mp.mpi) {
- mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
+ mlx5_ib_dbg(ibdev, "port %u already affiliated.\n",
port_num + 1);
spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
return false;
@@ -3456,12 +3241,12 @@ unbind:
static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
{
- int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
port_num + 1);
struct mlx5_ib_multiport_info *mpi;
int err;
- int i;
+ u32 i;
if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
return 0;
@@ -3524,10 +3309,10 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
{
- int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
port_num + 1);
- int i;
+ u32 i;
if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
return;
@@ -3540,7 +3325,8 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
kfree(dev->port[i].mp.mpi);
dev->port[i].mp.mpi = NULL;
} else {
- mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
+ mlx5_ib_dbg(dev, "unbinding port_num: %u\n",
+ i + 1);
mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
}
}
@@ -3817,20 +3603,6 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
&UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
ADD_UVERBS_ATTRIBUTES_SIMPLE(
- mlx5_ib_dm,
- UVERBS_OBJECT_DM,
- UVERBS_METHOD_DM_ALLOC,
- UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
- UVERBS_ATTR_TYPE(u64),
- UA_MANDATORY),
- UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
- UVERBS_ATTR_TYPE(u16),
- UA_OPTIONAL),
- UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
- enum mlx5_ib_uapi_dm_type,
- UA_OPTIONAL));
-
-ADD_UVERBS_ATTRIBUTES_SIMPLE(
mlx5_ib_flow_action,
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
@@ -3852,10 +3624,10 @@ static const struct uapi_definition mlx5_ib_defs[] = {
UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
+ UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
&mlx5_ib_flow_action),
- UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
@@ -3892,8 +3664,6 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->port[i].roce.last_port_state = IB_PORT_DOWN;
}
- mlx5_ib_internal_fill_odp_caps(dev);
-
err = mlx5_ib_init_multiport_master(dev);
if (err)
return err;
@@ -4033,12 +3803,6 @@ static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd),
};
-static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
- .alloc_dm = mlx5_ib_alloc_dm,
- .dealloc_dm = mlx5_ib_dealloc_dm,
- .reg_dm_mr = mlx5_ib_reg_dm_mr,
-};
-
static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
@@ -4161,7 +3925,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
int port_type_cap;
- u8 port_num = 0;
+ u32 port_num = 0;
int err;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
@@ -4174,7 +3938,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
/* Register only for native ports */
err = mlx5_add_netdev_notifier(dev, port_num);
- if (err || dev->is_rep || !mlx5_is_roce_enabled(mdev))
+ if (err || dev->is_rep || !mlx5_is_roce_init_enabled(mdev))
/*
* We don't enable ETH interface for
* 1. IB representors
@@ -4198,7 +3962,7 @@ static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
int port_type_cap;
- u8 port_num;
+ u32 port_num;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
@@ -4711,7 +4475,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
dev->mdev = mdev;
dev->num_ports = num_ports;
- if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
+ if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
profile = &raw_eth_profile;
else
profile = &pf_profile;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 88cc26e008fc..e9a3f34a30b8 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -166,6 +166,7 @@ enum mlx5_ib_mmap_type {
MLX5_IB_MMAP_TYPE_VAR = 2,
MLX5_IB_MMAP_TYPE_UAR_WC = 3,
MLX5_IB_MMAP_TYPE_UAR_NC = 4,
+ MLX5_IB_MMAP_TYPE_MEMIC_OP = 5,
};
struct mlx5_bfreg_info {
@@ -406,7 +407,7 @@ struct mlx5_ib_qp_base {
struct mlx5_ib_qp_trans {
struct mlx5_ib_qp_base base;
u16 xrcdn;
- u8 alt_port;
+ u32 alt_port;
u8 atomic_rd_en;
u8 resp_depth;
};
@@ -453,7 +454,7 @@ struct mlx5_ib_dct {
struct mlx5_ib_gsi_qp {
struct ib_qp *rx_qp;
- u8 port_num;
+ u32 port_num;
struct ib_qp_cap cap;
struct ib_cq *cq;
struct mlx5_ib_gsi_wr *outstanding_wrs;
@@ -490,7 +491,7 @@ struct mlx5_ib_qp {
struct mutex mutex;
/* cached variant of create_flags from struct ib_qp_init_attr */
u32 flags;
- u8 port;
+ u32 port;
u8 state;
int max_inline_data;
struct mlx5_bf bf;
@@ -547,11 +548,6 @@ static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
return container_of(wr, struct mlx5_umr_wr, wr);
}
-struct mlx5_shared_mr_info {
- int mr_id;
- struct ib_umem *umem;
-};
-
enum mlx5_ib_cq_pr_flags {
MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
};
@@ -623,20 +619,6 @@ struct mlx5_user_mmap_entry {
u32 page_idx;
};
-struct mlx5_ib_dm {
- struct ib_dm ibdm;
- phys_addr_t dev_addr;
- u32 type;
- size_t size;
- union {
- struct {
- u32 obj_id;
- } icm_dm;
- /* other dm types specific params should be added here */
- };
- struct mlx5_user_mmap_entry mentry;
-};
-
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
#define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
@@ -654,47 +636,69 @@ struct mlx5_ib_dm {
atomic64_add(value, &((mr)->odp_stats.counter_name))
struct mlx5_ib_mr {
- struct ib_mr ibmr;
- void *descs;
- dma_addr_t desc_map;
- int ndescs;
- int data_length;
- int meta_ndescs;
- int meta_length;
- int max_descs;
- int desc_size;
- int access_mode;
- unsigned int page_shift;
- struct mlx5_core_mkey mmkey;
- struct ib_umem *umem;
- struct mlx5_shared_mr_info *smr_info;
- struct list_head list;
- struct mlx5_cache_ent *cache_ent;
- u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
- struct mlx5_core_sig_ctx *sig;
- void *descs_alloc;
- int access_flags; /* Needed for rereg MR */
-
- struct mlx5_ib_mr *parent;
- /* Needed for IB_MR_TYPE_INTEGRITY */
- struct mlx5_ib_mr *pi_mr;
- struct mlx5_ib_mr *klm_mr;
- struct mlx5_ib_mr *mtt_mr;
- u64 data_iova;
- u64 pi_iova;
-
- /* For ODP and implicit */
- struct xarray implicit_children;
+ struct ib_mr ibmr;
+ struct mlx5_core_mkey mmkey;
+
+ /* User MR data */
+ struct mlx5_cache_ent *cache_ent;
+ struct ib_umem *umem;
+
+ /* This is zero'd when the MR is allocated */
union {
- struct list_head elm;
- struct work_struct work;
- } odp_destroy;
- struct ib_odp_counters odp_stats;
- bool is_odp_implicit;
+ /* Used only while the MR is in the cache */
+ struct {
+ u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
+ struct mlx5_async_work cb_work;
+ /* Cache list element */
+ struct list_head list;
+ };
- struct mlx5_async_work cb_work;
+ /* Used only by kernel MRs (umem == NULL) */
+ struct {
+ void *descs;
+ void *descs_alloc;
+ dma_addr_t desc_map;
+ int max_descs;
+ int ndescs;
+ int desc_size;
+ int access_mode;
+
+ /* For Kernel IB_MR_TYPE_INTEGRITY */
+ struct mlx5_core_sig_ctx *sig;
+ struct mlx5_ib_mr *pi_mr;
+ struct mlx5_ib_mr *klm_mr;
+ struct mlx5_ib_mr *mtt_mr;
+ u64 data_iova;
+ u64 pi_iova;
+ int meta_ndescs;
+ int meta_length;
+ int data_length;
+ };
+
+ /* Used only by User MRs (umem != NULL) */
+ struct {
+ unsigned int page_shift;
+ /* Current access_flags */
+ int access_flags;
+
+ /* For User ODP */
+ struct mlx5_ib_mr *parent;
+ struct xarray implicit_children;
+ union {
+ struct work_struct work;
+ } odp_destroy;
+ struct ib_odp_counters odp_stats;
+ bool is_odp_implicit;
+ };
+ };
};
+/* Zero the fields in the mr that are variant depending on usage */
+static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr)
+{
+ memset(mr->out, 0, sizeof(*mr) - offsetof(struct mlx5_ib_mr, out));
+}
+
static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
{
return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
@@ -822,7 +826,7 @@ struct mlx5_roce {
atomic_t tx_port_affinity;
enum ib_port_state last_port_state;
struct mlx5_ib_dev *dev;
- u8 native_port_num;
+ u32 native_port_num;
};
struct mlx5_ib_port {
@@ -837,7 +841,7 @@ struct mlx5_ib_dbg_param {
int offset;
struct mlx5_ib_dev *dev;
struct dentry *dentry;
- u8 port_num;
+ u32 port_num;
};
enum mlx5_ib_dbg_cc_types {
@@ -1063,6 +1067,7 @@ struct mlx5_ib_dev {
struct mutex slow_path_mutex;
struct ib_odp_caps odp_caps;
u64 odp_max_size;
+ struct mutex odp_eq_mutex;
struct mlx5_ib_pf_eq odp_pf_eq;
struct xarray odp_mkeys;
@@ -1170,11 +1175,6 @@ static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
return container_of(msrq, struct mlx5_ib_srq, msrq);
}
-static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
-{
- return container_of(ibdm, struct mlx5_ib_dm, ibdm);
-}
-
static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct mlx5_ib_mr, ibmr);
@@ -1268,8 +1268,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct ib_udata *udata,
int access_flags);
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
-void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr);
-void mlx5_ib_fence_dmabuf_mr(struct mlx5_ib_mr *mr);
+void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr);
struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 length, u64 virt_addr, int access_flags,
struct ib_pd *pd, struct ib_udata *udata);
@@ -1285,7 +1284,7 @@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
int data_sg_nents, unsigned int *data_sg_offset,
struct scatterlist *meta_sg, int meta_sg_nents,
unsigned int *meta_sg_offset);
-int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index);
@@ -1300,13 +1299,13 @@ int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
u32 *vendor_id);
int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
-int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey);
-int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid);
-int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
-int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
+int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
u64 access_flags);
@@ -1317,8 +1316,6 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
unsigned int entry, int access_flags);
-void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr);
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
@@ -1332,18 +1329,13 @@ int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
-struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_dm_alloc_attr *attr,
- struct uverbs_attr_bundle *attrs);
-int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs);
struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
+int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
@@ -1357,12 +1349,12 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
-static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
+static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
+static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_pf_eq *eq)
{
- return;
+ return 0;
}
-
-static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {}
@@ -1397,22 +1389,22 @@ int __mlx5_ib_add(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile);
int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
- u8 port, struct ifla_vf_info *info);
+ u32 port, struct ifla_vf_info *info);
int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
- u8 port, int state);
+ u32 port, int state);
int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
- u8 port, struct ifla_vf_stats *stats);
-int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ u32 port, struct ifla_vf_stats *stats);
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
struct ifla_vf_guid *node_guid,
struct ifla_vf_guid *port_guid);
-int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
u64 guid, int type);
__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
const struct ib_gid_attr *attr);
-void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
-void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
+void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
/* GSI QP helper functions */
int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
@@ -1435,10 +1427,10 @@ void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
int bfregn);
struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
- u8 ib_port_num,
- u8 *native_port_num);
+ u32 ib_port_num,
+ u32 *native_port_num);
void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
- u8 port_num);
+ u32 port_num);
extern const struct uapi_definition mlx5_ib_devx_defs[];
extern const struct uapi_definition mlx5_ib_flow_defs[];
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index db05b0e0a8d7..4388afeff251 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -42,6 +42,7 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include <rdma/ib_verbs.h>
+#include "dm.h"
#include "mlx5_ib.h"
/*
@@ -119,8 +120,6 @@ mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
create_mkey_callback, context);
}
-static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
@@ -590,6 +589,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
ent->available_mrs--;
queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
+
+ mlx5_clear_mr(mr);
}
mr->access_flags = access_flags;
return mr;
@@ -615,42 +616,20 @@ static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
ent->available_mrs--;
queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
- break;
+ mlx5_clear_mr(mr);
+ return mr;
}
queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
}
-
- if (!mr)
- req_ent->miss++;
-
- return mr;
-}
-
-static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
-{
- struct mlx5_cache_ent *ent = mr->cache_ent;
-
- mr->cache_ent = NULL;
- spin_lock_irq(&ent->lock);
- ent->total_mrs--;
- spin_unlock_irq(&ent->lock);
+ req_ent->miss++;
+ return NULL;
}
-void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
struct mlx5_cache_ent *ent = mr->cache_ent;
- if (!ent)
- return;
-
- if (mlx5_mr_cache_invalidate(mr)) {
- detach_mr_from_cache(mr);
- destroy_mkey(dev, mr);
- kfree(mr);
- return;
- }
-
spin_lock_irq(&ent->lock);
list_add_tail(&mr->list, &ent->head);
ent->available_mrs++;
@@ -993,8 +972,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
mr->ibmr.pd = pd;
mr->umem = umem;
- mr->access_flags = access_flags;
- mr->desc_size = sizeof(struct mlx5_mtt);
mr->mmkey.iova = iova;
mr->mmkey.size = umem->length;
mr->mmkey.pd = to_mpd(pd)->pdn;
@@ -1028,7 +1005,7 @@ static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
*/
might_sleep();
- gfp_mask |= __GFP_ZERO;
+ gfp_mask |= __GFP_ZERO | __GFP_NORETRY;
/*
* If the system already has a suitable high order page then just use
@@ -1505,7 +1482,7 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
*/
err = mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE);
if (err) {
- dereg_mr(dev, mr);
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
return ERR_PTR(err);
}
}
@@ -1524,6 +1501,9 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
return ERR_PTR(-EOPNOTSUPP);
+ err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq);
+ if (err)
+ return ERR_PTR(err);
if (!start && length == U64_MAX) {
if (iova != 0)
return ERR_PTR(-EINVAL);
@@ -1562,7 +1542,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
return &mr->ibmr;
err_dereg_mr:
- dereg_mr(dev, mr);
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
return ERR_PTR(err);
}
@@ -1659,19 +1639,19 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
return &mr->ibmr;
err_dereg_mr:
- dereg_mr(dev, mr);
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
return ERR_PTR(err);
}
/**
- * mlx5_mr_cache_invalidate - Fence all DMA on the MR
+ * revoke_mr - Fence all DMA on the MR
* @mr: The MR to fence
*
* Upon return the NIC will not be doing any DMA to the pages under the MR,
- * and any DMA inprogress will be completed. Failure of this function
+ * and any DMA in progress will be completed. Failure of this function
* indicates the HW has failed catastrophically.
*/
-int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr)
+static int revoke_mr(struct mlx5_ib_mr *mr)
{
struct mlx5_umr_wr umrwr = {};
@@ -1765,7 +1745,7 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
* with it. This ensure the change is atomic relative to any use of the
* MR.
*/
- err = mlx5_mr_cache_invalidate(mr);
+ err = revoke_mr(mr);
if (err)
return err;
@@ -1844,7 +1824,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
* Only one active MR can refer to a umem at one time, revoke
* the old MR before assigning the umem to the new one.
*/
- err = mlx5_mr_cache_invalidate(mr);
+ err = revoke_mr(mr);
if (err)
return ERR_PTR(err);
umem = mr->umem;
@@ -1931,7 +1911,7 @@ err:
static void
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
- if (mr->descs) {
+ if (!mr->umem && mr->descs) {
struct ib_device *device = mr->ibmr.device;
int size = mr->max_descs * mr->desc_size;
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -1943,69 +1923,82 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
}
}
-static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
- if (mr->sig) {
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+ int rc;
+
+ /*
+ * Any async use of the mr must hold the refcount, once the refcount
+ * goes to zero no other thread, such as ODP page faults, prefetch, any
+ * UMR activity, etc can touch the mkey. Thus it is safe to destroy it.
+ */
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
+ refcount_read(&mr->mmkey.usecount) != 0 &&
+ xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)))
+ mlx5r_deref_wait_odp_mkey(&mr->mmkey);
+
+ if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
+ xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), ibmr,
+ NULL, GFP_KERNEL);
+
+ if (mr->mtt_mr) {
+ rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
+ if (rc)
+ return rc;
+ mr->mtt_mr = NULL;
+ }
+ if (mr->klm_mr) {
+ rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
+ if (rc)
+ return rc;
+ mr->klm_mr = NULL;
+ }
+
if (mlx5_core_destroy_psv(dev->mdev,
mr->sig->psv_memory.psv_idx))
mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
mr->sig->psv_memory.psv_idx);
- if (mlx5_core_destroy_psv(dev->mdev,
- mr->sig->psv_wire.psv_idx))
+ if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
mr->sig->psv_wire.psv_idx);
- xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key));
kfree(mr->sig);
mr->sig = NULL;
}
+ /* Stop DMA */
+ if (mr->cache_ent) {
+ if (revoke_mr(mr)) {
+ spin_lock_irq(&mr->cache_ent->lock);
+ mr->cache_ent->total_mrs--;
+ spin_unlock_irq(&mr->cache_ent->lock);
+ mr->cache_ent = NULL;
+ }
+ }
if (!mr->cache_ent) {
- destroy_mkey(dev, mr);
- mlx5_free_priv_descs(mr);
+ rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
+ if (rc)
+ return rc;
}
-}
-static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
-{
- struct ib_umem *umem = mr->umem;
+ if (mr->umem) {
+ bool is_odp = is_odp_mr(mr);
- /* Stop all DMA */
- if (is_odp_mr(mr))
- mlx5_ib_fence_odp_mr(mr);
- else if (is_dmabuf_mr(mr))
- mlx5_ib_fence_dmabuf_mr(mr);
- else
- clean_mr(dev, mr);
-
- if (umem) {
- if (!is_odp_mr(mr))
- atomic_sub(ib_umem_num_pages(umem),
+ if (!is_odp)
+ atomic_sub(ib_umem_num_pages(mr->umem),
&dev->mdev->priv.reg_pages);
- ib_umem_release(umem);
+ ib_umem_release(mr->umem);
+ if (is_odp)
+ mlx5_ib_free_odp_mr(mr);
}
- if (mr->cache_ent)
+ if (mr->cache_ent) {
mlx5_mr_cache_free(dev, mr);
- else
+ } else {
+ mlx5_free_priv_descs(mr);
kfree(mr);
-}
-
-int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
-{
- struct mlx5_ib_mr *mmr = to_mmr(ibmr);
-
- if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
- dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr);
- dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr);
}
-
- if (is_odp_mr(mmr) && to_ib_umem_odp(mmr->umem)->is_implicit_odp) {
- mlx5_ib_free_implicit_mr(mmr);
- return 0;
- }
-
- dereg_mr(to_mdev(ibmr->device), mmr);
-
return 0;
}
@@ -2177,10 +2170,10 @@ err_free_descs:
destroy_mkey(dev, mr);
mlx5_free_priv_descs(mr);
err_free_mtt_mr:
- dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
+ mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
mr->mtt_mr = NULL;
err_free_klm_mr:
- dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr);
+ mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
mr->klm_mr = NULL;
err_destroy_psv:
if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index b103555b1f5d..782b2af8f211 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -181,64 +181,29 @@ void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
}
}
-static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
-{
- struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
-
- /* Ensure mlx5_ib_invalidate_range() will not touch the MR any more */
- mutex_lock(&odp->umem_mutex);
- if (odp->npages) {
- mlx5_mr_cache_invalidate(mr);
- ib_umem_odp_unmap_dma_pages(odp, ib_umem_start(odp),
- ib_umem_end(odp));
- WARN_ON(odp->npages);
- }
- odp->private = NULL;
- mutex_unlock(&odp->umem_mutex);
-
- if (!mr->cache_ent) {
- mlx5_core_destroy_mkey(mr_to_mdev(mr)->mdev, &mr->mmkey);
- WARN_ON(mr->descs);
- }
-}
-
/*
* This must be called after the mr has been removed from implicit_children.
* NOTE: The MR does not necessarily have to be
* empty here, parallel page faults could have raced with the free process and
* added pages to it.
*/
-static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
+static void free_implicit_child_mr_work(struct work_struct *work)
{
+ struct mlx5_ib_mr *mr =
+ container_of(work, struct mlx5_ib_mr, odp_destroy.work);
struct mlx5_ib_mr *imr = mr->parent;
struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
mlx5r_deref_wait_odp_mkey(&mr->mmkey);
- if (need_imr_xlt) {
- mutex_lock(&odp_imr->umem_mutex);
- mlx5_ib_update_xlt(mr->parent, idx, 1, 0,
- MLX5_IB_UPD_XLT_INDIRECT |
- MLX5_IB_UPD_XLT_ATOMIC);
- mutex_unlock(&odp_imr->umem_mutex);
- }
-
- dma_fence_odp_mr(mr);
-
- mr->parent = NULL;
- mlx5_mr_cache_free(mr_to_mdev(mr), mr);
- ib_umem_odp_release(odp);
-}
-
-static void free_implicit_child_mr_work(struct work_struct *work)
-{
- struct mlx5_ib_mr *mr =
- container_of(work, struct mlx5_ib_mr, odp_destroy.work);
- struct mlx5_ib_mr *imr = mr->parent;
+ mutex_lock(&odp_imr->umem_mutex);
+ mlx5_ib_update_xlt(mr->parent, ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT,
+ 1, 0,
+ MLX5_IB_UPD_XLT_INDIRECT | MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&odp_imr->umem_mutex);
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
- free_implicit_child_mr(mr, true);
mlx5r_deref_odp_mkey(&imr->mmkey);
}
@@ -352,7 +317,7 @@ const struct mmu_interval_notifier_ops mlx5_mn_ops = {
.invalidate = mlx5_ib_invalidate_range,
};
-void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
+static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{
struct ib_odp_caps *caps = &dev->odp_caps;
@@ -455,8 +420,10 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
ret = mr = mlx5_mr_cache_alloc(
mr_to_mdev(imr), MLX5_IMR_MTT_CACHE_ENTRY, imr->access_flags);
- if (IS_ERR(mr))
- goto out_umem;
+ if (IS_ERR(mr)) {
+ ib_umem_odp_release(odp);
+ return mr;
+ }
mr->ibmr.pd = imr->ibmr.pd;
mr->ibmr.device = &mr_to_mdev(imr)->ib_dev;
@@ -506,9 +473,7 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
out_lock:
xa_unlock(&imr->implicit_children);
out_mr:
- mlx5_mr_cache_free(mr_to_mdev(imr), mr);
-out_umem:
- ib_umem_odp_release(odp);
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
return ret;
}
@@ -531,8 +496,8 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
imr = mlx5_mr_cache_alloc(dev, MLX5_IMR_KSM_CACHE_ENTRY, access_flags);
if (IS_ERR(imr)) {
- err = PTR_ERR(imr);
- goto out_umem;
+ ib_umem_odp_release(umem_odp);
+ return imr;
}
imr->ibmr.pd = &pd->ibpd;
@@ -562,93 +527,22 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
return imr;
out_mr:
mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
- mlx5_mr_cache_free(dev, imr);
-out_umem:
- ib_umem_odp_release(umem_odp);
+ mlx5_ib_dereg_mr(&imr->ibmr, NULL);
return ERR_PTR(err);
}
-void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
+void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr)
{
- struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
- struct mlx5_ib_dev *dev = mr_to_mdev(imr);
struct mlx5_ib_mr *mtt;
unsigned long idx;
- xa_erase(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key));
- /*
- * All work on the prefetch list must be completed, xa_erase() prevented
- * new work from being created.
- */
- mlx5r_deref_wait_odp_mkey(&imr->mmkey);
- /*
- * At this point it is forbidden for any other thread to enter
- * pagefault_mr() on this imr. It is already forbidden to call
- * pagefault_mr() on an implicit child. Due to this additions to
- * implicit_children are prevented.
- * In addition, any new call to destroy_unused_implicit_child_mr()
- * may return immediately.
- */
-
/*
- * Fence the imr before we destroy the children. This allows us to
- * skip updating the XLT of the imr during destroy of the child mkey
- * the imr points to.
+ * If this is an implicit MR it is already invalidated so we can just
+ * delete the children mkeys.
*/
- mlx5_mr_cache_invalidate(imr);
-
- xa_for_each(&imr->implicit_children, idx, mtt) {
- xa_erase(&imr->implicit_children, idx);
- free_implicit_child_mr(mtt, false);
- }
-
- mlx5_mr_cache_free(dev, imr);
- ib_umem_odp_release(odp_imr);
-}
-
-/**
- * mlx5_ib_fence_odp_mr - Stop all access to the ODP MR
- * @mr: to fence
- *
- * On return no parallel threads will be touching this MR and no DMA will be
- * active.
- */
-void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
-{
- /* Prevent new page faults and prefetch requests from succeeding */
- xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
-
- /* Wait for all running page-fault handlers to finish. */
- mlx5r_deref_wait_odp_mkey(&mr->mmkey);
-
- dma_fence_odp_mr(mr);
-}
-
-/**
- * mlx5_ib_fence_dmabuf_mr - Stop all access to the dmabuf MR
- * @mr: to fence
- *
- * On return no parallel threads will be touching this MR and no DMA will be
- * active.
- */
-void mlx5_ib_fence_dmabuf_mr(struct mlx5_ib_mr *mr)
-{
- struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
-
- /* Prevent new page faults and prefetch requests from succeeding */
- xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
-
- mlx5r_deref_wait_odp_mkey(&mr->mmkey);
-
- dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
- mlx5_mr_cache_invalidate(mr);
- umem_dmabuf->private = NULL;
- ib_umem_dmabuf_unmap_pages(umem_dmabuf);
- dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
-
- if (!mr->cache_ent) {
- mlx5_core_destroy_mkey(mr_to_mdev(mr)->mdev, &mr->mmkey);
- WARN_ON(mr->descs);
+ xa_for_each(&mr->implicit_children, idx, mtt) {
+ xa_erase(&mr->implicit_children, idx);
+ mlx5_ib_dereg_mr(&mtt->ibmr, NULL);
}
}
@@ -1637,20 +1531,24 @@ enum {
MLX5_IB_NUM_PF_DRAIN = 64,
};
-static int
-mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
+int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
{
struct mlx5_eq_param param = {};
- int err;
+ int err = 0;
+ mutex_lock(&dev->odp_eq_mutex);
+ if (eq->core)
+ goto unlock;
INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
spin_lock_init(&eq->lock);
eq->dev = dev;
eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
sizeof(struct mlx5_pagefault));
- if (!eq->pool)
- return -ENOMEM;
+ if (!eq->pool) {
+ err = -ENOMEM;
+ goto unlock;
+ }
eq->wq = alloc_workqueue("mlx5_ib_page_fault",
WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
@@ -1661,7 +1559,7 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
}
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
- param = (struct mlx5_eq_param) {
+ param = (struct mlx5_eq_param){
.irq_index = 0,
.nent = MLX5_IB_NUM_PF_EQE,
};
@@ -1677,21 +1575,27 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
goto err_eq;
}
+ mutex_unlock(&dev->odp_eq_mutex);
return 0;
err_eq:
mlx5_eq_destroy_generic(dev->mdev, eq->core);
err_wq:
+ eq->core = NULL;
destroy_workqueue(eq->wq);
err_mempool:
mempool_destroy(eq->pool);
+unlock:
+ mutex_unlock(&dev->odp_eq_mutex);
return err;
}
static int
-mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
+mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
{
int err;
+ if (!eq->core)
+ return 0;
mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
cancel_work_sync(&eq->work);
@@ -1735,6 +1639,8 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
{
int ret = 0;
+ internal_fill_odp_caps(dev);
+
if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
return ret;
@@ -1748,8 +1654,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
}
}
- ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
-
+ mutex_init(&dev->odp_eq_mutex);
return ret;
}
@@ -1758,7 +1663,7 @@ void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
return;
- mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
+ mlx5_ib_odp_destroy_eq(dev, &dev->odp_pf_eq);
}
int mlx5_ib_odp_init(void)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index f5a52a6fae43..9282eb10bfae 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -67,7 +67,7 @@ struct mlx5_modify_raw_qp_param {
struct mlx5_rate_limit rl;
u8 rq_q_ctr_id;
- u16 port;
+ u32 port;
};
static void get_cqs(enum ib_qp_type qp_type,
@@ -3146,6 +3146,19 @@ enum {
MLX5_PATH_FLAG_COUNTER = 1 << 2,
};
+static int mlx5_to_ib_rate_map(u8 rate)
+{
+ static const int rates[] = { IB_RATE_PORT_CURRENT, IB_RATE_56_GBPS,
+ IB_RATE_25_GBPS, IB_RATE_100_GBPS,
+ IB_RATE_200_GBPS, IB_RATE_50_GBPS,
+ IB_RATE_400_GBPS };
+
+ if (rate < ARRAY_SIZE(rates))
+ return rates[rate];
+
+ return rate - MLX5_STAT_RATE_OFFSET;
+}
+
static int ib_to_mlx5_rate_map(u8 rate)
{
switch (rate) {
@@ -4485,7 +4498,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
static_rate = MLX5_GET(ads, path, stat_rate);
- rdma_ah_set_static_rate(ah_attr, static_rate ? static_rate - 5 : 0);
+ rdma_ah_set_static_rate(ah_attr, mlx5_to_ib_rate_map(static_rate));
if (MLX5_GET(ads, path, grh) ||
ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
diff --git a/drivers/infiniband/hw/mlx5/std_types.c b/drivers/infiniband/hw/mlx5/std_types.c
index 16145fda68d0..c0ddf7b3c6e2 100644
--- a/drivers/infiniband/hw/mlx5/std_types.c
+++ b/drivers/infiniband/hw/mlx5/std_types.c
@@ -7,6 +7,8 @@
#include <rdma/mlx5_user_ioctl_cmds.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/eswitch.h>
+#include <linux/mlx5/vport.h>
#include "mlx5_ib.h"
#define UVERBS_MODULE_NAME mlx5_ib
@@ -23,6 +25,174 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_PD_QUERY)(
&mpd->pdn, sizeof(mpd->pdn));
}
+static int fill_vport_icm_addr(struct mlx5_core_dev *mdev, u16 vport,
+ struct mlx5_ib_uapi_query_port *info)
+{
+ u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
+ bool sw_owner_supp;
+ u64 icm_rx;
+ u64 icm_tx;
+ int err;
+
+ sw_owner_supp = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner) ||
+ MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
+
+ if (vport == MLX5_VPORT_UPLINK) {
+ icm_rx = MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_uplink_icm_address_rx);
+ icm_tx = MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_uplink_icm_address_tx);
+ } else {
+ MLX5_SET(query_esw_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
+ MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
+ MLX5_SET(query_esw_vport_context_in, in, other_vport, true);
+
+ err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in,
+ out);
+
+ if (err)
+ return err;
+
+ icm_rx = MLX5_GET64(
+ query_esw_vport_context_out, out,
+ esw_vport_context.sw_steering_vport_icm_address_rx);
+
+ icm_tx = MLX5_GET64(
+ query_esw_vport_context_out, out,
+ esw_vport_context.sw_steering_vport_icm_address_tx);
+ }
+
+ if (sw_owner_supp && icm_rx) {
+ info->vport_steering_icm_rx = icm_rx;
+ info->flags |=
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_RX;
+ }
+
+ if (sw_owner_supp && icm_tx) {
+ info->vport_steering_icm_tx = icm_tx;
+ info->flags |=
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_TX;
+ }
+
+ return 0;
+}
+
+static int fill_vport_vhca_id(struct mlx5_core_dev *mdev, u16 vport,
+ struct mlx5_ib_uapi_query_port *info)
+{
+ size_t out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+ void *out;
+ int err;
+
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, other_function, true);
+ MLX5_SET(query_hca_cap_in, in, function_id, vport);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
+ HCA_CAP_OPMOD_GET_CUR);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_sz);
+ if (err)
+ goto out;
+
+ info->vport_vhca_id = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.vhca_id);
+
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID;
+out:
+ kfree(out);
+ return err;
+}
+
+static int fill_switchdev_info(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_ib_uapi_query_port *info)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_eswitch_rep *rep;
+ int err;
+
+ rep = dev->port[port_num - 1].rep;
+ if (!rep)
+ return -EOPNOTSUPP;
+
+ info->vport = rep->vport;
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT;
+
+ if (rep->vport != MLX5_VPORT_UPLINK) {
+ err = fill_vport_vhca_id(mdev, rep->vport, info);
+ if (err)
+ return err;
+ }
+
+ info->esw_owner_vhca_id = MLX5_CAP_GEN(mdev, vhca_id);
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID;
+
+ err = fill_vport_icm_addr(mdev, rep->vport, info);
+ if (err)
+ return err;
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(mdev->priv.eswitch)) {
+ info->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match(
+ mdev->priv.eswitch, rep->vport);
+ info->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_REG_C0;
+ }
+
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_QUERY_PORT)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_uapi_query_port info = {};
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ u32 port_num;
+ int ret;
+
+ if (uverbs_copy_from(&port_num, attrs,
+ MLX5_IB_ATTR_QUERY_PORT_PORT_NUM))
+ return -EFAULT;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ if (!rdma_is_port_valid(&dev->ib_dev, port_num))
+ return -EINVAL;
+
+ if (mlx5_eswitch_mode(dev->mdev) == MLX5_ESWITCH_OFFLOADS) {
+ ret = fill_switchdev_info(dev, port_num, &info);
+ if (ret)
+ return ret;
+ }
+
+ return uverbs_copy_to_struct_or_zero(attrs, MLX5_IB_ATTR_QUERY_PORT, &info,
+ sizeof(info));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_QUERY_PORT,
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_QUERY_PORT_PORT_NUM,
+ UVERBS_ATTR_TYPE(u32), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_QUERY_PORT,
+ UVERBS_ATTR_STRUCT(struct mlx5_ib_uapi_query_port,
+ reg_c0),
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(mlx5_ib_device,
+ UVERBS_OBJECT_DEVICE,
+ &UVERBS_METHOD(MLX5_IB_METHOD_QUERY_PORT));
+
DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_PD_QUERY,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_PD_HANDLE,
@@ -41,5 +211,8 @@ const struct uapi_definition mlx5_ib_std_types_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE(
UVERBS_OBJECT_PD,
&mlx5_ib_pd),
+ UAPI_DEF_CHAIN_OBJ_TREE(
+ UVERBS_OBJECT_DEVICE,
+ &mlx5_ib_device),
{},
};
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index f051f4e06b53..3df1f5ff7932 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -91,7 +91,7 @@ static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate)
}
}
-enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port)
+enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u32 port)
{
if (mthca_is_memfree(dev)) {
/* Handle old Arbel FW */
@@ -131,7 +131,7 @@ static u8 ib_rate_to_tavor(u8 static_rate)
}
}
-u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port)
+u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u32 port)
{
u8 rate;
@@ -293,7 +293,7 @@ int mthca_ah_query(struct ib_ah *ibah, struct rdma_ah_attr *attr)
{
struct mthca_ah *ah = to_mah(ibah);
struct mthca_dev *dev = to_mdev(ibah->device);
- u8 port_num = be32_to_cpu(ah->av->port_pd) >> 24;
+ u32 port_num = be32_to_cpu(ah->av->port_pd) >> 24;
/* Only implement for MAD and memfree ah for now. */
if (ah->type == MTHCA_AH_ON_HCA)
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index a445160de3e1..a4a9d871d00e 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -546,7 +546,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
enum ib_sig_type send_policy,
struct ib_qp_cap *cap,
int qpn,
- int port,
+ u32 port,
struct mthca_qp *qp,
struct ib_udata *udata);
void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp);
@@ -559,13 +559,13 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
struct ib_ud_header *header);
int mthca_ah_query(struct ib_ah *ibah, struct rdma_ah_attr *attr);
int mthca_ah_grh_present(struct mthca_ah *ah);
-u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port);
-enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port);
+u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u32 port);
+enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u32 port);
int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 99aa8183a7f2..04252700790e 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -162,7 +162,7 @@ static void node_desc_override(struct ib_device *dev,
}
static void forward_trap(struct mthca_dev *dev,
- u8 port_num,
+ u32 port_num,
const struct ib_mad *mad)
{
int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
@@ -196,7 +196,7 @@ static void forward_trap(struct mthca_dev *dev,
}
}
-int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index)
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 1a3dd07f993b..522bb606120e 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -127,7 +127,7 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
}
static int mthca_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
+ u32 port, struct ib_port_attr *props)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
@@ -194,7 +194,7 @@ static int mthca_modify_device(struct ib_device *ibdev,
}
static int mthca_modify_port(struct ib_device *ibdev,
- u8 port, int port_modify_mask,
+ u32 port, int port_modify_mask,
struct ib_port_modify *props)
{
struct mthca_set_ib_param set_ib;
@@ -223,7 +223,7 @@ out:
}
static int mthca_query_pkey(struct ib_device *ibdev,
- u8 port, u16 index, u16 *pkey)
+ u32 port, u16 index, u16 *pkey)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
@@ -251,7 +251,7 @@ static int mthca_query_pkey(struct ib_device *ibdev,
return err;
}
-static int mthca_query_gid(struct ib_device *ibdev, u8 port,
+static int mthca_query_gid(struct ib_device *ibdev, u32 port,
int index, union ib_gid *gid)
{
struct ib_smp *in_mad = NULL;
@@ -1051,7 +1051,7 @@ out:
return err;
}
-static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int mthca_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 07cfc0934b17..69bba0ef4a5d 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1370,7 +1370,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
enum ib_sig_type send_policy,
struct ib_qp_cap *cap,
int qpn,
- int port,
+ u32 port,
struct mthca_qp *qp,
struct ib_udata *udata)
{
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 699a8b719ed6..88c45928301f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -250,7 +250,7 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
}
int ocrdma_process_mad(struct ib_device *ibdev, int process_mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
+ u32 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh, const struct ib_mad *in,
struct ib_mad *out, size_t *out_mad_size,
u16 *out_mad_pkey_index)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 35cf2e2ff391..2626679df31d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -57,7 +57,7 @@ int ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int ocrdma_process_mad(struct ib_device *dev, int process_mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
+ u32 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh, const struct ib_mad *in,
struct ib_mad *out, size_t *out_mad_size,
u16 *out_mad_pkey_index);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 9a834a9cca0e..4882b3156edb 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -77,12 +77,12 @@ void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
guid[7] = mac_addr[5];
}
static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
-static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int ocrdma_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 3acb5c10b155..58619ce64d0d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -54,7 +54,7 @@
#include "ocrdma_verbs.h"
#include <rdma/ocrdma-abi.h>
-int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
{
if (index > 0)
return -EINVAL;
@@ -150,7 +150,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
}
int ocrdma_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
+ u32 port, struct ib_port_attr *props)
{
enum ib_port_state port_state;
struct ocrdma_dev *dev;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 425d554e7f3f..b1c5fad81603 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -53,13 +53,14 @@ int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags);
int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props,
struct ib_udata *uhw);
-int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
+int ocrdma_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props);
enum rdma_protocol_type
-ocrdma_query_protocol(struct ib_device *device, u8 port_num);
+ocrdma_query_protocol(struct ib_device *device, u32 port_num);
void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
-int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
+int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 8e7c069e1a2d..8334a9850220 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -53,7 +53,7 @@ MODULE_LICENSE("Dual BSD/GPL");
#define QEDR_WQ_MULTIPLIER_DFT (3)
-static void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
+static void qedr_ib_dispatch_event(struct qedr_dev *dev, u32 port_num,
enum ib_event_type type)
{
struct ib_event ibev;
@@ -66,7 +66,7 @@ static void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
}
static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
@@ -81,7 +81,7 @@ static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
(fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
}
-static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int qedr_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -100,7 +100,7 @@ static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int qedr_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index c4bc58736e48..1715fbe0719d 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -636,8 +636,10 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
- &qp->iwarp_cm_flags))
+ &qp->iwarp_cm_flags)) {
+ rc = -ENODEV;
goto err; /* QP already being destroyed */
+ }
rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
if (rc) {
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 9ea542270ed4..fdc47ef7d861 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -72,7 +72,7 @@ static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
return ib_copy_to_udata(udata, src, min_len);
}
-int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
{
if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
return -EINVAL;
@@ -81,7 +81,7 @@ int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
return 0;
}
-int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
+int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
int index, union ib_gid *sgid)
{
struct qedr_dev *dev = get_qedr_dev(ibdev);
@@ -210,7 +210,8 @@ static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
}
}
-int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
+int qedr_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *attr)
{
struct qedr_dev *dev;
struct qed_rdma_port *rdma_port;
@@ -4483,7 +4484,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
}
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
+ u32 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh, const struct ib_mad *in,
struct ib_mad *out_mad, size_t *out_mad_size,
u16 *out_mad_pkey_index)
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 2672c32bc2f7..34ad47515861 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -34,12 +34,13 @@
int qedr_query_device(struct ib_device *ibdev,
struct ib_device_attr *attr, struct ib_udata *udata);
-int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
+int qedr_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props);
-int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
+int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
int index, union ib_gid *gid);
-int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
+int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
@@ -92,11 +93,11 @@ int qedr_post_send(struct ib_qp *, const struct ib_send_wr *,
int qedr_post_recv(struct ib_qp *, const struct ib_recv_wr *,
const struct ib_recv_wr **bad_wr);
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
+ u32 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh, const struct ib_mad *in_mad,
struct ib_mad *out_mad, size_t *out_mad_size,
u16 *out_mad_pkey_index);
-int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
+int qedr_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable);
#endif
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index ee211423058a..88497739029e 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -630,7 +630,7 @@ struct qib_pportdata {
u8 rx_pol_inv;
u8 hw_pidx; /* physical port index */
- u8 port; /* IB port number and index into dd->pports - 1 */
+ u32 port; /* IB port number and index into dd->pports - 1 */
u8 delay_mult;
@@ -1200,10 +1200,10 @@ static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
return container_of(ibp, struct qib_pportdata, ibport_data);
}
-static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
+static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u32 port)
{
struct qib_devdata *dd = dd_from_ibdev(ibdev);
- unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+ u32 pidx = port - 1; /* IB number port from 1, hdw from 0 */
WARN_ON(pidx >= dd->num_pports);
return &dd->pport[pidx].ibport_data;
@@ -1303,11 +1303,6 @@ int qib_sdma_verbs_send(struct qib_pportdata *, struct rvt_sge_state *,
/* ppd->sdma_lock should be locked before calling this. */
int qib_sdma_make_progress(struct qib_pportdata *dd);
-static inline int qib_sdma_empty(const struct qib_pportdata *ppd)
-{
- return ppd->sdma_descq_added == ppd->sdma_descq_removed;
-}
-
/* must be called under qib_sdma_lock */
static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
{
@@ -1364,27 +1359,6 @@ static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
*((volatile __le64 *)rcd->rcvhdrtail_kvaddr)); /* DMA'ed */
}
-static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
-{
- const struct qib_devdata *dd = rcd->dd;
- u32 hdrqtail;
-
- if (dd->flags & QIB_NODMA_RTAIL) {
- __le32 *rhf_addr;
- u32 seq;
-
- rhf_addr = (__le32 *) rcd->rcvhdrq +
- rcd->head + dd->rhf_offset;
- seq = qib_hdrget_seq(rhf_addr);
- hdrqtail = rcd->head;
- if (seq == rcd->seq_cnt)
- hdrqtail++;
- } else
- hdrqtail = qib_get_rcvhdrtail(rcd);
-
- return hdrqtail;
-}
-
/*
* sysfs interface.
*/
@@ -1395,7 +1369,7 @@ extern const struct attribute_group qib_attr_group;
int qib_device_create(struct qib_devdata *);
void qib_device_remove(struct qib_devdata *);
-int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
+int qib_create_port_files(struct ib_device *ibdev, u32 port_num,
struct kobject *kobj);
void qib_verbs_unregister_sysfs(struct qib_devdata *);
/* Hook for sysfs read of QSFP */
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
index f91f23e02283..cf652831d8e7 100644
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -795,11 +795,4 @@ static inline __u32 qib_hdrget_use_egr_buf(const __le32 *rbuf)
{
return __le32_to_cpu(rbuf[0]) & QLOGIC_IB_RHF_L_USE_EGR;
}
-
-static inline __u32 qib_hdrget_qib_ver(__le32 hdrword)
-{
- return (__le32_to_cpu(hdrword) >> QLOGIC_IB_I_VERS_SHIFT) &
- QLOGIC_IB_I_VERS_MASK;
-}
-
#endif /* _QIB_COMMON_H */
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index ff87a67dd7b7..c60e79d214a1 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1758,7 +1758,8 @@ bail:
}
/**
- * unlock_exptid - unlock any expected TID entries context still had in use
+ * unlock_expected_tids - unlock any expected TID entries context still had
+ * in use
* @rcd: ctxt
*
* We don't actually update the chip here, because we do a bulk update
@@ -2247,7 +2248,7 @@ static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (!iter_is_iovec(from) || !from->nr_segs || !pq)
return -EINVAL;
-
+
return qib_user_sdma_writev(rcd, pq, from->iov, from->nr_segs);
}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index e336d778e076..a0c5f3bdc324 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -427,79 +427,21 @@ bail:
return ret;
}
-static int remove_file(struct dentry *parent, char *name)
-{
- struct dentry *tmp;
- int ret;
-
- tmp = lookup_one_len(name, parent, strlen(name));
-
- if (IS_ERR(tmp)) {
- ret = PTR_ERR(tmp);
- goto bail;
- }
-
- spin_lock(&tmp->d_lock);
- if (simple_positive(tmp)) {
- __d_drop(tmp);
- spin_unlock(&tmp->d_lock);
- simple_unlink(d_inode(parent), tmp);
- } else {
- spin_unlock(&tmp->d_lock);
- }
- dput(tmp);
-
- ret = 0;
-bail:
- /*
- * We don't expect clients to care about the return value, but
- * it's there if they need it.
- */
- return ret;
-}
-
static int remove_device_files(struct super_block *sb,
struct qib_devdata *dd)
{
- struct dentry *dir, *root;
+ struct dentry *dir;
char unit[10];
- int ret, i;
- root = dget(sb->s_root);
- inode_lock(d_inode(root));
snprintf(unit, sizeof(unit), "%u", dd->unit);
- dir = lookup_one_len(unit, root, strlen(unit));
+ dir = lookup_one_len_unlocked(unit, sb->s_root, strlen(unit));
if (IS_ERR(dir)) {
- ret = PTR_ERR(dir);
pr_err("Lookup of %s failed\n", unit);
- goto bail;
+ return PTR_ERR(dir);
}
-
- inode_lock(d_inode(dir));
- remove_file(dir, "counters");
- remove_file(dir, "counter_names");
- remove_file(dir, "portcounter_names");
- for (i = 0; i < dd->num_pports; i++) {
- char fname[24];
-
- sprintf(fname, "port%dcounters", i + 1);
- remove_file(dir, fname);
- if (dd->flags & QIB_HAS_QSFP) {
- sprintf(fname, "qsfp%d", i + 1);
- remove_file(dir, fname);
- }
- }
- remove_file(dir, "flash");
- inode_unlock(d_inode(dir));
- ret = simple_rmdir(d_inode(root), dir);
- d_drop(dir);
- dput(dir);
-
-bail:
- inode_unlock(d_inode(root));
- dput(root);
- return ret;
+ simple_recursive_removal(dir, NULL);
+ return 0;
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index b35e1174be22..a9b83bc13f4a 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2609,7 +2609,7 @@ static void qib_chk_6120_errormask(struct qib_devdata *dd)
}
/**
- * qib_get_faststats - get word counters from chip before they overflow
+ * qib_get_6120_faststats - get word counters from chip before they overflow
* @t: contains a pointer to the qlogic_ib device qib_devdata
*
* This needs more work; in particular, decision on whether we really
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 229dcd6ead95..d1c0bc31869f 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2236,7 +2236,7 @@ static void qib_7220_tidtemplate(struct qib_devdata *dd)
}
/**
- * qib_init_7220_get_base_info - set chip-specific flags for user code
+ * qib_7220_get_base_info - set chip-specific flags for user code
* @rcd: the qlogic_ib ctxt
* @kinfo: qib_base_info pointer
*
@@ -4411,7 +4411,7 @@ static void writescratch(struct qib_devdata *dd, u32 val)
#define VALID_TS_RD_REG_MASK 0xBF
/**
- * qib_7220_tempsense_read - read register of temp sensor via TWSI
+ * qib_7220_tempsense_rd - read register of temp sensor via TWSI
* @dd: the qlogic_ib device
* @regnum: register to read from
*
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 9fe6ea75b45e..ab98b6a3ae1e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -791,28 +791,6 @@ static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
}
/**
- * qib_read_ureg - read virtualized per-context register
- * @dd: device
- * @regno: register number
- * @ctxt: context number
- *
- * Return the contents of a register that is virtualized to be per context.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u64 qib_read_ureg(const struct qib_devdata *dd,
- enum qib_ureg regno, int ctxt)
-{
-
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readq(regno + (u64 __iomem *)(
- (dd->ureg_align * ctxt) + (dd->userbase ?
- (char __iomem *)dd->userbase :
- (char __iomem *)dd->kregbase + dd->uregbase)));
-}
-
-/**
* qib_write_ureg - write virtualized per-context register
* @dd: device
* @regno: register number
@@ -2513,7 +2491,7 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
}
/**
- * qib_7322_quiet_serdes - set serdes to txidle
+ * qib_7322_mini_quiet_serdes - set serdes to txidle
* @ppd: the qlogic_ib device
* Called when driver is being unloaded
*/
@@ -3859,7 +3837,7 @@ static void qib_7322_tidtemplate(struct qib_devdata *dd)
}
/**
- * qib_init_7322_get_base_info - set chip-specific flags for user code
+ * qib_7322_get_base_info - set chip-specific flags for user code
* @rcd: the qlogic_ib ctxt
* @kinfo: qib_base_info pointer
*
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 43c8ee1f46e0..b5a78576c48b 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1609,7 +1609,7 @@ bail:
}
/**
- * allocate eager buffers, both kernel and user contexts.
+ * qib_setup_eagerbufs - allocate eager buffers, both kernel and user contexts.
* @rcd: the context we are setting up.
*
* Allocate the eager TID buffers and program them into hip.
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 44e2f813024a..ef02f2bfddb2 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -203,7 +203,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
/*
* Send a Port Capability Mask Changed trap (ch. 14.3.11).
*/
-void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
+void qib_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num)
{
struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
struct qib_devdata *dd = dd_from_dev(ibdev);
@@ -2360,7 +2360,7 @@ static int process_cc(struct ib_device *ibdev, int mad_flags,
*
* This is called by the ib_mad module.
*/
-int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
+int qib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index)
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index ca39a029e4af..1974ceb9d405 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -125,7 +125,7 @@ static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
* zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
*/
int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port)
+ enum ib_qp_type type, u32 port)
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
@@ -136,7 +136,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
u16 qpt_mask = dd->qpn_mask;
if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
- unsigned n;
+ u32 n;
ret = type == IB_QPT_GSI;
n = 1 << (ret + 2 * (port - 1));
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 4f4a09c2dbcd..81b810d006c0 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -687,7 +687,6 @@ static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
return -1;
}
- ret = 0;
for (tries = EPB_TRANS_TRIES; tries; --tries) {
transval = qib_read_kreg32(dd, trans);
if (transval & EPB_TRANS_RDY)
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 62c179fc764b..5e9e66f27064 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -728,7 +728,7 @@ const struct attribute_group qib_attr_group = {
.attrs = qib_attributes,
};
-int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
+int qib_create_port_files(struct ib_device *ibdev, u32 port_num,
struct kobject *kobj)
{
struct qib_pportdata *ppd;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 8e0de265ad57..d17d034ecdfd 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1188,7 +1188,7 @@ full:
}
}
-static int qib_query_port(struct rvt_dev_info *rdi, u8 port_num,
+static int qib_query_port(struct rvt_dev_info *rdi, u32 port_num,
struct ib_port_attr *props)
{
struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
@@ -1273,7 +1273,7 @@ bail:
return ret;
}
-static int qib_shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
+static int qib_shut_down_port(struct rvt_dev_info *rdi, u32 port_num)
{
struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
struct qib_devdata *dd = dd_from_dev(ibdev);
@@ -1342,7 +1342,7 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
struct rvt_qp *qp0;
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_devdata *dd = dd_from_ppd(ppd);
- u8 port_num = ppd->port;
+ u32 port_num = ppd->port;
memset(&attr, 0, sizeof(attr));
attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index dc0e81f3b6f4..07548fac1d8e 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -239,10 +239,10 @@ static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
void qib_bad_pkey(struct qib_ibport *ibp, u32 key, u32 sl,
u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
-void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num);
+void qib_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num);
void qib_sys_guid_chg(struct qib_ibport *ibp);
void qib_node_desc_chg(struct qib_ibport *ibp);
-int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int qib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index);
@@ -273,7 +273,7 @@ void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
void qib_notify_qp_reset(struct rvt_qp *qp);
int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port);
+ enum ib_qp_type type, u32 port);
void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 1b63a491fa72..ff6a40e259d5 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -303,7 +303,7 @@ static struct notifier_block usnic_ib_inetaddr_notifier = {
};
/* End of inet section*/
-static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int usnic_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 3705c6b8b223..57d210ca855a 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -270,7 +270,7 @@ static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
/* Start of ib callback functions */
enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
@@ -332,7 +332,7 @@ int usnic_ib_query_device(struct ib_device *ibdev,
return 0;
}
-int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
+int usnic_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
@@ -420,7 +420,7 @@ err_out:
return err;
}
-int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index 11fe1ba6bbc9..6b82d0f2d184 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -37,16 +37,16 @@
#include "usnic_ib.h"
enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
- u8 port_num);
+ u32 port_num);
int usnic_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw);
-int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
+int usnic_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
-int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid);
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index de57f2fed743..763ddc6f25d1 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -344,11 +344,6 @@ static inline enum ib_port_state pvrdma_port_state_to_ib(
return (enum ib_port_state)state;
}
-static inline int ib_port_cap_flags_to_pvrdma(int flags)
-{
- return flags & PVRDMA_MASK(PVRDMA_PORT_CAP_FLAGS_MAX);
-}
-
static inline int pvrdma_port_cap_flags_to_ib(int flags)
{
return flags;
@@ -410,11 +405,6 @@ static inline enum pvrdma_qp_type ib_qp_type_to_pvrdma(enum ib_qp_type type)
return (enum pvrdma_qp_type)type;
}
-static inline enum ib_qp_type pvrdma_qp_type_to_ib(enum pvrdma_qp_type type)
-{
- return (enum ib_qp_type)type;
-}
-
static inline enum pvrdma_qp_state ib_qp_state_to_pvrdma(enum ib_qp_state state)
{
return (enum pvrdma_qp_state)state;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 4b6019e7de67..6bf2d2e47d07 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -121,7 +121,7 @@ static int pvrdma_init_device(struct pvrdma_dev *dev)
return 0;
}
-static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int pvrdma_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct pvrdma_dev *dev = to_vdev(ibdev);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 1d3bdd7bb51d..67769b715126 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -882,7 +882,7 @@ out:
}
/**
- * pvrdma_post_receive - post receive work request entries on a QP
+ * pvrdma_post_recv - post receive work request entries on a QP
* @ibqp: the QP
* @wr: the work request list to post
* @bad_wr: the first bad WR returned
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index fc412cbfd042..19176583dbde 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -125,7 +125,7 @@ int pvrdma_query_device(struct ib_device *ibdev,
*
* @return: 0 on success, otherwise negative errno
*/
-int pvrdma_query_port(struct ib_device *ibdev, u8 port,
+int pvrdma_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
struct pvrdma_dev *dev = to_vdev(ibdev);
@@ -183,7 +183,7 @@ int pvrdma_query_port(struct ib_device *ibdev, u8 port,
*
* @return: 0 on success, otherwise negative errno
*/
-int pvrdma_query_gid(struct ib_device *ibdev, u8 port, int index,
+int pvrdma_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
struct pvrdma_dev *dev = to_vdev(ibdev);
@@ -205,7 +205,7 @@ int pvrdma_query_gid(struct ib_device *ibdev, u8 port, int index,
*
* @return: 0 on success, otherwise negative errno
*/
-int pvrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int pvrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
int err = 0;
@@ -232,7 +232,7 @@ int pvrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
}
enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev,
- u8 port)
+ u32 port)
{
return IB_LINK_LAYER_ETHERNET;
}
@@ -274,7 +274,7 @@ int pvrdma_modify_device(struct ib_device *ibdev, int mask,
*
* @return: 0 on success, otherwise negative errno
*/
-int pvrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
+int pvrdma_modify_port(struct ib_device *ibdev, u32 port, int mask,
struct ib_port_modify *props)
{
struct ib_port_attr attr;
@@ -516,7 +516,7 @@ int pvrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct pvrdma_dev *dev = to_vdev(ibah->device);
struct pvrdma_ah *ah = to_vah(ibah);
const struct ib_global_route *grh;
- u8 port_num = rdma_ah_get_port_num(ah_attr);
+ u32 port_num = rdma_ah_get_port_num(ah_attr);
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
return -EINVAL;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index 97ed8f952f6e..544b94d97c3a 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -70,30 +70,6 @@ enum pvrdma_mtu {
PVRDMA_MTU_4096 = 5,
};
-static inline int pvrdma_mtu_enum_to_int(enum pvrdma_mtu mtu)
-{
- switch (mtu) {
- case PVRDMA_MTU_256: return 256;
- case PVRDMA_MTU_512: return 512;
- case PVRDMA_MTU_1024: return 1024;
- case PVRDMA_MTU_2048: return 2048;
- case PVRDMA_MTU_4096: return 4096;
- default: return -1;
- }
-}
-
-static inline enum pvrdma_mtu pvrdma_mtu_int_to_enum(int mtu)
-{
- switch (mtu) {
- case 256: return PVRDMA_MTU_256;
- case 512: return PVRDMA_MTU_512;
- case 1024: return PVRDMA_MTU_1024;
- case 2048: return PVRDMA_MTU_2048;
- case 4096:
- default: return PVRDMA_MTU_4096;
- }
-}
-
enum pvrdma_port_state {
PVRDMA_PORT_NOP = 0,
PVRDMA_PORT_DOWN = 1,
@@ -138,17 +114,6 @@ enum pvrdma_port_width {
PVRDMA_WIDTH_12X = 8,
};
-static inline int pvrdma_width_enum_to_int(enum pvrdma_port_width width)
-{
- switch (width) {
- case PVRDMA_WIDTH_1X: return 1;
- case PVRDMA_WIDTH_4X: return 4;
- case PVRDMA_WIDTH_8X: return 8;
- case PVRDMA_WIDTH_12X: return 12;
- default: return -1;
- }
-}
-
enum pvrdma_port_speed {
PVRDMA_SPEED_SDR = 1,
PVRDMA_SPEED_DDR = 2,
@@ -383,17 +348,17 @@ enum pvrdma_access_flags {
int pvrdma_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *udata);
-int pvrdma_query_port(struct ib_device *ibdev, u8 port,
+int pvrdma_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
-int pvrdma_query_gid(struct ib_device *ibdev, u8 port,
+int pvrdma_query_gid(struct ib_device *ibdev, u32 port,
int index, union ib_gid *gid);
-int pvrdma_query_pkey(struct ib_device *ibdev, u8 port,
+int pvrdma_query_pkey(struct ib_device *ibdev, u32 port,
u16 index, u16 *pkey);
enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev,
- u8 port);
+ u32 port);
int pvrdma_modify_device(struct ib_device *ibdev, int mask,
struct ib_device_modify *props);
-int pvrdma_modify_port(struct ib_device *ibdev, u8 port,
+int pvrdma_modify_port(struct ib_device *ibdev, u32 port,
int mask, struct ib_port_modify *props);
int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
diff --git a/drivers/infiniband/sw/rdmavt/mad.c b/drivers/infiniband/sw/rdmavt/mad.c
index fa5be13a4394..207bc0ed96ff 100644
--- a/drivers/infiniband/sw/rdmavt/mad.c
+++ b/drivers/infiniband/sw/rdmavt/mad.c
@@ -70,7 +70,7 @@
*
* Return: IB_MAD_RESULT_SUCCESS or error
*/
-int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
struct ib_mad_hdr *out, size_t *out_mad_size,
@@ -82,9 +82,6 @@ int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
* future may choose to implement this but it should not be made into a
* requirement.
*/
- if (ibport_num_to_idx(ibdev, port_num) < 0)
- return -EINVAL;
-
return IB_MAD_RESULT_FAILURE;
}
diff --git a/drivers/infiniband/sw/rdmavt/mad.h b/drivers/infiniband/sw/rdmavt/mad.h
index a9d6eecc3723..1eae5efea4be 100644
--- a/drivers/infiniband/sw/rdmavt/mad.h
+++ b/drivers/infiniband/sw/rdmavt/mad.h
@@ -50,7 +50,7 @@
#include <rdma/rdma_vt.h>
-int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
struct ib_mad_hdr *out, size_t *out_mad_size,
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 8fd0128a9336..12ebe041a5da 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -151,15 +151,12 @@ static int rvt_modify_device(struct ib_device *device,
*
* Return: 0 on success
*/
-static int rvt_query_port(struct ib_device *ibdev, u8 port_num,
+static int rvt_query_port(struct ib_device *ibdev, u32 port_num,
struct ib_port_attr *props)
{
struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
struct rvt_ibport *rvp;
- int port_index = ibport_num_to_idx(ibdev, port_num);
-
- if (port_index < 0)
- return -EINVAL;
+ u32 port_index = ibport_num_to_idx(ibdev, port_num);
rvp = rdi->ports[port_index];
/* props being zeroed by the caller, avoid zeroing it here */
@@ -186,16 +183,13 @@ static int rvt_query_port(struct ib_device *ibdev, u8 port_num,
*
* Return: 0 on success
*/
-static int rvt_modify_port(struct ib_device *ibdev, u8 port_num,
+static int rvt_modify_port(struct ib_device *ibdev, u32 port_num,
int port_modify_mask, struct ib_port_modify *props)
{
struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
struct rvt_ibport *rvp;
int ret = 0;
- int port_index = ibport_num_to_idx(ibdev, port_num);
-
- if (port_index < 0)
- return -EINVAL;
+ u32 port_index = ibport_num_to_idx(ibdev, port_num);
rvp = rdi->ports[port_index];
if (port_modify_mask & IB_PORT_OPA_MASK_CHG) {
@@ -225,7 +219,7 @@ static int rvt_modify_port(struct ib_device *ibdev, u8 port_num,
*
* Return: 0 on failure pkey otherwise
*/
-static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
+static int rvt_query_pkey(struct ib_device *ibdev, u32 port_num, u16 index,
u16 *pkey)
{
/*
@@ -235,11 +229,9 @@ static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
* no way to protect against that anyway.
*/
struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
- int port_index;
+ u32 port_index;
port_index = ibport_num_to_idx(ibdev, port_num);
- if (port_index < 0)
- return -EINVAL;
if (index >= rvt_get_npkeys(rdi))
return -EINVAL;
@@ -257,12 +249,12 @@ static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
*
* Return: 0 on success
*/
-static int rvt_query_gid(struct ib_device *ibdev, u8 port_num,
+static int rvt_query_gid(struct ib_device *ibdev, u32 port_num,
int guid_index, union ib_gid *gid)
{
struct rvt_dev_info *rdi;
struct rvt_ibport *rvp;
- int port_index;
+ u32 port_index;
/*
* Driver is responsible for updating the guid table. Which will be used
@@ -270,8 +262,6 @@ static int rvt_query_gid(struct ib_device *ibdev, u8 port_num,
* is being done.
*/
port_index = ibport_num_to_idx(ibdev, port_num);
- if (port_index < 0)
- return -EINVAL;
rdi = ib_to_rvt(ibdev);
rvp = rdi->ports[port_index];
@@ -301,16 +291,12 @@ static void rvt_dealloc_ucontext(struct ib_ucontext *context)
return;
}
-static int rvt_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int rvt_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
struct ib_port_attr attr;
- int err, port_index;
-
- port_index = ibport_num_to_idx(ibdev, port_num);
- if (port_index < 0)
- return -EINVAL;
+ int err;
immutable->core_cap_flags = rdi->dparms.core_cap_flags;
diff --git a/drivers/infiniband/sw/rdmavt/vt.h b/drivers/infiniband/sw/rdmavt/vt.h
index d19ff817c2c7..c0fed6510f0b 100644
--- a/drivers/infiniband/sw/rdmavt/vt.h
+++ b/drivers/infiniband/sw/rdmavt/vt.h
@@ -96,16 +96,9 @@
#define __rvt_pr_err_ratelimited(pdev, name, fmt, ...) \
dev_err_ratelimited(&(pdev)->dev, "%s: " fmt, name, ##__VA_ARGS__)
-static inline int ibport_num_to_idx(struct ib_device *ibdev, u8 port_num)
+static inline u32 ibport_num_to_idx(struct ib_device *ibdev, u32 port_num)
{
- struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
- int port_index;
-
- port_index = port_num - 1; /* IB ports start at 1 our arrays at 0 */
- if ((port_index < 0) || (port_index >= rdi->dparms.nports))
- return -EINVAL;
-
- return port_index;
+ return port_num - 1; /* IB ports start at 1 our arrays at 0 */
}
#endif /* DEF_RDMAVT_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index df0d173d6acb..da2e867a1ed9 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -88,7 +88,7 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
type = RXE_NETWORK_TYPE_IPV4;
break;
case RDMA_NETWORK_IPV6:
- type = RXE_NETWORK_TYPE_IPV4;
+ type = RXE_NETWORK_TYPE_IPV6;
break;
default:
/* not reached - checked in rxe_av_chk_attr */
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 17a361b8dbb1..2af26737d32d 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -345,7 +345,7 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, payload_addr(pkt),
- payload_size(pkt), to_mem_obj, NULL);
+ payload_size(pkt), to_mr_obj, NULL);
if (ret)
return COMPST_ERROR;
@@ -365,7 +365,7 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, &atomic_orig,
- sizeof(u64), to_mem_obj, NULL);
+ sizeof(u64), to_mr_obj, NULL);
if (ret)
return COMPST_ERROR;
else
@@ -676,7 +676,6 @@ int rxe_completer(void *arg)
/* there is nothing to retry in this case */
if (!wqe || (wqe->state == wqe_state_posted)) {
- pr_warn("Retry attempted without a valid wqe\n");
ret = -EAGAIN;
goto done;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.c b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
index ac9154f0593d..f469fd1c753d 100644
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.c
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
@@ -26,7 +26,7 @@ static const char * const rxe_counter_name[] = {
int rxe_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port, int index)
+ u32 port, int index)
{
struct rxe_dev *dev = to_rdev(ibdev);
unsigned int cnt;
@@ -41,7 +41,7 @@ int rxe_ib_get_hw_stats(struct ib_device *ibdev,
}
struct rdma_hw_stats *rxe_ib_alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
BUILD_BUG_ON(ARRAY_SIZE(rxe_counter_name) != RXE_NUM_OF_COUNTERS);
/* We support only per port stats */
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.h b/drivers/infiniband/sw/rxe/rxe_hw_counters.h
index 49ee6f96656d..2f369acb46d7 100644
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.h
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.h
@@ -30,8 +30,8 @@ enum rxe_counters {
};
struct rdma_hw_stats *rxe_ib_alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num);
+ u32 port_num);
int rxe_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port, int index);
+ u32 port, int index);
#endif /* RXE_HW_COUNTERS_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 0d758760b9ae..ef8061d2fbe0 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -72,40 +72,37 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
/* rxe_mr.c */
enum copy_direction {
- to_mem_obj,
- from_mem_obj,
+ to_mr_obj,
+ from_mr_obj,
};
-void rxe_mem_init_dma(struct rxe_pd *pd,
- int access, struct rxe_mem *mem);
+void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
-int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
- u64 length, u64 iova, int access, struct ib_udata *udata,
- struct rxe_mem *mr);
+int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
+ int access, struct ib_udata *udata, struct rxe_mr *mr);
-int rxe_mem_init_fast(struct rxe_pd *pd,
- int max_pages, struct rxe_mem *mem);
+int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
-int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
- int length, enum copy_direction dir, u32 *crcp);
+int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
+ enum copy_direction dir, u32 *crcp);
int copy_data(struct rxe_pd *pd, int access,
struct rxe_dma_info *dma, void *addr, int length,
enum copy_direction dir, u32 *crcp);
-void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
+void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
enum lookup_type {
lookup_local,
lookup_remote,
};
-struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
- enum lookup_type type);
+struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
+ enum lookup_type type);
-int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
+int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
-void rxe_mem_cleanup(struct rxe_pool_entry *arg);
+void rxe_mr_cleanup(struct rxe_pool_entry *arg);
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
@@ -116,7 +113,6 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
int paylen, struct rxe_pkt_info *pkt);
int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc);
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
-struct device *rxe_dma_device(struct rxe_dev *rxe);
int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid);
int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 6e8c41567ba0..9f63947bab12 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -24,16 +24,15 @@ static u8 rxe_get_key(void)
return key;
}
-int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
+int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
{
- switch (mem->type) {
- case RXE_MEM_TYPE_DMA:
+ switch (mr->type) {
+ case RXE_MR_TYPE_DMA:
return 0;
- case RXE_MEM_TYPE_MR:
- if (iova < mem->iova ||
- length > mem->length ||
- iova > mem->iova + mem->length - length)
+ case RXE_MR_TYPE_MR:
+ if (iova < mr->iova || length > mr->length ||
+ iova > mr->iova + mr->length - length)
return -EFAULT;
return 0;
@@ -46,85 +45,83 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
| IB_ACCESS_REMOTE_WRITE \
| IB_ACCESS_REMOTE_ATOMIC)
-static void rxe_mem_init(int access, struct rxe_mem *mem)
+static void rxe_mr_init(int access, struct rxe_mr *mr)
{
- u32 lkey = mem->pelem.index << 8 | rxe_get_key();
+ u32 lkey = mr->pelem.index << 8 | rxe_get_key();
u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
- mem->ibmr.lkey = lkey;
- mem->ibmr.rkey = rkey;
- mem->state = RXE_MEM_STATE_INVALID;
- mem->type = RXE_MEM_TYPE_NONE;
- mem->map_shift = ilog2(RXE_BUF_PER_MAP);
+ mr->ibmr.lkey = lkey;
+ mr->ibmr.rkey = rkey;
+ mr->state = RXE_MR_STATE_INVALID;
+ mr->type = RXE_MR_TYPE_NONE;
+ mr->map_shift = ilog2(RXE_BUF_PER_MAP);
}
-void rxe_mem_cleanup(struct rxe_pool_entry *arg)
+void rxe_mr_cleanup(struct rxe_pool_entry *arg)
{
- struct rxe_mem *mem = container_of(arg, typeof(*mem), pelem);
+ struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
int i;
- ib_umem_release(mem->umem);
+ ib_umem_release(mr->umem);
- if (mem->map) {
- for (i = 0; i < mem->num_map; i++)
- kfree(mem->map[i]);
+ if (mr->map) {
+ for (i = 0; i < mr->num_map; i++)
+ kfree(mr->map[i]);
- kfree(mem->map);
+ kfree(mr->map);
}
}
-static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf)
+static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
{
int i;
int num_map;
- struct rxe_map **map = mem->map;
+ struct rxe_map **map = mr->map;
num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
- mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
- if (!mem->map)
+ mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
+ if (!mr->map)
goto err1;
for (i = 0; i < num_map; i++) {
- mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
- if (!mem->map[i])
+ mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
+ if (!mr->map[i])
goto err2;
}
BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
- mem->map_shift = ilog2(RXE_BUF_PER_MAP);
- mem->map_mask = RXE_BUF_PER_MAP - 1;
+ mr->map_shift = ilog2(RXE_BUF_PER_MAP);
+ mr->map_mask = RXE_BUF_PER_MAP - 1;
- mem->num_buf = num_buf;
- mem->num_map = num_map;
- mem->max_buf = num_map * RXE_BUF_PER_MAP;
+ mr->num_buf = num_buf;
+ mr->num_map = num_map;
+ mr->max_buf = num_map * RXE_BUF_PER_MAP;
return 0;
err2:
for (i--; i >= 0; i--)
- kfree(mem->map[i]);
+ kfree(mr->map[i]);
- kfree(mem->map);
+ kfree(mr->map);
err1:
return -ENOMEM;
}
-void rxe_mem_init_dma(struct rxe_pd *pd,
- int access, struct rxe_mem *mem)
+void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
{
- rxe_mem_init(access, mem);
+ rxe_mr_init(access, mr);
- mem->ibmr.pd = &pd->ibpd;
- mem->access = access;
- mem->state = RXE_MEM_STATE_VALID;
- mem->type = RXE_MEM_TYPE_DMA;
+ mr->ibmr.pd = &pd->ibpd;
+ mr->access = access;
+ mr->state = RXE_MR_STATE_VALID;
+ mr->type = RXE_MR_TYPE_DMA;
}
-int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
- u64 length, u64 iova, int access, struct ib_udata *udata,
- struct rxe_mem *mem)
+int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
+ int access, struct ib_udata *udata, struct rxe_mr *mr)
{
struct rxe_map **map;
struct rxe_phys_buf *buf = NULL;
@@ -142,23 +139,23 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
goto err1;
}
- mem->umem = umem;
+ mr->umem = umem;
num_buf = ib_umem_num_pages(umem);
- rxe_mem_init(access, mem);
+ rxe_mr_init(access, mr);
- err = rxe_mem_alloc(mem, num_buf);
+ err = rxe_mr_alloc(mr, num_buf);
if (err) {
- pr_warn("err %d from rxe_mem_alloc\n", err);
+ pr_warn("err %d from rxe_mr_alloc\n", err);
ib_umem_release(umem);
goto err1;
}
- mem->page_shift = PAGE_SHIFT;
- mem->page_mask = PAGE_SIZE - 1;
+ mr->page_shift = PAGE_SHIFT;
+ mr->page_mask = PAGE_SIZE - 1;
num_buf = 0;
- map = mem->map;
+ map = mr->map;
if (length > 0) {
buf = map[0]->buf;
@@ -185,15 +182,15 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
}
}
- mem->ibmr.pd = &pd->ibpd;
- mem->umem = umem;
- mem->access = access;
- mem->length = length;
- mem->iova = iova;
- mem->va = start;
- mem->offset = ib_umem_offset(umem);
- mem->state = RXE_MEM_STATE_VALID;
- mem->type = RXE_MEM_TYPE_MR;
+ mr->ibmr.pd = &pd->ibpd;
+ mr->umem = umem;
+ mr->access = access;
+ mr->length = length;
+ mr->iova = iova;
+ mr->va = start;
+ mr->offset = ib_umem_offset(umem);
+ mr->state = RXE_MR_STATE_VALID;
+ mr->type = RXE_MR_TYPE_MR;
return 0;
@@ -201,24 +198,23 @@ err1:
return err;
}
-int rxe_mem_init_fast(struct rxe_pd *pd,
- int max_pages, struct rxe_mem *mem)
+int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
{
int err;
- rxe_mem_init(0, mem);
+ rxe_mr_init(0, mr);
/* In fastreg, we also set the rkey */
- mem->ibmr.rkey = mem->ibmr.lkey;
+ mr->ibmr.rkey = mr->ibmr.lkey;
- err = rxe_mem_alloc(mem, max_pages);
+ err = rxe_mr_alloc(mr, max_pages);
if (err)
goto err1;
- mem->ibmr.pd = &pd->ibpd;
- mem->max_buf = max_pages;
- mem->state = RXE_MEM_STATE_FREE;
- mem->type = RXE_MEM_TYPE_MR;
+ mr->ibmr.pd = &pd->ibpd;
+ mr->max_buf = max_pages;
+ mr->state = RXE_MR_STATE_FREE;
+ mr->type = RXE_MR_TYPE_MR;
return 0;
@@ -226,28 +222,24 @@ err1:
return err;
}
-static void lookup_iova(
- struct rxe_mem *mem,
- u64 iova,
- int *m_out,
- int *n_out,
- size_t *offset_out)
+static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
+ size_t *offset_out)
{
- size_t offset = iova - mem->iova + mem->offset;
+ size_t offset = iova - mr->iova + mr->offset;
int map_index;
int buf_index;
u64 length;
- if (likely(mem->page_shift)) {
- *offset_out = offset & mem->page_mask;
- offset >>= mem->page_shift;
- *n_out = offset & mem->map_mask;
- *m_out = offset >> mem->map_shift;
+ if (likely(mr->page_shift)) {
+ *offset_out = offset & mr->page_mask;
+ offset >>= mr->page_shift;
+ *n_out = offset & mr->map_mask;
+ *m_out = offset >> mr->map_shift;
} else {
map_index = 0;
buf_index = 0;
- length = mem->map[map_index]->buf[buf_index].size;
+ length = mr->map[map_index]->buf[buf_index].size;
while (offset >= length) {
offset -= length;
@@ -257,7 +249,7 @@ static void lookup_iova(
map_index++;
buf_index = 0;
}
- length = mem->map[map_index]->buf[buf_index].size;
+ length = mr->map[map_index]->buf[buf_index].size;
}
*m_out = map_index;
@@ -266,49 +258,49 @@ static void lookup_iova(
}
}
-void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
+void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
{
size_t offset;
int m, n;
void *addr;
- if (mem->state != RXE_MEM_STATE_VALID) {
- pr_warn("mem not in valid state\n");
+ if (mr->state != RXE_MR_STATE_VALID) {
+ pr_warn("mr not in valid state\n");
addr = NULL;
goto out;
}
- if (!mem->map) {
+ if (!mr->map) {
addr = (void *)(uintptr_t)iova;
goto out;
}
- if (mem_check_range(mem, iova, length)) {
+ if (mr_check_range(mr, iova, length)) {
pr_warn("range violation\n");
addr = NULL;
goto out;
}
- lookup_iova(mem, iova, &m, &n, &offset);
+ lookup_iova(mr, iova, &m, &n, &offset);
- if (offset + length > mem->map[m]->buf[n].size) {
+ if (offset + length > mr->map[m]->buf[n].size) {
pr_warn("crosses page boundary\n");
addr = NULL;
goto out;
}
- addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset;
+ addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
out:
return addr;
}
/* copy data from a range (vaddr, vaddr+length-1) to or from
- * a mem object starting at iova. Compute incremental value of
- * crc32 if crcp is not zero. caller must hold a reference to mem
+ * a mr object starting at iova. Compute incremental value of
+ * crc32 if crcp is not zero. caller must hold a reference to mr
*/
-int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
- enum copy_direction dir, u32 *crcp)
+int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
+ enum copy_direction dir, u32 *crcp)
{
int err;
int bytes;
@@ -323,43 +315,41 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
if (length == 0)
return 0;
- if (mem->type == RXE_MEM_TYPE_DMA) {
+ if (mr->type == RXE_MR_TYPE_DMA) {
u8 *src, *dest;
- src = (dir == to_mem_obj) ?
- addr : ((void *)(uintptr_t)iova);
+ src = (dir == to_mr_obj) ? addr : ((void *)(uintptr_t)iova);
- dest = (dir == to_mem_obj) ?
- ((void *)(uintptr_t)iova) : addr;
+ dest = (dir == to_mr_obj) ? ((void *)(uintptr_t)iova) : addr;
memcpy(dest, src, length);
if (crcp)
- *crcp = rxe_crc32(to_rdev(mem->ibmr.device),
- *crcp, dest, length);
+ *crcp = rxe_crc32(to_rdev(mr->ibmr.device), *crcp, dest,
+ length);
return 0;
}
- WARN_ON_ONCE(!mem->map);
+ WARN_ON_ONCE(!mr->map);
- err = mem_check_range(mem, iova, length);
+ err = mr_check_range(mr, iova, length);
if (err) {
err = -EFAULT;
goto err1;
}
- lookup_iova(mem, iova, &m, &i, &offset);
+ lookup_iova(mr, iova, &m, &i, &offset);
- map = mem->map + m;
+ map = mr->map + m;
buf = map[0]->buf + i;
while (length > 0) {
u8 *src, *dest;
va = (u8 *)(uintptr_t)buf->addr + offset;
- src = (dir == to_mem_obj) ? addr : va;
- dest = (dir == to_mem_obj) ? va : addr;
+ src = (dir == to_mr_obj) ? addr : va;
+ dest = (dir == to_mr_obj) ? va : addr;
bytes = buf->size - offset;
@@ -369,8 +359,8 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
memcpy(dest, src, bytes);
if (crcp)
- crc = rxe_crc32(to_rdev(mem->ibmr.device),
- crc, dest, bytes);
+ crc = rxe_crc32(to_rdev(mr->ibmr.device), crc, dest,
+ bytes);
length -= bytes;
addr += bytes;
@@ -411,7 +401,7 @@ int copy_data(
struct rxe_sge *sge = &dma->sge[dma->cur_sge];
int offset = dma->sge_offset;
int resid = dma->resid;
- struct rxe_mem *mem = NULL;
+ struct rxe_mr *mr = NULL;
u64 iova;
int err;
@@ -424,8 +414,8 @@ int copy_data(
}
if (sge->length && (offset < sge->length)) {
- mem = lookup_mem(pd, access, sge->lkey, lookup_local);
- if (!mem) {
+ mr = lookup_mr(pd, access, sge->lkey, lookup_local);
+ if (!mr) {
err = -EINVAL;
goto err1;
}
@@ -435,9 +425,9 @@ int copy_data(
bytes = length;
if (offset >= sge->length) {
- if (mem) {
- rxe_drop_ref(mem);
- mem = NULL;
+ if (mr) {
+ rxe_drop_ref(mr);
+ mr = NULL;
}
sge++;
dma->cur_sge++;
@@ -449,9 +439,9 @@ int copy_data(
}
if (sge->length) {
- mem = lookup_mem(pd, access, sge->lkey,
- lookup_local);
- if (!mem) {
+ mr = lookup_mr(pd, access, sge->lkey,
+ lookup_local);
+ if (!mr) {
err = -EINVAL;
goto err1;
}
@@ -466,7 +456,7 @@ int copy_data(
if (bytes > 0) {
iova = sge->addr + offset;
- err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
+ err = rxe_mr_copy(mr, iova, addr, bytes, dir, crcp);
if (err)
goto err2;
@@ -480,14 +470,14 @@ int copy_data(
dma->sge_offset = offset;
dma->resid = resid;
- if (mem)
- rxe_drop_ref(mem);
+ if (mr)
+ rxe_drop_ref(mr);
return 0;
err2:
- if (mem)
- rxe_drop_ref(mem);
+ if (mr)
+ rxe_drop_ref(mr);
err1:
return err;
}
@@ -525,31 +515,30 @@ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
return 0;
}
-/* (1) find the mem (mr or mw) corresponding to lkey/rkey
+/* (1) find the mr corresponding to lkey/rkey
* depending on lookup_type
- * (2) verify that the (qp) pd matches the mem pd
- * (3) verify that the mem can support the requested access
- * (4) verify that mem state is valid
+ * (2) verify that the (qp) pd matches the mr pd
+ * (3) verify that the mr can support the requested access
+ * (4) verify that mr state is valid
*/
-struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
- enum lookup_type type)
+struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
+ enum lookup_type type)
{
- struct rxe_mem *mem;
+ struct rxe_mr *mr;
struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
int index = key >> 8;
- mem = rxe_pool_get_index(&rxe->mr_pool, index);
- if (!mem)
+ mr = rxe_pool_get_index(&rxe->mr_pool, index);
+ if (!mr)
return NULL;
- if (unlikely((type == lookup_local && mr_lkey(mem) != key) ||
- (type == lookup_remote && mr_rkey(mem) != key) ||
- mr_pd(mem) != pd ||
- (access && !(access & mem->access)) ||
- mem->state != RXE_MEM_STATE_VALID)) {
- rxe_drop_ref(mem);
- mem = NULL;
+ if (unlikely((type == lookup_local && mr_lkey(mr) != key) ||
+ (type == lookup_remote && mr_rkey(mr) != key) ||
+ mr_pd(mr) != pd || (access && !(access & mr->access)) ||
+ mr->state != RXE_MR_STATE_VALID)) {
+ rxe_drop_ref(mr);
+ mr = NULL;
}
- return mem;
+ return mr;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 307d8986e7c9..d24901f2af3f 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -8,8 +8,6 @@
#include "rxe_loc.h"
/* info about object pools
- * note that mr and mw share a single index space
- * so that one can map an lkey to the correct type of object
*/
struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_UC] = {
@@ -56,18 +54,18 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
},
[RXE_TYPE_MR] = {
.name = "rxe-mr",
- .size = sizeof(struct rxe_mem),
- .elem_offset = offsetof(struct rxe_mem, pelem),
- .cleanup = rxe_mem_cleanup,
+ .size = sizeof(struct rxe_mr),
+ .elem_offset = offsetof(struct rxe_mr, pelem),
+ .cleanup = rxe_mr_cleanup,
.flags = RXE_POOL_INDEX,
.max_index = RXE_MAX_MR_INDEX,
.min_index = RXE_MIN_MR_INDEX,
},
[RXE_TYPE_MW] = {
.name = "rxe-mw",
- .size = sizeof(struct rxe_mem),
- .elem_offset = offsetof(struct rxe_mem, pelem),
- .flags = RXE_POOL_INDEX,
+ .size = sizeof(struct rxe_mw),
+ .elem_offset = offsetof(struct rxe_mw, pelem),
+ .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.max_index = RXE_MAX_MW_INDEX,
.min_index = RXE_MIN_MW_INDEX,
},
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 889290793d75..3664cdae7e1f 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -464,7 +464,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
} else {
err = copy_data(qp->pd, 0, &wqe->dma,
payload_addr(pkt), paylen,
- from_mem_obj,
+ from_mr_obj,
&crc);
if (err)
return err;
@@ -596,7 +596,7 @@ next_wqe:
if (wqe->mask & WR_REG_MASK) {
if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- struct rxe_mem *rmr;
+ struct rxe_mr *rmr;
rmr = rxe_pool_get_index(&rxe->mr_pool,
wqe->wr.ex.invalidate_rkey >> 8);
@@ -607,14 +607,14 @@ next_wqe:
wqe->status = IB_WC_MW_BIND_ERR;
goto exit;
}
- rmr->state = RXE_MEM_STATE_FREE;
+ rmr->state = RXE_MR_STATE_FREE;
rxe_drop_ref(rmr);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
} else if (wqe->wr.opcode == IB_WR_REG_MR) {
- struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
+ struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr);
- rmr->state = RXE_MEM_STATE_VALID;
+ rmr->state = RXE_MR_STATE_VALID;
rmr->access = wqe->wr.wr.reg.access;
rmr->ibmr.lkey = wqe->wr.wr.reg.key;
rmr->ibmr.rkey = wqe->wr.wr.reg.key;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 142f3d8014d8..2b220659bddb 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -391,7 +391,7 @@ static enum resp_states check_length(struct rxe_qp *qp,
static enum resp_states check_rkey(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
- struct rxe_mem *mem = NULL;
+ struct rxe_mr *mr = NULL;
u64 va;
u32 rkey;
u32 resid;
@@ -430,18 +430,18 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
resid = qp->resp.resid;
pktlen = payload_size(pkt);
- mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
- if (!mem) {
+ mr = lookup_mr(qp->pd, access, rkey, lookup_remote);
+ if (!mr) {
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
- if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
+ if (unlikely(mr->state == RXE_MR_STATE_FREE)) {
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
- if (mem_check_range(mem, va, resid)) {
+ if (mr_check_range(mr, va, resid)) {
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
@@ -469,12 +469,12 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
WARN_ON_ONCE(qp->resp.mr);
- qp->resp.mr = mem;
+ qp->resp.mr = mr;
return RESPST_EXECUTE;
err:
- if (mem)
- rxe_drop_ref(mem);
+ if (mr)
+ rxe_drop_ref(mr);
return state;
}
@@ -484,7 +484,7 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
int err;
err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
- data_addr, data_len, to_mem_obj, NULL);
+ data_addr, data_len, to_mr_obj, NULL);
if (unlikely(err))
return (err == -ENOSPC) ? RESPST_ERR_LENGTH
: RESPST_ERR_MALFORMED_WQE;
@@ -499,8 +499,8 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
int err;
int data_len = payload_size(pkt);
- err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
- data_len, to_mem_obj, NULL);
+ err = rxe_mr_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), data_len,
+ to_mr_obj, NULL);
if (err) {
rc = RESPST_ERR_RKEY_VIOLATION;
goto out;
@@ -522,9 +522,9 @@ static enum resp_states process_atomic(struct rxe_qp *qp,
u64 iova = atmeth_va(pkt);
u64 *vaddr;
enum resp_states ret;
- struct rxe_mem *mr = qp->resp.mr;
+ struct rxe_mr *mr = qp->resp.mr;
- if (mr->state != RXE_MEM_STATE_VALID) {
+ if (mr->state != RXE_MR_STATE_VALID) {
ret = RESPST_ERR_RKEY_VIOLATION;
goto out;
}
@@ -700,8 +700,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
if (!skb)
return RESPST_ERR_RNR;
- err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
- payload, from_mem_obj, &icrc);
+ err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
+ payload, from_mr_obj, &icrc);
if (err)
pr_err("Failed copying memory\n");
@@ -816,8 +816,8 @@ static enum resp_states do_complete(struct rxe_qp *qp,
struct rxe_recv_wqe *wqe = qp->resp.wqe;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- if (unlikely(!wqe))
- return RESPST_CLEANUP;
+ if (!wqe)
+ goto finish;
memset(&cqe, 0, sizeof(cqe));
@@ -883,7 +883,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
}
if (pkt->mask & RXE_IETH_MASK) {
- struct rxe_mem *rmr;
+ struct rxe_mr *rmr;
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
wc->ex.invalidate_rkey = ieth_rkey(pkt);
@@ -895,7 +895,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
wc->ex.invalidate_rkey);
return RESPST_ERROR;
}
- rmr->state = RXE_MEM_STATE_FREE;
+ rmr->state = RXE_MR_STATE_FREE;
rxe_drop_ref(rmr);
}
@@ -917,12 +917,12 @@ static enum resp_states do_complete(struct rxe_qp *qp,
if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
return RESPST_ERR_CQ_OVERFLOW;
- if (qp->resp.state == QP_STATE_ERROR)
+finish:
+ if (unlikely(qp->resp.state == QP_STATE_ERROR))
return RESPST_CHK_RESOURCE;
-
- if (!pkt)
+ if (unlikely(!pkt))
return RESPST_DONE;
- else if (qp_type(qp) == IB_QPT_RC)
+ if (qp_type(qp) == IB_QPT_RC)
return RESPST_ACKNOWLEDGE;
else
return RESPST_CLEANUP;
@@ -1056,10 +1056,8 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
if (pkt->mask & RXE_SEND_MASK ||
pkt->mask & RXE_WRITE_MASK) {
/* SEND. Ack again and cleanup. C9-105. */
- if (bth_ack(pkt))
- send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
- rc = RESPST_CLEANUP;
- goto out;
+ send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
+ return RESPST_CLEANUP;
} else if (pkt->mask & RXE_READ_MASK) {
struct resp_res *res;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index dee5e0e919d2..aeb5e232c195 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -26,7 +26,7 @@ static int rxe_query_device(struct ib_device *dev,
}
static int rxe_query_port(struct ib_device *dev,
- u8 port_num, struct ib_port_attr *attr)
+ u32 port_num, struct ib_port_attr *attr)
{
struct rxe_dev *rxe = to_rdev(dev);
struct rxe_port *port;
@@ -54,7 +54,7 @@ static int rxe_query_port(struct ib_device *dev,
}
static int rxe_query_pkey(struct ib_device *device,
- u8 port_num, u16 index, u16 *pkey)
+ u32 port_num, u16 index, u16 *pkey)
{
if (index > 0)
return -EINVAL;
@@ -84,7 +84,7 @@ static int rxe_modify_device(struct ib_device *dev,
}
static int rxe_modify_port(struct ib_device *dev,
- u8 port_num, int mask, struct ib_port_modify *attr)
+ u32 port_num, int mask, struct ib_port_modify *attr)
{
struct rxe_dev *rxe = to_rdev(dev);
struct rxe_port *port;
@@ -101,7 +101,7 @@ static int rxe_modify_port(struct ib_device *dev,
}
static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
@@ -121,7 +121,7 @@ static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
rxe_drop_ref(uc);
}
-static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
+static int rxe_port_immutable(struct ib_device *dev, u32 port_num,
struct ib_port_immutable *immutable)
{
int err;
@@ -865,7 +865,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
{
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_mem *mr;
+ struct rxe_mr *mr;
mr = rxe_alloc(&rxe->mr_pool);
if (!mr)
@@ -873,7 +873,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
rxe_add_index(mr);
rxe_add_ref(pd);
- rxe_mem_init_dma(pd, access, mr);
+ rxe_mr_init_dma(pd, access, mr);
return &mr->ibmr;
}
@@ -887,7 +887,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
int err;
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_mem *mr;
+ struct rxe_mr *mr;
mr = rxe_alloc(&rxe->mr_pool);
if (!mr) {
@@ -899,8 +899,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
rxe_add_ref(pd);
- err = rxe_mem_init_user(pd, start, length, iova,
- access, udata, mr);
+ err = rxe_mr_init_user(pd, start, length, iova, access, udata, mr);
if (err)
goto err3;
@@ -916,9 +915,9 @@ err2:
static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
- struct rxe_mem *mr = to_rmr(ibmr);
+ struct rxe_mr *mr = to_rmr(ibmr);
- mr->state = RXE_MEM_STATE_ZOMBIE;
+ mr->state = RXE_MR_STATE_ZOMBIE;
rxe_drop_ref(mr_pd(mr));
rxe_drop_index(mr);
rxe_drop_ref(mr);
@@ -930,7 +929,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
{
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_mem *mr;
+ struct rxe_mr *mr;
int err;
if (mr_type != IB_MR_TYPE_MEM_REG)
@@ -946,7 +945,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
rxe_add_ref(pd);
- err = rxe_mem_init_fast(pd, max_num_sg, mr);
+ err = rxe_mr_init_fast(pd, max_num_sg, mr);
if (err)
goto err2;
@@ -962,7 +961,7 @@ err1:
static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
{
- struct rxe_mem *mr = to_rmr(ibmr);
+ struct rxe_mr *mr = to_rmr(ibmr);
struct rxe_map *map;
struct rxe_phys_buf *buf;
@@ -982,7 +981,7 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
- struct rxe_mem *mr = to_rmr(ibmr);
+ struct rxe_mr *mr = to_rmr(ibmr);
int n;
mr->nbuf = 0;
@@ -1110,6 +1109,7 @@ static const struct ib_device_ops rxe_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
+ INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
};
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 79e0a5a878da..11eba7a3ba8f 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -156,7 +156,7 @@ struct resp_res {
struct sk_buff *skb;
} atomic;
struct {
- struct rxe_mem *mr;
+ struct rxe_mr *mr;
u64 va_org;
u32 rkey;
u32 length;
@@ -183,7 +183,7 @@ struct rxe_resp_info {
/* RDMA read / atomic only */
u64 va;
- struct rxe_mem *mr;
+ struct rxe_mr *mr;
u32 resid;
u32 rkey;
u32 length;
@@ -262,18 +262,18 @@ struct rxe_qp {
struct execute_work cleanup_work;
};
-enum rxe_mem_state {
- RXE_MEM_STATE_ZOMBIE,
- RXE_MEM_STATE_INVALID,
- RXE_MEM_STATE_FREE,
- RXE_MEM_STATE_VALID,
+enum rxe_mr_state {
+ RXE_MR_STATE_ZOMBIE,
+ RXE_MR_STATE_INVALID,
+ RXE_MR_STATE_FREE,
+ RXE_MR_STATE_VALID,
};
-enum rxe_mem_type {
- RXE_MEM_TYPE_NONE,
- RXE_MEM_TYPE_DMA,
- RXE_MEM_TYPE_MR,
- RXE_MEM_TYPE_MW,
+enum rxe_mr_type {
+ RXE_MR_TYPE_NONE,
+ RXE_MR_TYPE_DMA,
+ RXE_MR_TYPE_MR,
+ RXE_MR_TYPE_MW,
};
#define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf))
@@ -287,17 +287,14 @@ struct rxe_map {
struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
};
-struct rxe_mem {
+struct rxe_mr {
struct rxe_pool_entry pelem;
- union {
- struct ib_mr ibmr;
- struct ib_mw ibmw;
- };
+ struct ib_mr ibmr;
struct ib_umem *umem;
- enum rxe_mem_state state;
- enum rxe_mem_type type;
+ enum rxe_mr_state state;
+ enum rxe_mr_type type;
u64 va;
u64 iova;
size_t length;
@@ -318,6 +315,17 @@ struct rxe_mem {
struct rxe_map **map;
};
+enum rxe_mw_state {
+ RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID,
+ RXE_MW_STATE_FREE = RXE_MR_STATE_FREE,
+ RXE_MW_STATE_VALID = RXE_MR_STATE_VALID,
+};
+
+struct rxe_mw {
+ struct ib_mw ibmw;
+ struct rxe_pool_entry pelem;
+};
+
struct rxe_mc_grp {
struct rxe_pool_entry pelem;
spinlock_t mcg_lock; /* guard group */
@@ -422,27 +430,27 @@ static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
}
-static inline struct rxe_mem *to_rmr(struct ib_mr *mr)
+static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
{
- return mr ? container_of(mr, struct rxe_mem, ibmr) : NULL;
+ return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
}
-static inline struct rxe_mem *to_rmw(struct ib_mw *mw)
+static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
{
- return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL;
+ return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
}
-static inline struct rxe_pd *mr_pd(struct rxe_mem *mr)
+static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
{
return to_rpd(mr->ibmr.pd);
}
-static inline u32 mr_lkey(struct rxe_mem *mr)
+static inline u32 mr_lkey(struct rxe_mr *mr)
{
return mr->ibmr.lkey;
}
-static inline u32 mr_rkey(struct rxe_mem *mr)
+static inline u32 mr_rkey(struct rxe_mr *mr)
{
return mr->ibmr.rkey;
}
diff --git a/drivers/infiniband/sw/siw/iwarp.h b/drivers/infiniband/sw/siw/iwarp.h
index e8a04d9c89cb..3f1dedb50a0d 100644
--- a/drivers/infiniband/sw/siw/iwarp.h
+++ b/drivers/infiniband/sw/siw/iwarp.h
@@ -114,13 +114,6 @@ static inline u8 __ddp_get_version(struct iwarp_ctrl *ctrl)
return be16_to_cpu(ctrl->ddp_rdmap_ctrl & DDP_MASK_VERSION) >> 8;
}
-static inline void __ddp_set_version(struct iwarp_ctrl *ctrl, u8 version)
-{
- ctrl->ddp_rdmap_ctrl =
- (ctrl->ddp_rdmap_ctrl & ~DDP_MASK_VERSION) |
- (cpu_to_be16((u16)version << 8) & DDP_MASK_VERSION);
-}
-
static inline u8 __rdmap_get_version(struct iwarp_ctrl *ctrl)
{
__be16 ver = ctrl->ddp_rdmap_ctrl & RDMAP_MASK_VERSION;
@@ -128,12 +121,6 @@ static inline u8 __rdmap_get_version(struct iwarp_ctrl *ctrl)
return be16_to_cpu(ver) >> 6;
}
-static inline void __rdmap_set_version(struct iwarp_ctrl *ctrl, u8 version)
-{
- ctrl->ddp_rdmap_ctrl = (ctrl->ddp_rdmap_ctrl & ~RDMAP_MASK_VERSION) |
- (cpu_to_be16(version << 6) & RDMAP_MASK_VERSION);
-}
-
static inline u8 __rdmap_get_opcode(struct iwarp_ctrl *ctrl)
{
return be16_to_cpu(ctrl->ddp_rdmap_ctrl & RDMAP_MASK_OPCODE);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 1f9e15b71504..7a5ed86ffc9f 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1300,7 +1300,7 @@ static void siw_cm_llp_state_change(struct sock *sk)
}
static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
- struct sockaddr *raddr)
+ struct sockaddr *raddr, bool afonly)
{
int rv, flags = 0;
size_t size = laddr->sa_family == AF_INET ?
@@ -1311,6 +1311,12 @@ static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
*/
sock_set_reuseaddr(s->sk);
+ if (afonly) {
+ rv = ip6_sock_set_v6only(s->sk);
+ if (rv)
+ return rv;
+ }
+
rv = s->ops->bind(s, laddr, size);
if (rv < 0)
return rv;
@@ -1371,7 +1377,7 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
* mode. Might be reconsidered for async connection setup at
* TCP level.
*/
- rv = kernel_bindconnect(s, laddr, raddr);
+ rv = kernel_bindconnect(s, laddr, raddr, id->afonly);
if (rv != 0) {
siw_dbg_qp(qp, "kernel_bindconnect: error %d\n", rv);
goto error;
@@ -1786,6 +1792,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
} else {
struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr);
+ if (id->afonly) {
+ rv = ip6_sock_set_v6only(s->sk);
+ if (rv) {
+ siw_dbg(id->device,
+ "ip6_sock_set_v6only erro: %d\n", rv);
+ goto error;
+ }
+ }
+
/* For wildcard addr, limit binding to current device only */
if (ipv6_addr_any(&laddr->sin6_addr))
s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index 34a910cf0edb..61c17db70d65 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -106,8 +106,6 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
mem->perms = rights & IWARP_ACCESS_MASK;
kref_init(&mem->ref);
- mr->mem = mem;
-
get_random_bytes(&next, 4);
next &= 0x00ffffff;
@@ -116,6 +114,8 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
kfree(mem);
return -ENOMEM;
}
+
+ mr->mem = mem;
/* Set the STag index part */
mem->stag = id << 8;
mr->base_mr.lkey = mr->base_mr.rkey = mem->stag;
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
index db138c8423da..f911287576d1 100644
--- a/drivers/infiniband/sw/siw/siw_mem.h
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -29,11 +29,6 @@ static inline void siw_mem_put(struct siw_mem *mem)
kref_put(&mem->ref, siw_free_mem);
}
-static inline struct siw_mr *siw_mem2mr(struct siw_mem *m)
-{
- return container_of(m, struct siw_mr, mem);
-}
-
static inline void siw_unref_mem_sgl(struct siw_mem **mem, unsigned int num_sge)
{
while (num_sge) {
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index e389d44e5591..d2313efb26db 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -160,7 +160,7 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
return 0;
}
-int siw_query_port(struct ib_device *base_dev, u8 port,
+int siw_query_port(struct ib_device *base_dev, u32 port,
struct ib_port_attr *attr)
{
struct siw_device *sdev = to_siw_dev(base_dev);
@@ -194,7 +194,7 @@ int siw_query_port(struct ib_device *base_dev, u8 port,
return rv;
}
-int siw_get_port_immutable(struct ib_device *base_dev, u8 port,
+int siw_get_port_immutable(struct ib_device *base_dev, u32 port,
struct ib_port_immutable *port_immutable)
{
struct ib_port_attr attr;
@@ -209,7 +209,7 @@ int siw_get_port_immutable(struct ib_device *base_dev, u8 port,
return 0;
}
-int siw_query_gid(struct ib_device *base_dev, u8 port, int idx,
+int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
union ib_gid *gid)
{
struct siw_device *sdev = to_siw_dev(base_dev);
@@ -1848,7 +1848,7 @@ void siw_srq_event(struct siw_srq *srq, enum ib_event_type etype)
}
}
-void siw_port_event(struct siw_device *sdev, u8 port, enum ib_event_type etype)
+void siw_port_event(struct siw_device *sdev, u32 port, enum ib_event_type etype)
{
struct ib_event event;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h
index 637454529357..67ac08886a70 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.h
+++ b/drivers/infiniband/sw/siw/siw_verbs.h
@@ -36,17 +36,17 @@ static inline void siw_copy_sgl(struct ib_sge *sge, struct siw_sge *siw_sge,
int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata);
void siw_dealloc_ucontext(struct ib_ucontext *base_ctx);
-int siw_query_port(struct ib_device *base_dev, u8 port,
+int siw_query_port(struct ib_device *base_dev, u32 port,
struct ib_port_attr *attr);
-int siw_get_port_immutable(struct ib_device *base_dev, u8 port,
+int siw_get_port_immutable(struct ib_device *base_dev, u32 port,
struct ib_port_immutable *port_immutable);
int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
struct ib_udata *udata);
int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
-int siw_query_port(struct ib_device *base_dev, u8 port,
+int siw_query_port(struct ib_device *base_dev, u32 port,
struct ib_port_attr *attr);
-int siw_query_gid(struct ib_device *base_dev, u8 port, int idx,
+int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
union ib_gid *gid);
int siw_alloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
int siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
@@ -86,6 +86,6 @@ void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
void siw_qp_event(struct siw_qp *qp, enum ib_event_type type);
void siw_cq_event(struct siw_cq *cq, enum ib_event_type type);
void siw_srq_event(struct siw_srq *srq, enum ib_event_type type);
-void siw_port_event(struct siw_device *dev, u8 port, enum ib_event_type type);
+void siw_port_event(struct siw_device *dev, u32 port, enum ib_event_type type);
#endif
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 179ff1d068e5..75cd44789661 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -501,9 +501,9 @@ void ipoib_reap_ah(struct work_struct *work);
struct ipoib_path *__path_find(struct net_device *dev, void *gid);
void ipoib_mark_paths_invalid(struct net_device *dev);
void ipoib_flush_paths(struct net_device *dev);
-struct net_device *ipoib_intf_alloc(struct ib_device *hca, u8 port,
+struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port,
const char *format);
-int ipoib_intf_init(struct ib_device *hca, u8 port, const char *format,
+int ipoib_intf_init(struct ib_device *hca, u32 port, const char *format,
struct net_device *dev);
void ipoib_ib_tx_timer_func(struct timer_list *t);
void ipoib_ib_dev_flush_light(struct work_struct *work);
@@ -677,8 +677,6 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc);
void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc);
#else
-struct ipoib_cm_tx;
-
#define ipoib_max_conn_qp 0
static inline int ipoib_cm_admin_enabled(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index d5d592bdab35..9dbc85a6b702 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1122,12 +1122,8 @@ static int ipoib_cm_modify_tx_init(struct net_device *dev,
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
- ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
- if (ret) {
- ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
- return ret;
- }
+ qp_attr.pkey_index = priv->pkey_index;
qp_attr.qp_state = IB_QPS_INIT;
qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
qp_attr.port_num = priv->port;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 494f413dc3c6..ceabfb0b0a83 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1060,7 +1060,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
union ib_gid *netdev_gid;
int err;
u16 index;
- u8 port;
+ u32 port;
bool ret = false;
netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index e16b40c09f82..bbb18087fdab 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -90,7 +90,7 @@ static int ipoib_add_one(struct ib_device *device);
static void ipoib_remove_one(struct ib_device *device, void *client_data);
static void ipoib_neigh_reclaim(struct rcu_head *rp);
static struct net_device *ipoib_get_net_dev_by_params(
- struct ib_device *dev, u8 port, u16 pkey,
+ struct ib_device *dev, u32 port, u16 pkey,
const union ib_gid *gid, const struct sockaddr *addr,
void *client_data);
static int ipoib_set_mac(struct net_device *dev, void *addr);
@@ -164,8 +164,13 @@ int ipoib_open(struct net_device *dev)
dev_change_flags(cpriv->dev, flags | IFF_UP, NULL);
}
up_read(&priv->vlan_rwsem);
- }
+ } else if (priv->parent) {
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+ if (!test_bit(IPOIB_FLAG_ADMIN_UP, &ppriv->flags))
+ ipoib_dbg(priv, "parent device %s is not up, so child device may be not functioning.\n",
+ ppriv->dev->name);
+ }
netif_start_queue(dev);
return 0;
@@ -438,7 +443,7 @@ static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
/* Returns the number of matching net_devs found (between 0 and 2). Also
* return the matching net_device in the @net_dev parameter, holding a
* reference to the net_device, if the number of matches >= 1 */
-static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
+static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u32 port,
u16 pkey_index,
const union ib_gid *gid,
const struct sockaddr *addr,
@@ -463,7 +468,7 @@ static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
}
static struct net_device *ipoib_get_net_dev_by_params(
- struct ib_device *dev, u8 port, u16 pkey,
+ struct ib_device *dev, u32 port, u16 pkey,
const union ib_gid *gid, const struct sockaddr *addr,
void *client_data)
{
@@ -1181,7 +1186,12 @@ unref:
static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+ if (rn->tx_timeout) {
+ rn->tx_timeout(dev, txqueue);
+ return;
+ }
ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
jiffies_to_msecs(jiffies - dev_trans_start(dev)));
ipoib_warn(priv,
@@ -2145,7 +2155,7 @@ static void ipoib_build_priv(struct net_device *dev)
INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
}
-static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u8 port,
+static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u32 port,
const char *name)
{
struct net_device *dev;
@@ -2162,7 +2172,7 @@ static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u8 port,
return dev;
}
-int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name,
+int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
struct net_device *dev)
{
struct rdma_netdev *rn = netdev_priv(dev);
@@ -2213,7 +2223,7 @@ out:
return rc;
}
-struct net_device *ipoib_intf_alloc(struct ib_device *hca, u8 port,
+struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port,
const char *name)
{
struct net_device *dev;
@@ -2456,7 +2466,7 @@ static int ipoib_intercept_dev_id_attr(struct net_device *dev)
}
static struct net_device *ipoib_add_port(const char *format,
- struct ib_device *hca, u8 port)
+ struct ib_device *hca, u32 port)
{
struct rtnl_link_ops *ops = ipoib_get_link_ops();
struct rdma_netdev_alloc_params params;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 78ee9445f801..9f6ac0a09a78 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -297,7 +297,6 @@ struct iser_login_desc {
struct iser_conn;
struct ib_conn;
-struct iscsi_iser_task;
/**
* struct iser_device - iSER device handle
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 7305ed8976c2..18266f07c58d 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -438,23 +438,23 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
isert_init_conn(isert_conn);
isert_conn->cm_id = cma_id;
- ret = isert_alloc_login_buf(isert_conn, cma_id->device);
- if (ret)
- goto out;
-
device = isert_device_get(cma_id);
if (IS_ERR(device)) {
ret = PTR_ERR(device);
- goto out_rsp_dma_map;
+ goto out;
}
isert_conn->device = device;
+ ret = isert_alloc_login_buf(isert_conn, cma_id->device);
+ if (ret)
+ goto out_conn_dev;
+
isert_set_nego_params(isert_conn, &event->param.conn);
isert_conn->qp = isert_create_qp(isert_conn, cma_id);
if (IS_ERR(isert_conn->qp)) {
ret = PTR_ERR(isert_conn->qp);
- goto out_conn_dev;
+ goto out_rsp_dma_map;
}
ret = isert_login_post_recv(isert_conn);
@@ -473,10 +473,10 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
out_destroy_qp:
isert_destroy_qp(isert_conn);
-out_conn_dev:
- isert_device_put(device);
out_rsp_dma_map:
isert_free_login_buf(isert_conn);
+out_conn_dev:
+ isert_device_put(device);
out:
kfree(isert_conn);
rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index b6a0abf40589..7d53d18a5004 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -101,6 +101,9 @@ static ssize_t mpath_policy_show(struct device *dev,
case MP_POLICY_MIN_INFLIGHT:
return sysfs_emit(page, "min-inflight (MI: %d)\n",
clt->mp_policy);
+ case MP_POLICY_MIN_LATENCY:
+ return sysfs_emit(page, "min-latency (ML: %d)\n",
+ clt->mp_policy);
default:
return sysfs_emit(page, "Unknown (%d)\n", clt->mp_policy);
}
@@ -114,22 +117,32 @@ static ssize_t mpath_policy_store(struct device *dev,
struct rtrs_clt *clt;
int value;
int ret;
+ size_t len = 0;
clt = container_of(dev, struct rtrs_clt, dev);
ret = kstrtoint(buf, 10, &value);
if (!ret && (value == MP_POLICY_RR ||
- value == MP_POLICY_MIN_INFLIGHT)) {
+ value == MP_POLICY_MIN_INFLIGHT ||
+ value == MP_POLICY_MIN_LATENCY)) {
clt->mp_policy = value;
return count;
}
+ /* distinguish "mi" and "min-latency" with length */
+ len = strnlen(buf, NAME_MAX);
+ if (buf[len - 1] == '\n')
+ len--;
+
if (!strncasecmp(buf, "round-robin", 11) ||
- !strncasecmp(buf, "rr", 2))
+ (len == 2 && !strncasecmp(buf, "rr", 2)))
clt->mp_policy = MP_POLICY_RR;
else if (!strncasecmp(buf, "min-inflight", 12) ||
- !strncasecmp(buf, "mi", 2))
+ (len == 2 && !strncasecmp(buf, "mi", 2)))
clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
+ else if (!strncasecmp(buf, "min-latency", 11) ||
+ (len == 2 && !strncasecmp(buf, "ml", 2)))
+ clt->mp_policy = MP_POLICY_MIN_LATENCY;
else
return -EINVAL;
@@ -342,6 +355,21 @@ static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj,
static struct kobj_attribute rtrs_clt_hca_name_attr =
__ATTR(hca_name, 0444, rtrs_clt_hca_name_show, NULL);
+static ssize_t rtrs_clt_cur_latency_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+
+ return sysfs_emit(page, "%lld ns\n",
+ ktime_to_ns(sess->s.hb_cur_latency));
+}
+
+static struct kobj_attribute rtrs_clt_cur_latency_attr =
+ __ATTR(cur_latency, 0444, rtrs_clt_cur_latency_show, NULL);
+
static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
@@ -385,6 +413,7 @@ static struct attribute *rtrs_clt_sess_attrs[] = {
&rtrs_clt_reconnect_attr.attr,
&rtrs_clt_disconnect_attr.attr,
&rtrs_clt_remove_path_attr.attr,
+ &rtrs_clt_cur_latency_attr.attr,
NULL,
};
@@ -396,14 +425,13 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
{
struct rtrs_clt *clt = sess->clt;
char str[NAME_MAX];
- int err, cnt;
-
- cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
- str, sizeof(str));
- cnt += scnprintf(str + cnt, sizeof(str) - cnt, "@");
- sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
- str + cnt, sizeof(str) - cnt);
+ int err;
+ struct rtrs_addr path = {
+ .src = &sess->s.src_addr,
+ .dst = &sess->s.dst_addr,
+ };
+ rtrs_addr_to_str(&path, str, sizeof(str));
err = kobject_init_and_add(&sess->kobj, &ktype_sess, clt->kobj_paths,
"%s", str);
if (err) {
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 934a2ff18e7f..0a794d748a7a 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -325,7 +325,7 @@ static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (unlikely(wc->status != IB_WC_SUCCESS)) {
rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
@@ -345,7 +345,7 @@ static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_io_req *req =
container_of(wc->wr_cqe, typeof(*req), inv_cqe);
- struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (unlikely(wc->status != IB_WC_SUCCESS)) {
rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
@@ -437,6 +437,13 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
req->in_use = false;
req->con = NULL;
+ if (errno) {
+ rtrs_err_rl(con->c.sess,
+ "IO request failed: error=%d path=%s [%s:%u]\n",
+ errno, kobject_name(&sess->kobj), sess->hca_name,
+ sess->hca_port);
+ }
+
if (notify)
req->conf(req->priv, errno);
}
@@ -586,7 +593,7 @@ static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
u32 imm_type, imm_payload;
bool w_inval = false;
@@ -628,6 +635,8 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
sess->s.hb_missed_cnt = 0;
+ sess->s.hb_cur_latency =
+ ktime_sub(ktime_get(), sess->s.hb_last_sent);
if (sess->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else {
@@ -826,6 +835,57 @@ static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
return min_path;
}
+/**
+ * get_next_path_min_latency() - Returns path with minimal latency.
+ * @it: the path pointer
+ *
+ * Return: a path with the lowest latency or NULL if all paths are tried
+ *
+ * Locks:
+ * rcu_read_lock() must be hold.
+ *
+ * Related to @MP_POLICY_MIN_LATENCY
+ *
+ * This DOES skip an already-tried path.
+ * There is a skip-list to skip a path if the path has tried but failed.
+ * It will try the minimum latency path and then the second minimum latency
+ * path and so on. Finally it will return NULL if all paths are tried.
+ * Therefore the caller MUST check the returned
+ * path is NULL and trigger the IO error.
+ */
+static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it)
+{
+ struct rtrs_clt_sess *min_path = NULL;
+ struct rtrs_clt *clt = it->clt;
+ struct rtrs_clt_sess *sess;
+ ktime_t min_latency = INT_MAX;
+ ktime_t latency;
+
+ list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
+ if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
+ continue;
+
+ if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
+ continue;
+
+ latency = sess->s.hb_cur_latency;
+
+ if (latency < min_latency) {
+ min_latency = latency;
+ min_path = sess;
+ }
+ }
+
+ /*
+ * add the path to the skip list, so that next time we can get
+ * a different one
+ */
+ if (min_path)
+ list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
+
+ return min_path;
+}
+
static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
{
INIT_LIST_HEAD(&it->skip_list);
@@ -834,8 +894,10 @@ static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
if (clt->mp_policy == MP_POLICY_RR)
it->next_path = get_next_path_rr;
- else
+ else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
it->next_path = get_next_path_min_inflight;
+ else
+ it->next_path = get_next_path_min_latency;
}
static inline void path_it_deinit(struct path_it *it)
@@ -1020,7 +1082,10 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
req->usr_len + sizeof(*msg),
imm);
if (unlikely(ret)) {
- rtrs_err(s, "Write request failed: %d\n", ret);
+ rtrs_err_rl(s,
+ "Write request failed: error=%d path=%s [%s:%u]\n",
+ ret, kobject_name(&sess->kobj), sess->hca_name,
+ sess->hca_port);
if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&sess->stats->inflight);
if (req->sg_cnt)
@@ -1052,7 +1117,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
struct rtrs_sess *s = con->c.sess;
struct rtrs_clt_sess *sess = to_clt_sess(s);
struct rtrs_msg_rdma_read *msg;
- struct rtrs_ib_dev *dev;
+ struct rtrs_ib_dev *dev = sess->s.dev;
struct ib_reg_wr rwr;
struct ib_send_wr *wr = NULL;
@@ -1062,9 +1127,6 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
- s = &sess->s;
- dev = sess->s.dev;
-
if (unlikely(tsize > sess->chunk_size)) {
rtrs_wrn(s,
"Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
@@ -1141,7 +1203,10 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
req->data_len, imm, wr);
if (unlikely(ret)) {
- rtrs_err(s, "Read request failed: %d\n", ret);
+ rtrs_err_rl(s,
+ "Read request failed: error=%d path=%s [%s:%u]\n",
+ ret, kobject_name(&sess->kobj), sess->hca_name,
+ sess->hca_port);
if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&sess->stats->inflight);
req->need_inv = false;
@@ -1863,12 +1928,14 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
- rtrs_wrn(s, "CM error event %d\n", ev->event);
+ rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
cm_err = -ECONNRESET;
break;
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
- rtrs_wrn(s, "CM error event %d\n", ev->event);
+ rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
cm_err = -EHOSTUNREACH;
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
@@ -1878,7 +1945,8 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
rtrs_clt_close_conns(sess, false);
return 0;
default:
- rtrs_err(s, "Unexpected RDMA CM event (%d)\n", ev->event);
+ rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
cm_err = -ECONNRESET;
break;
}
@@ -2251,7 +2319,7 @@ destroy:
static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
struct rtrs_iu *iu;
@@ -2333,7 +2401,7 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
struct rtrs_msg_info_rsp *msg;
enum rtrs_clt_state state;
@@ -2464,16 +2532,28 @@ out:
static int init_sess(struct rtrs_clt_sess *sess)
{
int err;
+ char str[NAME_MAX];
+ struct rtrs_addr path = {
+ .src = &sess->s.src_addr,
+ .dst = &sess->s.dst_addr,
+ };
+
+ rtrs_addr_to_str(&path, str, sizeof(str));
mutex_lock(&sess->init_mutex);
err = init_conns(sess);
if (err) {
- rtrs_err(sess->clt, "init_conns(), err: %d\n", err);
+ rtrs_err(sess->clt,
+ "init_conns() failed: err=%d path=%s [%s:%u]\n", err,
+ str, sess->hca_name, sess->hca_port);
goto out;
}
err = rtrs_send_sess_info(sess);
if (err) {
- rtrs_err(sess->clt, "rtrs_send_sess_info(), err: %d\n", err);
+ rtrs_err(
+ sess->clt,
+ "rtrs_send_sess_info() failed: err=%d path=%s [%s:%u]\n",
+ err, str, sess->hca_name, sess->hca_port);
goto out;
}
rtrs_clt_sess_up(sess);
@@ -2791,8 +2871,8 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
} while (!changed && old_state != RTRS_CLT_DEAD);
if (likely(changed)) {
- rtrs_clt_destroy_sess_files(sess, sysfs_self);
rtrs_clt_remove_path_from_arr(sess);
+ rtrs_clt_destroy_sess_files(sess, sysfs_self);
kobject_put(&sess->kobj);
}
@@ -2934,9 +3014,9 @@ int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
return -ECOMM;
attr->queue_depth = clt->queue_depth;
- attr->max_io_size = clt->max_io_size;
- attr->sess_kobj = &clt->dev.kobj;
- strlcpy(attr->sessname, clt->sessname, sizeof(attr->sessname));
+ /* Cap max_io_size to min of remote buffer size and the fr pages */
+ attr->max_io_size = min_t(int, clt->max_io_size,
+ clt->max_segments * SZ_4K);
return 0;
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index 98ba5d0a48b8..4c52f30e4da1 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -29,6 +29,7 @@ enum rtrs_clt_state {
enum rtrs_mp_policy {
MP_POLICY_RR,
MP_POLICY_MIN_INFLIGHT,
+ MP_POLICY_MIN_LATENCY,
};
/* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index 00eb45053339..86e65cf30cab 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -91,6 +91,7 @@ struct rtrs_con {
struct ib_cq *cq;
struct rdma_cm_id *cm_id;
unsigned int cid;
+ u16 cq_size;
};
struct rtrs_sess {
@@ -112,6 +113,8 @@ struct rtrs_sess {
unsigned int hb_interval_ms;
unsigned int hb_missed_cnt;
unsigned int hb_missed_max;
+ ktime_t hb_last_sent;
+ ktime_t hb_cur_latency;
};
/* rtrs information unit */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index 126a96e75c62..a9288175fbb5 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -176,7 +176,8 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
err = device_add(&srv->dev);
if (err) {
pr_err("device_add(): %d\n", err);
- goto put;
+ put_device(&srv->dev);
+ goto unlock;
}
srv->kobj_paths = kobject_create_and_add("paths", &srv->dev.kobj);
if (!srv->kobj_paths) {
@@ -188,10 +189,6 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
}
dev_set_uevent_suppress(&srv->dev, false);
kobject_uevent(&srv->dev.kobj, KOBJ_ADD);
- goto unlock;
-
-put:
- put_device(&srv->dev);
unlock:
mutex_unlock(&srv->paths_mutex);
@@ -262,14 +259,13 @@ int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess)
struct rtrs_srv *srv = sess->srv;
struct rtrs_sess *s = &sess->s;
char str[NAME_MAX];
- int err, cnt;
-
- cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
- str, sizeof(str));
- cnt += scnprintf(str + cnt, sizeof(str) - cnt, "@");
- sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
- str + cnt, sizeof(str) - cnt);
+ int err;
+ struct rtrs_addr path = {
+ .src = &sess->s.dst_addr,
+ .dst = &sess->s.src_addr,
+ };
+ rtrs_addr_to_str(&path, str, sizeof(str));
err = rtrs_srv_create_once_sysfs_root_folders(sess);
if (err)
return err;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index f7aa2a7e7442..0fa116cabc44 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -199,7 +199,7 @@ static void rtrs_srv_wait_ops_ids(struct rtrs_srv_sess *sess)
static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
struct rtrs_sess *s = con->c.sess;
struct rtrs_srv_sess *sess = to_srv_sess(s);
@@ -518,8 +518,9 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
rtrs_err_rl(s,
- "Sending I/O response failed, session is disconnected, sess state %s\n",
- rtrs_srv_state_str(sess->state));
+ "Sending I/O response failed, session %s is disconnected, sess state %s\n",
+ kobject_name(&sess->kobj),
+ rtrs_srv_state_str(sess->state));
goto out;
}
if (always_invalidate) {
@@ -529,7 +530,9 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
}
if (unlikely(atomic_sub_return(1,
&con->sq_wr_avail) < 0)) {
- pr_err("IB send queue full\n");
+ rtrs_err(s, "IB send queue full: sess=%s cid=%d\n",
+ kobject_name(&sess->kobj),
+ con->c.cid);
atomic_add(1, &con->sq_wr_avail);
spin_lock(&con->rsp_wr_wait_lock);
list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
@@ -543,7 +546,8 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
err = rdma_write_sg(id);
if (unlikely(err)) {
- rtrs_err_rl(s, "IO response failed: %d\n", err);
+ rtrs_err_rl(s, "IO response failed: %d: sess=%s\n", err,
+ kobject_name(&sess->kobj));
close_sess(sess);
}
out:
@@ -720,7 +724,7 @@ static void rtrs_srv_stop_hb(struct rtrs_srv_sess *sess)
static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
struct rtrs_sess *s = con->c.sess;
struct rtrs_srv_sess *sess = to_srv_sess(s);
struct rtrs_iu *iu;
@@ -862,7 +866,7 @@ rwr_free:
static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
struct rtrs_sess *s = con->c.sess;
struct rtrs_srv_sess *sess = to_srv_sess(s);
struct rtrs_msg_info_req *msg;
@@ -1110,7 +1114,7 @@ static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_mr *mr =
container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
- struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
struct rtrs_sess *s = con->c.sess;
struct rtrs_srv_sess *sess = to_srv_sess(s);
struct rtrs_srv *srv = sess->srv;
@@ -1167,7 +1171,7 @@ static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
struct rtrs_sess *s = con->c.sess;
struct rtrs_srv_sess *sess = to_srv_sess(s);
struct rtrs_srv *srv = sess->srv;
@@ -1683,6 +1687,8 @@ static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
{
struct rtrs_srv_sess *sess;
int err = -ENOMEM;
+ char str[NAME_MAX];
+ struct rtrs_addr path;
if (srv->paths_num >= MAX_PATHS_NUM) {
err = -ECONNRESET;
@@ -1717,6 +1723,13 @@ static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
sess->cur_cq_vector = -1;
sess->s.dst_addr = cm_id->route.addr.dst_addr;
sess->s.src_addr = cm_id->route.addr.src_addr;
+
+ /* temporary until receiving session-name from client */
+ path.src = &sess->s.src_addr;
+ path.dst = &sess->s.dst_addr;
+ rtrs_addr_to_str(&path, str, sizeof(str));
+ strlcpy(sess->s.sessname, str, sizeof(sess->s.sessname));
+
sess->s.con_num = con_num;
sess->s.recon_cnt = recon_cnt;
uuid_copy(&sess->s.uuid, uuid);
@@ -1908,13 +1921,10 @@ static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_UNREACHABLE:
rtrs_err(s, "CM error (CM event: %s, err: %d)\n",
rdma_event_msg(ev->event), ev->status);
- close_sess(sess);
- break;
+ fallthrough;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
- close_sess(sess);
- break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
close_sess(sess);
break;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index d13aff0aa816..a7847282a2eb 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -218,14 +218,14 @@ static int create_cq(struct rtrs_con *con, int cq_vector, u16 cq_size,
struct rdma_cm_id *cm_id = con->cm_id;
struct ib_cq *cq;
- cq = ib_alloc_cq(cm_id->device, con, cq_size,
- cq_vector, poll_ctx);
+ cq = ib_cq_pool_get(cm_id->device, cq_size, cq_vector, poll_ctx);
if (IS_ERR(cq)) {
rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n",
PTR_ERR(cq));
return PTR_ERR(cq);
}
con->cq = cq;
+ con->cq_size = cq_size;
return 0;
}
@@ -273,7 +273,7 @@ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr,
max_send_sge);
if (err) {
- ib_free_cq(con->cq);
+ ib_cq_pool_put(con->cq, con->cq_size);
con->cq = NULL;
return err;
}
@@ -290,7 +290,7 @@ void rtrs_cq_qp_destroy(struct rtrs_con *con)
con->qp = NULL;
}
if (con->cq) {
- ib_free_cq(con->cq);
+ ib_cq_pool_put(con->cq, con->cq_size);
con->cq = NULL;
}
}
@@ -337,6 +337,9 @@ static void hb_work(struct work_struct *work)
schedule_hb(sess);
return;
}
+
+ sess->hb_last_sent = ktime_get();
+
imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
0, NULL);
@@ -464,6 +467,30 @@ int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len)
EXPORT_SYMBOL(sockaddr_to_str);
/**
+ * rtrs_addr_to_str() - convert rtrs_addr to a string "src@dst"
+ * @addr: the rtrs_addr structure to be converted
+ * @buf: string containing source and destination addr of a path
+ * separated by '@' I.e. "ip:1.1.1.1@ip:1.1.1.2"
+ * "ip:1.1.1.1@ip:1.1.1.2".
+ * @len: string length
+ *
+ * The return value is the number of characters written into buf not
+ * including the trailing '\0'.
+ */
+int rtrs_addr_to_str(const struct rtrs_addr *addr, char *buf, size_t len)
+{
+ int cnt;
+
+ cnt = sockaddr_to_str((struct sockaddr *)addr->src,
+ buf, len);
+ cnt += scnprintf(buf + cnt, len - cnt, "@");
+ sockaddr_to_str((struct sockaddr *)addr->dst,
+ buf + cnt, len - cnt);
+ return cnt;
+}
+EXPORT_SYMBOL(rtrs_addr_to_str);
+
+/**
* rtrs_addr_to_sockaddr() - convert path string "src,dst" or "src@dst"
* to sockaddreses
* @str: string containing source and destination addr of a path
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
index bebaa94c4728..dc3e1af1a85b 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -110,8 +110,6 @@ int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index);
struct rtrs_attrs {
u32 queue_depth;
u32 max_io_size;
- u8 sessname[NAME_MAX];
- struct kobject *sess_kobj;
};
int rtrs_clt_query(struct rtrs_clt *sess, struct rtrs_attrs *attr);
@@ -185,4 +183,5 @@ int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
struct rtrs_addr *addr);
int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len);
+int rtrs_addr_to_str(const struct rtrs_addr *addr, char *buf, size_t len);
#endif
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 51c386a215f5..ea447805d4ea 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2382,6 +2382,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
dev_name(&sdev->device->dev), port_num);
mutex_unlock(&sport->mutex);
+ ret = -EINVAL;
goto reject;
}
@@ -3109,7 +3110,8 @@ static int srpt_add_one(struct ib_device *device)
{
struct srpt_device *sdev;
struct srpt_port *sport;
- int i, ret;
+ int ret;
+ u32 i;
pr_debug("device = %p\n", device);
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index d8f5310e22ba..037cc595106c 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -7,6 +7,7 @@
obj-$(CONFIG_INPUT) += input-core.o
input-core-y := input.o input-compat.o input-mt.o input-poller.o ff-core.o
+input-core-y += touchscreen.o
obj-$(CONFIG_INPUT_FF_MEMLESS) += ff-memless.o
obj-$(CONFIG_INPUT_SPARSEKMAP) += sparse-keymap.o
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 9f0d07dcbf06..d69d7657ab12 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -268,6 +268,7 @@ static const struct xpad_device {
{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ { 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
@@ -440,6 +441,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
+ XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 77bac4ddf324..8dbf1e69c90a 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
+#include <linux/hrtimer.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
@@ -36,10 +37,11 @@ struct gpio_button_data {
unsigned short *code;
- struct timer_list release_timer;
+ struct hrtimer release_timer;
unsigned int release_delay; /* in msecs, for IRQ-only buttons */
struct delayed_work work;
+ struct hrtimer debounce_timer;
unsigned int software_debounce; /* in msecs, for GPIO-driven buttons */
unsigned int irq;
@@ -48,6 +50,7 @@ struct gpio_button_data {
bool disabled;
bool key_pressed;
bool suspended;
+ bool debounce_use_hrtimer;
};
struct gpio_keys_drvdata {
@@ -122,6 +125,18 @@ static const unsigned long *get_bm_events_by_type(struct input_dev *dev,
return (type == EV_KEY) ? dev->keybit : dev->swbit;
}
+static void gpio_keys_quiesce_key(void *data)
+{
+ struct gpio_button_data *bdata = data;
+
+ if (!bdata->gpiod)
+ hrtimer_cancel(&bdata->release_timer);
+ if (bdata->debounce_use_hrtimer)
+ hrtimer_cancel(&bdata->debounce_timer);
+ else
+ cancel_delayed_work_sync(&bdata->work);
+}
+
/**
* gpio_keys_disable_button() - disables given GPIO button
* @bdata: button data for button to be disabled
@@ -142,12 +157,7 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
* Disable IRQ and associated timer/work structure.
*/
disable_irq(bdata->irq);
-
- if (bdata->gpiod)
- cancel_delayed_work_sync(&bdata->work);
- else
- del_timer_sync(&bdata->release_timer);
-
+ gpio_keys_quiesce_key(bdata);
bdata->disabled = true;
}
}
@@ -360,7 +370,9 @@ static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
unsigned int type = button->type ?: EV_KEY;
int state;
- state = gpiod_get_value_cansleep(bdata->gpiod);
+ state = bdata->debounce_use_hrtimer ?
+ gpiod_get_value(bdata->gpiod) :
+ gpiod_get_value_cansleep(bdata->gpiod);
if (state < 0) {
dev_err(input->dev.parent,
"failed to get gpio state: %d\n", state);
@@ -373,7 +385,15 @@ static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
} else {
input_event(input, type, *bdata->code, state);
}
- input_sync(input);
+}
+
+static void gpio_keys_debounce_event(struct gpio_button_data *bdata)
+{
+ gpio_keys_gpio_report_event(bdata);
+ input_sync(bdata->input);
+
+ if (bdata->button->wakeup)
+ pm_relax(bdata->input->dev.parent);
}
static void gpio_keys_gpio_work_func(struct work_struct *work)
@@ -381,10 +401,17 @@ static void gpio_keys_gpio_work_func(struct work_struct *work)
struct gpio_button_data *bdata =
container_of(work, struct gpio_button_data, work.work);
- gpio_keys_gpio_report_event(bdata);
+ gpio_keys_debounce_event(bdata);
+}
- if (bdata->button->wakeup)
- pm_relax(bdata->input->dev.parent);
+static enum hrtimer_restart gpio_keys_debounce_timer(struct hrtimer *t)
+{
+ struct gpio_button_data *bdata =
+ container_of(t, struct gpio_button_data, debounce_timer);
+
+ gpio_keys_debounce_event(bdata);
+
+ return HRTIMER_NORESTART;
}
static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
@@ -408,26 +435,33 @@ static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
}
}
- mod_delayed_work(system_wq,
- &bdata->work,
- msecs_to_jiffies(bdata->software_debounce));
+ if (bdata->debounce_use_hrtimer) {
+ hrtimer_start(&bdata->debounce_timer,
+ ms_to_ktime(bdata->software_debounce),
+ HRTIMER_MODE_REL);
+ } else {
+ mod_delayed_work(system_wq,
+ &bdata->work,
+ msecs_to_jiffies(bdata->software_debounce));
+ }
return IRQ_HANDLED;
}
-static void gpio_keys_irq_timer(struct timer_list *t)
+static enum hrtimer_restart gpio_keys_irq_timer(struct hrtimer *t)
{
- struct gpio_button_data *bdata = from_timer(bdata, t, release_timer);
+ struct gpio_button_data *bdata = container_of(t,
+ struct gpio_button_data,
+ release_timer);
struct input_dev *input = bdata->input;
- unsigned long flags;
- spin_lock_irqsave(&bdata->lock, flags);
if (bdata->key_pressed) {
input_event(input, EV_KEY, *bdata->code, 0);
input_sync(input);
bdata->key_pressed = false;
}
- spin_unlock_irqrestore(&bdata->lock, flags);
+
+ return HRTIMER_NORESTART;
}
static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
@@ -457,23 +491,14 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
}
if (bdata->release_delay)
- mod_timer(&bdata->release_timer,
- jiffies + msecs_to_jiffies(bdata->release_delay));
+ hrtimer_start(&bdata->release_timer,
+ ms_to_ktime(bdata->release_delay),
+ HRTIMER_MODE_REL_HARD);
out:
spin_unlock_irqrestore(&bdata->lock, flags);
return IRQ_HANDLED;
}
-static void gpio_keys_quiesce_key(void *data)
-{
- struct gpio_button_data *bdata = data;
-
- if (bdata->gpiod)
- cancel_delayed_work_sync(&bdata->work);
- else
- del_timer_sync(&bdata->release_timer);
-}
-
static int gpio_keys_setup_key(struct platform_device *pdev,
struct input_dev *input,
struct gpio_keys_drvdata *ddata,
@@ -543,6 +568,14 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
if (error < 0)
bdata->software_debounce =
button->debounce_interval;
+
+ /*
+ * If reading the GPIO won't sleep, we can use a
+ * hrtimer instead of a standard timer for the software
+ * debounce, to reduce the latency as much as possible.
+ */
+ bdata->debounce_use_hrtimer =
+ !gpiod_cansleep(bdata->gpiod);
}
if (button->irq) {
@@ -561,6 +594,10 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
INIT_DELAYED_WORK(&bdata->work, gpio_keys_gpio_work_func);
+ hrtimer_init(&bdata->debounce_timer,
+ CLOCK_REALTIME, HRTIMER_MODE_REL);
+ bdata->debounce_timer.function = gpio_keys_debounce_timer;
+
isr = gpio_keys_gpio_isr;
irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
@@ -595,7 +632,9 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
}
bdata->release_delay = button->debounce_interval;
- timer_setup(&bdata->release_timer, gpio_keys_irq_timer, 0);
+ hrtimer_init(&bdata->release_timer,
+ CLOCK_REALTIME, HRTIMER_MODE_REL_HARD);
+ bdata->release_timer.function = gpio_keys_irq_timer;
isr = gpio_keys_irq_isr;
irqflags = 0;
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 1f5c9ea5e9e5..ae9303848571 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -408,27 +408,18 @@ open_err:
return -EIO;
}
-#ifdef CONFIG_OF
static const struct of_device_id imx_keypad_of_match[] = {
{ .compatible = "fsl,imx21-kpp", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_keypad_of_match);
-#endif
static int imx_keypad_probe(struct platform_device *pdev)
{
- const struct matrix_keymap_data *keymap_data =
- dev_get_platdata(&pdev->dev);
struct imx_keypad *keypad;
struct input_dev *input_dev;
int irq, error, i, row, col;
- if (!keymap_data && !pdev->dev.of_node) {
- dev_err(&pdev->dev, "no keymap defined\n");
- return -EINVAL;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -469,7 +460,7 @@ static int imx_keypad_probe(struct platform_device *pdev)
input_dev->open = imx_keypad_open;
input_dev->close = imx_keypad_close;
- error = matrix_keypad_build_keymap(keymap_data, NULL,
+ error = matrix_keypad_build_keymap(NULL, NULL,
MAX_MATRIX_KEY_ROWS,
MAX_MATRIX_KEY_COLS,
keypad->keycodes, input_dev);
@@ -582,7 +573,7 @@ static struct platform_driver imx_keypad_driver = {
.driver = {
.name = "imx-keypad",
.pm = &imx_kbd_pm_ops,
- .of_match_table = of_match_ptr(imx_keypad_of_match),
+ .of_match_table = imx_keypad_of_match,
},
.probe = imx_keypad_probe,
};
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 9b0f9665dcb0..2a9755910065 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -274,7 +274,7 @@ static int tca6416_keypad_probe(struct i2c_client *client,
error = request_threaded_irq(chip->irqnum, NULL,
tca6416_keys_isr,
IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"tca6416-keypad", chip);
if (error) {
dev_dbg(&client->dev,
@@ -282,7 +282,6 @@ static int tca6416_keypad_probe(struct i2c_client *client,
chip->irqnum, error);
goto fail1;
}
- disable_irq(chip->irqnum);
}
error = input_register_device(input);
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 9671842a082a..570fe18c0ce9 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -694,14 +694,13 @@ static int tegra_kbc_probe(struct platform_device *pdev)
input_set_drvdata(kbc->idev, kbc);
err = devm_request_irq(&pdev->dev, kbc->irq, tegra_kbc_isr,
- IRQF_TRIGGER_HIGH, pdev->name, kbc);
+ IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN,
+ pdev->name, kbc);
if (err) {
dev_err(&pdev->dev, "failed to request keyboard IRQ\n");
return err;
}
- disable_irq(kbc->irq);
-
err = input_register_device(kbc->idev);
if (err) {
dev_err(&pdev->dev, "failed to register input device\n");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7237dc440b98..498cde376981 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -763,6 +763,17 @@ config INPUT_IQS269A
To compile this driver as a module, choose M here: the
module will be called iqs269a.
+config INPUT_IQS626A
+ tristate "Azoteq IQS626A capacitive touch controller"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y to enable support for the Azoteq IQS626A capacitive
+ touch controller.
+
+ To compile this driver as a module, choose M here: the
+ module will be called iqs626a.
+
config INPUT_CMA3000
tristate "VTI CMA3000 Tri-axis accelerometer"
help
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 46db664a8bc4..f593beed7e05 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_INPUT_HISI_POWERKEY) += hisi_powerkey.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
obj-$(CONFIG_INPUT_IQS269A) += iqs269a.o
+obj-$(CONFIG_INPUT_IQS626A) += iqs626a.o
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 08b9b5cdb943..81de8c4e37d0 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -2018,7 +2018,6 @@ static int ims_pcu_probe(struct usb_interface *intf,
}
usb_set_intfdata(pcu->ctrl_intf, pcu);
- usb_set_intfdata(pcu->data_intf, pcu);
error = ims_pcu_buffers_alloc(pcu);
if (error)
diff --git a/drivers/input/misc/iqs626a.c b/drivers/input/misc/iqs626a.c
new file mode 100644
index 000000000000..d57e996732cf
--- /dev/null
+++ b/drivers/input/misc/iqs626a.c
@@ -0,0 +1,1838 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS626A Capacitive Touch Controller
+ *
+ * Copyright (C) 2020 Jeff LaBundy <jeff@labundy.com>
+ *
+ * This driver registers up to 2 input devices: one representing capacitive or
+ * inductive keys as well as Hall-effect switches, and one for a trackpad that
+ * can express various gestures.
+ */
+
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define IQS626_VER_INFO 0x00
+#define IQS626_VER_INFO_PROD_NUM 0x51
+
+#define IQS626_SYS_FLAGS 0x02
+#define IQS626_SYS_FLAGS_SHOW_RESET BIT(15)
+#define IQS626_SYS_FLAGS_IN_ATI BIT(12)
+#define IQS626_SYS_FLAGS_PWR_MODE_MASK GENMASK(9, 8)
+#define IQS626_SYS_FLAGS_PWR_MODE_SHIFT 8
+
+#define IQS626_HALL_OUTPUT 0x23
+
+#define IQS626_SYS_SETTINGS 0x80
+#define IQS626_SYS_SETTINGS_CLK_DIV BIT(15)
+#define IQS626_SYS_SETTINGS_ULP_AUTO BIT(14)
+#define IQS626_SYS_SETTINGS_DIS_AUTO BIT(13)
+#define IQS626_SYS_SETTINGS_PWR_MODE_MASK GENMASK(12, 11)
+#define IQS626_SYS_SETTINGS_PWR_MODE_SHIFT 11
+#define IQS626_SYS_SETTINGS_PWR_MODE_MAX 3
+#define IQS626_SYS_SETTINGS_ULP_UPDATE_MASK GENMASK(10, 8)
+#define IQS626_SYS_SETTINGS_ULP_UPDATE_SHIFT 8
+#define IQS626_SYS_SETTINGS_ULP_UPDATE_MAX 7
+#define IQS626_SYS_SETTINGS_EVENT_MODE BIT(5)
+#define IQS626_SYS_SETTINGS_EVENT_MODE_LP BIT(4)
+#define IQS626_SYS_SETTINGS_REDO_ATI BIT(2)
+#define IQS626_SYS_SETTINGS_ACK_RESET BIT(0)
+
+#define IQS626_MISC_A_ATI_BAND_DISABLE BIT(7)
+#define IQS626_MISC_A_TPx_LTA_UPDATE_MASK GENMASK(6, 4)
+#define IQS626_MISC_A_TPx_LTA_UPDATE_SHIFT 4
+#define IQS626_MISC_A_TPx_LTA_UPDATE_MAX 7
+#define IQS626_MISC_A_ATI_LP_ONLY BIT(3)
+#define IQS626_MISC_A_GPIO3_SELECT_MASK GENMASK(2, 0)
+#define IQS626_MISC_A_GPIO3_SELECT_MAX 7
+
+#define IQS626_EVENT_MASK_SYS BIT(6)
+#define IQS626_EVENT_MASK_GESTURE BIT(3)
+#define IQS626_EVENT_MASK_DEEP BIT(2)
+#define IQS626_EVENT_MASK_TOUCH BIT(1)
+#define IQS626_EVENT_MASK_PROX BIT(0)
+
+#define IQS626_RATE_NP_MS_MAX 255
+#define IQS626_RATE_LP_MS_MAX 255
+#define IQS626_RATE_ULP_MS_MAX 4080
+#define IQS626_TIMEOUT_PWR_MS_MAX 130560
+#define IQS626_TIMEOUT_LTA_MS_MAX 130560
+
+#define IQS626_MISC_B_RESEED_UI_SEL_MASK GENMASK(7, 6)
+#define IQS626_MISC_B_RESEED_UI_SEL_SHIFT 6
+#define IQS626_MISC_B_RESEED_UI_SEL_MAX 3
+#define IQS626_MISC_B_THRESH_EXTEND BIT(5)
+#define IQS626_MISC_B_TRACKING_UI_ENABLE BIT(4)
+#define IQS626_MISC_B_TPx_SWIPE BIT(3)
+#define IQS626_MISC_B_RESEED_OFFSET BIT(2)
+#define IQS626_MISC_B_FILT_STR_TPx GENMASK(1, 0)
+
+#define IQS626_THRESH_SWIPE_MAX 255
+#define IQS626_TIMEOUT_TAP_MS_MAX 4080
+#define IQS626_TIMEOUT_SWIPE_MS_MAX 4080
+
+#define IQS626_CHx_ENG_0_MEAS_CAP_SIZE BIT(7)
+#define IQS626_CHx_ENG_0_RX_TERM_VSS BIT(5)
+#define IQS626_CHx_ENG_0_LINEARIZE BIT(4)
+#define IQS626_CHx_ENG_0_DUAL_DIR BIT(3)
+#define IQS626_CHx_ENG_0_FILT_DISABLE BIT(2)
+#define IQS626_CHx_ENG_0_ATI_MODE_MASK GENMASK(1, 0)
+#define IQS626_CHx_ENG_0_ATI_MODE_MAX 3
+
+#define IQS626_CHx_ENG_1_CCT_HIGH_1 BIT(7)
+#define IQS626_CHx_ENG_1_CCT_HIGH_0 BIT(6)
+#define IQS626_CHx_ENG_1_PROJ_BIAS_MASK GENMASK(5, 4)
+#define IQS626_CHx_ENG_1_PROJ_BIAS_SHIFT 4
+#define IQS626_CHx_ENG_1_PROJ_BIAS_MAX 3
+#define IQS626_CHx_ENG_1_CCT_ENABLE BIT(3)
+#define IQS626_CHx_ENG_1_SENSE_FREQ_MASK GENMASK(2, 1)
+#define IQS626_CHx_ENG_1_SENSE_FREQ_SHIFT 1
+#define IQS626_CHx_ENG_1_SENSE_FREQ_MAX 3
+#define IQS626_CHx_ENG_1_ATI_BAND_TIGHTEN BIT(0)
+
+#define IQS626_CHx_ENG_2_LOCAL_CAP_MASK GENMASK(7, 6)
+#define IQS626_CHx_ENG_2_LOCAL_CAP_SHIFT 6
+#define IQS626_CHx_ENG_2_LOCAL_CAP_MAX 3
+#define IQS626_CHx_ENG_2_LOCAL_CAP_ENABLE BIT(5)
+#define IQS626_CHx_ENG_2_SENSE_MODE_MASK GENMASK(3, 0)
+#define IQS626_CHx_ENG_2_SENSE_MODE_MAX 15
+
+#define IQS626_CHx_ENG_3_TX_FREQ_MASK GENMASK(5, 4)
+#define IQS626_CHx_ENG_3_TX_FREQ_SHIFT 4
+#define IQS626_CHx_ENG_3_TX_FREQ_MAX 3
+#define IQS626_CHx_ENG_3_INV_LOGIC BIT(0)
+
+#define IQS626_CHx_ENG_4_RX_TERM_VREG BIT(6)
+#define IQS626_CHx_ENG_4_CCT_LOW_1 BIT(5)
+#define IQS626_CHx_ENG_4_CCT_LOW_0 BIT(4)
+#define IQS626_CHx_ENG_4_COMP_DISABLE BIT(1)
+#define IQS626_CHx_ENG_4_STATIC_ENABLE BIT(0)
+
+#define IQS626_TPx_ATI_BASE_MIN 45
+#define IQS626_TPx_ATI_BASE_MAX 300
+#define IQS626_CHx_ATI_BASE_MASK GENMASK(7, 6)
+#define IQS626_CHx_ATI_BASE_75 0x00
+#define IQS626_CHx_ATI_BASE_100 0x40
+#define IQS626_CHx_ATI_BASE_150 0x80
+#define IQS626_CHx_ATI_BASE_200 0xC0
+#define IQS626_CHx_ATI_TARGET_MASK GENMASK(5, 0)
+#define IQS626_CHx_ATI_TARGET_MAX 2016
+
+#define IQS626_CHx_THRESH_MAX 255
+#define IQS626_CHx_HYST_DEEP_MASK GENMASK(7, 4)
+#define IQS626_CHx_HYST_DEEP_SHIFT 4
+#define IQS626_CHx_HYST_TOUCH_MASK GENMASK(3, 0)
+#define IQS626_CHx_HYST_MAX 15
+
+#define IQS626_FILT_STR_NP_TPx_MASK GENMASK(7, 6)
+#define IQS626_FILT_STR_NP_TPx_SHIFT 6
+#define IQS626_FILT_STR_LP_TPx_MASK GENMASK(5, 4)
+#define IQS626_FILT_STR_LP_TPx_SHIFT 4
+
+#define IQS626_FILT_STR_NP_CNT_MASK GENMASK(7, 6)
+#define IQS626_FILT_STR_NP_CNT_SHIFT 6
+#define IQS626_FILT_STR_LP_CNT_MASK GENMASK(5, 4)
+#define IQS626_FILT_STR_LP_CNT_SHIFT 4
+#define IQS626_FILT_STR_NP_LTA_MASK GENMASK(3, 2)
+#define IQS626_FILT_STR_NP_LTA_SHIFT 2
+#define IQS626_FILT_STR_LP_LTA_MASK GENMASK(1, 0)
+#define IQS626_FILT_STR_MAX 3
+
+#define IQS626_ULP_PROJ_ENABLE BIT(4)
+#define IQS626_GEN_WEIGHT_MAX 255
+
+#define IQS626_MAX_REG 0xFF
+
+#define IQS626_NUM_CH_TP_3 9
+#define IQS626_NUM_CH_TP_2 6
+#define IQS626_NUM_CH_GEN 3
+#define IQS626_NUM_CRx_TX 8
+
+#define IQS626_PWR_MODE_POLL_SLEEP_US 50000
+#define IQS626_PWR_MODE_POLL_TIMEOUT_US 500000
+
+#define iqs626_irq_wait() usleep_range(350, 400)
+
+enum iqs626_ch_id {
+ IQS626_CH_ULP_0,
+ IQS626_CH_TP_2,
+ IQS626_CH_TP_3,
+ IQS626_CH_GEN_0,
+ IQS626_CH_GEN_1,
+ IQS626_CH_GEN_2,
+ IQS626_CH_HALL,
+};
+
+enum iqs626_rx_inactive {
+ IQS626_RX_INACTIVE_VSS,
+ IQS626_RX_INACTIVE_FLOAT,
+ IQS626_RX_INACTIVE_VREG,
+};
+
+enum iqs626_st_offs {
+ IQS626_ST_OFFS_PROX,
+ IQS626_ST_OFFS_DIR,
+ IQS626_ST_OFFS_TOUCH,
+ IQS626_ST_OFFS_DEEP,
+};
+
+enum iqs626_th_offs {
+ IQS626_TH_OFFS_PROX,
+ IQS626_TH_OFFS_TOUCH,
+ IQS626_TH_OFFS_DEEP,
+};
+
+enum iqs626_event_id {
+ IQS626_EVENT_PROX_DN,
+ IQS626_EVENT_PROX_UP,
+ IQS626_EVENT_TOUCH_DN,
+ IQS626_EVENT_TOUCH_UP,
+ IQS626_EVENT_DEEP_DN,
+ IQS626_EVENT_DEEP_UP,
+};
+
+enum iqs626_gesture_id {
+ IQS626_GESTURE_FLICK_X_POS,
+ IQS626_GESTURE_FLICK_X_NEG,
+ IQS626_GESTURE_FLICK_Y_POS,
+ IQS626_GESTURE_FLICK_Y_NEG,
+ IQS626_GESTURE_TAP,
+ IQS626_GESTURE_HOLD,
+ IQS626_NUM_GESTURES,
+};
+
+struct iqs626_event_desc {
+ const char *name;
+ enum iqs626_st_offs st_offs;
+ enum iqs626_th_offs th_offs;
+ bool dir_up;
+ u8 mask;
+};
+
+static const struct iqs626_event_desc iqs626_events[] = {
+ [IQS626_EVENT_PROX_DN] = {
+ .name = "event-prox",
+ .st_offs = IQS626_ST_OFFS_PROX,
+ .th_offs = IQS626_TH_OFFS_PROX,
+ .mask = IQS626_EVENT_MASK_PROX,
+ },
+ [IQS626_EVENT_PROX_UP] = {
+ .name = "event-prox-alt",
+ .st_offs = IQS626_ST_OFFS_PROX,
+ .th_offs = IQS626_TH_OFFS_PROX,
+ .dir_up = true,
+ .mask = IQS626_EVENT_MASK_PROX,
+ },
+ [IQS626_EVENT_TOUCH_DN] = {
+ .name = "event-touch",
+ .st_offs = IQS626_ST_OFFS_TOUCH,
+ .th_offs = IQS626_TH_OFFS_TOUCH,
+ .mask = IQS626_EVENT_MASK_TOUCH,
+ },
+ [IQS626_EVENT_TOUCH_UP] = {
+ .name = "event-touch-alt",
+ .st_offs = IQS626_ST_OFFS_TOUCH,
+ .th_offs = IQS626_TH_OFFS_TOUCH,
+ .dir_up = true,
+ .mask = IQS626_EVENT_MASK_TOUCH,
+ },
+ [IQS626_EVENT_DEEP_DN] = {
+ .name = "event-deep",
+ .st_offs = IQS626_ST_OFFS_DEEP,
+ .th_offs = IQS626_TH_OFFS_DEEP,
+ .mask = IQS626_EVENT_MASK_DEEP,
+ },
+ [IQS626_EVENT_DEEP_UP] = {
+ .name = "event-deep-alt",
+ .st_offs = IQS626_ST_OFFS_DEEP,
+ .th_offs = IQS626_TH_OFFS_DEEP,
+ .dir_up = true,
+ .mask = IQS626_EVENT_MASK_DEEP,
+ },
+};
+
+struct iqs626_ver_info {
+ u8 prod_num;
+ u8 sw_num;
+ u8 hw_num;
+ u8 padding;
+} __packed;
+
+struct iqs626_flags {
+ __be16 system;
+ u8 gesture;
+ u8 padding_a;
+ u8 states[4];
+ u8 ref_active;
+ u8 padding_b;
+ u8 comp_min;
+ u8 comp_max;
+ u8 trackpad_x;
+ u8 trackpad_y;
+} __packed;
+
+struct iqs626_ch_reg_ulp {
+ u8 thresh[2];
+ u8 hyst;
+ u8 filter;
+ u8 engine[2];
+ u8 ati_target;
+ u8 padding;
+ __be16 ati_comp;
+ u8 rx_enable;
+ u8 tx_enable;
+} __packed;
+
+struct iqs626_ch_reg_tp {
+ u8 thresh;
+ u8 ati_base;
+ __be16 ati_comp;
+} __packed;
+
+struct iqs626_tp_grp_reg {
+ u8 hyst;
+ u8 ati_target;
+ u8 engine[2];
+ struct iqs626_ch_reg_tp ch_reg_tp[IQS626_NUM_CH_TP_3];
+} __packed;
+
+struct iqs626_ch_reg_gen {
+ u8 thresh[3];
+ u8 padding;
+ u8 hyst;
+ u8 ati_target;
+ __be16 ati_comp;
+ u8 engine[5];
+ u8 filter;
+ u8 rx_enable;
+ u8 tx_enable;
+ u8 assoc_select;
+ u8 assoc_weight;
+} __packed;
+
+struct iqs626_ch_reg_hall {
+ u8 engine;
+ u8 thresh;
+ u8 hyst;
+ u8 ati_target;
+ __be16 ati_comp;
+} __packed;
+
+struct iqs626_sys_reg {
+ __be16 general;
+ u8 misc_a;
+ u8 event_mask;
+ u8 active;
+ u8 reseed;
+ u8 rate_np;
+ u8 rate_lp;
+ u8 rate_ulp;
+ u8 timeout_pwr;
+ u8 timeout_rdy;
+ u8 timeout_lta;
+ u8 misc_b;
+ u8 thresh_swipe;
+ u8 timeout_tap;
+ u8 timeout_swipe;
+ u8 redo_ati;
+ u8 padding;
+ struct iqs626_ch_reg_ulp ch_reg_ulp;
+ struct iqs626_tp_grp_reg tp_grp_reg;
+ struct iqs626_ch_reg_gen ch_reg_gen[IQS626_NUM_CH_GEN];
+ struct iqs626_ch_reg_hall ch_reg_hall;
+} __packed;
+
+struct iqs626_channel_desc {
+ const char *name;
+ int num_ch;
+ u8 active;
+ bool events[ARRAY_SIZE(iqs626_events)];
+};
+
+static const struct iqs626_channel_desc iqs626_channels[] = {
+ [IQS626_CH_ULP_0] = {
+ .name = "ulp-0",
+ .num_ch = 1,
+ .active = BIT(0),
+ .events = {
+ [IQS626_EVENT_PROX_DN] = true,
+ [IQS626_EVENT_PROX_UP] = true,
+ [IQS626_EVENT_TOUCH_DN] = true,
+ [IQS626_EVENT_TOUCH_UP] = true,
+ },
+ },
+ [IQS626_CH_TP_2] = {
+ .name = "trackpad-3x2",
+ .num_ch = IQS626_NUM_CH_TP_2,
+ .active = BIT(1),
+ .events = {
+ [IQS626_EVENT_TOUCH_DN] = true,
+ },
+ },
+ [IQS626_CH_TP_3] = {
+ .name = "trackpad-3x3",
+ .num_ch = IQS626_NUM_CH_TP_3,
+ .active = BIT(2) | BIT(1),
+ .events = {
+ [IQS626_EVENT_TOUCH_DN] = true,
+ },
+ },
+ [IQS626_CH_GEN_0] = {
+ .name = "generic-0",
+ .num_ch = 1,
+ .active = BIT(4),
+ .events = {
+ [IQS626_EVENT_PROX_DN] = true,
+ [IQS626_EVENT_PROX_UP] = true,
+ [IQS626_EVENT_TOUCH_DN] = true,
+ [IQS626_EVENT_TOUCH_UP] = true,
+ [IQS626_EVENT_DEEP_DN] = true,
+ [IQS626_EVENT_DEEP_UP] = true,
+ },
+ },
+ [IQS626_CH_GEN_1] = {
+ .name = "generic-1",
+ .num_ch = 1,
+ .active = BIT(5),
+ .events = {
+ [IQS626_EVENT_PROX_DN] = true,
+ [IQS626_EVENT_PROX_UP] = true,
+ [IQS626_EVENT_TOUCH_DN] = true,
+ [IQS626_EVENT_TOUCH_UP] = true,
+ [IQS626_EVENT_DEEP_DN] = true,
+ [IQS626_EVENT_DEEP_UP] = true,
+ },
+ },
+ [IQS626_CH_GEN_2] = {
+ .name = "generic-2",
+ .num_ch = 1,
+ .active = BIT(6),
+ .events = {
+ [IQS626_EVENT_PROX_DN] = true,
+ [IQS626_EVENT_PROX_UP] = true,
+ [IQS626_EVENT_TOUCH_DN] = true,
+ [IQS626_EVENT_TOUCH_UP] = true,
+ [IQS626_EVENT_DEEP_DN] = true,
+ [IQS626_EVENT_DEEP_UP] = true,
+ },
+ },
+ [IQS626_CH_HALL] = {
+ .name = "hall",
+ .num_ch = 1,
+ .active = BIT(7),
+ .events = {
+ [IQS626_EVENT_TOUCH_DN] = true,
+ [IQS626_EVENT_TOUCH_UP] = true,
+ },
+ },
+};
+
+struct iqs626_private {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct iqs626_sys_reg sys_reg;
+ struct completion ati_done;
+ struct input_dev *keypad;
+ struct input_dev *trackpad;
+ struct touchscreen_properties prop;
+ unsigned int kp_type[ARRAY_SIZE(iqs626_channels)]
+ [ARRAY_SIZE(iqs626_events)];
+ unsigned int kp_code[ARRAY_SIZE(iqs626_channels)]
+ [ARRAY_SIZE(iqs626_events)];
+ unsigned int tp_code[IQS626_NUM_GESTURES];
+ unsigned int suspend_mode;
+};
+
+static int iqs626_parse_events(struct iqs626_private *iqs626,
+ const struct fwnode_handle *ch_node,
+ enum iqs626_ch_id ch_id)
+{
+ struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
+ struct i2c_client *client = iqs626->client;
+ const struct fwnode_handle *ev_node;
+ const char *ev_name;
+ u8 *thresh, *hyst;
+ unsigned int thresh_tp[IQS626_NUM_CH_TP_3];
+ unsigned int val;
+ int num_ch = iqs626_channels[ch_id].num_ch;
+ int error, i, j;
+
+ switch (ch_id) {
+ case IQS626_CH_ULP_0:
+ thresh = sys_reg->ch_reg_ulp.thresh;
+ hyst = &sys_reg->ch_reg_ulp.hyst;
+ break;
+
+ case IQS626_CH_TP_2:
+ case IQS626_CH_TP_3:
+ thresh = &sys_reg->tp_grp_reg.ch_reg_tp[0].thresh;
+ hyst = &sys_reg->tp_grp_reg.hyst;
+ break;
+
+ case IQS626_CH_GEN_0:
+ case IQS626_CH_GEN_1:
+ case IQS626_CH_GEN_2:
+ i = ch_id - IQS626_CH_GEN_0;
+ thresh = sys_reg->ch_reg_gen[i].thresh;
+ hyst = &sys_reg->ch_reg_gen[i].hyst;
+ break;
+
+ case IQS626_CH_HALL:
+ thresh = &sys_reg->ch_reg_hall.thresh;
+ hyst = &sys_reg->ch_reg_hall.hyst;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(iqs626_events); i++) {
+ if (!iqs626_channels[ch_id].events[i])
+ continue;
+
+ if (ch_id == IQS626_CH_TP_2 || ch_id == IQS626_CH_TP_3) {
+ /*
+ * Trackpad touch events are simply described under the
+ * trackpad child node.
+ */
+ ev_node = ch_node;
+ } else {
+ ev_name = iqs626_events[i].name;
+ ev_node = fwnode_get_named_child_node(ch_node, ev_name);
+ if (!ev_node)
+ continue;
+
+ if (!fwnode_property_read_u32(ev_node, "linux,code",
+ &val)) {
+ iqs626->kp_code[ch_id][i] = val;
+
+ if (fwnode_property_read_u32(ev_node,
+ "linux,input-type",
+ &val)) {
+ if (ch_id == IQS626_CH_HALL)
+ val = EV_SW;
+ else
+ val = EV_KEY;
+ }
+
+ if (val != EV_KEY && val != EV_SW) {
+ dev_err(&client->dev,
+ "Invalid input type: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ iqs626->kp_type[ch_id][i] = val;
+
+ sys_reg->event_mask &= ~iqs626_events[i].mask;
+ }
+ }
+
+ if (!fwnode_property_read_u32(ev_node, "azoteq,hyst", &val)) {
+ if (val > IQS626_CHx_HYST_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel hysteresis: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ if (i == IQS626_EVENT_DEEP_DN ||
+ i == IQS626_EVENT_DEEP_UP) {
+ *hyst &= ~IQS626_CHx_HYST_DEEP_MASK;
+ *hyst |= (val << IQS626_CHx_HYST_DEEP_SHIFT);
+ } else if (i == IQS626_EVENT_TOUCH_DN ||
+ i == IQS626_EVENT_TOUCH_UP) {
+ *hyst &= ~IQS626_CHx_HYST_TOUCH_MASK;
+ *hyst |= val;
+ }
+ }
+
+ if (ch_id != IQS626_CH_TP_2 && ch_id != IQS626_CH_TP_3 &&
+ !fwnode_property_read_u32(ev_node, "azoteq,thresh", &val)) {
+ if (val > IQS626_CHx_THRESH_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel threshold: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ if (ch_id == IQS626_CH_HALL)
+ *thresh = val;
+ else
+ *(thresh + iqs626_events[i].th_offs) = val;
+
+ continue;
+ }
+
+ if (!fwnode_property_present(ev_node, "azoteq,thresh"))
+ continue;
+
+ error = fwnode_property_read_u32_array(ev_node, "azoteq,thresh",
+ thresh_tp, num_ch);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read %s channel thresholds: %d\n",
+ fwnode_get_name(ch_node), error);
+ return error;
+ }
+
+ for (j = 0; j < num_ch; j++) {
+ if (thresh_tp[j] > IQS626_CHx_THRESH_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel threshold: %u\n",
+ fwnode_get_name(ch_node), thresh_tp[j]);
+ return -EINVAL;
+ }
+
+ sys_reg->tp_grp_reg.ch_reg_tp[j].thresh = thresh_tp[j];
+ }
+ }
+
+ return 0;
+}
+
+static int iqs626_parse_ati_target(struct iqs626_private *iqs626,
+ const struct fwnode_handle *ch_node,
+ enum iqs626_ch_id ch_id)
+{
+ struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
+ struct i2c_client *client = iqs626->client;
+ unsigned int ati_base[IQS626_NUM_CH_TP_3];
+ unsigned int val;
+ u8 *ati_target;
+ int num_ch = iqs626_channels[ch_id].num_ch;
+ int error, i;
+
+ switch (ch_id) {
+ case IQS626_CH_ULP_0:
+ ati_target = &sys_reg->ch_reg_ulp.ati_target;
+ break;
+
+ case IQS626_CH_TP_2:
+ case IQS626_CH_TP_3:
+ ati_target = &sys_reg->tp_grp_reg.ati_target;
+ break;
+
+ case IQS626_CH_GEN_0:
+ case IQS626_CH_GEN_1:
+ case IQS626_CH_GEN_2:
+ i = ch_id - IQS626_CH_GEN_0;
+ ati_target = &sys_reg->ch_reg_gen[i].ati_target;
+ break;
+
+ case IQS626_CH_HALL:
+ ati_target = &sys_reg->ch_reg_hall.ati_target;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,ati-target", &val)) {
+ if (val > IQS626_CHx_ATI_TARGET_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel ATI target: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ *ati_target &= ~IQS626_CHx_ATI_TARGET_MASK;
+ *ati_target |= (val / 32);
+ }
+
+ if (ch_id != IQS626_CH_TP_2 && ch_id != IQS626_CH_TP_3 &&
+ !fwnode_property_read_u32(ch_node, "azoteq,ati-base", &val)) {
+ switch (val) {
+ case 75:
+ val = IQS626_CHx_ATI_BASE_75;
+ break;
+
+ case 100:
+ val = IQS626_CHx_ATI_BASE_100;
+ break;
+
+ case 150:
+ val = IQS626_CHx_ATI_BASE_150;
+ break;
+
+ case 200:
+ val = IQS626_CHx_ATI_BASE_200;
+ break;
+
+ default:
+ dev_err(&client->dev,
+ "Invalid %s channel ATI base: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ *ati_target &= ~IQS626_CHx_ATI_BASE_MASK;
+ *ati_target |= val;
+
+ return 0;
+ }
+
+ if (!fwnode_property_present(ch_node, "azoteq,ati-base"))
+ return 0;
+
+ error = fwnode_property_read_u32_array(ch_node, "azoteq,ati-base",
+ ati_base, num_ch);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read %s channel ATI bases: %d\n",
+ fwnode_get_name(ch_node), error);
+ return error;
+ }
+
+ for (i = 0; i < num_ch; i++) {
+ if (ati_base[i] < IQS626_TPx_ATI_BASE_MIN ||
+ ati_base[i] > IQS626_TPx_ATI_BASE_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel ATI base: %u\n",
+ fwnode_get_name(ch_node), ati_base[i]);
+ return -EINVAL;
+ }
+
+ ati_base[i] -= IQS626_TPx_ATI_BASE_MIN;
+ sys_reg->tp_grp_reg.ch_reg_tp[i].ati_base = ati_base[i];
+ }
+
+ return 0;
+}
+
+static int iqs626_parse_pins(struct iqs626_private *iqs626,
+ const struct fwnode_handle *ch_node,
+ const char *propname, u8 *enable)
+{
+ struct i2c_client *client = iqs626->client;
+ unsigned int val[IQS626_NUM_CRx_TX];
+ int error, count, i;
+
+ if (!fwnode_property_present(ch_node, propname))
+ return 0;
+
+ count = fwnode_property_count_u32(ch_node, propname);
+ if (count > IQS626_NUM_CRx_TX) {
+ dev_err(&client->dev,
+ "Too many %s channel CRX/TX pins present\n",
+ fwnode_get_name(ch_node));
+ return -EINVAL;
+ } else if (count < 0) {
+ dev_err(&client->dev,
+ "Failed to count %s channel CRX/TX pins: %d\n",
+ fwnode_get_name(ch_node), count);
+ return count;
+ }
+
+ error = fwnode_property_read_u32_array(ch_node, propname, val, count);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read %s channel CRX/TX pins: %d\n",
+ fwnode_get_name(ch_node), error);
+ return error;
+ }
+
+ *enable = 0;
+
+ for (i = 0; i < count; i++) {
+ if (val[i] >= IQS626_NUM_CRx_TX) {
+ dev_err(&client->dev,
+ "Invalid %s channel CRX/TX pin: %u\n",
+ fwnode_get_name(ch_node), val[i]);
+ return -EINVAL;
+ }
+
+ *enable |= BIT(val[i]);
+ }
+
+ return 0;
+}
+
+static int iqs626_parse_trackpad(struct iqs626_private *iqs626,
+ const struct fwnode_handle *ch_node)
+{
+ struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
+ struct i2c_client *client = iqs626->client;
+ u8 *hyst = &sys_reg->tp_grp_reg.hyst;
+ unsigned int val;
+ int error, count;
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,lta-update", &val)) {
+ if (val > IQS626_MISC_A_TPx_LTA_UPDATE_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel update rate: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ sys_reg->misc_a &= ~IQS626_MISC_A_TPx_LTA_UPDATE_MASK;
+ sys_reg->misc_a |= (val << IQS626_MISC_A_TPx_LTA_UPDATE_SHIFT);
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,filt-str-trackpad",
+ &val)) {
+ if (val > IQS626_FILT_STR_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel filter strength: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ sys_reg->misc_b &= ~IQS626_MISC_B_FILT_STR_TPx;
+ sys_reg->misc_b |= val;
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,filt-str-np-cnt",
+ &val)) {
+ if (val > IQS626_FILT_STR_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel filter strength: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ *hyst &= ~IQS626_FILT_STR_NP_TPx_MASK;
+ *hyst |= (val << IQS626_FILT_STR_NP_TPx_SHIFT);
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,filt-str-lp-cnt",
+ &val)) {
+ if (val > IQS626_FILT_STR_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel filter strength: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ *hyst &= ~IQS626_FILT_STR_LP_TPx_MASK;
+ *hyst |= (val << IQS626_FILT_STR_LP_TPx_SHIFT);
+ }
+
+ if (!fwnode_property_present(ch_node, "linux,keycodes"))
+ return 0;
+
+ count = fwnode_property_count_u32(ch_node, "linux,keycodes");
+ if (count > IQS626_NUM_GESTURES) {
+ dev_err(&client->dev, "Too many keycodes present\n");
+ return -EINVAL;
+ } else if (count < 0) {
+ dev_err(&client->dev, "Failed to count keycodes: %d\n", count);
+ return count;
+ }
+
+ error = fwnode_property_read_u32_array(ch_node, "linux,keycodes",
+ iqs626->tp_code, count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read keycodes: %d\n", error);
+ return error;
+ }
+
+ sys_reg->misc_b &= ~IQS626_MISC_B_TPx_SWIPE;
+ if (fwnode_property_present(ch_node, "azoteq,gesture-swipe"))
+ sys_reg->misc_b |= IQS626_MISC_B_TPx_SWIPE;
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,timeout-tap-ms",
+ &val)) {
+ if (val > IQS626_TIMEOUT_TAP_MS_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel timeout: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ sys_reg->timeout_tap = val / 16;
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,timeout-swipe-ms",
+ &val)) {
+ if (val > IQS626_TIMEOUT_SWIPE_MS_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel timeout: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ sys_reg->timeout_swipe = val / 16;
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,thresh-swipe",
+ &val)) {
+ if (val > IQS626_THRESH_SWIPE_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel threshold: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ sys_reg->thresh_swipe = val;
+ }
+
+ sys_reg->event_mask &= ~IQS626_EVENT_MASK_GESTURE;
+
+ return 0;
+}
+
+static int iqs626_parse_channel(struct iqs626_private *iqs626,
+ const struct fwnode_handle *ch_node,
+ enum iqs626_ch_id ch_id)
+{
+ struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
+ struct i2c_client *client = iqs626->client;
+ u8 *engine, *filter, *rx_enable, *tx_enable;
+ u8 *assoc_select, *assoc_weight;
+ unsigned int val;
+ int error, i;
+
+ switch (ch_id) {
+ case IQS626_CH_ULP_0:
+ engine = sys_reg->ch_reg_ulp.engine;
+ break;
+
+ case IQS626_CH_TP_2:
+ case IQS626_CH_TP_3:
+ engine = sys_reg->tp_grp_reg.engine;
+ break;
+
+ case IQS626_CH_GEN_0:
+ case IQS626_CH_GEN_1:
+ case IQS626_CH_GEN_2:
+ i = ch_id - IQS626_CH_GEN_0;
+ engine = sys_reg->ch_reg_gen[i].engine;
+ break;
+
+ case IQS626_CH_HALL:
+ engine = &sys_reg->ch_reg_hall.engine;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ *engine |= IQS626_CHx_ENG_0_MEAS_CAP_SIZE;
+ if (fwnode_property_present(ch_node, "azoteq,meas-cap-decrease"))
+ *engine &= ~IQS626_CHx_ENG_0_MEAS_CAP_SIZE;
+
+ *engine |= IQS626_CHx_ENG_0_RX_TERM_VSS;
+ if (!fwnode_property_read_u32(ch_node, "azoteq,rx-inactive", &val)) {
+ switch (val) {
+ case IQS626_RX_INACTIVE_VSS:
+ break;
+
+ case IQS626_RX_INACTIVE_FLOAT:
+ *engine &= ~IQS626_CHx_ENG_0_RX_TERM_VSS;
+ if (ch_id == IQS626_CH_GEN_0 ||
+ ch_id == IQS626_CH_GEN_1 ||
+ ch_id == IQS626_CH_GEN_2)
+ *(engine + 4) &= ~IQS626_CHx_ENG_4_RX_TERM_VREG;
+ break;
+
+ case IQS626_RX_INACTIVE_VREG:
+ if (ch_id == IQS626_CH_GEN_0 ||
+ ch_id == IQS626_CH_GEN_1 ||
+ ch_id == IQS626_CH_GEN_2) {
+ *engine &= ~IQS626_CHx_ENG_0_RX_TERM_VSS;
+ *(engine + 4) |= IQS626_CHx_ENG_4_RX_TERM_VREG;
+ break;
+ }
+ fallthrough;
+
+ default:
+ dev_err(&client->dev,
+ "Invalid %s channel CRX pin termination: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+ }
+
+ *engine &= ~IQS626_CHx_ENG_0_LINEARIZE;
+ if (fwnode_property_present(ch_node, "azoteq,linearize"))
+ *engine |= IQS626_CHx_ENG_0_LINEARIZE;
+
+ *engine &= ~IQS626_CHx_ENG_0_DUAL_DIR;
+ if (fwnode_property_present(ch_node, "azoteq,dual-direction"))
+ *engine |= IQS626_CHx_ENG_0_DUAL_DIR;
+
+ *engine &= ~IQS626_CHx_ENG_0_FILT_DISABLE;
+ if (fwnode_property_present(ch_node, "azoteq,filt-disable"))
+ *engine |= IQS626_CHx_ENG_0_FILT_DISABLE;
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,ati-mode", &val)) {
+ if (val > IQS626_CHx_ENG_0_ATI_MODE_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel ATI mode: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ *engine &= ~IQS626_CHx_ENG_0_ATI_MODE_MASK;
+ *engine |= val;
+ }
+
+ if (ch_id == IQS626_CH_HALL)
+ return 0;
+
+ *(engine + 1) &= ~IQS626_CHx_ENG_1_CCT_ENABLE;
+ if (!fwnode_property_read_u32(ch_node, "azoteq,cct-increase",
+ &val) && val) {
+ unsigned int orig_val = val--;
+
+ /*
+ * In the case of the generic channels, the charge cycle time
+ * field doubles in size and straddles two separate registers.
+ */
+ if (ch_id == IQS626_CH_GEN_0 ||
+ ch_id == IQS626_CH_GEN_1 ||
+ ch_id == IQS626_CH_GEN_2) {
+ *(engine + 4) &= ~IQS626_CHx_ENG_4_CCT_LOW_1;
+ if (val & BIT(1))
+ *(engine + 4) |= IQS626_CHx_ENG_4_CCT_LOW_1;
+
+ *(engine + 4) &= ~IQS626_CHx_ENG_4_CCT_LOW_0;
+ if (val & BIT(0))
+ *(engine + 4) |= IQS626_CHx_ENG_4_CCT_LOW_0;
+
+ val >>= 2;
+ }
+
+ if (val & ~GENMASK(1, 0)) {
+ dev_err(&client->dev,
+ "Invalid %s channel charge cycle time: %u\n",
+ fwnode_get_name(ch_node), orig_val);
+ return -EINVAL;
+ }
+
+ *(engine + 1) &= ~IQS626_CHx_ENG_1_CCT_HIGH_1;
+ if (val & BIT(1))
+ *(engine + 1) |= IQS626_CHx_ENG_1_CCT_HIGH_1;
+
+ *(engine + 1) &= ~IQS626_CHx_ENG_1_CCT_HIGH_0;
+ if (val & BIT(0))
+ *(engine + 1) |= IQS626_CHx_ENG_1_CCT_HIGH_0;
+
+ *(engine + 1) |= IQS626_CHx_ENG_1_CCT_ENABLE;
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,proj-bias", &val)) {
+ if (val > IQS626_CHx_ENG_1_PROJ_BIAS_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel bias current: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ *(engine + 1) &= ~IQS626_CHx_ENG_1_PROJ_BIAS_MASK;
+ *(engine + 1) |= (val << IQS626_CHx_ENG_1_PROJ_BIAS_SHIFT);
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,sense-freq", &val)) {
+ if (val > IQS626_CHx_ENG_1_SENSE_FREQ_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel sensing frequency: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ *(engine + 1) &= ~IQS626_CHx_ENG_1_SENSE_FREQ_MASK;
+ *(engine + 1) |= (val << IQS626_CHx_ENG_1_SENSE_FREQ_SHIFT);
+ }
+
+ *(engine + 1) &= ~IQS626_CHx_ENG_1_ATI_BAND_TIGHTEN;
+ if (fwnode_property_present(ch_node, "azoteq,ati-band-tighten"))
+ *(engine + 1) |= IQS626_CHx_ENG_1_ATI_BAND_TIGHTEN;
+
+ if (ch_id == IQS626_CH_TP_2 || ch_id == IQS626_CH_TP_3)
+ return iqs626_parse_trackpad(iqs626, ch_node);
+
+ if (ch_id == IQS626_CH_ULP_0) {
+ sys_reg->ch_reg_ulp.hyst &= ~IQS626_ULP_PROJ_ENABLE;
+ if (fwnode_property_present(ch_node, "azoteq,proj-enable"))
+ sys_reg->ch_reg_ulp.hyst |= IQS626_ULP_PROJ_ENABLE;
+
+ filter = &sys_reg->ch_reg_ulp.filter;
+
+ rx_enable = &sys_reg->ch_reg_ulp.rx_enable;
+ tx_enable = &sys_reg->ch_reg_ulp.tx_enable;
+ } else {
+ i = ch_id - IQS626_CH_GEN_0;
+ filter = &sys_reg->ch_reg_gen[i].filter;
+
+ rx_enable = &sys_reg->ch_reg_gen[i].rx_enable;
+ tx_enable = &sys_reg->ch_reg_gen[i].tx_enable;
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,filt-str-np-cnt",
+ &val)) {
+ if (val > IQS626_FILT_STR_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel filter strength: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ *filter &= ~IQS626_FILT_STR_NP_CNT_MASK;
+ *filter |= (val << IQS626_FILT_STR_NP_CNT_SHIFT);
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,filt-str-lp-cnt",
+ &val)) {
+ if (val > IQS626_FILT_STR_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel filter strength: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ *filter &= ~IQS626_FILT_STR_LP_CNT_MASK;
+ *filter |= (val << IQS626_FILT_STR_LP_CNT_SHIFT);
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,filt-str-np-lta",
+ &val)) {
+ if (val > IQS626_FILT_STR_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel filter strength: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ *filter &= ~IQS626_FILT_STR_NP_LTA_MASK;
+ *filter |= (val << IQS626_FILT_STR_NP_LTA_SHIFT);
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,filt-str-lp-lta",
+ &val)) {
+ if (val > IQS626_FILT_STR_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel filter strength: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ *filter &= ~IQS626_FILT_STR_LP_LTA_MASK;
+ *filter |= val;
+ }
+
+ error = iqs626_parse_pins(iqs626, ch_node, "azoteq,rx-enable",
+ rx_enable);
+ if (error)
+ return error;
+
+ error = iqs626_parse_pins(iqs626, ch_node, "azoteq,tx-enable",
+ tx_enable);
+ if (error)
+ return error;
+
+ if (ch_id == IQS626_CH_ULP_0)
+ return 0;
+
+ *(engine + 2) &= ~IQS626_CHx_ENG_2_LOCAL_CAP_ENABLE;
+ if (!fwnode_property_read_u32(ch_node, "azoteq,local-cap-size",
+ &val) && val) {
+ unsigned int orig_val = val--;
+
+ if (val > IQS626_CHx_ENG_2_LOCAL_CAP_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel local cap. size: %u\n",
+ fwnode_get_name(ch_node), orig_val);
+ return -EINVAL;
+ }
+
+ *(engine + 2) &= ~IQS626_CHx_ENG_2_LOCAL_CAP_MASK;
+ *(engine + 2) |= (val << IQS626_CHx_ENG_2_LOCAL_CAP_SHIFT);
+
+ *(engine + 2) |= IQS626_CHx_ENG_2_LOCAL_CAP_ENABLE;
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,sense-mode", &val)) {
+ if (val > IQS626_CHx_ENG_2_SENSE_MODE_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel sensing mode: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ *(engine + 2) &= ~IQS626_CHx_ENG_2_SENSE_MODE_MASK;
+ *(engine + 2) |= val;
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,tx-freq", &val)) {
+ if (val > IQS626_CHx_ENG_3_TX_FREQ_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel excitation frequency: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ *(engine + 3) &= ~IQS626_CHx_ENG_3_TX_FREQ_MASK;
+ *(engine + 3) |= (val << IQS626_CHx_ENG_3_TX_FREQ_SHIFT);
+ }
+
+ *(engine + 3) &= ~IQS626_CHx_ENG_3_INV_LOGIC;
+ if (fwnode_property_present(ch_node, "azoteq,invert-enable"))
+ *(engine + 3) |= IQS626_CHx_ENG_3_INV_LOGIC;
+
+ *(engine + 4) &= ~IQS626_CHx_ENG_4_COMP_DISABLE;
+ if (fwnode_property_present(ch_node, "azoteq,comp-disable"))
+ *(engine + 4) |= IQS626_CHx_ENG_4_COMP_DISABLE;
+
+ *(engine + 4) &= ~IQS626_CHx_ENG_4_STATIC_ENABLE;
+ if (fwnode_property_present(ch_node, "azoteq,static-enable"))
+ *(engine + 4) |= IQS626_CHx_ENG_4_STATIC_ENABLE;
+
+ i = ch_id - IQS626_CH_GEN_0;
+ assoc_select = &sys_reg->ch_reg_gen[i].assoc_select;
+ assoc_weight = &sys_reg->ch_reg_gen[i].assoc_weight;
+
+ *assoc_select = 0;
+ if (!fwnode_property_present(ch_node, "azoteq,assoc-select"))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(iqs626_channels); i++) {
+ if (fwnode_property_match_string(ch_node, "azoteq,assoc-select",
+ iqs626_channels[i].name) < 0)
+ continue;
+
+ *assoc_select |= iqs626_channels[i].active;
+ }
+
+ if (fwnode_property_read_u32(ch_node, "azoteq,assoc-weight", &val))
+ return 0;
+
+ if (val > IQS626_GEN_WEIGHT_MAX) {
+ dev_err(&client->dev,
+ "Invalid %s channel associated weight: %u\n",
+ fwnode_get_name(ch_node), val);
+ return -EINVAL;
+ }
+
+ *assoc_weight = val;
+
+ return 0;
+}
+
+static int iqs626_parse_prop(struct iqs626_private *iqs626)
+{
+ struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
+ struct i2c_client *client = iqs626->client;
+ struct fwnode_handle *ch_node;
+ unsigned int val;
+ int error, i;
+ u16 general;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,suspend-mode",
+ &val)) {
+ if (val > IQS626_SYS_SETTINGS_PWR_MODE_MAX) {
+ dev_err(&client->dev, "Invalid suspend mode: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ iqs626->suspend_mode = val;
+ }
+
+ error = regmap_raw_read(iqs626->regmap, IQS626_SYS_SETTINGS, sys_reg,
+ sizeof(*sys_reg));
+ if (error)
+ return error;
+
+ general = be16_to_cpu(sys_reg->general);
+ general &= IQS626_SYS_SETTINGS_ULP_UPDATE_MASK;
+
+ if (device_property_present(&client->dev, "azoteq,clk-div"))
+ general |= IQS626_SYS_SETTINGS_CLK_DIV;
+
+ if (device_property_present(&client->dev, "azoteq,ulp-enable"))
+ general |= IQS626_SYS_SETTINGS_ULP_AUTO;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,ulp-update",
+ &val)) {
+ if (val > IQS626_SYS_SETTINGS_ULP_UPDATE_MAX) {
+ dev_err(&client->dev, "Invalid update rate: %u\n", val);
+ return -EINVAL;
+ }
+
+ general &= ~IQS626_SYS_SETTINGS_ULP_UPDATE_MASK;
+ general |= (val << IQS626_SYS_SETTINGS_ULP_UPDATE_SHIFT);
+ }
+
+ sys_reg->misc_a &= ~IQS626_MISC_A_ATI_BAND_DISABLE;
+ if (device_property_present(&client->dev, "azoteq,ati-band-disable"))
+ sys_reg->misc_a |= IQS626_MISC_A_ATI_BAND_DISABLE;
+
+ sys_reg->misc_a &= ~IQS626_MISC_A_ATI_LP_ONLY;
+ if (device_property_present(&client->dev, "azoteq,ati-lp-only"))
+ sys_reg->misc_a |= IQS626_MISC_A_ATI_LP_ONLY;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,gpio3-select",
+ &val)) {
+ if (val > IQS626_MISC_A_GPIO3_SELECT_MAX) {
+ dev_err(&client->dev, "Invalid GPIO3 selection: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ sys_reg->misc_a &= ~IQS626_MISC_A_GPIO3_SELECT_MASK;
+ sys_reg->misc_a |= val;
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,reseed-select",
+ &val)) {
+ if (val > IQS626_MISC_B_RESEED_UI_SEL_MAX) {
+ dev_err(&client->dev, "Invalid reseed selection: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ sys_reg->misc_b &= ~IQS626_MISC_B_RESEED_UI_SEL_MASK;
+ sys_reg->misc_b |= (val << IQS626_MISC_B_RESEED_UI_SEL_SHIFT);
+ }
+
+ sys_reg->misc_b &= ~IQS626_MISC_B_THRESH_EXTEND;
+ if (device_property_present(&client->dev, "azoteq,thresh-extend"))
+ sys_reg->misc_b |= IQS626_MISC_B_THRESH_EXTEND;
+
+ sys_reg->misc_b &= ~IQS626_MISC_B_TRACKING_UI_ENABLE;
+ if (device_property_present(&client->dev, "azoteq,tracking-enable"))
+ sys_reg->misc_b |= IQS626_MISC_B_TRACKING_UI_ENABLE;
+
+ sys_reg->misc_b &= ~IQS626_MISC_B_RESEED_OFFSET;
+ if (device_property_present(&client->dev, "azoteq,reseed-offset"))
+ sys_reg->misc_b |= IQS626_MISC_B_RESEED_OFFSET;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,rate-np-ms",
+ &val)) {
+ if (val > IQS626_RATE_NP_MS_MAX) {
+ dev_err(&client->dev, "Invalid report rate: %u\n", val);
+ return -EINVAL;
+ }
+
+ sys_reg->rate_np = val;
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,rate-lp-ms",
+ &val)) {
+ if (val > IQS626_RATE_LP_MS_MAX) {
+ dev_err(&client->dev, "Invalid report rate: %u\n", val);
+ return -EINVAL;
+ }
+
+ sys_reg->rate_lp = val;
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,rate-ulp-ms",
+ &val)) {
+ if (val > IQS626_RATE_ULP_MS_MAX) {
+ dev_err(&client->dev, "Invalid report rate: %u\n", val);
+ return -EINVAL;
+ }
+
+ sys_reg->rate_ulp = val / 16;
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,timeout-pwr-ms",
+ &val)) {
+ if (val > IQS626_TIMEOUT_PWR_MS_MAX) {
+ dev_err(&client->dev, "Invalid timeout: %u\n", val);
+ return -EINVAL;
+ }
+
+ sys_reg->timeout_pwr = val / 512;
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,timeout-lta-ms",
+ &val)) {
+ if (val > IQS626_TIMEOUT_LTA_MS_MAX) {
+ dev_err(&client->dev, "Invalid timeout: %u\n", val);
+ return -EINVAL;
+ }
+
+ sys_reg->timeout_lta = val / 512;
+ }
+
+ sys_reg->event_mask = ~((u8)IQS626_EVENT_MASK_SYS);
+ sys_reg->redo_ati = 0;
+
+ sys_reg->reseed = 0;
+ sys_reg->active = 0;
+
+ for (i = 0; i < ARRAY_SIZE(iqs626_channels); i++) {
+ ch_node = device_get_named_child_node(&client->dev,
+ iqs626_channels[i].name);
+ if (!ch_node)
+ continue;
+
+ error = iqs626_parse_channel(iqs626, ch_node, i);
+ if (error)
+ return error;
+
+ error = iqs626_parse_ati_target(iqs626, ch_node, i);
+ if (error)
+ return error;
+
+ error = iqs626_parse_events(iqs626, ch_node, i);
+ if (error)
+ return error;
+
+ if (!fwnode_property_present(ch_node, "azoteq,ati-exclude"))
+ sys_reg->redo_ati |= iqs626_channels[i].active;
+
+ if (!fwnode_property_present(ch_node, "azoteq,reseed-disable"))
+ sys_reg->reseed |= iqs626_channels[i].active;
+
+ sys_reg->active |= iqs626_channels[i].active;
+ }
+
+ general |= IQS626_SYS_SETTINGS_EVENT_MODE;
+
+ /*
+ * Enable streaming during normal-power mode if the trackpad is used to
+ * report raw coordinates instead of gestures. In that case, the device
+ * returns to event mode during low-power mode.
+ */
+ if (sys_reg->active & iqs626_channels[IQS626_CH_TP_2].active &&
+ sys_reg->event_mask & IQS626_EVENT_MASK_GESTURE)
+ general |= IQS626_SYS_SETTINGS_EVENT_MODE_LP;
+
+ general |= IQS626_SYS_SETTINGS_REDO_ATI;
+ general |= IQS626_SYS_SETTINGS_ACK_RESET;
+
+ sys_reg->general = cpu_to_be16(general);
+
+ error = regmap_raw_write(iqs626->regmap, IQS626_SYS_SETTINGS,
+ &iqs626->sys_reg, sizeof(iqs626->sys_reg));
+ if (error)
+ return error;
+
+ iqs626_irq_wait();
+
+ return 0;
+}
+
+static int iqs626_input_init(struct iqs626_private *iqs626)
+{
+ struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
+ struct i2c_client *client = iqs626->client;
+ int error, i, j;
+
+ iqs626->keypad = devm_input_allocate_device(&client->dev);
+ if (!iqs626->keypad)
+ return -ENOMEM;
+
+ iqs626->keypad->keycodemax = ARRAY_SIZE(iqs626->kp_code);
+ iqs626->keypad->keycode = iqs626->kp_code;
+ iqs626->keypad->keycodesize = sizeof(**iqs626->kp_code);
+
+ iqs626->keypad->name = "iqs626a_keypad";
+ iqs626->keypad->id.bustype = BUS_I2C;
+
+ for (i = 0; i < ARRAY_SIZE(iqs626_channels); i++) {
+ if (!(sys_reg->active & iqs626_channels[i].active))
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(iqs626_events); j++) {
+ if (!iqs626->kp_type[i][j])
+ continue;
+
+ input_set_capability(iqs626->keypad,
+ iqs626->kp_type[i][j],
+ iqs626->kp_code[i][j]);
+ }
+ }
+
+ if (!(sys_reg->active & iqs626_channels[IQS626_CH_TP_2].active))
+ return 0;
+
+ iqs626->trackpad = devm_input_allocate_device(&client->dev);
+ if (!iqs626->trackpad)
+ return -ENOMEM;
+
+ iqs626->trackpad->keycodemax = ARRAY_SIZE(iqs626->tp_code);
+ iqs626->trackpad->keycode = iqs626->tp_code;
+ iqs626->trackpad->keycodesize = sizeof(*iqs626->tp_code);
+
+ iqs626->trackpad->name = "iqs626a_trackpad";
+ iqs626->trackpad->id.bustype = BUS_I2C;
+
+ /*
+ * Present the trackpad as a traditional pointing device if no gestures
+ * have been mapped to a keycode.
+ */
+ if (sys_reg->event_mask & IQS626_EVENT_MASK_GESTURE) {
+ u8 tp_mask = iqs626_channels[IQS626_CH_TP_3].active;
+
+ input_set_capability(iqs626->trackpad, EV_KEY, BTN_TOUCH);
+ input_set_abs_params(iqs626->trackpad, ABS_Y, 0, 255, 0, 0);
+
+ if ((sys_reg->active & tp_mask) == tp_mask)
+ input_set_abs_params(iqs626->trackpad,
+ ABS_X, 0, 255, 0, 0);
+ else
+ input_set_abs_params(iqs626->trackpad,
+ ABS_X, 0, 128, 0, 0);
+
+ touchscreen_parse_properties(iqs626->trackpad, false,
+ &iqs626->prop);
+ } else {
+ for (i = 0; i < IQS626_NUM_GESTURES; i++)
+ if (iqs626->tp_code[i] != KEY_RESERVED)
+ input_set_capability(iqs626->trackpad, EV_KEY,
+ iqs626->tp_code[i]);
+ }
+
+ error = input_register_device(iqs626->trackpad);
+ if (error)
+ dev_err(&client->dev, "Failed to register trackpad: %d\n",
+ error);
+
+ return error;
+}
+
+static int iqs626_report(struct iqs626_private *iqs626)
+{
+ struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
+ struct i2c_client *client = iqs626->client;
+ struct iqs626_flags flags;
+ __le16 hall_output;
+ int error, i, j;
+ u8 state;
+ u8 *dir_mask = &flags.states[IQS626_ST_OFFS_DIR];
+
+ error = regmap_raw_read(iqs626->regmap, IQS626_SYS_FLAGS, &flags,
+ sizeof(flags));
+ if (error) {
+ dev_err(&client->dev, "Failed to read device status: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * The device resets itself if its own watchdog bites, which can happen
+ * in the event of an I2C communication error. In this case, the device
+ * asserts a SHOW_RESET interrupt and all registers must be restored.
+ */
+ if (be16_to_cpu(flags.system) & IQS626_SYS_FLAGS_SHOW_RESET) {
+ dev_err(&client->dev, "Unexpected device reset\n");
+
+ error = regmap_raw_write(iqs626->regmap, IQS626_SYS_SETTINGS,
+ sys_reg, sizeof(*sys_reg));
+ if (error)
+ dev_err(&client->dev,
+ "Failed to re-initialize device: %d\n", error);
+
+ return error;
+ }
+
+ if (be16_to_cpu(flags.system) & IQS626_SYS_FLAGS_IN_ATI)
+ return 0;
+
+ /*
+ * Unlike the ULP or generic channels, the Hall channel does not have a
+ * direction flag. Instead, the direction (i.e. magnet polarity) can be
+ * derived based on the sign of the 2's complement differential output.
+ */
+ if (sys_reg->active & iqs626_channels[IQS626_CH_HALL].active) {
+ error = regmap_raw_read(iqs626->regmap, IQS626_HALL_OUTPUT,
+ &hall_output, sizeof(hall_output));
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read Hall output: %d\n", error);
+ return error;
+ }
+
+ *dir_mask &= ~iqs626_channels[IQS626_CH_HALL].active;
+ if (le16_to_cpu(hall_output) < 0x8000)
+ *dir_mask |= iqs626_channels[IQS626_CH_HALL].active;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(iqs626_channels); i++) {
+ if (!(sys_reg->active & iqs626_channels[i].active))
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(iqs626_events); j++) {
+ if (!iqs626->kp_type[i][j])
+ continue;
+
+ state = flags.states[iqs626_events[j].st_offs];
+ state &= iqs626_events[j].dir_up ? *dir_mask
+ : ~(*dir_mask);
+ state &= iqs626_channels[i].active;
+
+ input_event(iqs626->keypad, iqs626->kp_type[i][j],
+ iqs626->kp_code[i][j], !!state);
+ }
+ }
+
+ input_sync(iqs626->keypad);
+
+ /*
+ * The following completion signals that ATI has finished, any initial
+ * switch states have been reported and the keypad can be registered.
+ */
+ complete_all(&iqs626->ati_done);
+
+ if (!(sys_reg->active & iqs626_channels[IQS626_CH_TP_2].active))
+ return 0;
+
+ if (sys_reg->event_mask & IQS626_EVENT_MASK_GESTURE) {
+ state = flags.states[IQS626_ST_OFFS_TOUCH];
+ state &= iqs626_channels[IQS626_CH_TP_2].active;
+
+ input_report_key(iqs626->trackpad, BTN_TOUCH, state);
+
+ if (state)
+ touchscreen_report_pos(iqs626->trackpad, &iqs626->prop,
+ flags.trackpad_x,
+ flags.trackpad_y, false);
+ } else {
+ for (i = 0; i < IQS626_NUM_GESTURES; i++)
+ input_report_key(iqs626->trackpad, iqs626->tp_code[i],
+ flags.gesture & BIT(i));
+
+ if (flags.gesture & GENMASK(IQS626_GESTURE_TAP, 0)) {
+ input_sync(iqs626->trackpad);
+
+ /*
+ * Momentary gestures are followed by a complementary
+ * release cycle so as to emulate a full keystroke.
+ */
+ for (i = 0; i < IQS626_GESTURE_HOLD; i++)
+ input_report_key(iqs626->trackpad,
+ iqs626->tp_code[i], 0);
+ }
+ }
+
+ input_sync(iqs626->trackpad);
+
+ return 0;
+}
+
+static irqreturn_t iqs626_irq(int irq, void *context)
+{
+ struct iqs626_private *iqs626 = context;
+
+ if (iqs626_report(iqs626))
+ return IRQ_NONE;
+
+ /*
+ * The device does not deassert its interrupt (RDY) pin until shortly
+ * after receiving an I2C stop condition; the following delay ensures
+ * the interrupt handler does not return before this time.
+ */
+ iqs626_irq_wait();
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_config iqs626_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = IQS626_MAX_REG,
+};
+
+static int iqs626_probe(struct i2c_client *client)
+{
+ struct iqs626_ver_info ver_info;
+ struct iqs626_private *iqs626;
+ int error;
+
+ iqs626 = devm_kzalloc(&client->dev, sizeof(*iqs626), GFP_KERNEL);
+ if (!iqs626)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, iqs626);
+ iqs626->client = client;
+
+ iqs626->regmap = devm_regmap_init_i2c(client, &iqs626_regmap_config);
+ if (IS_ERR(iqs626->regmap)) {
+ error = PTR_ERR(iqs626->regmap);
+ dev_err(&client->dev, "Failed to initialize register map: %d\n",
+ error);
+ return error;
+ }
+
+ init_completion(&iqs626->ati_done);
+
+ error = regmap_raw_read(iqs626->regmap, IQS626_VER_INFO, &ver_info,
+ sizeof(ver_info));
+ if (error)
+ return error;
+
+ if (ver_info.prod_num != IQS626_VER_INFO_PROD_NUM) {
+ dev_err(&client->dev, "Unrecognized product number: 0x%02X\n",
+ ver_info.prod_num);
+ return -EINVAL;
+ }
+
+ error = iqs626_parse_prop(iqs626);
+ if (error)
+ return error;
+
+ error = iqs626_input_init(iqs626);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, iqs626_irq, IRQF_ONESHOT,
+ client->name, iqs626);
+ if (error) {
+ dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
+ return error;
+ }
+
+ if (!wait_for_completion_timeout(&iqs626->ati_done,
+ msecs_to_jiffies(2000))) {
+ dev_err(&client->dev, "Failed to complete ATI\n");
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * The keypad may include one or more switches and is not registered
+ * until ATI is complete and the initial switch states are read.
+ */
+ error = input_register_device(iqs626->keypad);
+ if (error)
+ dev_err(&client->dev, "Failed to register keypad: %d\n", error);
+
+ return error;
+}
+
+static int __maybe_unused iqs626_suspend(struct device *dev)
+{
+ struct iqs626_private *iqs626 = dev_get_drvdata(dev);
+ struct i2c_client *client = iqs626->client;
+ unsigned int val;
+ int error;
+
+ if (!iqs626->suspend_mode)
+ return 0;
+
+ disable_irq(client->irq);
+
+ /*
+ * Automatic power mode switching must be disabled before the device is
+ * forced into any particular power mode. In this case, the device will
+ * transition into normal-power mode.
+ */
+ error = regmap_update_bits(iqs626->regmap, IQS626_SYS_SETTINGS,
+ IQS626_SYS_SETTINGS_DIS_AUTO, ~0);
+ if (error)
+ goto err_irq;
+
+ /*
+ * The following check ensures the device has completed its transition
+ * into normal-power mode before a manual mode switch is performed.
+ */
+ error = regmap_read_poll_timeout(iqs626->regmap, IQS626_SYS_FLAGS, val,
+ !(val & IQS626_SYS_FLAGS_PWR_MODE_MASK),
+ IQS626_PWR_MODE_POLL_SLEEP_US,
+ IQS626_PWR_MODE_POLL_TIMEOUT_US);
+ if (error)
+ goto err_irq;
+
+ error = regmap_update_bits(iqs626->regmap, IQS626_SYS_SETTINGS,
+ IQS626_SYS_SETTINGS_PWR_MODE_MASK,
+ iqs626->suspend_mode <<
+ IQS626_SYS_SETTINGS_PWR_MODE_SHIFT);
+ if (error)
+ goto err_irq;
+
+ /*
+ * This last check ensures the device has completed its transition into
+ * the desired power mode to prevent any spurious interrupts from being
+ * triggered after iqs626_suspend has already returned.
+ */
+ error = regmap_read_poll_timeout(iqs626->regmap, IQS626_SYS_FLAGS, val,
+ (val & IQS626_SYS_FLAGS_PWR_MODE_MASK)
+ == (iqs626->suspend_mode <<
+ IQS626_SYS_FLAGS_PWR_MODE_SHIFT),
+ IQS626_PWR_MODE_POLL_SLEEP_US,
+ IQS626_PWR_MODE_POLL_TIMEOUT_US);
+
+err_irq:
+ iqs626_irq_wait();
+ enable_irq(client->irq);
+
+ return error;
+}
+
+static int __maybe_unused iqs626_resume(struct device *dev)
+{
+ struct iqs626_private *iqs626 = dev_get_drvdata(dev);
+ struct i2c_client *client = iqs626->client;
+ unsigned int val;
+ int error;
+
+ if (!iqs626->suspend_mode)
+ return 0;
+
+ disable_irq(client->irq);
+
+ error = regmap_update_bits(iqs626->regmap, IQS626_SYS_SETTINGS,
+ IQS626_SYS_SETTINGS_PWR_MODE_MASK, 0);
+ if (error)
+ goto err_irq;
+
+ /*
+ * This check ensures the device has returned to normal-power mode
+ * before automatic power mode switching is re-enabled.
+ */
+ error = regmap_read_poll_timeout(iqs626->regmap, IQS626_SYS_FLAGS, val,
+ !(val & IQS626_SYS_FLAGS_PWR_MODE_MASK),
+ IQS626_PWR_MODE_POLL_SLEEP_US,
+ IQS626_PWR_MODE_POLL_TIMEOUT_US);
+ if (error)
+ goto err_irq;
+
+ error = regmap_update_bits(iqs626->regmap, IQS626_SYS_SETTINGS,
+ IQS626_SYS_SETTINGS_DIS_AUTO, 0);
+ if (error)
+ goto err_irq;
+
+ /*
+ * This step reports any events that may have been "swallowed" as a
+ * result of polling PWR_MODE (which automatically acknowledges any
+ * pending interrupts).
+ */
+ error = iqs626_report(iqs626);
+
+err_irq:
+ iqs626_irq_wait();
+ enable_irq(client->irq);
+
+ return error;
+}
+
+static SIMPLE_DEV_PM_OPS(iqs626_pm, iqs626_suspend, iqs626_resume);
+
+static const struct of_device_id iqs626_of_match[] = {
+ { .compatible = "azoteq,iqs626a" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, iqs626_of_match);
+
+static struct i2c_driver iqs626_i2c_driver = {
+ .driver = {
+ .name = "iqs626a",
+ .of_match_table = iqs626_of_match,
+ .pm = &iqs626_pm,
+ },
+ .probe_new = iqs626_probe,
+};
+module_i2c_driver(iqs626_i2c_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS626A Capacitive Touch Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index 20ff087b8a44..cd5e99ec1d3c 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -61,15 +61,10 @@ static int max8997_haptic_set_duty_cycle(struct max8997_haptic *chip)
unsigned int duty = chip->pwm_period * chip->level / 100;
ret = pwm_config(chip->pwm, duty, chip->pwm_period);
} else {
- int i;
u8 duty_index = 0;
- for (i = 0; i <= 64; i++) {
- if (chip->level <= i * 100 / 64) {
- duty_index = i;
- break;
- }
- }
+ duty_index = DIV_ROUND_UP(chip->level * 64, 100);
+
switch (chip->internal_mode_pattern) {
case 0:
max8997_write_reg(chip->client,
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index e12da5b024b0..dc4a240f4489 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -55,6 +55,11 @@
#define ETP_FW_PAGE_SIZE_512 512
#define ETP_FW_SIGNATURE_SIZE 6
+#define ETP_PRODUCT_ID_DELBIN 0x00C2
+#define ETP_PRODUCT_ID_VOXEL 0x00BF
+#define ETP_PRODUCT_ID_MAGPIE 0x0120
+#define ETP_PRODUCT_ID_BOBBA 0x0121
+
struct i2c_client;
struct completion;
@@ -73,7 +78,7 @@ struct elan_transport_ops {
int (*calibrate_result)(struct i2c_client *client, u8 *val);
int (*get_baseline_data)(struct i2c_client *client,
- bool max_baseliune, u8 *value);
+ bool max_baseline, u8 *value);
int (*get_version)(struct i2c_client *client, u8 pattern, bool iap,
u8 *version);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index bef73822315d..dad22c1ea6a0 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -46,6 +46,9 @@
#define ETP_FINGER_WIDTH 15
#define ETP_RETRY_COUNT 3
+/* quirks to control the device */
+#define ETP_QUIRK_QUICK_WAKEUP BIT(0)
+
/* The main device structure */
struct elan_tp_data {
struct i2c_client *client;
@@ -90,8 +93,38 @@ struct elan_tp_data {
bool baseline_ready;
u8 clickpad;
bool middle_button;
+
+ u32 quirks; /* Various quirks */
};
+static u32 elan_i2c_lookup_quirks(u16 ic_type, u16 product_id)
+{
+ static const struct {
+ u16 ic_type;
+ u16 product_id;
+ u32 quirks;
+ } elan_i2c_quirks[] = {
+ { 0x0D, ETP_PRODUCT_ID_DELBIN, ETP_QUIRK_QUICK_WAKEUP },
+ { 0x10, ETP_PRODUCT_ID_VOXEL, ETP_QUIRK_QUICK_WAKEUP },
+ { 0x14, ETP_PRODUCT_ID_MAGPIE, ETP_QUIRK_QUICK_WAKEUP },
+ { 0x14, ETP_PRODUCT_ID_BOBBA, ETP_QUIRK_QUICK_WAKEUP },
+ };
+ u32 quirks = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(elan_i2c_quirks); i++) {
+ if (elan_i2c_quirks[i].ic_type == ic_type &&
+ elan_i2c_quirks[i].product_id == product_id) {
+ quirks = elan_i2c_quirks[i].quirks;
+ }
+ }
+
+ if (ic_type >= 0x0D && product_id >= 0x123)
+ quirks |= ETP_QUIRK_QUICK_WAKEUP;
+
+ return quirks;
+}
+
static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count,
u32 *signature_address, u16 *page_size)
{
@@ -258,16 +291,18 @@ static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
return false;
}
-static int __elan_initialize(struct elan_tp_data *data)
+static int __elan_initialize(struct elan_tp_data *data, bool skip_reset)
{
struct i2c_client *client = data->client;
bool woken_up = false;
int error;
- error = data->ops->initialize(client);
- if (error) {
- dev_err(&client->dev, "device initialize failed: %d\n", error);
- return error;
+ if (!skip_reset) {
+ error = data->ops->initialize(client);
+ if (error) {
+ dev_err(&client->dev, "device initialize failed: %d\n", error);
+ return error;
+ }
}
error = elan_query_product(data);
@@ -311,16 +346,17 @@ static int __elan_initialize(struct elan_tp_data *data)
return 0;
}
-static int elan_initialize(struct elan_tp_data *data)
+static int elan_initialize(struct elan_tp_data *data, bool skip_reset)
{
int repeat = ETP_RETRY_COUNT;
int error;
do {
- error = __elan_initialize(data);
+ error = __elan_initialize(data, skip_reset);
if (!error)
return 0;
+ skip_reset = false;
msleep(30);
} while (--repeat > 0);
@@ -357,6 +393,8 @@ static int elan_query_device_info(struct elan_tp_data *data)
if (error)
return error;
+ data->quirks = elan_i2c_lookup_quirks(data->ic_type, data->product_id);
+
error = elan_get_fwinfo(data->ic_type, data->iap_version,
&data->fw_validpage_count,
&data->fw_signature_address,
@@ -546,7 +584,7 @@ static int elan_update_firmware(struct elan_tp_data *data,
data->ops->iap_reset(client);
} else {
/* Reinitialize TP after fw is updated */
- elan_initialize(data);
+ elan_initialize(data, false);
elan_query_device_info(data);
}
@@ -1247,7 +1285,7 @@ static int elan_probe(struct i2c_client *client,
}
/* Initialize the touchpad. */
- error = elan_initialize(data);
+ error = elan_initialize(data, false);
if (error)
return error;
@@ -1384,7 +1422,7 @@ static int __maybe_unused elan_resume(struct device *dev)
goto err;
}
- error = elan_initialize(data);
+ error = elan_initialize(data, data->quirks & ETP_QUIRK_QUICK_WAKEUP);
if (error)
dev_err(dev, "initialize when resuming failed: %d\n", error);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 97381e2e03ba..2d0bc029619f 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1885,8 +1885,6 @@ static int elantech_create_smbus(struct psmouse *psmouse,
};
unsigned int idx = 0;
- smbus_board.properties = i2c_props;
-
i2c_props[idx++] = PROPERTY_ENTRY_U32("touchscreen-size-x",
info->x_max + 1);
i2c_props[idx++] = PROPERTY_ENTRY_U32("touchscreen-size-y",
@@ -1918,6 +1916,10 @@ static int elantech_create_smbus(struct psmouse *psmouse,
if (elantech_is_buttonpad(info))
i2c_props[idx++] = PROPERTY_ENTRY_BOOL("elan,clickpad");
+ smbus_board.fwnode = fwnode_create_software_node(i2c_props, NULL);
+ if (IS_ERR(smbus_board.fwnode))
+ return PTR_ERR(smbus_board.fwnode);
+
return psmouse_smbus_init(psmouse, &smbus_board, NULL, 0, false,
leave_breadcrumbs);
}
diff --git a/drivers/input/serio/apbps2.c b/drivers/input/serio/apbps2.c
index 594ac4e6f8ea..974d7bfae0a0 100644
--- a/drivers/input/serio/apbps2.c
+++ b/drivers/input/serio/apbps2.c
@@ -103,7 +103,6 @@ static int apbps2_open(struct serio *io)
{
struct apbps2_priv *priv = io->port_data;
int limit;
- unsigned long tmp;
/* clear error flags */
iowrite32be(0, &priv->regs->status);
@@ -111,7 +110,7 @@ static int apbps2_open(struct serio *io)
/* Clear old data if available (unlikely) */
limit = 1024;
while ((ioread32be(&priv->regs->status) & APBPS2_STATUS_DR) && --limit)
- tmp = ioread32be(&priv->regs->data);
+ ioread32be(&priv->regs->data);
/* Enable reciever and it's interrupt */
iowrite32be(APBPS2_CTRL_RE | APBPS2_CTRL_RI, &priv->regs->ctrl);
diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen.c
index 97342e14b4f1..dd18cb917c4d 100644
--- a/drivers/input/touchscreen/of_touchscreen.c
+++ b/drivers/input/touchscreen.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Generic DT helper functions for touchscreen devices
+ * Generic helper functions for touchscreens and other two-dimensional
+ * pointing devices
*
* Copyright (c) 2014 Sebastian Reichel <sre@kernel.org>
*/
@@ -37,7 +38,7 @@ static void touchscreen_set_params(struct input_dev *dev,
if (!test_bit(axis, dev->absbit)) {
dev_warn(&dev->dev,
- "DT specifies parameters but the axis %lu is not set up\n",
+ "Parameters are specified but the axis %lu is not set up\n",
axis);
return;
}
@@ -49,7 +50,7 @@ static void touchscreen_set_params(struct input_dev *dev,
}
/**
- * touchscreen_parse_properties - parse common touchscreen DT properties
+ * touchscreen_parse_properties - parse common touchscreen properties
* @input: input device that should be parsed
* @multitouch: specifies whether parsed properties should be applied to
* single-touch or multi-touch axes
@@ -57,9 +58,9 @@ static void touchscreen_set_params(struct input_dev *dev,
* axis swap and invert info for use with touchscreen_report_x_y();
* or %NULL
*
- * This function parses common DT properties for touchscreens and setups the
+ * This function parses common properties for touchscreens and sets up the
* input device accordingly. The function keeps previously set up default
- * values if no value is specified via DT.
+ * values if no value is specified.
*/
void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
struct touchscreen_properties *prop)
@@ -203,4 +204,4 @@ void touchscreen_report_pos(struct input_dev *input,
EXPORT_SYMBOL(touchscreen_report_pos);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Device-tree helpers functions for touchscreen devices");
+MODULE_DESCRIPTION("Helper functions for touchscreens and other devices");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 529614d364fe..ad454cd2855a 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -12,10 +12,6 @@ menuconfig INPUT_TOUCHSCREEN
if INPUT_TOUCHSCREEN
-config TOUCHSCREEN_PROPERTIES
- def_tristate INPUT
- depends on INPUT
-
config TOUCHSCREEN_88PM860X
tristate "Marvell 88PM860x touchscreen"
depends on MFD_88PM860X
@@ -415,6 +411,17 @@ config TOUCHSCREEN_HIDEEP
To compile this driver as a module, choose M here : the
module will be called hideep_ts.
+config TOUCHSCREEN_HYCON_HY46XX
+ tristate "Hycon hy46xx touchscreen support"
+ depends on I2C
+ help
+ Say Y here if you have a touchscreen using Hycon hy46xx
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hycon-hy46xx.
+
config TOUCHSCREEN_ILI210X
tristate "Ilitek ILI210X based touchscreen"
depends on I2C
@@ -430,6 +437,18 @@ config TOUCHSCREEN_ILI210X
To compile this driver as a module, choose M here: the
module will be called ili210x.
+config TOUCHSCREEN_ILITEK
+ tristate "Ilitek I2C 213X/23XX/25XX/Lego Series Touch ICs"
+ depends on I2C
+ help
+ Say Y here if you have touchscreen with ILITEK touch IC,
+ it supports 213X/23XX/25XX and other Lego series.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ilitek_ts_i2c.
+
config TOUCHSCREEN_IPROC
tristate "IPROC touch panel driver support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
@@ -594,6 +613,18 @@ config TOUCHSCREEN_MELFAS_MIP4
To compile this driver as a module, choose M here:
the module will be called melfas_mip4.
+config TOUCHSCREEN_MSG2638
+ tristate "MStar msg2638 touchscreen support"
+ depends on I2C
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Say Y here if you have an I2C touchscreen using MStar msg2638.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called msg2638.
+
config TOUCHSCREEN_MTOUCH
tristate "MicroTouch serial touchscreens"
select SERIO
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 6233541e9173..7d34100f7f22 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -7,7 +7,6 @@
wm97xx-ts-y := wm97xx-core.o
-obj-$(CONFIG_TOUCHSCREEN_PROPERTIES) += of_touchscreen.o
obj-$(CONFIG_TOUCHSCREEN_88PM860X) += 88pm860x-ts.o
obj-$(CONFIG_TOUCHSCREEN_AD7877) += ad7877.o
obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
@@ -35,6 +34,7 @@ obj-$(CONFIG_TOUCHSCREEN_DA9052) += da9052_tsi.o
obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o
obj-$(CONFIG_TOUCHSCREEN_EDT_FT5X06) += edt-ft5x06.o
obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
+obj-$(CONFIG_TOUCHSCREEN_HYCON_HY46XX) += hycon-hy46xx.o
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
obj-$(CONFIG_TOUCHSCREEN_EKTF2127) += ektf2127.o
@@ -47,6 +47,7 @@ obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix.o
obj-$(CONFIG_TOUCHSCREEN_HIDEEP) += hideep.o
obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
+obj-$(CONFIG_TOUCHSCREEN_ILITEK) += ilitek_ts_i2c.o
obj-$(CONFIG_TOUCHSCREEN_IMX6UL_TSC) += imx6ul_tsc.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
obj-$(CONFIG_TOUCHSCREEN_IPROC) += bcm_iproc_tsc.o
@@ -59,6 +60,7 @@ obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o
obj-$(CONFIG_TOUCHSCREEN_MELFAS_MIP4) += melfas_mip4.o
obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o
obj-$(CONFIG_TOUCHSCREEN_MMS114) += mms114.o
+obj-$(CONFIG_TOUCHSCREEN_MSG2638) += msg2638.o
obj-$(CONFIG_TOUCHSCREEN_MTOUCH) += mtouch.o
obj-$(CONFIG_TOUCHSCREEN_MK712) += mk712.o
obj-$(CONFIG_TOUCHSCREEN_HP600) += hp680_ts_input.o
diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c
index c0d5c2413356..dc6a85362a40 100644
--- a/drivers/input/touchscreen/ar1021_i2c.c
+++ b/drivers/input/touchscreen/ar1021_i2c.c
@@ -125,7 +125,7 @@ static int ar1021_i2c_probe(struct i2c_client *client,
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, ar1021_i2c_irq,
- IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"ar1021_i2c", ar1021);
if (error) {
dev_err(&client->dev,
@@ -133,9 +133,6 @@ static int ar1021_i2c_probe(struct i2c_client *client,
return error;
}
- /* Disable the IRQ, we'll enable it in ar1021_i2c_open() */
- disable_irq(client->irq);
-
error = input_register_device(ar1021->input);
if (error) {
dev_err(&client->dev,
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 383a848eb601..05de92c0293b 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -31,6 +31,7 @@
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
+#include <dt-bindings/input/atmel-maxtouch.h>
/* Firmware files */
#define MXT_FW_NAME "maxtouch.fw"
@@ -199,6 +200,7 @@ enum t100_type {
#define MXT_CRC_TIMEOUT 1000 /* msec */
#define MXT_FW_RESET_TIME 3000 /* msec */
#define MXT_FW_CHG_TIMEOUT 300 /* msec */
+#define MXT_WAKEUP_TIME 25 /* msec */
/* Command to unlock bootloader */
#define MXT_UNLOCK_CMD_MSB 0xaa
@@ -312,6 +314,7 @@ struct mxt_data {
struct mxt_dbg dbg;
struct regulator_bulk_data regulators[2];
struct gpio_desc *reset_gpio;
+ struct gpio_desc *wake_gpio;
bool use_retrigen_workaround;
/* Cached parameters from object table */
@@ -342,6 +345,8 @@ struct mxt_data {
unsigned int t19_num_keys;
enum mxt_suspend_mode suspend_mode;
+
+ u32 wakeup_method;
};
struct mxt_vb2_buffer {
@@ -621,10 +626,42 @@ static int mxt_send_bootloader_cmd(struct mxt_data *data, bool unlock)
return mxt_bootloader_write(data, buf, sizeof(buf));
}
+static bool mxt_wakeup_toggle(struct i2c_client *client,
+ bool wake_up, bool in_i2c)
+{
+ struct mxt_data *data = i2c_get_clientdata(client);
+
+ switch (data->wakeup_method) {
+ case ATMEL_MXT_WAKEUP_I2C_SCL:
+ if (!in_i2c)
+ return false;
+ break;
+
+ case ATMEL_MXT_WAKEUP_GPIO:
+ if (in_i2c)
+ return false;
+
+ gpiod_set_value(data->wake_gpio, wake_up);
+ break;
+
+ default:
+ return false;
+ }
+
+ if (wake_up) {
+ dev_dbg(&client->dev, "waking up controller\n");
+
+ msleep(MXT_WAKEUP_TIME);
+ }
+
+ return true;
+}
+
static int __mxt_read_reg(struct i2c_client *client,
u16 reg, u16 len, void *val)
{
struct i2c_msg xfer[2];
+ bool retried = false;
u8 buf[2];
int ret;
@@ -643,9 +680,13 @@ static int __mxt_read_reg(struct i2c_client *client,
xfer[1].len = len;
xfer[1].buf = val;
+retry:
ret = i2c_transfer(client->adapter, xfer, 2);
if (ret == 2) {
ret = 0;
+ } else if (!retried && mxt_wakeup_toggle(client, true, true)) {
+ retried = true;
+ goto retry;
} else {
if (ret >= 0)
ret = -EIO;
@@ -659,6 +700,7 @@ static int __mxt_read_reg(struct i2c_client *client,
static int __mxt_write_reg(struct i2c_client *client, u16 reg, u16 len,
const void *val)
{
+ bool retried = false;
u8 *buf;
size_t count;
int ret;
@@ -672,9 +714,13 @@ static int __mxt_write_reg(struct i2c_client *client, u16 reg, u16 len,
buf[1] = (reg >> 8) & 0xff;
memcpy(&buf[2], val, len);
+retry:
ret = i2c_master_send(client, buf, count);
if (ret == count) {
ret = 0;
+ } else if (!retried && mxt_wakeup_toggle(client, true, true)) {
+ retried = true;
+ goto retry;
} else {
if (ret >= 0)
ret = -EIO;
@@ -2975,6 +3021,8 @@ static const struct attribute_group mxt_attr_group = {
static void mxt_start(struct mxt_data *data)
{
+ mxt_wakeup_toggle(data->client, true, false);
+
switch (data->suspend_mode) {
case MXT_SUSPEND_T9_CTRL:
mxt_soft_reset(data);
@@ -3009,6 +3057,8 @@ static void mxt_stop(struct mxt_data *data)
mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP);
break;
}
+
+ mxt_wakeup_toggle(data->client, false, false);
}
static int mxt_input_open(struct input_dev *dev)
@@ -3155,16 +3205,24 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
return error;
}
+ /* Request the WAKE line as asserted so we go out of sleep */
+ data->wake_gpio = devm_gpiod_get_optional(&client->dev,
+ "wake", GPIOD_OUT_HIGH);
+ if (IS_ERR(data->wake_gpio)) {
+ error = PTR_ERR(data->wake_gpio);
+ dev_err(&client->dev, "Failed to get wake gpio: %d\n", error);
+ return error;
+ }
+
error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, mxt_interrupt, IRQF_ONESHOT,
+ NULL, mxt_interrupt,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
client->name, data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
return error;
}
- disable_irq(client->irq);
-
error = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
data->regulators);
if (error) {
@@ -3185,6 +3243,25 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
msleep(MXT_RESET_INVALID_CHG);
}
+ /*
+ * Controllers like mXT1386 have a dedicated WAKE line that could be
+ * connected to a GPIO or to I2C SCL pin, or permanently asserted low.
+ *
+ * This WAKE line is used for waking controller from a deep-sleep and
+ * it needs to be asserted low for 25 milliseconds before I2C transfers
+ * could be accepted by controller if it was in a deep-sleep mode.
+ * Controller will go into sleep automatically after 2 seconds of
+ * inactivity if WAKE line is deasserted and deep sleep is activated.
+ *
+ * If WAKE line is connected to I2C SCL pin, then the first I2C transfer
+ * will get an instant NAK and transfer needs to be retried after 25ms.
+ *
+ * If WAKE line is connected to a GPIO line, the line must be asserted
+ * 25ms before the host attempts to communicate with the controller.
+ */
+ device_property_read_u32(&client->dev, "atmel,wakeup-method",
+ &data->wakeup_method);
+
error = mxt_initialize(data);
if (error)
goto err_disable_regulators;
diff --git a/drivers/input/touchscreen/bu21029_ts.c b/drivers/input/touchscreen/bu21029_ts.c
index 341925edb8e6..392950aa7856 100644
--- a/drivers/input/touchscreen/bu21029_ts.c
+++ b/drivers/input/touchscreen/bu21029_ts.c
@@ -401,10 +401,10 @@ static int bu21029_probe(struct i2c_client *client,
input_set_drvdata(in_dev, bu21029);
- irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, bu21029_touch_soft_irq,
- IRQF_ONESHOT, DRIVER_NAME, bu21029);
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ DRIVER_NAME, bu21029);
if (error) {
dev_err(&client->dev,
"unable to request touch irq: %d\n", error);
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 73c854f35f33..106dd4962785 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -229,16 +229,21 @@ static int cyttsp_set_sysinfo_regs(struct cyttsp *ts)
static void cyttsp_hard_reset(struct cyttsp *ts)
{
if (ts->reset_gpio) {
+ /*
+ * According to the CY8CTMA340 datasheet page 21, the external
+ * reset pulse width should be >= 1 ms. The datasheet does not
+ * specify how long we have to wait after reset but a vendor
+ * tree specifies 5 ms here.
+ */
gpiod_set_value_cansleep(ts->reset_gpio, 1);
- msleep(CY_DELAY_DFLT);
+ usleep_range(1000, 2000);
gpiod_set_value_cansleep(ts->reset_gpio, 0);
- msleep(CY_DELAY_DFLT);
+ usleep_range(5000, 6000);
}
}
static int cyttsp_soft_reset(struct cyttsp *ts)
{
- unsigned long timeout;
int retval;
/* wait for interrupt to set ready completion */
@@ -248,12 +253,16 @@ static int cyttsp_soft_reset(struct cyttsp *ts)
enable_irq(ts->irq);
retval = ttsp_send_command(ts, CY_SOFT_RESET_MODE);
- if (retval)
+ if (retval) {
+ dev_err(ts->dev, "failed to send soft reset\n");
goto out;
+ }
- timeout = wait_for_completion_timeout(&ts->bl_ready,
- msecs_to_jiffies(CY_DELAY_DFLT * CY_DELAY_MAX));
- retval = timeout ? 0 : -EIO;
+ if (!wait_for_completion_timeout(&ts->bl_ready,
+ msecs_to_jiffies(CY_DELAY_DFLT * CY_DELAY_MAX))) {
+ dev_err(ts->dev, "timeout waiting for soft reset\n");
+ retval = -EIO;
+ }
out:
ts->state = CY_IDLE_STATE;
@@ -405,8 +414,10 @@ static int cyttsp_power_on(struct cyttsp *ts)
if (GET_BOOTLOADERMODE(ts->bl_data.bl_status) &&
IS_VALID_APP(ts->bl_data.bl_status)) {
error = cyttsp_exit_bl_mode(ts);
- if (error)
+ if (error) {
+ dev_err(ts->dev, "failed to exit bootloader mode\n");
return error;
+ }
}
if (GET_HSTMODE(ts->bl_data.bl_file) != CY_OPERATE_MODE ||
@@ -629,10 +640,8 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
return ERR_PTR(error);
init_completion(&ts->bl_ready);
- snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(dev));
input_dev->name = "Cypress TTSP TouchScreen";
- input_dev->phys = ts->phys;
input_dev->id.bustype = bus_ops->bustype;
input_dev->dev.parent = ts->dev;
@@ -643,16 +652,20 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
+ /* One byte for width 0..255 so this is the limit */
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+
touchscreen_parse_properties(input_dev, true, NULL);
- error = input_mt_init_slots(input_dev, CY_MAX_ID, 0);
+ error = input_mt_init_slots(input_dev, CY_MAX_ID, INPUT_MT_DIRECT);
if (error) {
dev_err(dev, "Unable to init MT slots.\n");
return ERR_PTR(error);
}
error = devm_request_threaded_irq(dev, ts->irq, NULL, cyttsp_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
+ IRQF_NO_AUTOEN,
"cyttsp", ts);
if (error) {
dev_err(ts->dev, "failed to request IRQ %d, err: %d\n",
@@ -660,8 +673,6 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
return ERR_PTR(error);
}
- disable_irq(ts->irq);
-
cyttsp_hard_reset(ts);
error = cyttsp_power_on(ts);
diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h
index 8c651336ac12..9bc4fe7e6ac5 100644
--- a/drivers/input/touchscreen/cyttsp_core.h
+++ b/drivers/input/touchscreen/cyttsp_core.h
@@ -114,7 +114,6 @@ struct cyttsp {
struct device *dev;
int irq;
struct input_dev *input;
- char phys[32];
const struct cyttsp_bus_ops *bus_ops;
struct cyttsp_bootloader_data bl_data;
struct cyttsp_sysinfo_data sysinfo_data;
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 5f7706febcb0..17540bdb1eaf 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -38,6 +38,7 @@
#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
+#include <linux/uuid.h>
#include <asm/unaligned.h>
/* Device, Driver information */
@@ -1334,6 +1335,40 @@ static void elants_i2c_power_off(void *_data)
}
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id i2c_hid_ids[] = {
+ {"ACPI0C50", 0 },
+ {"PNP0C50", 0 },
+ { },
+};
+
+static const guid_t i2c_hid_guid =
+ GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
+ 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
+
+static bool elants_acpi_is_hid_device(struct device *dev)
+{
+ acpi_handle handle = ACPI_HANDLE(dev);
+ union acpi_object *obj;
+
+ if (acpi_match_device_ids(ACPI_COMPANION(dev), i2c_hid_ids))
+ return false;
+
+ obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL, ACPI_TYPE_INTEGER);
+ if (obj) {
+ ACPI_FREE(obj);
+ return true;
+ }
+
+ return false;
+}
+#else
+static bool elants_acpi_is_hid_device(struct device *dev)
+{
+ return false;
+}
+#endif
+
static int elants_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1342,9 +1377,14 @@ static int elants_i2c_probe(struct i2c_client *client,
unsigned long irqflags;
int error;
+ /* Don't bind to i2c-hid compatible devices, these are handled by the i2c-hid drv. */
+ if (elants_acpi_is_hid_device(&client->dev)) {
+ dev_warn(&client->dev, "This device appears to be an I2C-HID device, not binding\n");
+ return -ENODEV;
+ }
+
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev,
- "%s: i2c check functionality error\n", DEVICE_NAME);
+ dev_err(&client->dev, "I2C check functionality error\n");
return -ENXIO;
}
diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
index a6597f026980..cbe0dd412912 100644
--- a/drivers/input/touchscreen/exc3000.c
+++ b/drivers/input/touchscreen/exc3000.c
@@ -25,11 +25,13 @@
#define EXC3000_NUM_SLOTS 10
#define EXC3000_SLOTS_PER_FRAME 5
#define EXC3000_LEN_FRAME 66
+#define EXC3000_LEN_VENDOR_REQUEST 68
#define EXC3000_LEN_POINT 10
#define EXC3000_LEN_MODEL_NAME 16
#define EXC3000_LEN_FW_VERSION 16
+#define EXC3000_VENDOR_EVENT 0x03
#define EXC3000_MT1_EVENT 0x06
#define EXC3000_MT2_EVENT 0x18
@@ -76,9 +78,6 @@ struct exc3000_data {
u8 buf[2 * EXC3000_LEN_FRAME];
struct completion wait_event;
struct mutex query_lock;
- int query_result;
- char model[EXC3000_LEN_MODEL_NAME];
- char fw_version[EXC3000_LEN_FW_VERSION];
};
static void exc3000_report_slots(struct input_dev *input,
@@ -105,15 +104,16 @@ static void exc3000_timer(struct timer_list *t)
input_sync(data->input);
}
+static inline void exc3000_schedule_timer(struct exc3000_data *data)
+{
+ mod_timer(&data->timer, jiffies + msecs_to_jiffies(EXC3000_TIMEOUT_MS));
+}
+
static int exc3000_read_frame(struct exc3000_data *data, u8 *buf)
{
struct i2c_client *client = data->client;
- u8 expected_event = EXC3000_MT1_EVENT;
int ret;
- if (data->info->max_xy == SZ_16K - 1)
- expected_event = EXC3000_MT2_EVENT;
-
ret = i2c_master_send(client, "'", 2);
if (ret < 0)
return ret;
@@ -131,175 +131,196 @@ static int exc3000_read_frame(struct exc3000_data *data, u8 *buf)
if (get_unaligned_le16(buf) != EXC3000_LEN_FRAME)
return -EINVAL;
- if (buf[2] != expected_event)
- return -EINVAL;
-
return 0;
}
-static int exc3000_read_data(struct exc3000_data *data,
- u8 *buf, int *n_slots)
+static int exc3000_handle_mt_event(struct exc3000_data *data)
{
- int error;
-
- error = exc3000_read_frame(data, buf);
- if (error)
- return error;
+ struct input_dev *input = data->input;
+ int ret, total_slots;
+ u8 *buf = data->buf;
- *n_slots = buf[3];
- if (!*n_slots || *n_slots > EXC3000_NUM_SLOTS)
- return -EINVAL;
+ total_slots = buf[3];
+ if (!total_slots || total_slots > EXC3000_NUM_SLOTS) {
+ ret = -EINVAL;
+ goto out_fail;
+ }
- if (*n_slots > EXC3000_SLOTS_PER_FRAME) {
+ if (total_slots > EXC3000_SLOTS_PER_FRAME) {
/* Read 2nd frame to get the rest of the contacts. */
- error = exc3000_read_frame(data, buf + EXC3000_LEN_FRAME);
- if (error)
- return error;
+ ret = exc3000_read_frame(data, buf + EXC3000_LEN_FRAME);
+ if (ret)
+ goto out_fail;
/* 2nd chunk must have number of contacts set to 0. */
- if (buf[EXC3000_LEN_FRAME + 3] != 0)
- return -EINVAL;
+ if (buf[EXC3000_LEN_FRAME + 3] != 0) {
+ ret = -EINVAL;
+ goto out_fail;
+ }
}
- return 0;
-}
-
-static int exc3000_query_interrupt(struct exc3000_data *data)
-{
- u8 *buf = data->buf;
- int error;
+ /*
+ * We read full state successfully, no contacts will be "stuck".
+ */
+ del_timer_sync(&data->timer);
- error = i2c_master_recv(data->client, buf, EXC3000_LEN_FRAME);
- if (error < 0)
- return error;
+ while (total_slots > 0) {
+ int slots = min(total_slots, EXC3000_SLOTS_PER_FRAME);
- if (buf[0] != 'B')
- return -EPROTO;
+ exc3000_report_slots(input, &data->prop, buf + 4, slots);
+ total_slots -= slots;
+ buf += EXC3000_LEN_FRAME;
+ }
- if (buf[4] == 'E')
- strlcpy(data->model, buf + 5, sizeof(data->model));
- else if (buf[4] == 'D')
- strlcpy(data->fw_version, buf + 5, sizeof(data->fw_version));
- else
- return -EPROTO;
+ input_mt_sync_frame(input);
+ input_sync(input);
return 0;
+
+out_fail:
+ /* Schedule a timer to release "stuck" contacts */
+ exc3000_schedule_timer(data);
+
+ return ret;
}
static irqreturn_t exc3000_interrupt(int irq, void *dev_id)
{
struct exc3000_data *data = dev_id;
- struct input_dev *input = data->input;
u8 *buf = data->buf;
- int slots, total_slots;
- int error;
-
- if (mutex_is_locked(&data->query_lock)) {
- data->query_result = exc3000_query_interrupt(data);
- complete(&data->wait_event);
- goto out;
- }
+ int ret;
- error = exc3000_read_data(data, buf, &total_slots);
- if (error) {
+ ret = exc3000_read_frame(data, buf);
+ if (ret) {
/* Schedule a timer to release "stuck" contacts */
- mod_timer(&data->timer,
- jiffies + msecs_to_jiffies(EXC3000_TIMEOUT_MS));
+ exc3000_schedule_timer(data);
goto out;
}
- /*
- * We read full state successfully, no contacts will be "stuck".
- */
- del_timer_sync(&data->timer);
+ switch (buf[2]) {
+ case EXC3000_VENDOR_EVENT:
+ complete(&data->wait_event);
+ break;
- while (total_slots > 0) {
- slots = min(total_slots, EXC3000_SLOTS_PER_FRAME);
- exc3000_report_slots(input, &data->prop, buf + 4, slots);
- total_slots -= slots;
- buf += EXC3000_LEN_FRAME;
- }
+ case EXC3000_MT1_EVENT:
+ case EXC3000_MT2_EVENT:
+ exc3000_handle_mt_event(data);
+ break;
- input_mt_sync_frame(input);
- input_sync(input);
+ default:
+ break;
+ }
out:
return IRQ_HANDLED;
}
-static ssize_t fw_version_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int exc3000_vendor_data_request(struct exc3000_data *data, u8 *request,
+ u8 request_len, u8 *response, int timeout)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct exc3000_data *data = i2c_get_clientdata(client);
- static const u8 request[68] = {
- 0x67, 0x00, 0x42, 0x00, 0x03, 0x01, 'D', 0x00
- };
- int error;
+ u8 buf[EXC3000_LEN_VENDOR_REQUEST] = { 0x67, 0x00, 0x42, 0x00, 0x03 };
+ int ret;
mutex_lock(&data->query_lock);
- data->query_result = -ETIMEDOUT;
reinit_completion(&data->wait_event);
- error = i2c_master_send(client, request, sizeof(request));
- if (error < 0) {
- mutex_unlock(&data->query_lock);
- return error;
+ buf[5] = request_len;
+ memcpy(&buf[6], request, request_len);
+
+ ret = i2c_master_send(data->client, buf, EXC3000_LEN_VENDOR_REQUEST);
+ if (ret < 0)
+ goto out_unlock;
+
+ if (response) {
+ ret = wait_for_completion_timeout(&data->wait_event,
+ timeout * HZ);
+ if (ret <= 0) {
+ ret = -ETIMEDOUT;
+ goto out_unlock;
+ }
+
+ if (data->buf[3] >= EXC3000_LEN_FRAME) {
+ ret = -ENOSPC;
+ goto out_unlock;
+ }
+
+ memcpy(response, &data->buf[4], data->buf[3]);
+ ret = data->buf[3];
}
- wait_for_completion_interruptible_timeout(&data->wait_event, 1 * HZ);
+out_unlock:
mutex_unlock(&data->query_lock);
- if (data->query_result < 0)
- return data->query_result;
-
- return sprintf(buf, "%s\n", data->fw_version);
+ return ret;
}
-static DEVICE_ATTR_RO(fw_version);
-static ssize_t exc3000_get_model(struct exc3000_data *data)
+static ssize_t fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- static const u8 request[68] = {
- 0x67, 0x00, 0x42, 0x00, 0x03, 0x01, 'E', 0x00
- };
- struct i2c_client *client = data->client;
- int error;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct exc3000_data *data = i2c_get_clientdata(client);
+ u8 response[EXC3000_LEN_FRAME];
+ int ret;
- mutex_lock(&data->query_lock);
- data->query_result = -ETIMEDOUT;
- reinit_completion(&data->wait_event);
+ /* query bootloader info */
+ ret = exc3000_vendor_data_request(data,
+ (u8[]){0x39, 0x02}, 2, response, 1);
+ if (ret < 0)
+ return ret;
- error = i2c_master_send(client, request, sizeof(request));
- if (error < 0) {
- mutex_unlock(&data->query_lock);
- return error;
- }
+ /*
+ * If the bootloader version is non-zero then the device is in
+ * bootloader mode and won't answer a query for the application FW
+ * version, so we just use the bootloader version info.
+ */
+ if (response[2] || response[3])
+ return sprintf(buf, "%d.%d\n", response[2], response[3]);
- wait_for_completion_interruptible_timeout(&data->wait_event, 1 * HZ);
- mutex_unlock(&data->query_lock);
+ ret = exc3000_vendor_data_request(data, (u8[]){'D'}, 1, response, 1);
+ if (ret < 0)
+ return ret;
- return data->query_result;
+ return sprintf(buf, "%s\n", &response[1]);
}
+static DEVICE_ATTR_RO(fw_version);
static ssize_t model_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct exc3000_data *data = i2c_get_clientdata(client);
- int error;
+ u8 response[EXC3000_LEN_FRAME];
+ int ret;
- error = exc3000_get_model(data);
- if (error < 0)
- return error;
+ ret = exc3000_vendor_data_request(data, (u8[]){'E'}, 1, response, 1);
+ if (ret < 0)
+ return ret;
- return sprintf(buf, "%s\n", data->model);
+ return sprintf(buf, "%s\n", &response[1]);
}
static DEVICE_ATTR_RO(model);
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct exc3000_data *data = i2c_get_clientdata(client);
+ u8 response[EXC3000_LEN_FRAME];
+ int ret;
+
+ ret = exc3000_vendor_data_request(data, (u8[]){'F'}, 1, response, 1);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%s\n", &response[1]);
+}
+static DEVICE_ATTR_RO(type);
+
static struct attribute *sysfs_attrs[] = {
&dev_attr_fw_version.attr,
&dev_attr_model.attr,
+ &dev_attr_type.attr,
NULL
};
@@ -379,9 +400,15 @@ static int exc3000_probe(struct i2c_client *client)
* or two touch events anyways).
*/
for (retry = 0; retry < 3; retry++) {
- error = exc3000_get_model(data);
- if (!error)
+ u8 response[EXC3000_LEN_FRAME];
+
+ error = exc3000_vendor_data_request(data, (u8[]){'E'}, 1,
+ response, 1);
+ if (error > 0) {
+ dev_dbg(&client->dev, "TS Model: %s", &response[1]);
+ error = 0;
break;
+ }
dev_warn(&client->dev, "Retry %d get EETI EXC3000 model: %d\n",
retry + 1, error);
}
@@ -389,8 +416,6 @@ static int exc3000_probe(struct i2c_client *client)
if (error)
return error;
- dev_dbg(&client->dev, "TS Model: %s", data->model);
-
i2c_set_clientdata(client, data);
error = devm_device_add_group(&client->dev, &exc3000_attribute_group);
diff --git a/drivers/input/touchscreen/hycon-hy46xx.c b/drivers/input/touchscreen/hycon-hy46xx.c
new file mode 100644
index 000000000000..891d0430083e
--- /dev/null
+++ b/drivers/input/touchscreen/hycon-hy46xx.c
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021
+ * Author(s): Giulio Benetti <giulio.benetti@benettiengineering.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/irq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+
+#include <asm/unaligned.h>
+
+#define HY46XX_CHKSUM_CODE 0x1
+#define HY46XX_FINGER_NUM 0x2
+#define HY46XX_CHKSUM_LEN 0x7
+#define HY46XX_THRESHOLD 0x80
+#define HY46XX_GLOVE_EN 0x84
+#define HY46XX_REPORT_SPEED 0x88
+#define HY46XX_PWR_NOISE_EN 0x89
+#define HY46XX_FILTER_DATA 0x8A
+#define HY46XX_GAIN 0x92
+#define HY46XX_EDGE_OFFSET 0x93
+#define HY46XX_RX_NR_USED 0x94
+#define HY46XX_TX_NR_USED 0x95
+#define HY46XX_PWR_MODE 0xA5
+#define HY46XX_FW_VERSION 0xA6
+#define HY46XX_LIB_VERSION 0xA7
+#define HY46XX_TP_INFO 0xA8
+#define HY46XX_TP_CHIP_ID 0xA9
+#define HY46XX_BOOT_VER 0xB0
+
+#define HY46XX_TPLEN 0x6
+#define HY46XX_REPORT_PKT_LEN 0x44
+
+#define HY46XX_MAX_SUPPORTED_POINTS 11
+
+#define TOUCH_EVENT_DOWN 0x00
+#define TOUCH_EVENT_UP 0x01
+#define TOUCH_EVENT_CONTACT 0x02
+#define TOUCH_EVENT_RESERVED 0x03
+
+struct hycon_hy46xx_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct touchscreen_properties prop;
+ struct regulator *vcc;
+
+ struct gpio_desc *reset_gpio;
+
+ struct mutex mutex;
+ struct regmap *regmap;
+
+ int threshold;
+ bool glove_enable;
+ int report_speed;
+ bool noise_filter_enable;
+ int filter_data;
+ int gain;
+ int edge_offset;
+ int rx_number_used;
+ int tx_number_used;
+ int power_mode;
+ int fw_version;
+ int lib_version;
+ int tp_information;
+ int tp_chip_id;
+ int bootloader_version;
+};
+
+static const struct regmap_config hycon_hy46xx_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static bool hycon_hy46xx_check_checksum(struct hycon_hy46xx_data *tsdata, u8 *buf)
+{
+ u8 chksum = 0;
+ int i;
+
+ for (i = 2; i < buf[HY46XX_CHKSUM_LEN]; i++)
+ chksum += buf[i];
+
+ if (chksum == buf[HY46XX_CHKSUM_CODE])
+ return true;
+
+ dev_err_ratelimited(&tsdata->client->dev,
+ "checksum error: 0x%02x expected, got 0x%02x\n",
+ chksum, buf[HY46XX_CHKSUM_CODE]);
+
+ return false;
+}
+
+static irqreturn_t hycon_hy46xx_isr(int irq, void *dev_id)
+{
+ struct hycon_hy46xx_data *tsdata = dev_id;
+ struct device *dev = &tsdata->client->dev;
+ u8 rdbuf[HY46XX_REPORT_PKT_LEN];
+ int i, x, y, id;
+ int error;
+
+ memset(rdbuf, 0, sizeof(rdbuf));
+
+ error = regmap_bulk_read(tsdata->regmap, 0, rdbuf, sizeof(rdbuf));
+ if (error) {
+ dev_err_ratelimited(dev, "Unable to fetch data, error: %d\n",
+ error);
+ goto out;
+ }
+
+ if (!hycon_hy46xx_check_checksum(tsdata, rdbuf))
+ goto out;
+
+ for (i = 0; i < HY46XX_MAX_SUPPORTED_POINTS; i++) {
+ u8 *buf = &rdbuf[3 + (HY46XX_TPLEN * i)];
+ int type = buf[0] >> 6;
+
+ if (type == TOUCH_EVENT_RESERVED)
+ continue;
+
+ x = get_unaligned_be16(buf) & 0x0fff;
+ y = get_unaligned_be16(buf + 2) & 0x0fff;
+
+ id = buf[2] >> 4;
+
+ input_mt_slot(tsdata->input, id);
+ if (input_mt_report_slot_state(tsdata->input, MT_TOOL_FINGER,
+ type != TOUCH_EVENT_UP))
+ touchscreen_report_pos(tsdata->input, &tsdata->prop,
+ x, y, true);
+ }
+
+ input_mt_report_pointer_emulation(tsdata->input, false);
+ input_sync(tsdata->input);
+
+out:
+ return IRQ_HANDLED;
+}
+
+struct hycon_hy46xx_attribute {
+ struct device_attribute dattr;
+ size_t field_offset;
+ u8 address;
+ u8 limit_low;
+ u8 limit_high;
+};
+
+#define HYCON_ATTR_U8(_field, _mode, _address, _limit_low, _limit_high) \
+ struct hycon_hy46xx_attribute hycon_hy46xx_attr_##_field = { \
+ .dattr = __ATTR(_field, _mode, \
+ hycon_hy46xx_setting_show, \
+ hycon_hy46xx_setting_store), \
+ .field_offset = offsetof(struct hycon_hy46xx_data, _field), \
+ .address = _address, \
+ .limit_low = _limit_low, \
+ .limit_high = _limit_high, \
+ }
+
+#define HYCON_ATTR_BOOL(_field, _mode, _address) \
+ struct hycon_hy46xx_attribute hycon_hy46xx_attr_##_field = { \
+ .dattr = __ATTR(_field, _mode, \
+ hycon_hy46xx_setting_show, \
+ hycon_hy46xx_setting_store), \
+ .field_offset = offsetof(struct hycon_hy46xx_data, _field), \
+ .address = _address, \
+ .limit_low = false, \
+ .limit_high = true, \
+ }
+
+static ssize_t hycon_hy46xx_setting_show(struct device *dev,
+ struct device_attribute *dattr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hycon_hy46xx_data *tsdata = i2c_get_clientdata(client);
+ struct hycon_hy46xx_attribute *attr =
+ container_of(dattr, struct hycon_hy46xx_attribute, dattr);
+ u8 *field = (u8 *)tsdata + attr->field_offset;
+ size_t count = 0;
+ int error = 0;
+ int val;
+
+ mutex_lock(&tsdata->mutex);
+
+ error = regmap_read(tsdata->regmap, attr->address, &val);
+ if (error < 0) {
+ dev_err(&tsdata->client->dev,
+ "Failed to fetch attribute %s, error %d\n",
+ dattr->attr.name, error);
+ goto out;
+ }
+
+ if (val != *field) {
+ dev_warn(&tsdata->client->dev,
+ "%s: read (%d) and stored value (%d) differ\n",
+ dattr->attr.name, val, *field);
+ *field = val;
+ }
+
+ count = scnprintf(buf, PAGE_SIZE, "%d\n", val);
+
+out:
+ mutex_unlock(&tsdata->mutex);
+ return error ?: count;
+}
+
+static ssize_t hycon_hy46xx_setting_store(struct device *dev,
+ struct device_attribute *dattr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hycon_hy46xx_data *tsdata = i2c_get_clientdata(client);
+ struct hycon_hy46xx_attribute *attr =
+ container_of(dattr, struct hycon_hy46xx_attribute, dattr);
+ u8 *field = (u8 *)tsdata + attr->field_offset;
+ unsigned int val;
+ int error;
+
+ mutex_lock(&tsdata->mutex);
+
+ error = kstrtouint(buf, 0, &val);
+ if (error)
+ goto out;
+
+ if (val < attr->limit_low || val > attr->limit_high) {
+ error = -ERANGE;
+ goto out;
+ }
+
+ error = regmap_write(tsdata->regmap, attr->address, val);
+ if (error < 0) {
+ dev_err(&tsdata->client->dev,
+ "Failed to update attribute %s, error: %d\n",
+ dattr->attr.name, error);
+ goto out;
+ }
+ *field = val;
+
+out:
+ mutex_unlock(&tsdata->mutex);
+ return error ?: count;
+}
+
+static HYCON_ATTR_U8(threshold, 0644, HY46XX_THRESHOLD, 0, 255);
+static HYCON_ATTR_BOOL(glove_enable, 0644, HY46XX_GLOVE_EN);
+static HYCON_ATTR_U8(report_speed, 0644, HY46XX_REPORT_SPEED, 0, 255);
+static HYCON_ATTR_BOOL(noise_filter_enable, 0644, HY46XX_PWR_NOISE_EN);
+static HYCON_ATTR_U8(filter_data, 0644, HY46XX_FILTER_DATA, 0, 5);
+static HYCON_ATTR_U8(gain, 0644, HY46XX_GAIN, 0, 5);
+static HYCON_ATTR_U8(edge_offset, 0644, HY46XX_EDGE_OFFSET, 0, 5);
+static HYCON_ATTR_U8(fw_version, 0444, HY46XX_FW_VERSION, 0, 255);
+static HYCON_ATTR_U8(lib_version, 0444, HY46XX_LIB_VERSION, 0, 255);
+static HYCON_ATTR_U8(tp_information, 0444, HY46XX_TP_INFO, 0, 255);
+static HYCON_ATTR_U8(tp_chip_id, 0444, HY46XX_TP_CHIP_ID, 0, 255);
+static HYCON_ATTR_U8(bootloader_version, 0444, HY46XX_BOOT_VER, 0, 255);
+
+static struct attribute *hycon_hy46xx_attrs[] = {
+ &hycon_hy46xx_attr_threshold.dattr.attr,
+ &hycon_hy46xx_attr_glove_enable.dattr.attr,
+ &hycon_hy46xx_attr_report_speed.dattr.attr,
+ &hycon_hy46xx_attr_noise_filter_enable.dattr.attr,
+ &hycon_hy46xx_attr_filter_data.dattr.attr,
+ &hycon_hy46xx_attr_gain.dattr.attr,
+ &hycon_hy46xx_attr_edge_offset.dattr.attr,
+ &hycon_hy46xx_attr_fw_version.dattr.attr,
+ &hycon_hy46xx_attr_lib_version.dattr.attr,
+ &hycon_hy46xx_attr_tp_information.dattr.attr,
+ &hycon_hy46xx_attr_tp_chip_id.dattr.attr,
+ &hycon_hy46xx_attr_bootloader_version.dattr.attr,
+ NULL
+};
+
+static const struct attribute_group hycon_hy46xx_attr_group = {
+ .attrs = hycon_hy46xx_attrs,
+};
+
+static void hycon_hy46xx_get_defaults(struct device *dev, struct hycon_hy46xx_data *tsdata)
+{
+ bool val_bool;
+ int error;
+ u32 val;
+
+ error = device_property_read_u32(dev, "hycon,threshold", &val);
+ if (!error) {
+ error = regmap_write(tsdata->regmap, HY46XX_THRESHOLD, val);
+ if (error < 0)
+ goto out;
+
+ tsdata->threshold = val;
+ }
+
+ val_bool = device_property_read_bool(dev, "hycon,glove-enable");
+ error = regmap_write(tsdata->regmap, HY46XX_GLOVE_EN, val_bool);
+ if (error < 0)
+ goto out;
+ tsdata->glove_enable = val_bool;
+
+ error = device_property_read_u32(dev, "hycon,report-speed-hz", &val);
+ if (!error) {
+ error = regmap_write(tsdata->regmap, HY46XX_REPORT_SPEED, val);
+ if (error < 0)
+ goto out;
+
+ tsdata->report_speed = val;
+ }
+
+ val_bool = device_property_read_bool(dev, "hycon,noise-filter-enable");
+ error = regmap_write(tsdata->regmap, HY46XX_PWR_NOISE_EN, val_bool);
+ if (error < 0)
+ goto out;
+ tsdata->noise_filter_enable = val_bool;
+
+ error = device_property_read_u32(dev, "hycon,filter-data", &val);
+ if (!error) {
+ error = regmap_write(tsdata->regmap, HY46XX_FILTER_DATA, val);
+ if (error < 0)
+ goto out;
+
+ tsdata->filter_data = val;
+ }
+
+ error = device_property_read_u32(dev, "hycon,gain", &val);
+ if (!error) {
+ error = regmap_write(tsdata->regmap, HY46XX_GAIN, val);
+ if (error < 0)
+ goto out;
+
+ tsdata->gain = val;
+ }
+
+ error = device_property_read_u32(dev, "hycon,edge-offset", &val);
+ if (!error) {
+ error = regmap_write(tsdata->regmap, HY46XX_EDGE_OFFSET, val);
+ if (error < 0)
+ goto out;
+
+ tsdata->edge_offset = val;
+ }
+
+ return;
+out:
+ dev_err(&tsdata->client->dev, "Failed to set default settings");
+}
+
+static void hycon_hy46xx_get_parameters(struct hycon_hy46xx_data *tsdata)
+{
+ int error;
+ u32 val;
+
+ error = regmap_read(tsdata->regmap, HY46XX_THRESHOLD, &val);
+ if (error < 0)
+ goto out;
+ tsdata->threshold = val;
+
+ error = regmap_read(tsdata->regmap, HY46XX_GLOVE_EN, &val);
+ if (error < 0)
+ goto out;
+ tsdata->glove_enable = val;
+
+ error = regmap_read(tsdata->regmap, HY46XX_REPORT_SPEED, &val);
+ if (error < 0)
+ goto out;
+ tsdata->report_speed = val;
+
+ error = regmap_read(tsdata->regmap, HY46XX_PWR_NOISE_EN, &val);
+ if (error < 0)
+ goto out;
+ tsdata->noise_filter_enable = val;
+
+ error = regmap_read(tsdata->regmap, HY46XX_FILTER_DATA, &val);
+ if (error < 0)
+ goto out;
+ tsdata->filter_data = val;
+
+ error = regmap_read(tsdata->regmap, HY46XX_GAIN, &val);
+ if (error < 0)
+ goto out;
+ tsdata->gain = val;
+
+ error = regmap_read(tsdata->regmap, HY46XX_EDGE_OFFSET, &val);
+ if (error < 0)
+ goto out;
+ tsdata->edge_offset = val;
+
+ error = regmap_read(tsdata->regmap, HY46XX_RX_NR_USED, &val);
+ if (error < 0)
+ goto out;
+ tsdata->rx_number_used = val;
+
+ error = regmap_read(tsdata->regmap, HY46XX_TX_NR_USED, &val);
+ if (error < 0)
+ goto out;
+ tsdata->tx_number_used = val;
+
+ error = regmap_read(tsdata->regmap, HY46XX_PWR_MODE, &val);
+ if (error < 0)
+ goto out;
+ tsdata->power_mode = val;
+
+ error = regmap_read(tsdata->regmap, HY46XX_FW_VERSION, &val);
+ if (error < 0)
+ goto out;
+ tsdata->fw_version = val;
+
+ error = regmap_read(tsdata->regmap, HY46XX_LIB_VERSION, &val);
+ if (error < 0)
+ goto out;
+ tsdata->lib_version = val;
+
+ error = regmap_read(tsdata->regmap, HY46XX_TP_INFO, &val);
+ if (error < 0)
+ goto out;
+ tsdata->tp_information = val;
+
+ error = regmap_read(tsdata->regmap, HY46XX_TP_CHIP_ID, &val);
+ if (error < 0)
+ goto out;
+ tsdata->tp_chip_id = val;
+
+ error = regmap_read(tsdata->regmap, HY46XX_BOOT_VER, &val);
+ if (error < 0)
+ goto out;
+ tsdata->bootloader_version = val;
+
+ return;
+out:
+ dev_err(&tsdata->client->dev, "Failed to read default settings");
+}
+
+static void hycon_hy46xx_disable_regulator(void *arg)
+{
+ struct hycon_hy46xx_data *data = arg;
+
+ regulator_disable(data->vcc);
+}
+
+static int hycon_hy46xx_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct hycon_hy46xx_data *tsdata;
+ struct input_dev *input;
+ int error;
+
+ dev_dbg(&client->dev, "probing for HYCON HY46XX I2C\n");
+
+ tsdata = devm_kzalloc(&client->dev, sizeof(*tsdata), GFP_KERNEL);
+ if (!tsdata)
+ return -ENOMEM;
+
+ tsdata->vcc = devm_regulator_get(&client->dev, "vcc");
+ if (IS_ERR(tsdata->vcc)) {
+ error = PTR_ERR(tsdata->vcc);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "failed to request regulator: %d\n", error);
+ return error;
+ }
+
+ error = regulator_enable(tsdata->vcc);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to enable vcc: %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action_or_reset(&client->dev,
+ hycon_hy46xx_disable_regulator,
+ tsdata);
+ if (error)
+ return error;
+
+ tsdata->reset_gpio = devm_gpiod_get_optional(&client->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(tsdata->reset_gpio)) {
+ error = PTR_ERR(tsdata->reset_gpio);
+ dev_err(&client->dev,
+ "Failed to request GPIO reset pin, error %d\n", error);
+ return error;
+ }
+
+ if (tsdata->reset_gpio) {
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(tsdata->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(tsdata->reset_gpio, 0);
+ msleep(1000);
+ }
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input) {
+ dev_err(&client->dev, "failed to allocate input device.\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&tsdata->mutex);
+ tsdata->client = client;
+ tsdata->input = input;
+
+ tsdata->regmap = devm_regmap_init_i2c(client,
+ &hycon_hy46xx_i2c_regmap_config);
+ if (IS_ERR(tsdata->regmap)) {
+ dev_err(&client->dev, "regmap allocation failed\n");
+ return PTR_ERR(tsdata->regmap);
+ }
+
+ hycon_hy46xx_get_defaults(&client->dev, tsdata);
+ hycon_hy46xx_get_parameters(tsdata);
+
+ input->name = "Hycon Capacitive Touch";
+ input->id.bustype = BUS_I2C;
+ input->dev.parent = &client->dev;
+
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, -1, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, -1, 0, 0);
+
+ touchscreen_parse_properties(input, true, &tsdata->prop);
+
+ error = input_mt_init_slots(input, HY46XX_MAX_SUPPORTED_POINTS,
+ INPUT_MT_DIRECT);
+ if (error) {
+ dev_err(&client->dev, "Unable to init MT slots.\n");
+ return error;
+ }
+
+ i2c_set_clientdata(client, tsdata);
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, hycon_hy46xx_isr, IRQF_ONESHOT,
+ client->name, tsdata);
+ if (error) {
+ dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
+ return error;
+ }
+
+ error = devm_device_add_group(&client->dev, &hycon_hy46xx_attr_group);
+ if (error)
+ return error;
+
+ error = input_register_device(input);
+ if (error)
+ return error;
+
+ dev_dbg(&client->dev,
+ "HYCON HY46XX initialized: IRQ %d, Reset pin %d.\n",
+ client->irq,
+ tsdata->reset_gpio ? desc_to_gpio(tsdata->reset_gpio) : -1);
+
+ return 0;
+}
+
+static const struct i2c_device_id hycon_hy46xx_id[] = {
+ { .name = "hy4613" },
+ { .name = "hy4614" },
+ { .name = "hy4621" },
+ { .name = "hy4623" },
+ { .name = "hy4633" },
+ { .name = "hy4635" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, hycon_hy46xx_id);
+
+static const struct of_device_id hycon_hy46xx_of_match[] = {
+ { .compatible = "hycon,hy4613" },
+ { .compatible = "hycon,hy4614" },
+ { .compatible = "hycon,hy4621" },
+ { .compatible = "hycon,hy4623" },
+ { .compatible = "hycon,hy4633" },
+ { .compatible = "hycon,hy4635" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hycon_hy46xx_of_match);
+
+static struct i2c_driver hycon_hy46xx_driver = {
+ .driver = {
+ .name = "hycon_hy46xx",
+ .of_match_table = hycon_hy46xx_of_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .id_table = hycon_hy46xx_id,
+ .probe = hycon_hy46xx_probe,
+};
+
+module_i2c_driver(hycon_hy46xx_driver);
+
+MODULE_AUTHOR("Giulio Benetti <giulio.benetti@benettiengineering.com>");
+MODULE_DESCRIPTION("HYCON HY46XX I2C Touchscreen Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index d8fccf048bf4..30576a5f2f04 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -87,7 +87,7 @@ static bool ili210x_touchdata_to_coords(const u8 *touchdata,
unsigned int *x, unsigned int *y,
unsigned int *z)
{
- if (touchdata[0] & BIT(finger))
+ if (!(touchdata[0] & BIT(finger)))
return false;
*x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0);
diff --git a/drivers/input/touchscreen/ilitek_ts_i2c.c b/drivers/input/touchscreen/ilitek_ts_i2c.c
new file mode 100644
index 000000000000..c5d259c76adc
--- /dev/null
+++ b/drivers/input/touchscreen/ilitek_ts_i2c.c
@@ -0,0 +1,690 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ILITEK Touch IC driver for 23XX, 25XX and Lego series
+ *
+ * Copyright (C) 2011 ILI Technology Corporation.
+ * Copyright (C) 2020 Luca Hsu <luca_hsu@ilitek.com>
+ * Copyright (C) 2021 Joe Hung <joe_hung@ilitek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/input/touchscreen.h>
+#include <asm/unaligned.h>
+
+
+#define ILITEK_TS_NAME "ilitek_ts"
+#define BL_V1_8 0x108
+#define BL_V1_7 0x107
+#define BL_V1_6 0x106
+
+#define ILITEK_TP_CMD_GET_TP_RES 0x20
+#define ILITEK_TP_CMD_GET_SCRN_RES 0x21
+#define ILITEK_TP_CMD_SET_IC_SLEEP 0x30
+#define ILITEK_TP_CMD_SET_IC_WAKE 0x31
+#define ILITEK_TP_CMD_GET_FW_VER 0x40
+#define ILITEK_TP_CMD_GET_PRL_VER 0x42
+#define ILITEK_TP_CMD_GET_MCU_VER 0x61
+#define ILITEK_TP_CMD_GET_IC_MODE 0xC0
+
+#define REPORT_COUNT_ADDRESS 61
+#define ILITEK_SUPPORT_MAX_POINT 40
+
+struct ilitek_protocol_info {
+ u16 ver;
+ u8 ver_major;
+};
+
+struct ilitek_ts_data {
+ struct i2c_client *client;
+ struct gpio_desc *reset_gpio;
+ struct input_dev *input_dev;
+ struct touchscreen_properties prop;
+
+ const struct ilitek_protocol_map *ptl_cb_func;
+ struct ilitek_protocol_info ptl;
+
+ char product_id[30];
+ u16 mcu_ver;
+ u8 ic_mode;
+ u8 firmware_ver[8];
+
+ s32 reset_time;
+ s32 screen_max_x;
+ s32 screen_max_y;
+ s32 screen_min_x;
+ s32 screen_min_y;
+ s32 max_tp;
+};
+
+struct ilitek_protocol_map {
+ u16 cmd;
+ const char *name;
+ int (*func)(struct ilitek_ts_data *ts, u16 cmd, u8 *inbuf, u8 *outbuf);
+};
+
+enum ilitek_cmds {
+ /* common cmds */
+ GET_PTL_VER = 0,
+ GET_FW_VER,
+ GET_SCRN_RES,
+ GET_TP_RES,
+ GET_IC_MODE,
+ GET_MCU_VER,
+ SET_IC_SLEEP,
+ SET_IC_WAKE,
+
+ /* ALWAYS keep at the end */
+ MAX_CMD_CNT
+};
+
+/* ILITEK I2C R/W APIs */
+static int ilitek_i2c_write_and_read(struct ilitek_ts_data *ts,
+ u8 *cmd, int write_len, int delay,
+ u8 *data, int read_len)
+{
+ int error;
+ struct i2c_client *client = ts->client;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = write_len,
+ .buf = cmd,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = read_len,
+ .buf = data,
+ },
+ };
+
+ if (delay == 0 && write_len > 0 && read_len > 0) {
+ error = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (error < 0)
+ return error;
+ } else {
+ if (write_len > 0) {
+ error = i2c_transfer(client->adapter, msgs, 1);
+ if (error < 0)
+ return error;
+ }
+ if (delay > 0)
+ mdelay(delay);
+
+ if (read_len > 0) {
+ error = i2c_transfer(client->adapter, msgs + 1, 1);
+ if (error < 0)
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+/* ILITEK ISR APIs */
+static void ilitek_touch_down(struct ilitek_ts_data *ts, unsigned int id,
+ unsigned int x, unsigned int y)
+{
+ struct input_dev *input = ts->input_dev;
+
+ input_mt_slot(input, id);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+
+ touchscreen_report_pos(input, &ts->prop, x, y, true);
+}
+
+static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts)
+{
+ int error = 0;
+ u8 buf[512];
+ int packet_len = 5;
+ int packet_max_point = 10;
+ int report_max_point;
+ int i, count;
+ struct input_dev *input = ts->input_dev;
+ struct device *dev = &ts->client->dev;
+ unsigned int x, y, status, id;
+
+ error = ilitek_i2c_write_and_read(ts, NULL, 0, 0, buf, 64);
+ if (error) {
+ dev_err(dev, "get touch info failed, err:%d\n", error);
+ goto err_sync_frame;
+ }
+
+ report_max_point = buf[REPORT_COUNT_ADDRESS];
+ if (report_max_point > ts->max_tp) {
+ dev_err(dev, "FW report max point:%d > panel info. max:%d\n",
+ report_max_point, ts->max_tp);
+ error = -EINVAL;
+ goto err_sync_frame;
+ }
+
+ count = DIV_ROUND_UP(report_max_point, packet_max_point);
+ for (i = 1; i < count; i++) {
+ error = ilitek_i2c_write_and_read(ts, NULL, 0, 0,
+ buf + i * 64, 64);
+ if (error) {
+ dev_err(dev, "get touch info. failed, cnt:%d, err:%d\n",
+ count, error);
+ goto err_sync_frame;
+ }
+ }
+
+ for (i = 0; i < report_max_point; i++) {
+ status = buf[i * packet_len + 1] & 0x40;
+ if (!status)
+ continue;
+
+ id = buf[i * packet_len + 1] & 0x3F;
+
+ x = get_unaligned_le16(buf + i * packet_len + 2);
+ y = get_unaligned_le16(buf + i * packet_len + 4);
+
+ if (x > ts->screen_max_x || x < ts->screen_min_x ||
+ y > ts->screen_max_y || y < ts->screen_min_y) {
+ dev_warn(dev, "invalid position, X[%d,%u,%d], Y[%d,%u,%d]\n",
+ ts->screen_min_x, x, ts->screen_max_x,
+ ts->screen_min_y, y, ts->screen_max_y);
+ continue;
+ }
+
+ ilitek_touch_down(ts, id, x, y);
+ }
+
+err_sync_frame:
+ input_mt_sync_frame(input);
+ input_sync(input);
+ return error;
+}
+
+/* APIs of cmds for ILITEK Touch IC */
+static int api_protocol_set_cmd(struct ilitek_ts_data *ts,
+ u16 idx, u8 *inbuf, u8 *outbuf)
+{
+ u16 cmd;
+ int error;
+
+ if (idx >= MAX_CMD_CNT)
+ return -EINVAL;
+
+ cmd = ts->ptl_cb_func[idx].cmd;
+ error = ts->ptl_cb_func[idx].func(ts, cmd, inbuf, outbuf);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int api_protocol_get_ptl_ver(struct ilitek_ts_data *ts,
+ u16 cmd, u8 *inbuf, u8 *outbuf)
+{
+ int error;
+ u8 buf[64];
+
+ buf[0] = cmd;
+ error = ilitek_i2c_write_and_read(ts, buf, 1, 5, outbuf, 3);
+ if (error)
+ return error;
+
+ ts->ptl.ver = get_unaligned_be16(outbuf);
+ ts->ptl.ver_major = outbuf[0];
+
+ return 0;
+}
+
+static int api_protocol_get_mcu_ver(struct ilitek_ts_data *ts,
+ u16 cmd, u8 *inbuf, u8 *outbuf)
+{
+ int error;
+ u8 buf[64];
+
+ buf[0] = cmd;
+ error = ilitek_i2c_write_and_read(ts, buf, 1, 5, outbuf, 32);
+ if (error)
+ return error;
+
+ ts->mcu_ver = get_unaligned_le16(outbuf);
+ memset(ts->product_id, 0, sizeof(ts->product_id));
+ memcpy(ts->product_id, outbuf + 6, 26);
+
+ return 0;
+}
+
+static int api_protocol_get_fw_ver(struct ilitek_ts_data *ts,
+ u16 cmd, u8 *inbuf, u8 *outbuf)
+{
+ int error;
+ u8 buf[64];
+
+ buf[0] = cmd;
+ error = ilitek_i2c_write_and_read(ts, buf, 1, 5, outbuf, 8);
+ if (error)
+ return error;
+
+ memcpy(ts->firmware_ver, outbuf, 8);
+
+ return 0;
+}
+
+static int api_protocol_get_scrn_res(struct ilitek_ts_data *ts,
+ u16 cmd, u8 *inbuf, u8 *outbuf)
+{
+ int error;
+ u8 buf[64];
+
+ buf[0] = cmd;
+ error = ilitek_i2c_write_and_read(ts, buf, 1, 5, outbuf, 8);
+ if (error)
+ return error;
+
+ ts->screen_min_x = get_unaligned_le16(outbuf);
+ ts->screen_min_y = get_unaligned_le16(outbuf + 2);
+ ts->screen_max_x = get_unaligned_le16(outbuf + 4);
+ ts->screen_max_y = get_unaligned_le16(outbuf + 6);
+
+ return 0;
+}
+
+static int api_protocol_get_tp_res(struct ilitek_ts_data *ts,
+ u16 cmd, u8 *inbuf, u8 *outbuf)
+{
+ int error;
+ u8 buf[64];
+
+ buf[0] = cmd;
+ error = ilitek_i2c_write_and_read(ts, buf, 1, 5, outbuf, 15);
+ if (error)
+ return error;
+
+ ts->max_tp = outbuf[8];
+ if (ts->max_tp > ILITEK_SUPPORT_MAX_POINT) {
+ dev_err(&ts->client->dev, "Invalid MAX_TP:%d from FW\n",
+ ts->max_tp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int api_protocol_get_ic_mode(struct ilitek_ts_data *ts,
+ u16 cmd, u8 *inbuf, u8 *outbuf)
+{
+ int error;
+ u8 buf[64];
+
+ buf[0] = cmd;
+ error = ilitek_i2c_write_and_read(ts, buf, 1, 5, outbuf, 2);
+ if (error)
+ return error;
+
+ ts->ic_mode = outbuf[0];
+ return 0;
+}
+
+static int api_protocol_set_ic_sleep(struct ilitek_ts_data *ts,
+ u16 cmd, u8 *inbuf, u8 *outbuf)
+{
+ u8 buf[64];
+
+ buf[0] = cmd;
+ return ilitek_i2c_write_and_read(ts, buf, 1, 0, NULL, 0);
+}
+
+static int api_protocol_set_ic_wake(struct ilitek_ts_data *ts,
+ u16 cmd, u8 *inbuf, u8 *outbuf)
+{
+ u8 buf[64];
+
+ buf[0] = cmd;
+ return ilitek_i2c_write_and_read(ts, buf, 1, 0, NULL, 0);
+}
+
+static const struct ilitek_protocol_map ptl_func_map[] = {
+ /* common cmds */
+ [GET_PTL_VER] = {
+ ILITEK_TP_CMD_GET_PRL_VER, "GET_PTL_VER",
+ api_protocol_get_ptl_ver
+ },
+ [GET_FW_VER] = {
+ ILITEK_TP_CMD_GET_FW_VER, "GET_FW_VER",
+ api_protocol_get_fw_ver
+ },
+ [GET_SCRN_RES] = {
+ ILITEK_TP_CMD_GET_SCRN_RES, "GET_SCRN_RES",
+ api_protocol_get_scrn_res
+ },
+ [GET_TP_RES] = {
+ ILITEK_TP_CMD_GET_TP_RES, "GET_TP_RES",
+ api_protocol_get_tp_res
+ },
+ [GET_IC_MODE] = {
+ ILITEK_TP_CMD_GET_IC_MODE, "GET_IC_MODE",
+ api_protocol_get_ic_mode
+ },
+ [GET_MCU_VER] = {
+ ILITEK_TP_CMD_GET_MCU_VER, "GET_MOD_VER",
+ api_protocol_get_mcu_ver
+ },
+ [SET_IC_SLEEP] = {
+ ILITEK_TP_CMD_SET_IC_SLEEP, "SET_IC_SLEEP",
+ api_protocol_set_ic_sleep
+ },
+ [SET_IC_WAKE] = {
+ ILITEK_TP_CMD_SET_IC_WAKE, "SET_IC_WAKE",
+ api_protocol_set_ic_wake
+ },
+};
+
+/* Probe APIs */
+static void ilitek_reset(struct ilitek_ts_data *ts, int delay)
+{
+ if (ts->reset_gpio) {
+ gpiod_set_value(ts->reset_gpio, 1);
+ mdelay(10);
+ gpiod_set_value(ts->reset_gpio, 0);
+ mdelay(delay);
+ }
+}
+
+static int ilitek_protocol_init(struct ilitek_ts_data *ts)
+{
+ int error;
+ u8 outbuf[64];
+
+ ts->ptl_cb_func = ptl_func_map;
+ ts->reset_time = 600;
+
+ error = api_protocol_set_cmd(ts, GET_PTL_VER, NULL, outbuf);
+ if (error)
+ return error;
+
+ /* Protocol v3 is not support currently */
+ if (ts->ptl.ver_major == 0x3 ||
+ ts->ptl.ver == BL_V1_6 ||
+ ts->ptl.ver == BL_V1_7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ilitek_read_tp_info(struct ilitek_ts_data *ts, bool boot)
+{
+ u8 outbuf[256];
+ int error;
+
+ error = api_protocol_set_cmd(ts, GET_PTL_VER, NULL, outbuf);
+ if (error)
+ return error;
+
+ error = api_protocol_set_cmd(ts, GET_MCU_VER, NULL, outbuf);
+ if (error)
+ return error;
+
+ error = api_protocol_set_cmd(ts, GET_FW_VER, NULL, outbuf);
+ if (error)
+ return error;
+
+ if (boot) {
+ error = api_protocol_set_cmd(ts, GET_SCRN_RES, NULL,
+ outbuf);
+ if (error)
+ return error;
+ }
+
+ error = api_protocol_set_cmd(ts, GET_TP_RES, NULL, outbuf);
+ if (error)
+ return error;
+
+ error = api_protocol_set_cmd(ts, GET_IC_MODE, NULL, outbuf);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int ilitek_input_dev_init(struct device *dev, struct ilitek_ts_data *ts)
+{
+ int error;
+ struct input_dev *input;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
+ ts->input_dev = input;
+ input->name = ILITEK_TS_NAME;
+ input->id.bustype = BUS_I2C;
+
+ __set_bit(INPUT_PROP_DIRECT, input->propbit);
+
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ ts->screen_min_x, ts->screen_max_x, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ ts->screen_min_y, ts->screen_max_y, 0, 0);
+
+ touchscreen_parse_properties(input, true, &ts->prop);
+
+ error = input_mt_init_slots(input, ts->max_tp,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(dev, "initialize MT slots failed, err:%d\n", error);
+ return error;
+ }
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(dev, "register input device failed, err:%d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static irqreturn_t ilitek_i2c_isr(int irq, void *dev_id)
+{
+ struct ilitek_ts_data *ts = dev_id;
+ int error;
+
+ error = ilitek_process_and_report_v6(ts);
+ if (error < 0) {
+ dev_err(&ts->client->dev, "[%s] err:%d\n", __func__, error);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ilitek_ts_data *ts = i2c_get_clientdata(client);
+
+ return scnprintf(buf, PAGE_SIZE,
+ "fw version: [%02X%02X.%02X%02X.%02X%02X.%02X%02X]\n",
+ ts->firmware_ver[0], ts->firmware_ver[1],
+ ts->firmware_ver[2], ts->firmware_ver[3],
+ ts->firmware_ver[4], ts->firmware_ver[5],
+ ts->firmware_ver[6], ts->firmware_ver[7]);
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static ssize_t product_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ilitek_ts_data *ts = i2c_get_clientdata(client);
+
+ return scnprintf(buf, PAGE_SIZE, "product id: [%04X], module: [%s]\n",
+ ts->mcu_ver, ts->product_id);
+}
+static DEVICE_ATTR_RO(product_id);
+
+static struct attribute *ilitek_sysfs_attrs[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_product_id.attr,
+ NULL
+};
+
+static struct attribute_group ilitek_attrs_group = {
+ .attrs = ilitek_sysfs_attrs,
+};
+
+static int ilitek_ts_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ilitek_ts_data *ts;
+ struct device *dev = &client->dev;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "i2c check functionality failed\n");
+ return -ENXIO;
+ }
+
+ ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+
+ ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ts->reset_gpio)) {
+ error = PTR_ERR(ts->reset_gpio);
+ dev_err(dev, "request gpiod failed: %d", error);
+ return error;
+ }
+
+ ilitek_reset(ts, 1000);
+
+ error = ilitek_protocol_init(ts);
+ if (error) {
+ dev_err(dev, "protocol init failed: %d", error);
+ return error;
+ }
+
+ error = ilitek_read_tp_info(ts, true);
+ if (error) {
+ dev_err(dev, "read tp info failed: %d", error);
+ return error;
+ }
+
+ error = ilitek_input_dev_init(dev, ts);
+ if (error) {
+ dev_err(dev, "input dev init failed: %d", error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(dev, ts->client->irq,
+ NULL, ilitek_i2c_isr, IRQF_ONESHOT,
+ "ilitek_touch_irq", ts);
+ if (error) {
+ dev_err(dev, "request threaded irq failed: %d\n", error);
+ return error;
+ }
+
+ error = devm_device_add_group(dev, &ilitek_attrs_group);
+ if (error) {
+ dev_err(dev, "sysfs create group failed: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused ilitek_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ilitek_ts_data *ts = i2c_get_clientdata(client);
+ int error;
+
+ disable_irq(client->irq);
+
+ if (!device_may_wakeup(dev)) {
+ error = api_protocol_set_cmd(ts, SET_IC_SLEEP, NULL, NULL);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused ilitek_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ilitek_ts_data *ts = i2c_get_clientdata(client);
+ int error;
+
+ if (!device_may_wakeup(dev)) {
+ error = api_protocol_set_cmd(ts, SET_IC_WAKE, NULL, NULL);
+ if (error)
+ return error;
+
+ ilitek_reset(ts, ts->reset_time);
+ }
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ilitek_pm_ops, ilitek_suspend, ilitek_resume);
+
+static const struct i2c_device_id ilitek_ts_i2c_id[] = {
+ { ILITEK_TS_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, ilitek_ts_i2c_id);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ilitekts_acpi_id[] = {
+ { "ILTK0001", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ilitekts_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id ilitek_ts_i2c_match[] = {
+ {.compatible = "ilitek,ili2130",},
+ {.compatible = "ilitek,ili2131",},
+ {.compatible = "ilitek,ili2132",},
+ {.compatible = "ilitek,ili2316",},
+ {.compatible = "ilitek,ili2322",},
+ {.compatible = "ilitek,ili2323",},
+ {.compatible = "ilitek,ili2326",},
+ {.compatible = "ilitek,ili2520",},
+ {.compatible = "ilitek,ili2521",},
+ { },
+};
+MODULE_DEVICE_TABLE(of, ilitek_ts_i2c_match);
+#endif
+
+static struct i2c_driver ilitek_ts_i2c_driver = {
+ .driver = {
+ .name = ILITEK_TS_NAME,
+ .pm = &ilitek_pm_ops,
+ .of_match_table = of_match_ptr(ilitek_ts_i2c_match),
+ .acpi_match_table = ACPI_PTR(ilitekts_acpi_id),
+ },
+ .probe = ilitek_ts_i2c_probe,
+ .id_table = ilitek_ts_i2c_id,
+};
+module_i2c_driver(ilitek_ts_i2c_driver);
+
+MODULE_AUTHOR("ILITEK");
+MODULE_DESCRIPTION("ILITEK I2C Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/iqs5xx.c b/drivers/input/touchscreen/iqs5xx.c
index 54f30038dca4..b3fa71213d60 100644
--- a/drivers/input/touchscreen/iqs5xx.c
+++ b/drivers/input/touchscreen/iqs5xx.c
@@ -8,7 +8,7 @@
* made available by the vendor. Firmware files may be pushed to the device's
* nonvolatile memory by writing the filename to the 'fw_file' sysfs control.
*
- * Link to PC-based configuration tool and data sheet: http://www.azoteq.com/
+ * Link to PC-based configuration tool and datasheet: https://www.azoteq.com/
*/
#include <linux/bits.h>
@@ -32,14 +32,10 @@
#define IQS5XX_NUM_RETRIES 10
#define IQS5XX_NUM_CONTACTS 5
#define IQS5XX_WR_BYTES_MAX 2
-#define IQS5XX_XY_RES_MAX 0xFFFE
#define IQS5XX_PROD_NUM_IQS550 40
#define IQS5XX_PROD_NUM_IQS572 58
#define IQS5XX_PROD_NUM_IQS525 52
-#define IQS5XX_PROJ_NUM_A000 0
-#define IQS5XX_PROJ_NUM_B000 15
-#define IQS5XX_MAJOR_VER_MIN 2
#define IQS5XX_SHOW_RESET BIT(7)
#define IQS5XX_ACK_RESET BIT(7)
@@ -64,6 +60,7 @@
#define IQS5XX_SYS_CFG1 0x058F
#define IQS5XX_X_RES 0x066E
#define IQS5XX_Y_RES 0x0670
+#define IQS5XX_EXP_FILE 0x0677
#define IQS5XX_CHKSM 0x83C0
#define IQS5XX_APP 0x8400
#define IQS5XX_CSTM 0xBE00
@@ -87,22 +84,11 @@
#define IQS5XX_BL_CMD_CRC 0x03
#define IQS5XX_BL_BLK_LEN_MAX 64
#define IQS5XX_BL_ID 0x0200
-#define IQS5XX_BL_STATUS_RESET 0x00
-#define IQS5XX_BL_STATUS_AVAIL 0xA5
#define IQS5XX_BL_STATUS_NONE 0xEE
#define IQS5XX_BL_CRC_PASS 0x00
#define IQS5XX_BL_CRC_FAIL 0x01
#define IQS5XX_BL_ATTEMPTS 3
-struct iqs5xx_private {
- struct i2c_client *client;
- struct input_dev *input;
- struct gpio_desc *reset_gpio;
- struct touchscreen_properties prop;
- struct mutex lock;
- u8 bl_status;
-};
-
struct iqs5xx_dev_id_info {
__be16 prod_num;
__be16 proj_num;
@@ -134,6 +120,16 @@ struct iqs5xx_status {
struct iqs5xx_touch_data touch_data[IQS5XX_NUM_CONTACTS];
} __packed;
+struct iqs5xx_private {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct gpio_desc *reset_gpio;
+ struct touchscreen_properties prop;
+ struct mutex lock;
+ struct iqs5xx_dev_id_info dev_id_info;
+ u8 exp_file[2];
+};
+
static int iqs5xx_read_burst(struct i2c_client *client,
u16 reg, void *val, u16 len)
{
@@ -446,7 +442,7 @@ static int iqs5xx_set_state(struct i2c_client *client, u8 state)
struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
int error1, error2;
- if (iqs5xx->bl_status == IQS5XX_BL_STATUS_RESET)
+ if (!iqs5xx->dev_id_info.bl_status)
return 0;
mutex_lock(&iqs5xx->lock);
@@ -504,10 +500,6 @@ static int iqs5xx_axis_init(struct i2c_client *client)
input->open = iqs5xx_open;
input->close = iqs5xx_close;
- input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
- input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
- input_set_capability(input, EV_ABS, ABS_MT_PRESSURE);
-
input_set_drvdata(input, iqs5xx);
iqs5xx->input = input;
}
@@ -520,26 +512,29 @@ static int iqs5xx_axis_init(struct i2c_client *client)
if (error)
return error;
- input_abs_set_max(iqs5xx->input, ABS_MT_POSITION_X, max_x);
- input_abs_set_max(iqs5xx->input, ABS_MT_POSITION_Y, max_y);
+ input_set_abs_params(iqs5xx->input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
+ input_set_abs_params(iqs5xx->input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
+ input_set_abs_params(iqs5xx->input, ABS_MT_PRESSURE, 0, U16_MAX, 0, 0);
touchscreen_parse_properties(iqs5xx->input, true, prop);
- if (prop->max_x > IQS5XX_XY_RES_MAX) {
- dev_err(&client->dev, "Invalid maximum x-coordinate: %u > %u\n",
- prop->max_x, IQS5XX_XY_RES_MAX);
+ /*
+ * The device reserves 0xFFFF for coordinates that correspond to slots
+ * which are not in a state of touch.
+ */
+ if (prop->max_x >= U16_MAX || prop->max_y >= U16_MAX) {
+ dev_err(&client->dev, "Invalid touchscreen size: %u*%u\n",
+ prop->max_x, prop->max_y);
return -EINVAL;
- } else if (prop->max_x != max_x) {
+ }
+
+ if (prop->max_x != max_x) {
error = iqs5xx_write_word(client, IQS5XX_X_RES, prop->max_x);
if (error)
return error;
}
- if (prop->max_y > IQS5XX_XY_RES_MAX) {
- dev_err(&client->dev, "Invalid maximum y-coordinate: %u > %u\n",
- prop->max_y, IQS5XX_XY_RES_MAX);
- return -EINVAL;
- } else if (prop->max_y != max_y) {
+ if (prop->max_y != max_y) {
error = iqs5xx_write_word(client, IQS5XX_Y_RES, prop->max_y);
if (error)
return error;
@@ -574,7 +569,7 @@ static int iqs5xx_dev_init(struct i2c_client *client)
* the missing zero is prepended).
*/
buf[0] = 0;
- dev_id_info = (struct iqs5xx_dev_id_info *)&buf[(buf[1] > 0) ? 0 : 1];
+ dev_id_info = (struct iqs5xx_dev_id_info *)&buf[buf[1] ? 0 : 1];
switch (be16_to_cpu(dev_id_info->prod_num)) {
case IQS5XX_PROD_NUM_IQS550:
@@ -587,35 +582,20 @@ static int iqs5xx_dev_init(struct i2c_client *client)
return -EINVAL;
}
- switch (be16_to_cpu(dev_id_info->proj_num)) {
- case IQS5XX_PROJ_NUM_A000:
- dev_err(&client->dev, "Unsupported project number: %u\n",
- be16_to_cpu(dev_id_info->proj_num));
- return iqs5xx_bl_open(client);
- case IQS5XX_PROJ_NUM_B000:
- break;
- default:
- dev_err(&client->dev, "Unrecognized project number: %u\n",
- be16_to_cpu(dev_id_info->proj_num));
- return -EINVAL;
- }
-
- if (dev_id_info->major_ver < IQS5XX_MAJOR_VER_MIN) {
- dev_err(&client->dev, "Unsupported major version: %u\n",
- dev_id_info->major_ver);
+ /*
+ * With the product number recognized yet shifted by one byte, open the
+ * bootloader and wait for user space to convert the A000 device into a
+ * B000 device via new firmware.
+ */
+ if (buf[1]) {
+ dev_err(&client->dev, "Opening bootloader for A000 device\n");
return iqs5xx_bl_open(client);
}
- switch (dev_id_info->bl_status) {
- case IQS5XX_BL_STATUS_AVAIL:
- case IQS5XX_BL_STATUS_NONE:
- break;
- default:
- dev_err(&client->dev,
- "Unrecognized bootloader status: 0x%02X\n",
- dev_id_info->bl_status);
- return -EINVAL;
- }
+ error = iqs5xx_read_burst(client, IQS5XX_EXP_FILE,
+ iqs5xx->exp_file, sizeof(iqs5xx->exp_file));
+ if (error)
+ return error;
error = iqs5xx_axis_init(client);
if (error)
@@ -640,7 +620,7 @@ static int iqs5xx_dev_init(struct i2c_client *client)
if (error)
return error;
- iqs5xx->bl_status = dev_id_info->bl_status;
+ iqs5xx->dev_id_info = *dev_id_info;
/*
* The following delay allows ATI to complete before the open and close
@@ -666,7 +646,7 @@ static irqreturn_t iqs5xx_irq(int irq, void *data)
* RDY output during bootloader mode. If the device operates outside of
* bootloader mode, the input device is guaranteed to be allocated.
*/
- if (iqs5xx->bl_status == IQS5XX_BL_STATUS_RESET)
+ if (!iqs5xx->dev_id_info.bl_status)
return IRQ_NONE;
error = iqs5xx_read_burst(client, IQS5XX_SYS_INFO0,
@@ -852,12 +832,9 @@ static int iqs5xx_fw_file_parse(struct i2c_client *client,
static int iqs5xx_fw_file_write(struct i2c_client *client, const char *fw_file)
{
struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
- int error, error_bl = 0;
+ int error, error_init = 0;
u8 *pmap;
- if (iqs5xx->bl_status == IQS5XX_BL_STATUS_NONE)
- return -EPERM;
-
pmap = kzalloc(IQS5XX_PMAP_LEN, GFP_KERNEL);
if (!pmap)
return -ENOMEM;
@@ -875,7 +852,7 @@ static int iqs5xx_fw_file_write(struct i2c_client *client, const char *fw_file)
*/
disable_irq(client->irq);
- iqs5xx->bl_status = IQS5XX_BL_STATUS_RESET;
+ iqs5xx->dev_id_info.bl_status = 0;
error = iqs5xx_bl_cmd(client, IQS5XX_BL_CMD_VER, 0);
if (error) {
@@ -895,21 +872,14 @@ static int iqs5xx_fw_file_write(struct i2c_client *client, const char *fw_file)
error = iqs5xx_bl_verify(client, IQS5XX_CSTM,
pmap + IQS5XX_CHKSM_LEN + IQS5XX_APP_LEN,
IQS5XX_CSTM_LEN);
- if (error)
- goto err_reset;
-
- error = iqs5xx_bl_cmd(client, IQS5XX_BL_CMD_EXEC, 0);
err_reset:
- if (error) {
- iqs5xx_reset(client);
- usleep_range(10000, 10100);
- }
+ iqs5xx_reset(client);
+ usleep_range(15000, 15100);
- error_bl = error;
- error = iqs5xx_dev_init(client);
- if (!error && iqs5xx->bl_status == IQS5XX_BL_STATUS_RESET)
- error = -EINVAL;
+ error_init = iqs5xx_dev_init(client);
+ if (!iqs5xx->dev_id_info.bl_status)
+ error_init = error_init ? : -EINVAL;
enable_irq(client->irq);
@@ -918,10 +888,7 @@ err_reset:
err_kfree:
kfree(pmap);
- if (error_bl)
- return error_bl;
-
- return error;
+ return error ? : error_init;
}
static ssize_t fw_file_store(struct device *dev,
@@ -968,14 +935,47 @@ static ssize_t fw_file_store(struct device *dev,
return count;
}
+static ssize_t fw_info_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs5xx_private *iqs5xx = dev_get_drvdata(dev);
+
+ if (!iqs5xx->dev_id_info.bl_status)
+ return -ENODATA;
+
+ return scnprintf(buf, PAGE_SIZE, "%u.%u.%u.%u:%u.%u\n",
+ be16_to_cpu(iqs5xx->dev_id_info.prod_num),
+ be16_to_cpu(iqs5xx->dev_id_info.proj_num),
+ iqs5xx->dev_id_info.major_ver,
+ iqs5xx->dev_id_info.minor_ver,
+ iqs5xx->exp_file[0], iqs5xx->exp_file[1]);
+}
+
static DEVICE_ATTR_WO(fw_file);
+static DEVICE_ATTR_RO(fw_info);
static struct attribute *iqs5xx_attrs[] = {
&dev_attr_fw_file.attr,
+ &dev_attr_fw_info.attr,
NULL,
};
+static umode_t iqs5xx_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct iqs5xx_private *iqs5xx = dev_get_drvdata(dev);
+
+ if (attr == &dev_attr_fw_file.attr &&
+ (iqs5xx->dev_id_info.bl_status == IQS5XX_BL_STATUS_NONE ||
+ !iqs5xx->reset_gpio))
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group iqs5xx_attr_group = {
+ .is_visible = iqs5xx_attr_is_visible,
.attrs = iqs5xx_attrs,
};
@@ -1032,8 +1032,8 @@ static int iqs5xx_probe(struct i2c_client *client,
i2c_set_clientdata(client, iqs5xx);
iqs5xx->client = client;
- iqs5xx->reset_gpio = devm_gpiod_get(&client->dev,
- "reset", GPIOD_OUT_LOW);
+ iqs5xx->reset_gpio = devm_gpiod_get_optional(&client->dev,
+ "reset", GPIOD_OUT_LOW);
if (IS_ERR(iqs5xx->reset_gpio)) {
error = PTR_ERR(iqs5xx->reset_gpio);
dev_err(&client->dev, "Failed to request GPIO: %d\n", error);
@@ -1042,9 +1042,6 @@ static int iqs5xx_probe(struct i2c_client *client,
mutex_init(&iqs5xx->lock);
- iqs5xx_reset(client);
- usleep_range(10000, 10100);
-
error = iqs5xx_dev_init(client);
if (error)
return error;
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index b51450b3d943..15b5cb763526 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -34,18 +34,18 @@
#define LPC32XX_TSC_AUX_MIN 0x38
#define LPC32XX_TSC_AUX_MAX 0x3C
-#define LPC32XX_TSC_STAT_FIFO_OVRRN (1 << 8)
-#define LPC32XX_TSC_STAT_FIFO_EMPTY (1 << 7)
+#define LPC32XX_TSC_STAT_FIFO_OVRRN BIT(8)
+#define LPC32XX_TSC_STAT_FIFO_EMPTY BIT(7)
#define LPC32XX_TSC_SEL_DEFVAL 0x0284
#define LPC32XX_TSC_ADCCON_IRQ_TO_FIFO_4 (0x1 << 11)
#define LPC32XX_TSC_ADCCON_X_SAMPLE_SIZE(s) ((10 - (s)) << 7)
#define LPC32XX_TSC_ADCCON_Y_SAMPLE_SIZE(s) ((10 - (s)) << 4)
-#define LPC32XX_TSC_ADCCON_POWER_UP (1 << 2)
-#define LPC32XX_TSC_ADCCON_AUTO_EN (1 << 0)
+#define LPC32XX_TSC_ADCCON_POWER_UP BIT(2)
+#define LPC32XX_TSC_ADCCON_AUTO_EN BIT(0)
-#define LPC32XX_TSC_FIFO_TS_P_LEVEL (1 << 31)
+#define LPC32XX_TSC_FIFO_TS_P_LEVEL BIT(31)
#define LPC32XX_TSC_FIFO_NORMALIZE_X_VAL(x) (((x) & 0x03FF0000) >> 16)
#define LPC32XX_TSC_FIFO_NORMALIZE_Y_VAL(y) ((y) & 0x000003FF)
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 225796a3f546..2745bf1aee38 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -1502,7 +1502,8 @@ static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, mip4_interrupt,
- IRQF_ONESHOT, MIP4_DEVICE_NAME, ts);
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ MIP4_DEVICE_NAME, ts);
if (error) {
dev_err(&client->dev,
"Failed to request interrupt %d: %d\n",
@@ -1510,8 +1511,6 @@ static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
return error;
}
- disable_irq(client->irq);
-
error = input_register_device(input);
if (error) {
dev_err(&client->dev,
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 16557f51b09d..0efd1a1bb192 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-// Melfas MMS114/MMS152 touchscreen device driver
+// Melfas MMS114/MMS136/MMS152 touchscreen device driver
//
// Copyright (c) 2012 Samsung Electronics Co., Ltd.
// Author: Joonyoung Shim <jy0922.shim@samsung.com>
@@ -44,7 +44,8 @@
#define MMS114_MAX_AREA 0xff
#define MMS114_MAX_TOUCH 10
-#define MMS114_PACKET_NUM 8
+#define MMS114_EVENT_SIZE 8
+#define MMS136_EVENT_SIZE 6
/* Touch type */
#define MMS114_TYPE_NONE 0
@@ -53,6 +54,7 @@
enum mms_type {
TYPE_MMS114 = 114,
+ TYPE_MMS136 = 136,
TYPE_MMS152 = 152,
TYPE_MMS345L = 345,
};
@@ -209,7 +211,11 @@ static irqreturn_t mms114_interrupt(int irq, void *dev_id)
if (packet_size <= 0)
goto out;
- touch_size = packet_size / MMS114_PACKET_NUM;
+ /* MMS136 has slightly different event size */
+ if (data->type == TYPE_MMS136)
+ touch_size = packet_size / MMS136_EVENT_SIZE;
+ else
+ touch_size = packet_size / MMS114_EVENT_SIZE;
error = __mms114_read_reg(data, MMS114_INFORMATION, packet_size,
(u8 *)touch);
@@ -275,6 +281,7 @@ static int mms114_get_version(struct mms114_data *data)
break;
case TYPE_MMS114:
+ case TYPE_MMS136:
error = __mms114_read_reg(data, MMS114_TSP_REV, 6, buf);
if (error)
return error;
@@ -297,8 +304,8 @@ static int mms114_setup_regs(struct mms114_data *data)
if (error < 0)
return error;
- /* Only MMS114 has configuration and power on registers */
- if (data->type != TYPE_MMS114)
+ /* Only MMS114 and MMS136 have configuration and power on registers */
+ if (data->type != TYPE_MMS114 && data->type != TYPE_MMS136)
return 0;
error = mms114_set_active(data, true);
@@ -480,7 +487,7 @@ static int mms114_probe(struct i2c_client *client,
0, data->props.max_y, 0, 0);
}
- if (data->type == TYPE_MMS114) {
+ if (data->type == TYPE_MMS114 || data->type == TYPE_MMS136) {
/*
* The firmware handles movement and pressure fuzz, so
* don't duplicate that in software.
@@ -530,13 +537,13 @@ static int mms114_probe(struct i2c_client *client,
}
error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, mms114_interrupt, IRQF_ONESHOT,
+ NULL, mms114_interrupt,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
dev_name(&client->dev), data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
return error;
}
- disable_irq(client->irq);
error = input_register_device(data->input_dev);
if (error) {
@@ -605,6 +612,9 @@ static const struct of_device_id mms114_dt_match[] = {
.compatible = "melfas,mms114",
.data = (void *)TYPE_MMS114,
}, {
+ .compatible = "melfas,mms136",
+ .data = (void *)TYPE_MMS136,
+ }, {
.compatible = "melfas,mms152",
.data = (void *)TYPE_MMS152,
}, {
diff --git a/drivers/input/touchscreen/msg2638.c b/drivers/input/touchscreen/msg2638.c
new file mode 100644
index 000000000000..75536bc88969
--- /dev/null
+++ b/drivers/input/touchscreen/msg2638.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for MStar msg2638 touchscreens
+ *
+ * Copyright (c) 2021 Vincent Knecht <vincent.knecht@mailoo.org>
+ *
+ * Checksum and IRQ handler based on mstar_drv_common.c and
+ * mstar_drv_mutual_fw_control.c
+ * Copyright (c) 2006-2012 MStar Semiconductor, Inc.
+ *
+ * Driver structure based on zinitix.c by Michael Srba <Michael.Srba@seznam.cz>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#define MODE_DATA_RAW 0x5A
+
+#define MAX_SUPPORTED_FINGER_NUM 5
+
+#define CHIP_ON_DELAY_MS 15
+#define FIRMWARE_ON_DELAY_MS 50
+#define RESET_DELAY_MIN_US 10000
+#define RESET_DELAY_MAX_US 11000
+
+struct packet {
+ u8 xy_hi; /* higher bits of x and y coordinates */
+ u8 x_low;
+ u8 y_low;
+ u8 pressure;
+};
+
+struct touch_event {
+ u8 mode;
+ struct packet pkt[MAX_SUPPORTED_FINGER_NUM];
+ u8 proximity;
+ u8 checksum;
+};
+
+struct msg2638_ts_data {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ struct touchscreen_properties prop;
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpiod;
+};
+
+static u8 msg2638_checksum(u8 *data, u32 length)
+{
+ s32 sum = 0;
+ u32 i;
+
+ for (i = 0; i < length; i++)
+ sum += data[i];
+
+ return (u8)((-sum) & 0xFF);
+}
+
+static irqreturn_t msg2638_ts_irq_handler(int irq, void *msg2638_handler)
+{
+ struct msg2638_ts_data *msg2638 = msg2638_handler;
+ struct i2c_client *client = msg2638->client;
+ struct input_dev *input = msg2638->input_dev;
+ struct touch_event touch_event;
+ u32 len = sizeof(touch_event);
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(touch_event),
+ .buf = (u8 *)&touch_event,
+ },
+ };
+ struct packet *p;
+ u16 x, y;
+ int ret;
+ int i;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ dev_err(&client->dev,
+ "Failed I2C transfer in irq handler: %d\n",
+ ret < 0 ? ret : -EIO);
+ goto out;
+ }
+
+ if (touch_event.mode != MODE_DATA_RAW)
+ goto out;
+
+ if (msg2638_checksum((u8 *)&touch_event, len - 1) !=
+ touch_event.checksum) {
+ dev_err(&client->dev, "Failed checksum!\n");
+ goto out;
+ }
+
+ for (i = 0; i < MAX_SUPPORTED_FINGER_NUM; i++) {
+ p = &touch_event.pkt[i];
+
+ /* Ignore non-pressed finger data */
+ if (p->xy_hi == 0xFF && p->x_low == 0xFF && p->y_low == 0xFF)
+ continue;
+
+ x = (((p->xy_hi & 0xF0) << 4) | p->x_low);
+ y = (((p->xy_hi & 0x0F) << 8) | p->y_low);
+
+ input_mt_slot(input, i);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ touchscreen_report_pos(input, &msg2638->prop, x, y, true);
+ }
+
+ input_mt_sync_frame(msg2638->input_dev);
+ input_sync(msg2638->input_dev);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static void msg2638_reset(struct msg2638_ts_data *msg2638)
+{
+ gpiod_set_value_cansleep(msg2638->reset_gpiod, 1);
+ usleep_range(RESET_DELAY_MIN_US, RESET_DELAY_MAX_US);
+ gpiod_set_value_cansleep(msg2638->reset_gpiod, 0);
+ msleep(FIRMWARE_ON_DELAY_MS);
+}
+
+static int msg2638_start(struct msg2638_ts_data *msg2638)
+{
+ int error;
+
+ error = regulator_bulk_enable(ARRAY_SIZE(msg2638->supplies),
+ msg2638->supplies);
+ if (error) {
+ dev_err(&msg2638->client->dev,
+ "Failed to enable regulators: %d\n", error);
+ return error;
+ }
+
+ msleep(CHIP_ON_DELAY_MS);
+
+ msg2638_reset(msg2638);
+
+ enable_irq(msg2638->client->irq);
+
+ return 0;
+}
+
+static int msg2638_stop(struct msg2638_ts_data *msg2638)
+{
+ int error;
+
+ disable_irq(msg2638->client->irq);
+
+ error = regulator_bulk_disable(ARRAY_SIZE(msg2638->supplies),
+ msg2638->supplies);
+ if (error) {
+ dev_err(&msg2638->client->dev,
+ "Failed to disable regulators: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int msg2638_input_open(struct input_dev *dev)
+{
+ struct msg2638_ts_data *msg2638 = input_get_drvdata(dev);
+
+ return msg2638_start(msg2638);
+}
+
+static void msg2638_input_close(struct input_dev *dev)
+{
+ struct msg2638_ts_data *msg2638 = input_get_drvdata(dev);
+
+ msg2638_stop(msg2638);
+}
+
+static int msg2638_init_input_dev(struct msg2638_ts_data *msg2638)
+{
+ struct device *dev = &msg2638->client->dev;
+ struct input_dev *input_dev;
+ int error;
+
+ input_dev = devm_input_allocate_device(dev);
+ if (!input_dev) {
+ dev_err(dev, "Failed to allocate input device.\n");
+ return -ENOMEM;
+ }
+
+ input_set_drvdata(input_dev, msg2638);
+ msg2638->input_dev = input_dev;
+
+ input_dev->name = "MStar TouchScreen";
+ input_dev->phys = "input/ts";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->open = msg2638_input_open;
+ input_dev->close = msg2638_input_close;
+
+ input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
+
+ touchscreen_parse_properties(input_dev, true, &msg2638->prop);
+ if (!msg2638->prop.max_x || !msg2638->prop.max_y) {
+ dev_err(dev, "touchscreen-size-x and/or touchscreen-size-y not set in properties\n");
+ return -EINVAL;
+ }
+
+ error = input_mt_init_slots(input_dev, MAX_SUPPORTED_FINGER_NUM,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(dev, "Failed to initialize MT slots: %d\n", error);
+ return error;
+ }
+
+ error = input_register_device(input_dev);
+ if (error) {
+ dev_err(dev, "Failed to register input device: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int msg2638_ts_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct msg2638_ts_data *msg2638;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "Failed to assert adapter's support for plain I2C.\n");
+ return -ENXIO;
+ }
+
+ msg2638 = devm_kzalloc(dev, sizeof(*msg2638), GFP_KERNEL);
+ if (!msg2638)
+ return -ENOMEM;
+
+ msg2638->client = client;
+ i2c_set_clientdata(client, msg2638);
+
+ msg2638->supplies[0].supply = "vdd";
+ msg2638->supplies[1].supply = "vddio";
+ error = devm_regulator_bulk_get(dev, ARRAY_SIZE(msg2638->supplies),
+ msg2638->supplies);
+ if (error) {
+ dev_err(dev, "Failed to get regulators: %d\n", error);
+ return error;
+ }
+
+ msg2638->reset_gpiod = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(msg2638->reset_gpiod)) {
+ error = PTR_ERR(msg2638->reset_gpiod);
+ dev_err(dev, "Failed to request reset GPIO: %d\n", error);
+ return error;
+ }
+
+ error = msg2638_init_input_dev(msg2638);
+ if (error) {
+ dev_err(dev, "Failed to initialize input device: %d\n", error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(dev, client->irq,
+ NULL, msg2638_ts_irq_handler,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ client->name, msg2638);
+ if (error) {
+ dev_err(dev, "Failed to request IRQ: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused msg2638_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct msg2638_ts_data *msg2638 = i2c_get_clientdata(client);
+
+ mutex_lock(&msg2638->input_dev->mutex);
+
+ if (input_device_enabled(msg2638->input_dev))
+ msg2638_stop(msg2638);
+
+ mutex_unlock(&msg2638->input_dev->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused msg2638_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct msg2638_ts_data *msg2638 = i2c_get_clientdata(client);
+ int ret = 0;
+
+ mutex_lock(&msg2638->input_dev->mutex);
+
+ if (input_device_enabled(msg2638->input_dev))
+ ret = msg2638_start(msg2638);
+
+ mutex_unlock(&msg2638->input_dev->mutex);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(msg2638_pm_ops, msg2638_suspend, msg2638_resume);
+
+static const struct of_device_id msg2638_of_match[] = {
+ { .compatible = "mstar,msg2638" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msg2638_of_match);
+
+static struct i2c_driver msg2638_ts_driver = {
+ .probe_new = msg2638_ts_probe,
+ .driver = {
+ .name = "MStar-TS",
+ .pm = &msg2638_pm_ops,
+ .of_match_table = msg2638_of_match,
+ },
+};
+module_i2c_driver(msg2638_ts_driver);
+
+MODULE_AUTHOR("Vincent Knecht <vincent.knecht@mailoo.org>");
+MODULE_DESCRIPTION("MStar MSG2638 touchscreen driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index 8fa2f3b7cfd8..1ee760bac0cf 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -20,6 +20,7 @@
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/irq.h>
#include <linux/regulator/consumer.h>
@@ -335,10 +336,8 @@ static int silead_ts_get_id(struct i2c_client *client)
error = i2c_smbus_read_i2c_block_data(client, SILEAD_REG_ID,
sizeof(chip_id), (u8 *)&chip_id);
- if (error < 0) {
- dev_err(&client->dev, "Chip ID read error %d\n", error);
+ if (error < 0)
return error;
- }
data->chip_id = le32_to_cpu(chip_id);
dev_info(&client->dev, "Silead chip ID: 0x%8X", data->chip_id);
@@ -351,12 +350,49 @@ static int silead_ts_setup(struct i2c_client *client)
int error;
u32 status;
+ /*
+ * Some buggy BIOS-es bring up the chip in a stuck state where it
+ * blocks the I2C bus. The following steps are necessary to
+ * unstuck the chip / bus:
+ * 1. Turn off the Silead chip.
+ * 2. Try to do an I2C transfer with the chip, this will fail in
+ * response to which the I2C-bus-driver will call:
+ * i2c_recover_bus() which will unstuck the I2C-bus. Note the
+ * unstuck-ing of the I2C bus only works if we first drop the
+ * chip off the bus by turning it off.
+ * 3. Turn the chip back on.
+ *
+ * On the x86/ACPI systems were this problem is seen, step 1. and
+ * 3. require making ACPI calls and dealing with ACPI Power
+ * Resources. The workaround below runtime-suspends the chip to
+ * turn it off, leaving it up to the ACPI subsystem to deal with
+ * this.
+ */
+
+ if (device_property_read_bool(&client->dev,
+ "silead,stuck-controller-bug")) {
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_allow(&client->dev);
+
+ pm_runtime_suspend(&client->dev);
+
+ dev_warn(&client->dev, FW_BUG "Stuck I2C bus: please ignore the next 'controller timed out' error\n");
+ silead_ts_get_id(client);
+
+ /* The forbid will also resume the device */
+ pm_runtime_forbid(&client->dev);
+ pm_runtime_disable(&client->dev);
+ }
+
silead_ts_set_power(client, SILEAD_POWER_OFF);
silead_ts_set_power(client, SILEAD_POWER_ON);
error = silead_ts_get_id(client);
- if (error)
+ if (error) {
+ dev_err(&client->dev, "Chip ID read error %d\n", error);
return error;
+ }
error = silead_ts_init(client);
if (error)
@@ -486,7 +522,7 @@ static int silead_ts_probe(struct i2c_client *client,
silead_ts_read_props(client);
- /* We must have the IRQ provided by DT or ACPI subsytem */
+ /* We must have the IRQ provided by DT or ACPI subsystem */
if (client->irq <= 0)
return -ENODEV;
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index 9a64e1dbc04a..bc11203c9cf7 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -691,10 +691,9 @@ static int stmfts_probe(struct i2c_client *client,
* interrupts. To be on the safe side it's better to not enable
* the interrupts during their request.
*/
- irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
err = devm_request_threaded_irq(&client->dev, client->irq,
NULL, stmfts_irq_handler,
- IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"stmfts_irq", sdata);
if (err)
return err;
diff --git a/drivers/input/touchscreen/tsc2007.h b/drivers/input/touchscreen/tsc2007.h
index 91c60bf6dcaf..69b08dd6c8df 100644
--- a/drivers/input/touchscreen/tsc2007.h
+++ b/drivers/input/touchscreen/tsc2007.h
@@ -19,6 +19,8 @@
#ifndef _TSC2007_H
#define _TSC2007_H
+struct gpio_desc;
+
#define TSC2007_MEASURE_TEMP0 (0x0 << 4)
#define TSC2007_MEASURE_AUX (0x2 << 4)
#define TSC2007_MEASURE_TEMP1 (0x4 << 4)
@@ -69,7 +71,7 @@ struct tsc2007 {
int fuzzy;
int fuzzz;
- unsigned int gpio;
+ struct gpio_desc *gpiod;
int irq;
wait_queue_head_t wait;
diff --git a/drivers/input/touchscreen/tsc2007_core.c b/drivers/input/touchscreen/tsc2007_core.c
index 3b80abfc1eca..3e871d182c40 100644
--- a/drivers/input/touchscreen/tsc2007_core.c
+++ b/drivers/input/touchscreen/tsc2007_core.c
@@ -19,11 +19,12 @@
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/platform_data/tsc2007.h>
#include "tsc2007.h"
@@ -220,71 +221,58 @@ static void tsc2007_close(struct input_dev *input_dev)
tsc2007_stop(ts);
}
-#ifdef CONFIG_OF
static int tsc2007_get_pendown_state_gpio(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct tsc2007 *ts = i2c_get_clientdata(client);
- return !gpio_get_value(ts->gpio);
+ return gpiod_get_value(ts->gpiod);
}
-static int tsc2007_probe_dt(struct i2c_client *client, struct tsc2007 *ts)
+static int tsc2007_probe_properties(struct device *dev, struct tsc2007 *ts)
{
- struct device_node *np = client->dev.of_node;
u32 val32;
u64 val64;
- if (!np) {
- dev_err(&client->dev, "missing device tree data\n");
- return -EINVAL;
- }
-
- if (!of_property_read_u32(np, "ti,max-rt", &val32))
+ if (!device_property_read_u32(dev, "ti,max-rt", &val32))
ts->max_rt = val32;
else
ts->max_rt = MAX_12BIT;
- if (!of_property_read_u32(np, "ti,fuzzx", &val32))
+ if (!device_property_read_u32(dev, "ti,fuzzx", &val32))
ts->fuzzx = val32;
- if (!of_property_read_u32(np, "ti,fuzzy", &val32))
+ if (!device_property_read_u32(dev, "ti,fuzzy", &val32))
ts->fuzzy = val32;
- if (!of_property_read_u32(np, "ti,fuzzz", &val32))
+ if (!device_property_read_u32(dev, "ti,fuzzz", &val32))
ts->fuzzz = val32;
- if (!of_property_read_u64(np, "ti,poll-period", &val64))
+ if (!device_property_read_u64(dev, "ti,poll-period", &val64))
ts->poll_period = msecs_to_jiffies(val64);
else
ts->poll_period = msecs_to_jiffies(1);
- if (!of_property_read_u32(np, "ti,x-plate-ohms", &val32)) {
+ if (!device_property_read_u32(dev, "ti,x-plate-ohms", &val32)) {
ts->x_plate_ohms = val32;
} else {
- dev_err(&client->dev, "missing ti,x-plate-ohms devicetree property.");
+ dev_err(dev, "Missing ti,x-plate-ohms device property\n");
return -EINVAL;
}
- ts->gpio = of_get_gpio(np, 0);
- if (gpio_is_valid(ts->gpio))
+ ts->gpiod = devm_gpiod_get_optional(dev, NULL, GPIOD_IN);
+ if (IS_ERR(ts->gpiod))
+ return PTR_ERR(ts->gpiod);
+
+ if (ts->gpiod)
ts->get_pendown_state = tsc2007_get_pendown_state_gpio;
else
- dev_warn(&client->dev,
- "GPIO not specified in DT (of_get_gpio returned %d)\n",
- ts->gpio);
+ dev_warn(dev, "Pen down GPIO is not specified in properties\n");
return 0;
}
-#else
-static int tsc2007_probe_dt(struct i2c_client *client, struct tsc2007 *ts)
-{
- dev_err(&client->dev, "platform data is required!\n");
- return -EINVAL;
-}
-#endif
-static int tsc2007_probe_pdev(struct i2c_client *client, struct tsc2007 *ts,
+static int tsc2007_probe_pdev(struct device *dev, struct tsc2007 *ts,
const struct tsc2007_platform_data *pdata,
const struct i2c_device_id *id)
{
@@ -299,7 +287,7 @@ static int tsc2007_probe_pdev(struct i2c_client *client, struct tsc2007 *ts,
ts->fuzzz = pdata->fuzzz;
if (pdata->x_plate_ohms == 0) {
- dev_err(&client->dev, "x_plate_ohms is not set up in platform data");
+ dev_err(dev, "x_plate_ohms is not set up in platform data\n");
return -EINVAL;
}
@@ -332,9 +320,9 @@ static int tsc2007_probe(struct i2c_client *client,
return -ENOMEM;
if (pdata)
- err = tsc2007_probe_pdev(client, ts, pdata, id);
+ err = tsc2007_probe_pdev(&client->dev, ts, pdata, id);
else
- err = tsc2007_probe_dt(client, ts);
+ err = tsc2007_probe_properties(&client->dev, ts);
if (err)
return err;
@@ -431,18 +419,16 @@ static const struct i2c_device_id tsc2007_idtable[] = {
MODULE_DEVICE_TABLE(i2c, tsc2007_idtable);
-#ifdef CONFIG_OF
static const struct of_device_id tsc2007_of_match[] = {
{ .compatible = "ti,tsc2007" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, tsc2007_of_match);
-#endif
static struct i2c_driver tsc2007_driver = {
.driver = {
.name = "tsc2007",
- .of_match_table = of_match_ptr(tsc2007_of_match),
+ .of_match_table = tsc2007_of_match,
},
.id_table = tsc2007_idtable,
.probe = tsc2007_probe,
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
index 1afc6bde2891..22826c387da5 100644
--- a/drivers/input/touchscreen/wacom_i2c.c
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -145,15 +145,16 @@ static void wacom_i2c_close(struct input_dev *dev)
}
static int wacom_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct wacom_i2c *wac_i2c;
struct input_dev *input;
struct wacom_features features = { 0 };
int error;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "i2c_check_functionality error\n");
+ dev_err(dev, "i2c_check_functionality error\n");
return -EIO;
}
@@ -161,21 +162,22 @@ static int wacom_i2c_probe(struct i2c_client *client,
if (error)
return error;
- wac_i2c = kzalloc(sizeof(*wac_i2c), GFP_KERNEL);
- input = input_allocate_device();
- if (!wac_i2c || !input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ wac_i2c = devm_kzalloc(dev, sizeof(*wac_i2c), GFP_KERNEL);
+ if (!wac_i2c)
+ return -ENOMEM;
wac_i2c->client = client;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
wac_i2c->input = input;
input->name = "Wacom I2C Digitizer";
input->id.bustype = BUS_I2C;
input->id.vendor = 0x56a;
input->id.version = features.fw_version;
- input->dev.parent = &client->dev;
input->open = wacom_i2c_open;
input->close = wacom_i2c_close;
@@ -194,13 +196,11 @@ static int wacom_i2c_probe(struct i2c_client *client,
input_set_drvdata(input, wac_i2c);
- error = request_threaded_irq(client->irq, NULL, wacom_i2c_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "wacom_i2c", wac_i2c);
+ error = devm_request_threaded_irq(dev, client->irq, NULL, wacom_i2c_irq,
+ IRQF_ONESHOT, "wacom_i2c", wac_i2c);
if (error) {
- dev_err(&client->dev,
- "Failed to enable IRQ, error: %d\n", error);
- goto err_free_mem;
+ dev_err(dev, "Failed to request IRQ: %d\n", error);
+ return error;
}
/* Disable the IRQ, we'll enable it in wac_i2c_open() */
@@ -208,31 +208,10 @@ static int wacom_i2c_probe(struct i2c_client *client,
error = input_register_device(wac_i2c->input);
if (error) {
- dev_err(&client->dev,
- "Failed to register input device, error: %d\n", error);
- goto err_free_irq;
+ dev_err(dev, "Failed to register input device: %d\n", error);
+ return error;
}
- i2c_set_clientdata(client, wac_i2c);
- return 0;
-
-err_free_irq:
- free_irq(client->irq, wac_i2c);
-err_free_mem:
- input_free_device(input);
- kfree(wac_i2c);
-
- return error;
-}
-
-static int wacom_i2c_remove(struct i2c_client *client)
-{
- struct wacom_i2c *wac_i2c = i2c_get_clientdata(client);
-
- free_irq(client->irq, wac_i2c);
- input_unregister_device(wac_i2c->input);
- kfree(wac_i2c);
-
return 0;
}
@@ -269,7 +248,6 @@ static struct i2c_driver wacom_i2c_driver = {
},
.probe = wacom_i2c_probe,
- .remove = wacom_i2c_remove,
.id_table = wacom_i2c_id,
};
module_i2c_driver(wacom_i2c_driver);
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index bb1699e0d3c7..319f57fb9af5 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -317,14 +317,13 @@ static int wm831x_ts_probe(struct platform_device *pdev)
error = request_threaded_irq(wm831x_ts->data_irq,
NULL, wm831x_ts_data_irq,
- irqf | IRQF_ONESHOT,
+ irqf | IRQF_ONESHOT | IRQF_NO_AUTOEN,
"Touchscreen data", wm831x_ts);
if (error) {
dev_err(&pdev->dev, "Failed to request data IRQ %d: %d\n",
wm831x_ts->data_irq, error);
goto err_alloc;
}
- disable_irq(wm831x_ts->data_irq);
if (pdata && pdata->pd_irqf)
irqf = pdata->pd_irqf;
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index 3b636beb583c..b8d901099378 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -513,10 +513,10 @@ static int zinitix_ts_probe(struct i2c_client *client)
return -EINVAL;
}
- irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, zinitix_ts_irq_handler,
- IRQF_ONESHOT, client->name, bt541);
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ client->name, bt541);
if (error) {
dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
return error;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 192ef8f61310..1f111b399bca 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -349,7 +349,7 @@ config S390_AP_IOMMU
is not implemented as it is not necessary for VFIO.
config MTK_IOMMU
- bool "MTK IOMMU Support"
+ tristate "MediaTek IOMMU Support"
depends on ARCH_MEDIATEK || COMPILE_TEST
select ARM_DMA_USE_IOMMU
select IOMMU_API
@@ -364,7 +364,7 @@ config MTK_IOMMU
If unsure, say N here.
config MTK_IOMMU_V1
- bool "MTK IOMMU Version 1 (M4U gen1) Support"
+ tristate "MediaTek IOMMU Version 1 (M4U gen1) Support"
depends on ARM
depends on ARCH_MEDIATEK || COMPILE_TEST
select ARM_DMA_USE_IOMMU
@@ -408,4 +408,16 @@ config VIRTIO_IOMMU
Say Y here if you intend to run this kernel as a guest.
+config SPRD_IOMMU
+ tristate "Unisoc IOMMU Support"
+ depends on ARCH_SPRD || COMPILE_TEST
+ select IOMMU_API
+ help
+ Support for IOMMU on Unisoc's SoCs, this IOMMU can be used by
+ Unisoc's multimedia devices, such as display, Image codec(jpeg)
+ and a few signal processors, including VSP(video), GSP(graphic),
+ ISP(image), and CPP(camera pixel processor), etc.
+
+ Say Y here if you want to use the multimedia devices listed above.
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 61bd30cd8369..c0fb0ba88143 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -27,4 +27,5 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
-obj-$(CONFIG_IOMMU_SVA_LIB) += iommu-sva-lib.o
+obj-$(CONFIG_IOMMU_SVA_LIB) += iommu-sva-lib.o io-pgfault.o
+obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 026ce7f8d993..55dd38d814d9 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -11,7 +11,6 @@
#include "amd_iommu_types.h"
-extern int amd_iommu_get_num_iommus(void);
extern int amd_iommu_init_dma_ops(void);
extern int amd_iommu_init_passthrough(void);
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
@@ -65,7 +64,6 @@ extern int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
unsigned long cr3);
extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid);
-extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
#ifdef CONFIG_IRQ_REMAP
extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 6937e3674a16..94c1a7a9876d 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -693,7 +693,6 @@ struct iommu_dev_data {
} ats; /* ATS state */
bool pri_tlp; /* PASID TLB required for
PPR completions */
- u32 errata; /* Bitmap for errata to apply */
bool use_vapic; /* Enable device to use vapic mode */
bool defer_attach;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 321f5906e6ed..d006724f4dc2 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -12,7 +12,6 @@
#include <linux/acpi.h>
#include <linux/list.h>
#include <linux/bitmap.h>
-#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
@@ -208,7 +207,6 @@ u16 *amd_iommu_alias_table;
* for a specific device. It is also indexed by the PCI device id.
*/
struct amd_iommu **amd_iommu_rlookup_table;
-EXPORT_SYMBOL(amd_iommu_rlookup_table);
/*
* This table is used to find the irq remapping table for a given device id
@@ -257,8 +255,6 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void);
-static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
- u8 fxn, u64 *value, bool is_write);
static bool amd_iommu_pre_enabled = true;
@@ -268,7 +264,6 @@ bool translation_pre_enabled(struct amd_iommu *iommu)
{
return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
}
-EXPORT_SYMBOL(translation_pre_enabled);
static void clear_translation_pre_enabled(struct amd_iommu *iommu)
{
@@ -1717,53 +1712,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
return 0;
}
-static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
+static void init_iommu_perf_ctr(struct amd_iommu *iommu)
{
- int retry;
+ u64 val;
struct pci_dev *pdev = iommu->dev;
- u64 val = 0xabcd, val2 = 0, save_reg, save_src;
if (!iommu_feature(iommu, FEATURE_PC))
return;
amd_iommu_pc_present = true;
- /* save the value to restore, if writable */
- if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
- iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
- goto pc_false;
-
- /*
- * Disable power gating by programing the performance counter
- * source to 20 (i.e. counts the reads and writes from/to IOMMU
- * Reserved Register [MMIO Offset 1FF8h] that are ignored.),
- * which never get incremented during this init phase.
- * (Note: The event is also deprecated.)
- */
- val = 20;
- if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
- goto pc_false;
-
- /* Check if the performance counters can be written to */
- val = 0xabcd;
- for (retry = 5; retry; retry--) {
- if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
- iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
- val2)
- break;
-
- /* Wait about 20 msec for power gating to disable and retry. */
- msleep(20);
- }
-
- /* restore */
- if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
- iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
- goto pc_false;
-
- if (val != val2)
- goto pc_false;
-
pci_info(pdev, "IOMMU performance counters supported\n");
val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
@@ -1771,11 +1729,6 @@ static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
iommu->max_counters = (u8) ((val >> 7) & 0xf);
return;
-
-pc_false:
- pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
- amd_iommu_pc_present = false;
- return;
}
static ssize_t amd_iommu_show_cap(struct device *dev,
@@ -1837,7 +1790,7 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
* IVHD and MMIO conflict.
*/
if (features != iommu->features)
- pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
+ pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
features, iommu->features);
}
@@ -1935,8 +1888,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
amd_iommu_groups, "ivhd%d", iommu->index);
- iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
- iommu_device_register(&iommu->iommu);
+ iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
return pci_enable_device(iommu->dev);
}
@@ -3277,7 +3229,6 @@ struct amd_iommu *get_amd_iommu(unsigned int idx)
return iommu;
return NULL;
}
-EXPORT_SYMBOL(get_amd_iommu);
/****************************************************************************
*
@@ -3359,7 +3310,6 @@ int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64
return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
}
-EXPORT_SYMBOL(amd_iommu_pc_get_reg);
int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
{
@@ -3368,4 +3318,3 @@ int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64
return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
}
-EXPORT_SYMBOL(amd_iommu_pc_set_reg);
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index a69a8b573e40..80e8e1916dd1 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -290,15 +290,6 @@ static bool pci_iommuv2_capable(struct pci_dev *pdev)
return true;
}
-static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
-{
- struct iommu_dev_data *dev_data;
-
- dev_data = dev_iommu_priv_get(&pdev->dev);
-
- return dev_data->errata & (1 << erratum) ? true : false;
-}
-
/*
* This function checks if the driver got a valid device from the caller to
* avoid dereferencing invalid pointers.
@@ -861,33 +852,58 @@ static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
}
-static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
- size_t size, u16 domid, int pde)
+/*
+ * Builds an invalidation address which is suitable for one page or multiple
+ * pages. Sets the size bit (S) as needed is more than one page is flushed.
+ */
+static inline u64 build_inv_address(u64 address, size_t size)
{
- u64 pages;
- bool s;
+ u64 pages, end, msb_diff;
pages = iommu_num_pages(address, size, PAGE_SIZE);
- s = false;
- if (pages > 1) {
+ if (pages == 1)
+ return address & PAGE_MASK;
+
+ end = address + size - 1;
+
+ /*
+ * msb_diff would hold the index of the most significant bit that
+ * flipped between the start and end.
+ */
+ msb_diff = fls64(end ^ address) - 1;
+
+ /*
+ * Bits 63:52 are sign extended. If for some reason bit 51 is different
+ * between the start and the end, invalidate everything.
+ */
+ if (unlikely(msb_diff > 51)) {
+ address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
+ } else {
/*
- * If we have to flush more than one page, flush all
- * TLB entries for this domain
+ * The msb-bit must be clear on the address. Just set all the
+ * lower bits.
*/
- address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
- s = true;
+ address |= 1ull << (msb_diff - 1);
}
+ /* Clear bits 11:0 */
address &= PAGE_MASK;
+ /* Set the size bit - we flush more than one 4kb page */
+ return address | CMD_INV_IOMMU_PAGES_SIZE_MASK;
+}
+
+static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
+ size_t size, u16 domid, int pde)
+{
+ u64 inv_address = build_inv_address(address, size);
+
memset(cmd, 0, sizeof(*cmd));
cmd->data[1] |= domid;
- cmd->data[2] = lower_32_bits(address);
- cmd->data[3] = upper_32_bits(address);
+ cmd->data[2] = lower_32_bits(inv_address);
+ cmd->data[3] = upper_32_bits(inv_address);
CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
- if (s) /* size bit - we flush more than one 4kb page */
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
}
@@ -895,32 +911,15 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
u64 address, size_t size)
{
- u64 pages;
- bool s;
-
- pages = iommu_num_pages(address, size, PAGE_SIZE);
- s = false;
-
- if (pages > 1) {
- /*
- * If we have to flush more than one page, flush all
- * TLB entries for this domain
- */
- address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
- s = true;
- }
-
- address &= PAGE_MASK;
+ u64 inv_address = build_inv_address(address, size);
memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = devid;
cmd->data[0] |= (qdep & 0xff) << 24;
cmd->data[1] = devid;
- cmd->data[2] = lower_32_bits(address);
- cmd->data[3] = upper_32_bits(address);
+ cmd->data[2] = lower_32_bits(inv_address);
+ cmd->data[3] = upper_32_bits(inv_address);
CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
- if (s)
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
}
static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
@@ -1531,33 +1530,9 @@ static void pdev_iommuv2_disable(struct pci_dev *pdev)
pci_disable_pasid(pdev);
}
-/* FIXME: Change generic reset-function to do the same */
-static int pri_reset_while_enabled(struct pci_dev *pdev)
-{
- u16 control;
- int pos;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
- return -EINVAL;
-
- pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
- control |= PCI_PRI_CTRL_RESET;
- pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
-
- return 0;
-}
-
static int pdev_iommuv2_enable(struct pci_dev *pdev)
{
- bool reset_enable;
- int reqs, ret;
-
- /* FIXME: Hardcode number of outstanding requests for now */
- reqs = 32;
- if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
- reqs = 1;
- reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
+ int ret;
/* Only allow access to user-accessible pages */
ret = pci_enable_pasid(pdev, 0);
@@ -1570,16 +1545,11 @@ static int pdev_iommuv2_enable(struct pci_dev *pdev)
goto out_err;
/* Enable PRI */
- ret = pci_enable_pri(pdev, reqs);
+ /* FIXME: Hardcode number of outstanding requests for now */
+ ret = pci_enable_pri(pdev, 32);
if (ret)
goto out_err;
- if (reset_enable) {
- ret = pri_reset_while_enabled(pdev);
- if (ret)
- goto out_err;
- }
-
ret = pci_enable_ats(pdev, PAGE_SHIFT);
if (ret)
goto out_err;
@@ -1715,9 +1685,6 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
return ERR_PTR(-ENODEV);
devid = get_device_id(dev);
- if (devid < 0)
- return ERR_PTR(devid);
-
iommu = amd_iommu_rlookup_table[devid];
if (dev_iommu_priv_get(dev))
@@ -1771,26 +1738,6 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
return acpihid_device_group(dev);
}
-static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
-{
- switch (domain->type) {
- case IOMMU_DOMAIN_UNMANAGED:
- return -ENODEV;
- case IOMMU_DOMAIN_DMA:
- switch (attr) {
- case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- *(int *)data = !amd_iommu_unmap_flush;
- return 0;
- default:
- return -ENODEV;
- }
- break;
- default:
- return -EINVAL;
- }
-}
-
/*****************************************************************************
*
* The next functions belong to the dma_ops mapping/unmapping code.
@@ -1855,7 +1802,7 @@ int __init amd_iommu_init_dma_ops(void)
pr_info("IO/TLB flush on unmap enabled\n");
else
pr_info("Lazy IO/TLB flushing enabled\n");
-
+ iommu_set_dma_strict(amd_iommu_unmap_flush);
return 0;
}
@@ -2019,16 +1966,12 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
struct device *dev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+ int devid = get_device_id(dev);
struct amd_iommu *iommu;
- int devid;
if (!check_device(dev))
return;
- devid = get_device_id(dev);
- if (devid < 0)
- return;
-
if (dev_data->domain != NULL)
detach_device(dev);
@@ -2257,7 +2200,6 @@ const struct iommu_ops amd_iommu_ops = {
.release_device = amd_iommu_release_device,
.probe_finalize = amd_iommu_probe_finalize,
.device_group = amd_iommu_device_group,
- .domain_get_attr = amd_iommu_domain_get_attr,
.get_resv_regions = amd_iommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
@@ -2310,9 +2252,6 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
unsigned long flags;
int levels, ret;
- if (pasids <= 0 || pasids > (PASID_MASK + 1))
- return -EINVAL;
-
/* Number of GCR3 table levels required */
for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
levels += 1;
@@ -2563,52 +2502,6 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
}
EXPORT_SYMBOL(amd_iommu_complete_ppr);
-struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
-{
- struct protection_domain *pdomain;
- struct iommu_dev_data *dev_data;
- struct device *dev = &pdev->dev;
- struct iommu_domain *io_domain;
-
- if (!check_device(dev))
- return NULL;
-
- dev_data = dev_iommu_priv_get(&pdev->dev);
- pdomain = dev_data->domain;
- io_domain = iommu_get_domain_for_dev(dev);
-
- if (pdomain == NULL && dev_data->defer_attach) {
- dev_data->defer_attach = false;
- pdomain = to_pdomain(io_domain);
- attach_device(dev, pdomain);
- }
-
- if (pdomain == NULL)
- return NULL;
-
- if (io_domain->type != IOMMU_DOMAIN_DMA)
- return NULL;
-
- /* Only return IOMMUv2 domains */
- if (!(pdomain->flags & PD_IOMMUV2_MASK))
- return NULL;
-
- return &pdomain->domain;
-}
-EXPORT_SYMBOL(amd_iommu_get_v2_domain);
-
-void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
-{
- struct iommu_dev_data *dev_data;
-
- if (!amd_iommu_v2_supported())
- return;
-
- dev_data = dev_iommu_priv_get(&pdev->dev);
- dev_data->errata |= (1 << erratum);
-}
-EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
-
int amd_iommu_device_info(struct pci_dev *pdev,
struct amd_iommu_device_info *info)
{
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 8594b4a83043..54b2f27b81d4 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -245,8 +245,6 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
break;
case CMDQ_OP_PREFETCH_CFG:
cmd[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID, ent->prefetch.sid);
- cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size);
- cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
break;
case CMDQ_OP_CFGI_CD:
cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid);
@@ -909,8 +907,8 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
- for (i = 0; i < master->num_sids; i++) {
- cmd.cfgi.sid = master->sids[i];
+ for (i = 0; i < master->num_streams; i++) {
+ cmd.cfgi.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
}
}
@@ -1355,6 +1353,29 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return 0;
}
+__maybe_unused
+static struct arm_smmu_master *
+arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
+{
+ struct rb_node *node;
+ struct arm_smmu_stream *stream;
+
+ lockdep_assert_held(&smmu->streams_mutex);
+
+ node = smmu->streams.rb_node;
+ while (node) {
+ stream = rb_entry(node, struct arm_smmu_stream, node);
+ if (stream->id < sid)
+ node = node->rb_right;
+ else if (stream->id > sid)
+ node = node->rb_left;
+ else
+ return stream->master;
+ }
+
+ return NULL;
+}
+
/* IRQ and event handlers */
static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
{
@@ -1588,8 +1609,8 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
- for (i = 0; i < master->num_sids; i++) {
- cmd.atc.sid = master->sids[i];
+ for (i = 0; i < master->num_streams; i++) {
+ cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
}
@@ -1632,8 +1653,8 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
if (!master->ats_enabled)
continue;
- for (i = 0; i < master->num_sids; i++) {
- cmd.atc.sid = master->sids[i];
+ for (i = 0; i < master->num_streams; i++) {
+ cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
}
}
@@ -2017,7 +2038,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
.iommu_dev = smmu->dev,
};
- if (smmu_domain->non_strict)
+ if (!iommu_get_dma_strict(domain))
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
@@ -2065,13 +2086,13 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
int i, j;
struct arm_smmu_device *smmu = master->smmu;
- for (i = 0; i < master->num_sids; ++i) {
- u32 sid = master->sids[i];
+ for (i = 0; i < master->num_streams; ++i) {
+ u32 sid = master->streams[i].id;
__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
/* Bridged PCI devices may end up with duplicated IDs */
for (j = 0; j < i; j++)
- if (master->sids[j] == sid)
+ if (master->streams[j].id == sid)
break;
if (j < i)
continue;
@@ -2305,6 +2326,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ if (!gather->pgsize)
+ return;
+
arm_smmu_tlb_inv_range_domain(gather->start,
gather->end - gather->start + 1,
gather->pgsize, true, smmu_domain);
@@ -2345,11 +2369,101 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
return sid < limit;
}
+static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
+ struct arm_smmu_master *master)
+{
+ int i;
+ int ret = 0;
+ struct arm_smmu_stream *new_stream, *cur_stream;
+ struct rb_node **new_node, *parent_node = NULL;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
+
+ master->streams = kcalloc(fwspec->num_ids, sizeof(*master->streams),
+ GFP_KERNEL);
+ if (!master->streams)
+ return -ENOMEM;
+ master->num_streams = fwspec->num_ids;
+
+ mutex_lock(&smmu->streams_mutex);
+ for (i = 0; i < fwspec->num_ids; i++) {
+ u32 sid = fwspec->ids[i];
+
+ new_stream = &master->streams[i];
+ new_stream->id = sid;
+ new_stream->master = master;
+
+ /*
+ * Check the SIDs are in range of the SMMU and our stream table
+ */
+ if (!arm_smmu_sid_in_range(smmu, sid)) {
+ ret = -ERANGE;
+ break;
+ }
+
+ /* Ensure l2 strtab is initialised */
+ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
+ ret = arm_smmu_init_l2_strtab(smmu, sid);
+ if (ret)
+ break;
+ }
+
+ /* Insert into SID tree */
+ new_node = &(smmu->streams.rb_node);
+ while (*new_node) {
+ cur_stream = rb_entry(*new_node, struct arm_smmu_stream,
+ node);
+ parent_node = *new_node;
+ if (cur_stream->id > new_stream->id) {
+ new_node = &((*new_node)->rb_left);
+ } else if (cur_stream->id < new_stream->id) {
+ new_node = &((*new_node)->rb_right);
+ } else {
+ dev_warn(master->dev,
+ "stream %u already in tree\n",
+ cur_stream->id);
+ ret = -EINVAL;
+ break;
+ }
+ }
+ if (ret)
+ break;
+
+ rb_link_node(&new_stream->node, parent_node, new_node);
+ rb_insert_color(&new_stream->node, &smmu->streams);
+ }
+
+ if (ret) {
+ for (i--; i >= 0; i--)
+ rb_erase(&master->streams[i].node, &smmu->streams);
+ kfree(master->streams);
+ }
+ mutex_unlock(&smmu->streams_mutex);
+
+ return ret;
+}
+
+static void arm_smmu_remove_master(struct arm_smmu_master *master)
+{
+ int i;
+ struct arm_smmu_device *smmu = master->smmu;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
+
+ if (!smmu || !master->streams)
+ return;
+
+ mutex_lock(&smmu->streams_mutex);
+ for (i = 0; i < fwspec->num_ids; i++)
+ rb_erase(&master->streams[i].node, &smmu->streams);
+ mutex_unlock(&smmu->streams_mutex);
+
+ kfree(master->streams);
+}
+
static struct iommu_ops arm_smmu_ops;
static struct iommu_device *arm_smmu_probe_device(struct device *dev)
{
- int i, ret;
+ int ret;
struct arm_smmu_device *smmu;
struct arm_smmu_master *master;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
@@ -2370,29 +2484,15 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
master->dev = dev;
master->smmu = smmu;
- master->sids = fwspec->ids;
- master->num_sids = fwspec->num_ids;
INIT_LIST_HEAD(&master->bonds);
dev_iommu_priv_set(dev, master);
- /* Check the SIDs are in range of the SMMU and our stream table */
- for (i = 0; i < master->num_sids; i++) {
- u32 sid = master->sids[i];
-
- if (!arm_smmu_sid_in_range(smmu, sid)) {
- ret = -ERANGE;
- goto err_free_master;
- }
-
- /* Ensure l2 strtab is initialised */
- if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
- ret = arm_smmu_init_l2_strtab(smmu, sid);
- if (ret)
- goto err_free_master;
- }
- }
+ ret = arm_smmu_insert_master(smmu, master);
+ if (ret)
+ goto err_free_master;
- master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits);
+ device_property_read_u32(dev, "pasid-num-bits", &master->ssid_bits);
+ master->ssid_bits = min(smmu->ssid_bits, master->ssid_bits);
/*
* Note that PASID must be enabled before, and disabled after ATS:
@@ -2428,6 +2528,7 @@ static void arm_smmu_release_device(struct device *dev)
WARN_ON(arm_smmu_master_sva_enabled(master));
arm_smmu_detach_dev(master);
arm_smmu_disable_pasid(master);
+ arm_smmu_remove_master(master);
kfree(master);
iommu_fwspec_free(dev);
}
@@ -2449,76 +2550,18 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
return group;
}
-static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+static int arm_smmu_enable_nesting(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
- switch (domain->type) {
- case IOMMU_DOMAIN_UNMANAGED:
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
- return 0;
- default:
- return -ENODEV;
- }
- break;
- case IOMMU_DOMAIN_DMA:
- switch (attr) {
- case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- *(int *)data = smmu_domain->non_strict;
- return 0;
- default:
- return -ENODEV;
- }
- break;
- default:
- return -EINVAL;
- }
-}
-
-static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
-{
int ret = 0;
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
mutex_lock(&smmu_domain->init_mutex);
-
- switch (domain->type) {
- case IOMMU_DOMAIN_UNMANAGED:
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- if (smmu_domain->smmu) {
- ret = -EPERM;
- goto out_unlock;
- }
-
- if (*(int *)data)
- smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
- else
- smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
- break;
- default:
- ret = -ENODEV;
- }
- break;
- case IOMMU_DOMAIN_DMA:
- switch(attr) {
- case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- smmu_domain->non_strict = *(int *)data;
- break;
- default:
- ret = -ENODEV;
- }
- break;
- default:
- ret = -EINVAL;
- }
-
-out_unlock:
+ if (smmu_domain->smmu)
+ ret = -EPERM;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
mutex_unlock(&smmu_domain->init_mutex);
+
return ret;
}
@@ -2619,8 +2662,7 @@ static struct iommu_ops arm_smmu_ops = {
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
- .domain_get_attr = arm_smmu_domain_get_attr,
- .domain_set_attr = arm_smmu_domain_set_attr,
+ .enable_nesting = arm_smmu_enable_nesting,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
@@ -2632,6 +2674,7 @@ static struct iommu_ops arm_smmu_ops = {
.sva_unbind = arm_smmu_sva_unbind,
.sva_get_pasid = arm_smmu_sva_get_pasid,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
+ .owner = THIS_MODULE,
};
/* Probing and initialisation functions */
@@ -2851,6 +2894,9 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
{
int ret;
+ mutex_init(&smmu->streams_mutex);
+ smmu->streams = RB_ROOT;
+
ret = arm_smmu_init_queues(smmu);
if (ret)
return ret;
@@ -3620,10 +3666,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (ret)
return ret;
- iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
- iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
-
- ret = iommu_device_register(&smmu->iommu);
+ ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (ret) {
dev_err(dev, "Failed to register iommu\n");
return ret;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index f985817c967a..46e8c49214a8 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -115,7 +115,7 @@
#define GERROR_PRIQ_ABT_ERR (1 << 3)
#define GERROR_EVTQ_ABT_ERR (1 << 2)
#define GERROR_CMDQ_ERR (1 << 0)
-#define GERROR_ERR_MASK 0xfd
+#define GERROR_ERR_MASK 0x1fd
#define ARM_SMMU_GERRORN 0x64
@@ -410,8 +410,6 @@ struct arm_smmu_cmdq_ent {
#define CMDQ_OP_PREFETCH_CFG 0x1
struct {
u32 sid;
- u8 size;
- u64 addr;
} prefetch;
#define CMDQ_OP_CFGI_STE 0x3
@@ -639,6 +637,15 @@ struct arm_smmu_device {
/* IOMMU core code handle */
struct iommu_device iommu;
+
+ struct rb_root streams;
+ struct mutex streams_mutex;
+};
+
+struct arm_smmu_stream {
+ u32 id;
+ struct arm_smmu_master *master;
+ struct rb_node node;
};
/* SMMU private data for each master */
@@ -647,8 +654,8 @@ struct arm_smmu_master {
struct device *dev;
struct arm_smmu_domain *domain;
struct list_head domain_head;
- u32 *sids;
- unsigned int num_sids;
+ struct arm_smmu_stream *streams;
+ unsigned int num_streams;
bool ats_enabled;
bool sva_enabled;
struct list_head bonds;
@@ -668,7 +675,6 @@ struct arm_smmu_domain {
struct mutex init_mutex; /* Protects smmu pointer */
struct io_pgtable_ops *pgtbl_ops;
- bool non_strict;
atomic_t nr_ats_masters;
enum arm_smmu_domain_stage stage;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index d8c6bfde6a61..6f72c4d208ca 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -761,14 +761,17 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.iommu_dev = smmu->dev,
};
+ if (!iommu_get_dma_strict(domain))
+ pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
+
if (smmu->impl && smmu->impl->init_context) {
ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
if (ret)
goto out_clear_smmu;
}
- if (smmu_domain->pgtbl_cfg.quirks)
- pgtbl_cfg.quirks |= smmu_domain->pgtbl_cfg.quirks;
+ if (smmu_domain->pgtbl_quirks)
+ pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops) {
@@ -1481,98 +1484,34 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
return group;
}
-static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+static int arm_smmu_enable_nesting(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ int ret = 0;
- switch(domain->type) {
- case IOMMU_DOMAIN_UNMANAGED:
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
- return 0;
- case DOMAIN_ATTR_IO_PGTABLE_CFG: {
- struct io_pgtable_domain_attr *pgtbl_cfg = data;
- *pgtbl_cfg = smmu_domain->pgtbl_cfg;
-
- return 0;
- }
- default:
- return -ENODEV;
- }
- break;
- case IOMMU_DOMAIN_DMA:
- switch (attr) {
- case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: {
- bool non_strict = smmu_domain->pgtbl_cfg.quirks &
- IO_PGTABLE_QUIRK_NON_STRICT;
- *(int *)data = non_strict;
- return 0;
- }
- default:
- return -ENODEV;
- }
- break;
- default:
- return -EINVAL;
- }
+ mutex_lock(&smmu_domain->init_mutex);
+ if (smmu_domain->smmu)
+ ret = -EPERM;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ mutex_unlock(&smmu_domain->init_mutex);
+
+ return ret;
}
-static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain,
+ unsigned long quirks)
{
- int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ int ret = 0;
mutex_lock(&smmu_domain->init_mutex);
-
- switch(domain->type) {
- case IOMMU_DOMAIN_UNMANAGED:
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- if (smmu_domain->smmu) {
- ret = -EPERM;
- goto out_unlock;
- }
-
- if (*(int *)data)
- smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
- else
- smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
- break;
- case DOMAIN_ATTR_IO_PGTABLE_CFG: {
- struct io_pgtable_domain_attr *pgtbl_cfg = data;
-
- if (smmu_domain->smmu) {
- ret = -EPERM;
- goto out_unlock;
- }
-
- smmu_domain->pgtbl_cfg = *pgtbl_cfg;
- break;
- }
- default:
- ret = -ENODEV;
- }
- break;
- case IOMMU_DOMAIN_DMA:
- switch (attr) {
- case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- if (*(int *)data)
- smmu_domain->pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
- else
- smmu_domain->pgtbl_cfg.quirks &= ~IO_PGTABLE_QUIRK_NON_STRICT;
- break;
- default:
- ret = -ENODEV;
- }
- break;
- default:
- ret = -EINVAL;
- }
-out_unlock:
+ if (smmu_domain->smmu)
+ ret = -EPERM;
+ else
+ smmu_domain->pgtbl_quirks = quirks;
mutex_unlock(&smmu_domain->init_mutex);
+
return ret;
}
@@ -1631,13 +1570,14 @@ static struct iommu_ops arm_smmu_ops = {
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
- .domain_get_attr = arm_smmu_domain_get_attr,
- .domain_set_attr = arm_smmu_domain_set_attr,
+ .enable_nesting = arm_smmu_enable_nesting,
+ .set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
+ .owner = THIS_MODULE,
};
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@@ -2221,10 +2161,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return err;
}
- iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
- iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
-
- err = iommu_device_register(&smmu->iommu);
+ err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (err) {
dev_err(dev, "Failed to register iommu\n");
return err;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index d2a2d1bc58ba..c31a59d35c64 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -364,7 +364,7 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
- struct io_pgtable_domain_attr pgtbl_cfg;
+ unsigned long pgtbl_quirks;
const struct iommu_flush_ops *flush_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 7f280c8d5c53..4294abe389b2 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -847,10 +847,7 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
return ret;
}
- iommu_device_set_ops(&qcom_iommu->iommu, &qcom_iommu_ops);
- iommu_device_set_fwnode(&qcom_iommu->iommu, dev->fwnode);
-
- ret = iommu_device_register(&qcom_iommu->iommu);
+ ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev);
if (ret) {
dev_err(dev, "Failed to register iommu\n");
return ret;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index af765c813cc8..7bcdd1205535 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -52,15 +52,17 @@ struct iommu_dma_cookie {
};
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
+bool iommu_dma_forcedac __read_mostly;
-void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
- struct iommu_domain *domain)
+static int __init iommu_dma_forcedac_setup(char *str)
{
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
- struct iova_domain *iovad = &cookie->iovad;
+ int ret = kstrtobool(str, &iommu_dma_forcedac);
- free_cpu_cached_iovas(cpu, iovad);
+ if (!ret && iommu_dma_forcedac)
+ pr_info("Forcing DAC for PCI devices\n");
+ return ret;
}
+early_param("iommu.forcedac", iommu_dma_forcedac_setup);
static void iommu_dma_entry_dtor(unsigned long data)
{
@@ -304,10 +306,7 @@ static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
domain = cookie->fq_domain;
- /*
- * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
- * implies that ops->flush_iotlb_all must be non-NULL.
- */
+
domain->ops->flush_iotlb_all(domain);
}
@@ -334,7 +333,6 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie;
unsigned long order, base_pfn;
struct iova_domain *iovad;
- int attr;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
@@ -371,8 +369,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
init_iova_domain(iovad, 1UL << order, base_pfn);
if (!cookie->fq_domain && (!dev || !dev_is_untrusted(dev)) &&
- !iommu_domain_get_attr(domain, DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) &&
- attr) {
+ domain->ops->flush_iotlb_all && !iommu_get_dma_strict(domain)) {
if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
iommu_dma_entry_dtor))
pr_warn("iova flush queue initialization failed\n");
@@ -444,7 +441,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
/* Try to get PCI devices a SAC address */
- if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
+ if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
iova = alloc_iova_fast(iovad, iova_len,
DMA_BIT_MASK(32) >> shift, false);
@@ -499,8 +496,6 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
- struct iova_domain *iovad = &cookie->iovad;
phys_addr_t phys;
phys = iommu_iova_to_phys(domain, dma_addr);
@@ -510,8 +505,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
__iommu_dma_unmap(dev, dma_addr, size);
if (unlikely(is_swiotlb_buffer(phys)))
- swiotlb_tbl_unmap_single(dev, phys, size,
- iova_align(iovad, size), dir, attrs);
+ swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
@@ -581,10 +575,8 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
}
iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
- if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys))
- swiotlb_tbl_unmap_single(dev, phys, org_size,
- aligned_size, dir, attrs);
-
+ if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
+ swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
return iova;
}
@@ -650,23 +642,12 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
return pages;
}
-/**
- * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
- * @dev: Device to allocate memory for. Must be a real device
- * attached to an iommu_dma_domain
- * @size: Size of buffer in bytes
- * @dma_handle: Out argument for allocated DMA handle
- * @gfp: Allocation flags
- * @prot: pgprot_t to use for the remapped mapping
- * @attrs: DMA attributes for this allocation
- *
- * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
+/*
+ * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
* but an IOMMU which supports smaller pages might not map the whole thing.
- *
- * Return: Mapped virtual address, or NULL on failure.
*/
-static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
+static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
+ size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot,
unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
@@ -676,11 +657,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
- struct sg_table sgt;
dma_addr_t iova;
- void *vaddr;
-
- *dma_handle = DMA_MAPPING_ERROR;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, domain))
@@ -707,41 +684,91 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
if (!iova)
goto out_free_pages;
- if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
+ if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL))
goto out_free_iova;
if (!(ioprot & IOMMU_CACHE)) {
struct scatterlist *sg;
int i;
- for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
+ if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot)
< size)
goto out_free_sg;
+ sgt->sgl->dma_address = iova;
+ sgt->sgl->dma_length = size;
+ return pages;
+
+out_free_sg:
+ sg_free_table(sgt);
+out_free_iova:
+ iommu_dma_free_iova(cookie, iova, size, NULL);
+out_free_pages:
+ __iommu_dma_free_pages(pages, count);
+ return NULL;
+}
+
+static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
+ unsigned long attrs)
+{
+ struct page **pages;
+ struct sg_table sgt;
+ void *vaddr;
+
+ pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot,
+ attrs);
+ if (!pages)
+ return NULL;
+ *dma_handle = sgt.sgl->dma_address;
+ sg_free_table(&sgt);
vaddr = dma_common_pages_remap(pages, size, prot,
__builtin_return_address(0));
if (!vaddr)
goto out_unmap;
-
- *dma_handle = iova;
- sg_free_table(&sgt);
return vaddr;
out_unmap:
- __iommu_dma_unmap(dev, iova, size);
-out_free_sg:
- sg_free_table(&sgt);
-out_free_iova:
- iommu_dma_free_iova(cookie, iova, size, NULL);
-out_free_pages:
- __iommu_dma_free_pages(pages, count);
+ __iommu_dma_unmap(dev, *dma_handle, size);
+ __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
return NULL;
}
+#ifdef CONFIG_DMA_REMAP
+static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
+ size_t size, enum dma_data_direction dir, gfp_t gfp,
+ unsigned long attrs)
+{
+ struct dma_sgt_handle *sh;
+
+ sh = kmalloc(sizeof(*sh), gfp);
+ if (!sh)
+ return NULL;
+
+ sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp,
+ PAGE_KERNEL, attrs);
+ if (!sh->pages) {
+ kfree(sh);
+ return NULL;
+ }
+ return &sh->sgt;
+}
+
+static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ struct dma_sgt_handle *sh = sgt_handle(sgt);
+
+ __iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
+ __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
+ sg_free_table(&sh->sgt);
+}
+#endif /* CONFIG_DMA_REMAP */
+
static void iommu_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
@@ -755,7 +782,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
arch_sync_dma_for_cpu(phys, size, dir);
if (is_swiotlb_buffer(phys))
- swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU);
+ swiotlb_sync_single_for_cpu(dev, phys, size, dir);
}
static void iommu_dma_sync_single_for_device(struct device *dev,
@@ -768,7 +795,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
if (is_swiotlb_buffer(phys))
- swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE);
+ swiotlb_sync_single_for_device(dev, phys, size, dir);
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(phys, size, dir);
@@ -789,8 +816,8 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
if (is_swiotlb_buffer(sg_phys(sg)))
- swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
- dir, SYNC_FOR_CPU);
+ swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
+ sg->length, dir);
}
}
@@ -806,8 +833,8 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
for_each_sg(sgl, sg, nelems, i) {
if (is_swiotlb_buffer(sg_phys(sg)))
- swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
- dir, SYNC_FOR_DEVICE);
+ swiotlb_sync_single_for_device(dev, sg_phys(sg),
+ sg->length, dir);
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
@@ -1258,6 +1285,10 @@ static const struct dma_map_ops iommu_dma_ops = {
.free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
+#ifdef CONFIG_DMA_REMAP
+ .alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
+ .free_noncontiguous = iommu_dma_free_noncontiguous,
+#endif
.mmap = iommu_dma_mmap,
.get_sgtable = iommu_dma_get_sgtable,
.map_page = iommu_dma_map_page,
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index de324b4eedfe..7623d8c371f5 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -407,7 +407,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
struct sysmmu_drvdata *data = dev_id;
const struct sysmmu_fault_info *finfo;
unsigned int i, n, itype;
- sysmmu_iova_t fault_addr = -1;
+ sysmmu_iova_t fault_addr;
unsigned short reg_status, reg_clear;
int ret = -ENOSYS;
@@ -630,10 +630,7 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
- iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
-
- ret = iommu_device_register(&data->iommu);
+ ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
if (ret)
return ret;
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index b9a974d97831..fc38b1fba7cf 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -63,19 +63,6 @@ static const struct of_device_id l3_device_ids[] = {
/* maximum subwindows permitted per liodn */
static u32 max_subwindow_count;
-/* Pool for fspi allocation */
-static struct gen_pool *spaace_pool;
-
-/**
- * pamu_get_max_subwin_cnt() - Return the maximum supported
- * subwindow count per liodn.
- *
- */
-u32 pamu_get_max_subwin_cnt(void)
-{
- return max_subwindow_count;
-}
-
/**
* pamu_get_ppaace() - Return the primary PACCE
* @liodn: liodn PAACT index for desired PAACE
@@ -155,13 +142,6 @@ static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
return fls64(addrspace_size) - 2;
}
-/* Derive the PAACE window count encoding for the subwindow count */
-static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt)
-{
- /* window count is 2^(WCE+1) bytes */
- return __ffs(subwindow_cnt) - 1;
-}
-
/*
* Set the PAACE type as primary and set the coherency required domain
* attribute
@@ -175,88 +155,10 @@ static void pamu_init_ppaace(struct paace *ppaace)
}
/*
- * Set the PAACE type as secondary and set the coherency required domain
- * attribute.
- */
-static void pamu_init_spaace(struct paace *spaace)
-{
- set_bf(spaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_SECONDARY);
- set_bf(spaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
- PAACE_M_COHERENCE_REQ);
-}
-
-/*
- * Return the spaace (corresponding to the secondary window index)
- * for a particular ppaace.
- */
-static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum)
-{
- u32 subwin_cnt;
- struct paace *spaace = NULL;
-
- subwin_cnt = 1UL << (get_bf(paace->impl_attr, PAACE_IA_WCE) + 1);
-
- if (wnum < subwin_cnt)
- spaace = &spaact[paace->fspi + wnum];
- else
- pr_debug("secondary paace out of bounds\n");
-
- return spaace;
-}
-
-/**
- * pamu_get_fspi_and_allocate() - Allocates fspi index and reserves subwindows
- * required for primary PAACE in the secondary
- * PAACE table.
- * @subwin_cnt: Number of subwindows to be reserved.
- *
- * A PPAACE entry may have a number of associated subwindows. A subwindow
- * corresponds to a SPAACE entry in the SPAACT table. Each PAACE entry stores
- * the index (fspi) of the first SPAACE entry in the SPAACT table. This
- * function returns the index of the first SPAACE entry. The remaining
- * SPAACE entries are reserved contiguously from that index.
- *
- * Returns a valid fspi index in the range of 0 - SPAACE_NUMBER_ENTRIES on success.
- * If no SPAACE entry is available or the allocator can not reserve the required
- * number of contiguous entries function returns ULONG_MAX indicating a failure.
- *
- */
-static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt)
-{
- unsigned long spaace_addr;
-
- spaace_addr = gen_pool_alloc(spaace_pool, subwin_cnt * sizeof(struct paace));
- if (!spaace_addr)
- return ULONG_MAX;
-
- return (spaace_addr - (unsigned long)spaact) / (sizeof(struct paace));
-}
-
-/* Release the subwindows reserved for a particular LIODN */
-void pamu_free_subwins(int liodn)
-{
- struct paace *ppaace;
- u32 subwin_cnt, size;
-
- ppaace = pamu_get_ppaace(liodn);
- if (!ppaace) {
- pr_debug("Invalid liodn entry\n");
- return;
- }
-
- if (get_bf(ppaace->addr_bitfields, PPAACE_AF_MW)) {
- subwin_cnt = 1UL << (get_bf(ppaace->impl_attr, PAACE_IA_WCE) + 1);
- size = (subwin_cnt - 1) * sizeof(struct paace);
- gen_pool_free(spaace_pool, (unsigned long)&spaact[ppaace->fspi], size);
- set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
- }
-}
-
-/*
* Function used for updating stash destination for the coressponding
* LIODN.
*/
-int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
+int pamu_update_paace_stash(int liodn, u32 value)
{
struct paace *paace;
@@ -265,11 +167,6 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
pr_debug("Invalid liodn entry\n");
return -ENOENT;
}
- if (subwin) {
- paace = pamu_get_spaace(paace, subwin - 1);
- if (!paace)
- return -ENOENT;
- }
set_bf(paace->impl_attr, PAACE_IA_CID, value);
mb();
@@ -277,65 +174,20 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
return 0;
}
-/* Disable a subwindow corresponding to the LIODN */
-int pamu_disable_spaace(int liodn, u32 subwin)
-{
- struct paace *paace;
-
- paace = pamu_get_ppaace(liodn);
- if (!paace) {
- pr_debug("Invalid liodn entry\n");
- return -ENOENT;
- }
- if (subwin) {
- paace = pamu_get_spaace(paace, subwin - 1);
- if (!paace)
- return -ENOENT;
- set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
- } else {
- set_bf(paace->addr_bitfields, PAACE_AF_AP,
- PAACE_AP_PERMS_DENIED);
- }
-
- mb();
-
- return 0;
-}
-
/**
* pamu_config_paace() - Sets up PPAACE entry for specified liodn
*
* @liodn: Logical IO device number
- * @win_addr: starting address of DSA window
- * @win-size: size of DSA window
* @omi: Operation mapping index -- if ~omi == 0 then omi not defined
- * @rpn: real (true physical) page number
* @stashid: cache stash id for associated cpu -- if ~stashid == 0 then
* stashid not defined
- * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
- * snoopid not defined
- * @subwin_cnt: number of sub-windows
* @prot: window permissions
*
* Returns 0 upon success else error code < 0 returned
*/
-int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
- u32 omi, unsigned long rpn, u32 snoopid, u32 stashid,
- u32 subwin_cnt, int prot)
+int pamu_config_ppaace(int liodn, u32 omi, u32 stashid, int prot)
{
struct paace *ppaace;
- unsigned long fspi;
-
- if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
- pr_debug("window size too small or not a power of two %pa\n",
- &win_size);
- return -EINVAL;
- }
-
- if (win_addr & (win_size - 1)) {
- pr_debug("window address is not aligned with window size\n");
- return -EINVAL;
- }
ppaace = pamu_get_ppaace(liodn);
if (!ppaace)
@@ -343,13 +195,12 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
/* window size is 2^(WSE+1) bytes */
set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE,
- map_addrspace_size_to_wse(win_size));
+ map_addrspace_size_to_wse(1ULL << 36));
pamu_init_ppaace(ppaace);
- ppaace->wbah = win_addr >> (PAMU_PAGE_SHIFT + 20);
- set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL,
- (win_addr >> PAMU_PAGE_SHIFT));
+ ppaace->wbah = 0;
+ set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
/* set up operation mapping if it's configured */
if (omi < OME_NUMBER_ENTRIES) {
@@ -364,120 +215,12 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
if (~stashid != 0)
set_bf(ppaace->impl_attr, PAACE_IA_CID, stashid);
- /* configure snoop id */
- if (~snoopid != 0)
- ppaace->domain_attr.to_host.snpid = snoopid;
-
- if (subwin_cnt) {
- /* The first entry is in the primary PAACE instead */
- fspi = pamu_get_fspi_and_allocate(subwin_cnt - 1);
- if (fspi == ULONG_MAX) {
- pr_debug("spaace indexes exhausted\n");
- return -EINVAL;
- }
-
- /* window count is 2^(WCE+1) bytes */
- set_bf(ppaace->impl_attr, PAACE_IA_WCE,
- map_subwindow_cnt_to_wce(subwin_cnt));
- set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0x1);
- ppaace->fspi = fspi;
- } else {
- set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
- ppaace->twbah = rpn >> 20;
- set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, rpn);
- set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot);
- set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0);
- set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
- }
- mb();
-
- return 0;
-}
-
-/**
- * pamu_config_spaace() - Sets up SPAACE entry for specified subwindow
- *
- * @liodn: Logical IO device number
- * @subwin_cnt: number of sub-windows associated with dma-window
- * @subwin: subwindow index
- * @subwin_size: size of subwindow
- * @omi: Operation mapping index
- * @rpn: real (true physical) page number
- * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
- * snoopid not defined
- * @stashid: cache stash id for associated cpu
- * @enable: enable/disable subwindow after reconfiguration
- * @prot: sub window permissions
- *
- * Returns 0 upon success else error code < 0 returned
- */
-int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
- phys_addr_t subwin_size, u32 omi, unsigned long rpn,
- u32 snoopid, u32 stashid, int enable, int prot)
-{
- struct paace *paace;
-
- /* setup sub-windows */
- if (!subwin_cnt) {
- pr_debug("Invalid subwindow count\n");
- return -EINVAL;
- }
-
- paace = pamu_get_ppaace(liodn);
- if (subwin > 0 && subwin < subwin_cnt && paace) {
- paace = pamu_get_spaace(paace, subwin - 1);
-
- if (paace && !(paace->addr_bitfields & PAACE_V_VALID)) {
- pamu_init_spaace(paace);
- set_bf(paace->addr_bitfields, SPAACE_AF_LIODN, liodn);
- }
- }
-
- if (!paace) {
- pr_debug("Invalid liodn entry\n");
- return -ENOENT;
- }
-
- if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) {
- pr_debug("subwindow size out of range, or not a power of 2\n");
- return -EINVAL;
- }
-
- if (rpn == ULONG_MAX) {
- pr_debug("real page number out of range\n");
- return -EINVAL;
- }
-
- /* window size is 2^(WSE+1) bytes */
- set_bf(paace->win_bitfields, PAACE_WIN_SWSE,
- map_addrspace_size_to_wse(subwin_size));
-
- set_bf(paace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
- paace->twbah = rpn >> 20;
- set_bf(paace->win_bitfields, PAACE_WIN_TWBAL, rpn);
- set_bf(paace->addr_bitfields, PAACE_AF_AP, prot);
-
- /* configure snoop id */
- if (~snoopid != 0)
- paace->domain_attr.to_host.snpid = snoopid;
-
- /* set up operation mapping if it's configured */
- if (omi < OME_NUMBER_ENTRIES) {
- set_bf(paace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
- paace->op_encode.index_ot.omi = omi;
- } else if (~omi != 0) {
- pr_debug("bad operation mapping index: %d\n", omi);
- return -EINVAL;
- }
-
- if (~stashid != 0)
- set_bf(paace->impl_attr, PAACE_IA_CID, stashid);
-
- smp_wmb();
-
- if (enable)
- set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
-
+ set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
+ ppaace->twbah = 0;
+ set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, 0);
+ set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot);
+ set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0);
+ set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
mb();
return 0;
@@ -1129,17 +872,6 @@ static int fsl_pamu_probe(struct platform_device *pdev)
spaact_phys = virt_to_phys(spaact);
omt_phys = virt_to_phys(omt);
- spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1);
- if (!spaace_pool) {
- ret = -ENOMEM;
- dev_err(dev, "Failed to allocate spaace gen pool\n");
- goto error;
- }
-
- ret = gen_pool_add(spaace_pool, (unsigned long)spaact, SPAACT_SIZE, -1);
- if (ret)
- goto error_genpool;
-
pamubypenr = in_be32(&guts_regs->pamubypenr);
for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
@@ -1167,9 +899,6 @@ static int fsl_pamu_probe(struct platform_device *pdev)
return 0;
-error_genpool:
- gen_pool_destroy(spaace_pool);
-
error:
if (irq != NO_IRQ)
free_irq(irq, data);
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h
index e1496ba96160..36df7975ff64 100644
--- a/drivers/iommu/fsl_pamu.h
+++ b/drivers/iommu/fsl_pamu.h
@@ -383,18 +383,10 @@ struct ome {
int pamu_domain_init(void);
int pamu_enable_liodn(int liodn);
int pamu_disable_liodn(int liodn);
-void pamu_free_subwins(int liodn);
-int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
- u32 omi, unsigned long rpn, u32 snoopid, uint32_t stashid,
- u32 subwin_cnt, int prot);
-int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin_addr,
- phys_addr_t subwin_size, u32 omi, unsigned long rpn,
- uint32_t snoopid, u32 stashid, int enable, int prot);
+int pamu_config_ppaace(int liodn, u32 omi, uint32_t stashid, int prot);
u32 get_stash_id(u32 stash_dest_hint, u32 vcpu);
void get_ome_index(u32 *omi_index, struct device *dev);
-int pamu_update_paace_stash(int liodn, u32 subwin, u32 value);
-int pamu_disable_spaace(int liodn, u32 subwin);
-u32 pamu_get_max_subwin_cnt(void);
+int pamu_update_paace_stash(int liodn, u32 value);
#endif /* __FSL_PAMU_H */
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index b2110767caf4..a47f47307109 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -54,159 +54,18 @@ static int __init iommu_init_mempool(void)
return 0;
}
-static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
-{
- u32 win_cnt = dma_domain->win_cnt;
- struct dma_window *win_ptr = &dma_domain->win_arr[0];
- struct iommu_domain_geometry *geom;
-
- geom = &dma_domain->iommu_domain.geometry;
-
- if (!win_cnt || !dma_domain->geom_size) {
- pr_debug("Number of windows/geometry not configured for the domain\n");
- return 0;
- }
-
- if (win_cnt > 1) {
- u64 subwin_size;
- dma_addr_t subwin_iova;
- u32 wnd;
-
- subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
- subwin_iova = iova & ~(subwin_size - 1);
- wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
- win_ptr = &dma_domain->win_arr[wnd];
- }
-
- if (win_ptr->valid)
- return win_ptr->paddr + (iova & (win_ptr->size - 1));
-
- return 0;
-}
-
-static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
-{
- struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
- int i, ret;
- unsigned long rpn, flags;
-
- for (i = 0; i < dma_domain->win_cnt; i++) {
- if (sub_win_ptr[i].valid) {
- rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT;
- spin_lock_irqsave(&iommu_lock, flags);
- ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
- sub_win_ptr[i].size,
- ~(u32)0,
- rpn,
- dma_domain->snoop_id,
- dma_domain->stash_id,
- (i > 0) ? 1 : 0,
- sub_win_ptr[i].prot);
- spin_unlock_irqrestore(&iommu_lock, flags);
- if (ret) {
- pr_debug("SPAACE configuration failed for liodn %d\n",
- liodn);
- return ret;
- }
- }
- }
-
- return ret;
-}
-
-static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
-{
- int ret;
- struct dma_window *wnd = &dma_domain->win_arr[0];
- phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu_lock, flags);
- ret = pamu_config_ppaace(liodn, wnd_addr,
- wnd->size,
- ~(u32)0,
- wnd->paddr >> PAMU_PAGE_SHIFT,
- dma_domain->snoop_id, dma_domain->stash_id,
- 0, wnd->prot);
- spin_unlock_irqrestore(&iommu_lock, flags);
- if (ret)
- pr_debug("PAACE configuration failed for liodn %d\n", liodn);
-
- return ret;
-}
-
-/* Map the DMA window corresponding to the LIODN */
-static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
-{
- if (dma_domain->win_cnt > 1)
- return map_subwins(liodn, dma_domain);
- else
- return map_win(liodn, dma_domain);
-}
-
-/* Update window/subwindow mapping for the LIODN */
-static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
-{
- int ret;
- struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
- unsigned long flags;
-
- spin_lock_irqsave(&iommu_lock, flags);
- if (dma_domain->win_cnt > 1) {
- ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
- wnd->size,
- ~(u32)0,
- wnd->paddr >> PAMU_PAGE_SHIFT,
- dma_domain->snoop_id,
- dma_domain->stash_id,
- (wnd_nr > 0) ? 1 : 0,
- wnd->prot);
- if (ret)
- pr_debug("Subwindow reconfiguration failed for liodn %d\n",
- liodn);
- } else {
- phys_addr_t wnd_addr;
-
- wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
-
- ret = pamu_config_ppaace(liodn, wnd_addr,
- wnd->size,
- ~(u32)0,
- wnd->paddr >> PAMU_PAGE_SHIFT,
- dma_domain->snoop_id, dma_domain->stash_id,
- 0, wnd->prot);
- if (ret)
- pr_debug("Window reconfiguration failed for liodn %d\n",
- liodn);
- }
-
- spin_unlock_irqrestore(&iommu_lock, flags);
-
- return ret;
-}
-
static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
u32 val)
{
- int ret = 0, i;
+ int ret = 0;
unsigned long flags;
spin_lock_irqsave(&iommu_lock, flags);
- if (!dma_domain->win_arr) {
- pr_debug("Windows not configured, stash destination update failed for liodn %d\n",
- liodn);
+ ret = pamu_update_paace_stash(liodn, val);
+ if (ret) {
+ pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
- return -EINVAL;
- }
-
- for (i = 0; i < dma_domain->win_cnt; i++) {
- ret = pamu_update_paace_stash(liodn, i, val);
- if (ret) {
- pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
- i, liodn);
- spin_unlock_irqrestore(&iommu_lock, flags);
- return ret;
- }
+ return ret;
}
spin_unlock_irqrestore(&iommu_lock, flags);
@@ -215,16 +74,12 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
}
/* Set the geometry parameters for a LIODN */
-static int pamu_set_liodn(int liodn, struct device *dev,
- struct fsl_dma_domain *dma_domain,
- struct iommu_domain_geometry *geom_attr,
- u32 win_cnt)
+static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev,
+ int liodn)
{
- phys_addr_t window_addr, window_size;
- phys_addr_t subwin_size;
- int ret = 0, i;
u32 omi_index = ~(u32)0;
unsigned long flags;
+ int ret;
/*
* Configure the omi_index at the geometry setup time.
@@ -233,93 +88,30 @@ static int pamu_set_liodn(int liodn, struct device *dev,
*/
get_ome_index(&omi_index, dev);
- window_addr = geom_attr->aperture_start;
- window_size = dma_domain->geom_size;
-
spin_lock_irqsave(&iommu_lock, flags);
ret = pamu_disable_liodn(liodn);
- if (!ret)
- ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
- 0, dma_domain->snoop_id,
- dma_domain->stash_id, win_cnt, 0);
+ if (ret)
+ goto out_unlock;
+ ret = pamu_config_ppaace(liodn, omi_index, dma_domain->stash_id, 0);
+ if (ret)
+ goto out_unlock;
+ ret = pamu_config_ppaace(liodn, ~(u32)0, dma_domain->stash_id,
+ PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
+out_unlock:
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
- pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n",
- liodn, win_cnt);
- return ret;
- }
-
- if (win_cnt > 1) {
- subwin_size = window_size >> ilog2(win_cnt);
- for (i = 0; i < win_cnt; i++) {
- spin_lock_irqsave(&iommu_lock, flags);
- ret = pamu_disable_spaace(liodn, i);
- if (!ret)
- ret = pamu_config_spaace(liodn, win_cnt, i,
- subwin_size, omi_index,
- 0, dma_domain->snoop_id,
- dma_domain->stash_id,
- 0, 0);
- spin_unlock_irqrestore(&iommu_lock, flags);
- if (ret) {
- pr_debug("SPAACE configuration failed for liodn %d\n",
- liodn);
- return ret;
- }
- }
+ pr_debug("PAACE configuration failed for liodn %d\n",
+ liodn);
}
-
return ret;
}
-static int check_size(u64 size, dma_addr_t iova)
-{
- /*
- * Size must be a power of two and at least be equal
- * to PAMU page size.
- */
- if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
- pr_debug("Size too small or not a power of two\n");
- return -EINVAL;
- }
-
- /* iova must be page size aligned */
- if (iova & (size - 1)) {
- pr_debug("Address is not aligned with window size\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
-{
- struct fsl_dma_domain *domain;
-
- domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
- if (!domain)
- return NULL;
-
- domain->stash_id = ~(u32)0;
- domain->snoop_id = ~(u32)0;
- domain->win_cnt = pamu_get_max_subwin_cnt();
- domain->geom_size = 0;
-
- INIT_LIST_HEAD(&domain->devices);
-
- spin_lock_init(&domain->domain_lock);
-
- return domain;
-}
-
-static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
+static void remove_device_ref(struct device_domain_info *info)
{
unsigned long flags;
list_del(&info->link);
spin_lock_irqsave(&iommu_lock, flags);
- if (win_cnt > 1)
- pamu_free_subwins(info->liodn);
pamu_disable_liodn(info->liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
spin_lock_irqsave(&device_domain_lock, flags);
@@ -337,7 +129,7 @@ static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
/* Remove the device from the domain device list */
list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
if (!dev || (info->dev == dev))
- remove_device_ref(info, dma_domain->win_cnt);
+ remove_device_ref(info);
}
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
}
@@ -379,13 +171,10 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
-
if (iova < domain->geometry.aperture_start ||
iova > domain->geometry.aperture_end)
return 0;
-
- return get_phys_addr(dma_domain, iova);
+ return iova;
}
static bool fsl_pamu_capable(enum iommu_cap cap)
@@ -399,10 +188,6 @@ static void fsl_pamu_domain_free(struct iommu_domain *domain)
/* remove all the devices from the device list */
detach_device(NULL, dma_domain);
-
- dma_domain->enabled = 0;
- dma_domain->mapped = 0;
-
kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
}
@@ -413,12 +198,15 @@ static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
- dma_domain = iommu_alloc_dma_domain();
- if (!dma_domain) {
- pr_debug("dma_domain allocation failed\n");
+ dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
+ if (!dma_domain)
return NULL;
- }
- /* defaul geometry 64 GB i.e. maximum system address */
+
+ dma_domain->stash_id = ~(u32)0;
+ INIT_LIST_HEAD(&dma_domain->devices);
+ spin_lock_init(&dma_domain->domain_lock);
+
+ /* default geometry 64 GB i.e. maximum system address */
dma_domain->iommu_domain. geometry.aperture_start = 0;
dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
dma_domain->iommu_domain.geometry.force_aperture = true;
@@ -426,24 +214,6 @@ static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
return &dma_domain->iommu_domain;
}
-/* Configure geometry settings for all LIODNs associated with domain */
-static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
- struct iommu_domain_geometry *geom_attr,
- u32 win_cnt)
-{
- struct device_domain_info *info;
- int ret = 0;
-
- list_for_each_entry(info, &dma_domain->devices, link) {
- ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
- geom_attr, win_cnt);
- if (ret)
- break;
- }
-
- return ret;
-}
-
/* Update stash destination for all LIODNs associated with the domain */
static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
{
@@ -459,198 +229,13 @@ static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
return ret;
}
-/* Update domain mappings for all LIODNs associated with the domain */
-static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
-{
- struct device_domain_info *info;
- int ret = 0;
-
- list_for_each_entry(info, &dma_domain->devices, link) {
- ret = update_liodn(info->liodn, dma_domain, wnd_nr);
- if (ret)
- break;
- }
- return ret;
-}
-
-static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
-{
- struct device_domain_info *info;
- int ret = 0;
-
- list_for_each_entry(info, &dma_domain->devices, link) {
- if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
- ret = pamu_disable_liodn(info->liodn);
- if (!ret)
- dma_domain->enabled = 0;
- } else {
- ret = pamu_disable_spaace(info->liodn, wnd_nr);
- }
- }
-
- return ret;
-}
-
-static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&dma_domain->domain_lock, flags);
- if (!dma_domain->win_arr) {
- pr_debug("Number of windows not configured\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return;
- }
-
- if (wnd_nr >= dma_domain->win_cnt) {
- pr_debug("Invalid window index\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return;
- }
-
- if (dma_domain->win_arr[wnd_nr].valid) {
- ret = disable_domain_win(dma_domain, wnd_nr);
- if (!ret) {
- dma_domain->win_arr[wnd_nr].valid = 0;
- dma_domain->mapped--;
- }
- }
-
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-}
-
-static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
- phys_addr_t paddr, u64 size, int prot)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
- struct dma_window *wnd;
- int pamu_prot = 0;
- int ret;
- unsigned long flags;
- u64 win_size;
-
- if (prot & IOMMU_READ)
- pamu_prot |= PAACE_AP_PERMS_QUERY;
- if (prot & IOMMU_WRITE)
- pamu_prot |= PAACE_AP_PERMS_UPDATE;
-
- spin_lock_irqsave(&dma_domain->domain_lock, flags);
- if (!dma_domain->win_arr) {
- pr_debug("Number of windows not configured\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -ENODEV;
- }
-
- if (wnd_nr >= dma_domain->win_cnt) {
- pr_debug("Invalid window index\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
-
- win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
- if (size > win_size) {
- pr_debug("Invalid window size\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
-
- if (dma_domain->win_cnt == 1) {
- if (dma_domain->enabled) {
- pr_debug("Disable the window before updating the mapping\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EBUSY;
- }
-
- ret = check_size(size, domain->geometry.aperture_start);
- if (ret) {
- pr_debug("Aperture start not aligned to the size\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
- }
-
- wnd = &dma_domain->win_arr[wnd_nr];
- if (!wnd->valid) {
- wnd->paddr = paddr;
- wnd->size = size;
- wnd->prot = pamu_prot;
-
- ret = update_domain_mapping(dma_domain, wnd_nr);
- if (!ret) {
- wnd->valid = 1;
- dma_domain->mapped++;
- }
- } else {
- pr_debug("Disable the window before updating the mapping\n");
- ret = -EBUSY;
- }
-
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
- return ret;
-}
-
-/*
- * Attach the LIODN to the DMA domain and configure the geometry
- * and window mappings.
- */
-static int handle_attach_device(struct fsl_dma_domain *dma_domain,
- struct device *dev, const u32 *liodn,
- int num)
-{
- unsigned long flags;
- struct iommu_domain *domain = &dma_domain->iommu_domain;
- int ret = 0;
- int i;
-
- spin_lock_irqsave(&dma_domain->domain_lock, flags);
- for (i = 0; i < num; i++) {
- /* Ensure that LIODN value is valid */
- if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
- pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
- liodn[i], dev->of_node);
- ret = -EINVAL;
- break;
- }
-
- attach_device(dma_domain, liodn[i], dev);
- /*
- * Check if geometry has already been configured
- * for the domain. If yes, set the geometry for
- * the LIODN.
- */
- if (dma_domain->win_arr) {
- u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
-
- ret = pamu_set_liodn(liodn[i], dev, dma_domain,
- &domain->geometry, win_cnt);
- if (ret)
- break;
- if (dma_domain->mapped) {
- /*
- * Create window/subwindow mapping for
- * the LIODN.
- */
- ret = map_liodn(liodn[i], dma_domain);
- if (ret)
- break;
- }
- }
- }
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
- return ret;
-}
-
static int fsl_pamu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
+ unsigned long flags;
+ int len, ret = 0, i;
const u32 *liodn;
- u32 liodn_cnt;
- int len, ret = 0;
struct pci_dev *pdev = NULL;
struct pci_controller *pci_ctl;
@@ -670,14 +255,30 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
}
liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
- if (liodn) {
- liodn_cnt = len / sizeof(u32);
- ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
- } else {
+ if (!liodn) {
pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
- ret = -EINVAL;
+ return -EINVAL;
}
+ spin_lock_irqsave(&dma_domain->domain_lock, flags);
+ for (i = 0; i < len / sizeof(u32); i++) {
+ /* Ensure that LIODN value is valid */
+ if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
+ pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
+ liodn[i], dev->of_node);
+ ret = -EINVAL;
+ break;
+ }
+
+ attach_device(dma_domain, liodn[i], dev);
+ ret = pamu_set_liodn(dma_domain, dev, liodn[i]);
+ if (ret)
+ break;
+ ret = pamu_enable_liodn(liodn[i]);
+ if (ret)
+ break;
+ }
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
return ret;
}
@@ -712,202 +313,26 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
}
-static int configure_domain_geometry(struct iommu_domain *domain, void *data)
-{
- struct iommu_domain_geometry *geom_attr = data;
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
- dma_addr_t geom_size;
- unsigned long flags;
-
- geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
- /*
- * Sanity check the geometry size. Also, we do not support
- * DMA outside of the geometry.
- */
- if (check_size(geom_size, geom_attr->aperture_start) ||
- !geom_attr->force_aperture) {
- pr_debug("Invalid PAMU geometry attributes\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&dma_domain->domain_lock, flags);
- if (dma_domain->enabled) {
- pr_debug("Can't set geometry attributes as domain is active\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EBUSY;
- }
-
- /* Copy the domain geometry information */
- memcpy(&domain->geometry, geom_attr,
- sizeof(struct iommu_domain_geometry));
- dma_domain->geom_size = geom_size;
-
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
- return 0;
-}
-
/* Set the domain stash attribute */
-static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
+int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
{
- struct pamu_stash_attribute *stash_attr = data;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
unsigned long flags;
int ret;
spin_lock_irqsave(&dma_domain->domain_lock, flags);
-
- memcpy(&dma_domain->dma_stash, stash_attr,
- sizeof(struct pamu_stash_attribute));
-
- dma_domain->stash_id = get_stash_id(stash_attr->cache,
- stash_attr->cpu);
+ dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
if (dma_domain->stash_id == ~(u32)0) {
pr_debug("Invalid stash attributes\n");
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
return -EINVAL;
}
-
ret = update_domain_stash(dma_domain, dma_domain->stash_id);
-
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
return ret;
}
-/* Configure domain dma state i.e. enable/disable DMA */
-static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
-{
- struct device_domain_info *info;
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&dma_domain->domain_lock, flags);
-
- if (enable && !dma_domain->mapped) {
- pr_debug("Can't enable DMA domain without valid mapping\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -ENODEV;
- }
-
- dma_domain->enabled = enable;
- list_for_each_entry(info, &dma_domain->devices, link) {
- ret = (enable) ? pamu_enable_liodn(info->liodn) :
- pamu_disable_liodn(info->liodn);
- if (ret)
- pr_debug("Unable to set dma state for liodn %d",
- info->liodn);
- }
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
- return 0;
-}
-
-static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&dma_domain->domain_lock, flags);
- /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
- if (dma_domain->enabled) {
- pr_debug("Can't set geometry attributes as domain is active\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EBUSY;
- }
-
- /* Ensure that the geometry has been set for the domain */
- if (!dma_domain->geom_size) {
- pr_debug("Please configure geometry before setting the number of windows\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
-
- /*
- * Ensure we have valid window count i.e. it should be less than
- * maximum permissible limit and should be a power of two.
- */
- if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
- pr_debug("Invalid window count\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
-
- ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
- w_count > 1 ? w_count : 0);
- if (!ret) {
- kfree(dma_domain->win_arr);
- dma_domain->win_arr = kcalloc(w_count,
- sizeof(*dma_domain->win_arr),
- GFP_ATOMIC);
- if (!dma_domain->win_arr) {
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -ENOMEM;
- }
- dma_domain->win_cnt = w_count;
- }
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
- return ret;
-}
-
-static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
- enum iommu_attr attr_type, void *data)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
- int ret = 0;
-
- switch (attr_type) {
- case DOMAIN_ATTR_GEOMETRY:
- ret = configure_domain_geometry(domain, data);
- break;
- case DOMAIN_ATTR_FSL_PAMU_STASH:
- ret = configure_domain_stash(dma_domain, data);
- break;
- case DOMAIN_ATTR_FSL_PAMU_ENABLE:
- ret = configure_domain_dma_state(dma_domain, *(int *)data);
- break;
- case DOMAIN_ATTR_WINDOWS:
- ret = fsl_pamu_set_windows(domain, *(u32 *)data);
- break;
- default:
- pr_debug("Unsupported attribute type\n");
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
- enum iommu_attr attr_type, void *data)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
- int ret = 0;
-
- switch (attr_type) {
- case DOMAIN_ATTR_FSL_PAMU_STASH:
- memcpy(data, &dma_domain->dma_stash,
- sizeof(struct pamu_stash_attribute));
- break;
- case DOMAIN_ATTR_FSL_PAMU_ENABLE:
- *(int *)data = dma_domain->enabled;
- break;
- case DOMAIN_ATTR_FSL_PAMUV1:
- *(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
- break;
- case DOMAIN_ATTR_WINDOWS:
- *(u32 *)data = dma_domain->win_cnt;
- break;
- default:
- pr_debug("Unsupported attribute type\n");
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
static struct iommu_group *get_device_iommu_group(struct device *dev)
{
struct iommu_group *group;
@@ -1031,11 +456,7 @@ static const struct iommu_ops fsl_pamu_ops = {
.domain_free = fsl_pamu_domain_free,
.attach_dev = fsl_pamu_attach_device,
.detach_dev = fsl_pamu_detach_device,
- .domain_window_enable = fsl_pamu_window_enable,
- .domain_window_disable = fsl_pamu_window_disable,
.iova_to_phys = fsl_pamu_iova_to_phys,
- .domain_set_attr = fsl_pamu_set_domain_attr,
- .domain_get_attr = fsl_pamu_get_domain_attr,
.probe_device = fsl_pamu_probe_device,
.release_device = fsl_pamu_release_device,
.device_group = fsl_pamu_device_group,
@@ -1053,9 +474,7 @@ int __init pamu_domain_init(void)
if (ret)
return ret;
- iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops);
-
- ret = iommu_device_register(&pamu_iommu);
+ ret = iommu_device_register(&pamu_iommu, &fsl_pamu_ops, NULL);
if (ret) {
iommu_device_sysfs_remove(&pamu_iommu);
pr_err("Can't register iommu device\n");
diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h
index 2865d42782e8..95ac1b3cab3b 100644
--- a/drivers/iommu/fsl_pamu_domain.h
+++ b/drivers/iommu/fsl_pamu_domain.h
@@ -9,56 +9,10 @@
#include "fsl_pamu.h"
-struct dma_window {
- phys_addr_t paddr;
- u64 size;
- int valid;
- int prot;
-};
-
struct fsl_dma_domain {
- /*
- * Indicates the geometry size for the domain.
- * This would be set when the geometry is
- * configured for the domain.
- */
- dma_addr_t geom_size;
- /*
- * Number of windows assocaited with this domain.
- * During domain initialization, it is set to the
- * the maximum number of subwindows allowed for a LIODN.
- * Minimum value for this is 1 indicating a single PAMU
- * window, without any sub windows. Value can be set/
- * queried by set_attr/get_attr API for DOMAIN_ATTR_WINDOWS.
- * Value can only be set once the geometry has been configured.
- */
- u32 win_cnt;
- /*
- * win_arr contains information of the configured
- * windows for a domain. This is allocated only
- * when the number of windows for the domain are
- * set.
- */
- struct dma_window *win_arr;
/* list of devices associated with the domain */
struct list_head devices;
- /* dma_domain states:
- * mapped - A particular mapping has been created
- * within the configured geometry.
- * enabled - DMA has been enabled for the given
- * domain. This translates to setting of the
- * valid bit for the primary PAACE in the PAMU
- * PAACT table. Domain geometry should be set and
- * it must have a valid mapping before DMA can be
- * enabled for it.
- *
- */
- int mapped;
- int enabled;
- /* stash_id obtained from the stash attribute details */
u32 stash_id;
- struct pamu_stash_attribute dma_stash;
- u32 snoop_id;
struct iommu_domain iommu_domain;
spinlock_t domain_lock;
};
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index d5c51b5c20af..1757ac1e1623 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1140,9 +1140,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (err)
goto err_unmap;
- iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
-
- err = iommu_device_register(&iommu->iommu);
+ err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
if (err)
goto err_unmap;
}
@@ -1205,6 +1203,63 @@ static inline void reclaim_free_desc(struct q_inval *qi)
}
}
+static const char *qi_type_string(u8 type)
+{
+ switch (type) {
+ case QI_CC_TYPE:
+ return "Context-cache Invalidation";
+ case QI_IOTLB_TYPE:
+ return "IOTLB Invalidation";
+ case QI_DIOTLB_TYPE:
+ return "Device-TLB Invalidation";
+ case QI_IEC_TYPE:
+ return "Interrupt Entry Cache Invalidation";
+ case QI_IWD_TYPE:
+ return "Invalidation Wait";
+ case QI_EIOTLB_TYPE:
+ return "PASID-based IOTLB Invalidation";
+ case QI_PC_TYPE:
+ return "PASID-cache Invalidation";
+ case QI_DEIOTLB_TYPE:
+ return "PASID-based Device-TLB Invalidation";
+ case QI_PGRP_RESP_TYPE:
+ return "Page Group Response";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static void qi_dump_fault(struct intel_iommu *iommu, u32 fault)
+{
+ unsigned int head = dmar_readl(iommu->reg + DMAR_IQH_REG);
+ u64 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
+ struct qi_desc *desc = iommu->qi->desc + head;
+
+ if (fault & DMA_FSTS_IQE)
+ pr_err("VT-d detected Invalidation Queue Error: Reason %llx",
+ DMAR_IQER_REG_IQEI(iqe_err));
+ if (fault & DMA_FSTS_ITE)
+ pr_err("VT-d detected Invalidation Time-out Error: SID %llx",
+ DMAR_IQER_REG_ITESID(iqe_err));
+ if (fault & DMA_FSTS_ICE)
+ pr_err("VT-d detected Invalidation Completion Error: SID %llx",
+ DMAR_IQER_REG_ICESID(iqe_err));
+
+ pr_err("QI HEAD: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
+ qi_type_string(desc->qw0 & 0xf),
+ (unsigned long long)desc->qw0,
+ (unsigned long long)desc->qw1);
+
+ head = ((head >> qi_shift(iommu)) + QI_LENGTH - 1) % QI_LENGTH;
+ head <<= qi_shift(iommu);
+ desc = iommu->qi->desc + head;
+
+ pr_err("QI PRIOR: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
+ qi_type_string(desc->qw0 & 0xf),
+ (unsigned long long)desc->qw0,
+ (unsigned long long)desc->qw1);
+}
+
static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
{
u32 fault;
@@ -1216,6 +1271,8 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
return -EAGAIN;
fault = readl(iommu->reg + DMAR_FSTS_REG);
+ if (fault & (DMA_FSTS_IQE | DMA_FSTS_ITE | DMA_FSTS_ICE))
+ qi_dump_fault(iommu, fault);
/*
* If IQE happens, the head points to the descriptor associated
@@ -1232,12 +1289,10 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
* used by software as private data. We won't print
* out these two qw's for security consideration.
*/
- pr_err("VT-d detected invalid descriptor: qw0 = %llx, qw1 = %llx\n",
- (unsigned long long)desc->qw0,
- (unsigned long long)desc->qw1);
memcpy(desc, qi->desc + (wait_index << shift),
1 << shift);
writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
+ pr_info("Invalidation Queue Error (IQE) cleared\n");
return -EINVAL;
}
}
@@ -1254,6 +1309,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
+ pr_info("Invalidation Time-out Error (ITE) cleared\n");
do {
if (qi->desc_status[head] == QI_IN_USE)
@@ -1265,8 +1321,10 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
return -EAGAIN;
}
- if (fault & DMA_FSTS_ICE)
+ if (fault & DMA_FSTS_ICE) {
writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
+ pr_info("Invalidation Completion Error (ICE) cleared\n");
+ }
return 0;
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index ee0932307d64..708f430af1c4 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -360,7 +360,6 @@ int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
static int dmar_map_gfx = 1;
-static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
@@ -451,8 +450,8 @@ static int __init intel_iommu_setup(char *str)
dmar_map_gfx = 0;
pr_info("Disable GFX device mapping\n");
} else if (!strncmp(str, "forcedac", 8)) {
- pr_info("Forcing DAC for PCI devices\n");
- dmar_forcedac = 1;
+ pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
+ iommu_dma_forcedac = true;
} else if (!strncmp(str, "strict", 6)) {
pr_info("Disable batched IOTLB flush\n");
intel_iommu_strict = 1;
@@ -658,7 +657,14 @@ static int domain_update_iommu_snooping(struct intel_iommu *skip)
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
if (iommu != skip) {
- if (!ecap_sc_support(iommu->ecap)) {
+ /*
+ * If the hardware is operating in the scalable mode,
+ * the snooping control is always supported since we
+ * always set PASID-table-entry.PGSNP bit if the domain
+ * is managed outside (UNMANAGED).
+ */
+ if (!sm_supported(iommu) &&
+ !ecap_sc_support(iommu->ecap)) {
ret = 0;
break;
}
@@ -1340,6 +1346,11 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
readl, (sts & DMA_GSTS_RTPS), sts);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+ if (sm_supported(iommu))
+ qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
void iommu_flush_write_buffer(struct intel_iommu *iommu)
@@ -2289,6 +2300,41 @@ static inline int hardware_largepage_caps(struct dmar_domain *domain,
return level;
}
+/*
+ * Ensure that old small page tables are removed to make room for superpage(s).
+ * We're going to add new large pages, so make sure we don't remove their parent
+ * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
+ */
+static void switch_to_super_page(struct dmar_domain *domain,
+ unsigned long start_pfn,
+ unsigned long end_pfn, int level)
+{
+ unsigned long lvl_pages = lvl_to_nr_pages(level);
+ struct dma_pte *pte = NULL;
+ int i;
+
+ while (start_pfn <= end_pfn) {
+ if (!pte)
+ pte = pfn_to_dma_pte(domain, start_pfn, &level);
+
+ if (dma_pte_present(pte)) {
+ dma_pte_free_pagetable(domain, start_pfn,
+ start_pfn + lvl_pages - 1,
+ level + 1);
+
+ for_each_domain_iommu(i, domain)
+ iommu_flush_iotlb_psi(g_iommus[i], domain,
+ start_pfn, lvl_pages,
+ 0, 0);
+ }
+
+ pte++;
+ start_pfn += lvl_pages;
+ if (first_pte_in_page(pte))
+ pte = NULL;
+ }
+}
+
static int
__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages, int prot)
@@ -2305,8 +2351,9 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return -EINVAL;
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
+ attr |= DMA_FL_PTE_PRESENT;
if (domain_use_first_level(domain)) {
- attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
+ attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
if (domain->domain.type == IOMMU_DOMAIN_DMA) {
attr |= DMA_FL_PTE_ACCESS;
@@ -2329,22 +2376,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return -ENOMEM;
/* It is large page*/
if (largepage_lvl > 1) {
- unsigned long nr_superpages, end_pfn;
+ unsigned long end_pfn;
pteval |= DMA_PTE_LARGE_PAGE;
- lvl_pages = lvl_to_nr_pages(largepage_lvl);
-
- nr_superpages = nr_pages / lvl_pages;
- end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
-
- /*
- * Ensure that old small page tables are
- * removed to make room for superpage(s).
- * We're adding new large pages, so make sure
- * we don't remove their parent tables.
- */
- dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
- largepage_lvl + 1);
+ end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
+ switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
} else {
pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
}
@@ -2422,6 +2458,10 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
+
+ if (sm_supported(iommu))
+ qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
+
iommu->flush.flush_iotlb(iommu,
did_old,
0,
@@ -2505,6 +2545,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
+ if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
+ flags |= PASID_FLAG_PAGE_SNOOP;
+
return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
domain->iommu_did[iommu->seq_id],
flags);
@@ -3267,8 +3310,6 @@ static int __init init_dmars(void)
register_pasid_allocator(iommu);
#endif
iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
@@ -3458,12 +3499,7 @@ static int init_iommu_hw(void)
}
iommu_flush_write_buffer(iommu);
-
iommu_set_root_entry(iommu);
-
- iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
iommu_enable_translation(iommu);
iommu_disable_protect_mem_regions(iommu);
}
@@ -3846,8 +3882,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
goto disable_iommu;
iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
iommu_enable_translation(iommu);
iommu_disable_protect_mem_regions(iommu);
@@ -4065,35 +4099,6 @@ static struct notifier_block intel_iommu_memory_nb = {
.priority = 0
};
-static void free_all_cpu_cached_iovas(unsigned int cpu)
-{
- int i;
-
- for (i = 0; i < g_num_of_iommus; i++) {
- struct intel_iommu *iommu = g_iommus[i];
- struct dmar_domain *domain;
- int did;
-
- if (!iommu)
- continue;
-
- for (did = 0; did < cap_ndoms(iommu->cap); did++) {
- domain = get_iommu_domain(iommu, (u16)did);
-
- if (!domain || domain->domain.type != IOMMU_DOMAIN_DMA)
- continue;
-
- iommu_dma_free_cpu_cached_iovas(cpu, &domain->domain);
- }
- }
-}
-
-static int intel_iommu_cpu_dead(unsigned int cpu)
-{
- free_all_cpu_cached_iovas(cpu);
- return 0;
-}
-
static void intel_disable_iommus(void)
{
struct intel_iommu *iommu = NULL;
@@ -4377,19 +4382,28 @@ int __init intel_iommu_init(void)
down_read(&dmar_global_lock);
for_each_active_iommu(iommu, drhd) {
+ /*
+ * The flush queue implementation does not perform
+ * page-selective invalidations that are required for efficient
+ * TLB flushes in virtual environments. The benefit of batching
+ * is likely to be much lower than the overhead of synchronizing
+ * the virtual and physical IOMMU page-tables.
+ */
+ if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
+ pr_warn("IOMMU batching is disabled due to virtualization");
+ intel_iommu_strict = 1;
+ }
iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups,
"%s", iommu->name);
- iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
- iommu_device_register(&iommu->iommu);
+ iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
}
up_read(&dmar_global_lock);
+ iommu_set_dma_strict(intel_iommu_strict);
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
- cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
- intel_iommu_cpu_dead);
down_read(&dmar_global_lock);
if (probe_acpi_namespace_devices())
@@ -5343,6 +5357,8 @@ static int siov_find_pci_dvsec(struct pci_dev *pdev)
static bool
intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
{
+ struct device_domain_info *info = get_domain_info(dev);
+
if (feat == IOMMU_DEV_FEAT_AUX) {
int ret;
@@ -5357,13 +5373,13 @@ intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
return !!siov_find_pci_dvsec(to_pci_dev(dev));
}
- if (feat == IOMMU_DEV_FEAT_SVA) {
- struct device_domain_info *info = get_domain_info(dev);
+ if (feat == IOMMU_DEV_FEAT_IOPF)
+ return info && info->pri_supported;
+ if (feat == IOMMU_DEV_FEAT_SVA)
return info && (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) &&
info->pasid_supported && info->pri_supported &&
info->ats_supported;
- }
return false;
}
@@ -5374,12 +5390,18 @@ intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
if (feat == IOMMU_DEV_FEAT_AUX)
return intel_iommu_enable_auxd(dev);
+ if (feat == IOMMU_DEV_FEAT_IOPF)
+ return intel_iommu_dev_has_feat(dev, feat) ? 0 : -ENODEV;
+
if (feat == IOMMU_DEV_FEAT_SVA) {
struct device_domain_info *info = get_domain_info(dev);
if (!info)
return -EINVAL;
+ if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
+ return -EINVAL;
+
if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE)
return 0;
}
@@ -5423,87 +5445,23 @@ static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
}
static int
-intel_iommu_domain_set_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+intel_iommu_enable_nesting(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long flags;
- int ret = 0;
-
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
- return -EINVAL;
+ int ret = -ENODEV;
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- spin_lock_irqsave(&device_domain_lock, flags);
- if (nested_mode_support() &&
- list_empty(&dmar_domain->devices)) {
- dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
- dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
- } else {
- ret = -ENODEV;
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
- break;
- default:
- ret = -EINVAL;
- break;
+ spin_lock_irqsave(&device_domain_lock, flags);
+ if (nested_mode_support() && list_empty(&dmar_domain->devices)) {
+ dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
+ dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
+ ret = 0;
}
+ spin_unlock_irqrestore(&device_domain_lock, flags);
return ret;
}
-static bool domain_use_flush_queue(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool r = true;
-
- if (intel_iommu_strict)
- return false;
-
- /*
- * The flush queue implementation does not perform page-selective
- * invalidations that are required for efficient TLB flushes in virtual
- * environments. The benefit of batching is likely to be much lower than
- * the overhead of synchronizing the virtual and physical IOMMU
- * page-tables.
- */
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!cap_caching_mode(iommu->cap))
- continue;
-
- pr_warn_once("IOMMU batching is disabled due to virtualization");
- r = false;
- break;
- }
- rcu_read_unlock();
-
- return r;
-}
-
-static int
-intel_iommu_domain_get_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
-{
- switch (domain->type) {
- case IOMMU_DOMAIN_UNMANAGED:
- return -ENODEV;
- case IOMMU_DOMAIN_DMA:
- switch (attr) {
- case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- *(int *)data = domain_use_flush_queue();
- return 0;
- default:
- return -ENODEV;
- }
- break;
- default:
- return -EINVAL;
- }
-}
-
/*
* Check that the device does not live on an external facing PCI port that is
* marked as untrusted. Such devices should not be able to apply quirks and
@@ -5576,8 +5534,7 @@ const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
.domain_free = intel_iommu_domain_free,
- .domain_get_attr = intel_iommu_domain_get_attr,
- .domain_set_attr = intel_iommu_domain_set_attr,
+ .enable_nesting = intel_iommu_enable_nesting,
.attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device,
.aux_attach_dev = intel_iommu_aux_attach_device,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 611ef5243cb6..f912fe45bea2 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -736,7 +736,7 @@ static int __init intel_prepare_irq_remapping(void)
return -ENODEV;
if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
- goto error;
+ return -ENODEV;
if (!dmar_ir_support())
return -ENODEV;
@@ -1280,7 +1280,8 @@ static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
break;
case X86_IRQ_ALLOC_TYPE_PCI_MSI:
case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
- set_msi_sid(irte, msi_desc_to_pci_dev(info->desc));
+ set_msi_sid(irte,
+ pci_real_dma_dev(msi_desc_to_pci_dev(info->desc)));
break;
default:
BUG_ON(1);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index f26cb6195b2c..72646bafc52f 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -24,7 +24,6 @@
/*
* Intel IOMMU system wide PASID name space:
*/
-static DEFINE_SPINLOCK(pasid_lock);
u32 intel_pasid_max_id = PASID_MAX;
int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
@@ -231,7 +230,7 @@ struct pasid_table *intel_pasid_get_table(struct device *dev)
return info->pasid_table;
}
-int intel_pasid_get_dev_max_id(struct device *dev)
+static int intel_pasid_get_dev_max_id(struct device *dev)
{
struct device_domain_info *info;
@@ -242,7 +241,7 @@ int intel_pasid_get_dev_max_id(struct device *dev)
return info->pasid_table->max_pasid;
}
-struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
+static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
{
struct device_domain_info *info;
struct pasid_table *pasid_table;
@@ -259,19 +258,25 @@ struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
dir_index = pasid >> PASID_PDE_SHIFT;
index = pasid & PASID_PTE_MASK;
- spin_lock(&pasid_lock);
+retry:
entries = get_pasid_table_from_pde(&dir[dir_index]);
if (!entries) {
entries = alloc_pgtable_page(info->iommu->node);
- if (!entries) {
- spin_unlock(&pasid_lock);
+ if (!entries)
return NULL;
- }
- WRITE_ONCE(dir[dir_index].val,
- (u64)virt_to_phys(entries) | PASID_PTE_PRESENT);
+ /*
+ * The pasid directory table entry won't be freed after
+ * allocation. No worry about the race with free and
+ * clear. However, this entry might be populated by others
+ * while we are preparing it. Use theirs with a retry.
+ */
+ if (cmpxchg64(&dir[dir_index].val, 0ULL,
+ (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
+ free_pgtable_page(entries);
+ goto retry;
+ }
}
- spin_unlock(&pasid_lock);
return &entries[index];
}
@@ -394,6 +399,15 @@ static inline void pasid_set_sre(struct pasid_entry *pe)
}
/*
+ * Setup the WPE(Write Protect Enable) field (Bit 132) of a
+ * scalable mode PASID entry.
+ */
+static inline void pasid_set_wpe(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
+}
+
+/*
* Setup the P(Present) field (Bit 0) of a scalable mode PASID
* entry.
*/
@@ -412,6 +426,16 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
}
/*
+ * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
+ * PASID entry.
+ */
+static inline void
+pasid_set_pgsnp(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
+}
+
+/*
* Setup the First Level Page table Pointer field (Bit 140~191)
* of a scalable mode PASID entry.
*/
@@ -493,6 +517,9 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
if (WARN_ON(!pte))
return;
+ if (!(pte->val[0] & PASID_PTE_PRESENT))
+ return;
+
did = pasid_get_domain_id(pte);
intel_pasid_clear_entry(dev, pasid, fault_ignore);
@@ -522,6 +549,22 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
}
}
+static inline int pasid_enable_wpe(struct pasid_entry *pte)
+{
+#ifdef CONFIG_X86
+ unsigned long cr0 = read_cr0();
+
+ /* CR0.WP is normally set but just to be sure */
+ if (unlikely(!(cr0 & X86_CR0_WP))) {
+ pr_err_ratelimited("No CPU write protect!\n");
+ return -EINVAL;
+ }
+#endif
+ pasid_set_wpe(pte);
+
+ return 0;
+};
+
/*
* Set up the scalable mode pasid table entry for first only
* translation type.
@@ -553,6 +596,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
return -EINVAL;
}
pasid_set_sre(pte);
+ if (pasid_enable_wpe(pte))
+ return -EINVAL;
+
}
if (flags & PASID_FLAG_FL5LP) {
@@ -565,6 +611,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
}
}
+ if (flags & PASID_FLAG_PAGE_SNOOP)
+ pasid_set_pgsnp(pte);
+
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, iommu->agaw);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
@@ -643,6 +692,9 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
pasid_set_fault_enable(pte);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+ if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
+ pasid_set_pgsnp(pte);
+
/*
* Since it is a second level only translation setup, we should
* set SRE bit as well (addresses are expected to be GPAs).
@@ -706,6 +758,9 @@ intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
return -EINVAL;
}
pasid_set_sre(pte);
+ /* Enable write protect WP if guest requested */
+ if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_WPE)
+ pasid_set_wpe(pte);
}
if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) {
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 444c0bec221a..5ff61c3d401f 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -48,6 +48,7 @@
*/
#define PASID_FLAG_SUPERVISOR_MODE BIT(0)
#define PASID_FLAG_NESTED BIT(1)
+#define PASID_FLAG_PAGE_SNOOP BIT(2)
/*
* The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
@@ -99,14 +100,9 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte)
}
extern unsigned int intel_pasid_max_id;
-int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp);
-void intel_pasid_free_id(u32 pasid);
-void *intel_pasid_lookup_id(u32 pasid);
int intel_pasid_alloc_table(struct device *dev);
void intel_pasid_free_table(struct device *dev);
struct pasid_table *intel_pasid_get_table(struct device *dev);
-int intel_pasid_get_dev_max_id(struct device *dev);
-struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid);
int intel_pasid_setup_first_level(struct intel_iommu *iommu,
struct device *dev, pgd_t *pgd,
u32 pasid, u16 did, int flags);
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 574a7e657a9a..5165cea90421 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -462,13 +462,12 @@ static void load_pasid(struct mm_struct *mm, u32 pasid)
/* Caller must hold pasid_mutex, mm reference */
static int
intel_svm_bind_mm(struct device *dev, unsigned int flags,
- struct svm_dev_ops *ops,
struct mm_struct *mm, struct intel_svm_dev **sd)
{
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
+ struct intel_svm *svm = NULL, *t;
struct device_domain_info *info;
struct intel_svm_dev *sdev;
- struct intel_svm *svm = NULL;
unsigned long iflags;
int pasid_max;
int ret;
@@ -494,34 +493,26 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
}
}
- if (!(flags & SVM_FLAG_PRIVATE_PASID)) {
- struct intel_svm *t;
-
- list_for_each_entry(t, &global_svm_list, list) {
- if (t->mm != mm || (t->flags & SVM_FLAG_PRIVATE_PASID))
- continue;
-
- svm = t;
- if (svm->pasid >= pasid_max) {
- dev_warn(dev,
- "Limited PASID width. Cannot use existing PASID %d\n",
- svm->pasid);
- ret = -ENOSPC;
- goto out;
- }
+ list_for_each_entry(t, &global_svm_list, list) {
+ if (t->mm != mm)
+ continue;
- /* Find the matching device in svm list */
- for_each_svm_dev(sdev, svm, dev) {
- if (sdev->ops != ops) {
- ret = -EBUSY;
- goto out;
- }
- sdev->users++;
- goto success;
- }
+ svm = t;
+ if (svm->pasid >= pasid_max) {
+ dev_warn(dev,
+ "Limited PASID width. Cannot use existing PASID %d\n",
+ svm->pasid);
+ ret = -ENOSPC;
+ goto out;
+ }
- break;
+ /* Find the matching device in svm list */
+ for_each_svm_dev(sdev, svm, dev) {
+ sdev->users++;
+ goto success;
}
+
+ break;
}
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
@@ -550,7 +541,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
/* Finish the setup now we know we're keeping it */
sdev->users = 1;
- sdev->ops = ops;
init_rcu_head(&sdev->rcu);
if (!svm) {
@@ -862,7 +852,7 @@ intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
/* Fill in event data for device specific processing */
memset(&event, 0, sizeof(struct iommu_fault_event));
event.fault.type = IOMMU_FAULT_PAGE_REQ;
- event.fault.prm.addr = desc->addr;
+ event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
event.fault.prm.pasid = desc->pasid;
event.fault.prm.grpid = desc->prg_index;
event.fault.prm.perm = prq_to_iommu_prot(desc);
@@ -895,6 +885,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
struct intel_iommu *iommu = d;
struct intel_svm *svm = NULL;
int head, tail, handled = 0;
+ unsigned int flags = 0;
/* Clear PPR bit before reading head/tail registers, to
* ensure that we get a new interrupt if needed. */
@@ -920,7 +911,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
((unsigned long long *)req)[1]);
goto no_pasid;
}
-
+ /* We shall not receive page request for supervisor SVM */
+ if (req->pm_req && (req->rd_req | req->wr_req)) {
+ pr_err("Unexpected page request in Privilege Mode");
+ /* No need to find the matching sdev as for bad_req */
+ goto no_pasid;
+ }
+ /* DMA read with exec requeset is not supported. */
+ if (req->exe_req && req->rd_req) {
+ pr_err("Execution request not supported\n");
+ goto no_pasid;
+ }
if (!svm || svm->pasid != req->pasid) {
rcu_read_lock();
svm = ioasid_find(NULL, req->pasid, NULL);
@@ -982,9 +983,11 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (access_error(vma, req))
goto invalid;
- ret = handle_mm_fault(vma, address,
- req->wr_req ? FAULT_FLAG_WRITE : 0,
- NULL);
+ flags = FAULT_FLAG_USER | FAULT_FLAG_REMOTE;
+ if (req->wr_req)
+ flags |= FAULT_FLAG_WRITE;
+
+ ret = handle_mm_fault(vma, address, flags, NULL);
if (ret & VM_FAULT_ERROR)
goto invalid;
@@ -993,13 +996,6 @@ invalid:
mmap_read_unlock(svm->mm);
mmput(svm->mm);
bad_req:
- WARN_ON(!sdev);
- if (sdev && sdev->ops && sdev->ops->fault_cb) {
- int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
- (req->exe_req << 1) | (req->pm_req);
- sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr,
- req->priv_data, rwxp, result);
- }
/* We get here in the error case where the PASID lookup failed,
and these can be NULL. Do not use them below this point! */
sdev = NULL;
@@ -1021,12 +1017,12 @@ no_pasid:
QI_PGRP_RESP_TYPE;
resp.qw1 = QI_PGRP_IDX(req->prg_index) |
QI_PGRP_LPIG(req->lpig);
+ resp.qw2 = 0;
+ resp.qw3 = 0;
if (req->priv_data_present)
memcpy(&resp.qw2, req->priv_data,
sizeof(req->priv_data));
- resp.qw2 = 0;
- resp.qw3 = 0;
qi_submit_sync(iommu, &resp, 1, 0);
}
prq_advance:
@@ -1074,7 +1070,7 @@ intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
if (drvdata)
flags = *(unsigned int *)drvdata;
mutex_lock(&pasid_mutex);
- ret = intel_svm_bind_mm(dev, flags, NULL, mm, &sdev);
+ ret = intel_svm_bind_mm(dev, flags, mm, &sdev);
if (ret)
sva = ERR_PTR(ret);
else if (sdev)
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
new file mode 100644
index 000000000000..1df8c1dcae77
--- /dev/null
+++ b/drivers/iommu/io-pgfault.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Handle device page faults
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ */
+
+#include <linux/iommu.h>
+#include <linux/list.h>
+#include <linux/sched/mm.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "iommu-sva-lib.h"
+
+/**
+ * struct iopf_queue - IO Page Fault queue
+ * @wq: the fault workqueue
+ * @devices: devices attached to this queue
+ * @lock: protects the device list
+ */
+struct iopf_queue {
+ struct workqueue_struct *wq;
+ struct list_head devices;
+ struct mutex lock;
+};
+
+/**
+ * struct iopf_device_param - IO Page Fault data attached to a device
+ * @dev: the device that owns this param
+ * @queue: IOPF queue
+ * @queue_list: index into queue->devices
+ * @partial: faults that are part of a Page Request Group for which the last
+ * request hasn't been submitted yet.
+ */
+struct iopf_device_param {
+ struct device *dev;
+ struct iopf_queue *queue;
+ struct list_head queue_list;
+ struct list_head partial;
+};
+
+struct iopf_fault {
+ struct iommu_fault fault;
+ struct list_head list;
+};
+
+struct iopf_group {
+ struct iopf_fault last_fault;
+ struct list_head faults;
+ struct work_struct work;
+ struct device *dev;
+};
+
+static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
+ enum iommu_page_response_code status)
+{
+ struct iommu_page_response resp = {
+ .version = IOMMU_PAGE_RESP_VERSION_1,
+ .pasid = iopf->fault.prm.pasid,
+ .grpid = iopf->fault.prm.grpid,
+ .code = status,
+ };
+
+ if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
+ (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
+ resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
+
+ return iommu_page_response(dev, &resp);
+}
+
+static enum iommu_page_response_code
+iopf_handle_single(struct iopf_fault *iopf)
+{
+ vm_fault_t ret;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ unsigned int access_flags = 0;
+ unsigned int fault_flags = FAULT_FLAG_REMOTE;
+ struct iommu_fault_page_request *prm = &iopf->fault.prm;
+ enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
+
+ if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
+ return status;
+
+ mm = iommu_sva_find(prm->pasid);
+ if (IS_ERR_OR_NULL(mm))
+ return status;
+
+ mmap_read_lock(mm);
+
+ vma = find_extend_vma(mm, prm->addr);
+ if (!vma)
+ /* Unmapped area */
+ goto out_put_mm;
+
+ if (prm->perm & IOMMU_FAULT_PERM_READ)
+ access_flags |= VM_READ;
+
+ if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
+ access_flags |= VM_WRITE;
+ fault_flags |= FAULT_FLAG_WRITE;
+ }
+
+ if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
+ access_flags |= VM_EXEC;
+ fault_flags |= FAULT_FLAG_INSTRUCTION;
+ }
+
+ if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
+ fault_flags |= FAULT_FLAG_USER;
+
+ if (access_flags & ~vma->vm_flags)
+ /* Access fault */
+ goto out_put_mm;
+
+ ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
+ status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
+ IOMMU_PAGE_RESP_SUCCESS;
+
+out_put_mm:
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return status;
+}
+
+static void iopf_handle_group(struct work_struct *work)
+{
+ struct iopf_group *group;
+ struct iopf_fault *iopf, *next;
+ enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
+
+ group = container_of(work, struct iopf_group, work);
+
+ list_for_each_entry_safe(iopf, next, &group->faults, list) {
+ /*
+ * For the moment, errors are sticky: don't handle subsequent
+ * faults in the group if there is an error.
+ */
+ if (status == IOMMU_PAGE_RESP_SUCCESS)
+ status = iopf_handle_single(iopf);
+
+ if (!(iopf->fault.prm.flags &
+ IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
+ kfree(iopf);
+ }
+
+ iopf_complete_group(group->dev, &group->last_fault, status);
+ kfree(group);
+}
+
+/**
+ * iommu_queue_iopf - IO Page Fault handler
+ * @fault: fault event
+ * @cookie: struct device, passed to iommu_register_device_fault_handler.
+ *
+ * Add a fault to the device workqueue, to be handled by mm.
+ *
+ * This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard
+ * them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't
+ * expect a response. It may be generated when disabling a PASID (issuing a
+ * PASID stop request) by some PCI devices.
+ *
+ * The PASID stop request is issued by the device driver before unbind(). Once
+ * it completes, no page request is generated for this PASID anymore and
+ * outstanding ones have been pushed to the IOMMU (as per PCIe 4.0r1.0 - 6.20.1
+ * and 10.4.1.2 - Managing PASID TLP Prefix Usage). Some PCI devices will wait
+ * for all outstanding page requests to come back with a response before
+ * completing the PASID stop request. Others do not wait for page responses, and
+ * instead issue this Stop Marker that tells us when the PASID can be
+ * reallocated.
+ *
+ * It is safe to discard the Stop Marker because it is an optimization.
+ * a. Page requests, which are posted requests, have been flushed to the IOMMU
+ * when the stop request completes.
+ * b. The IOMMU driver flushes all fault queues on unbind() before freeing the
+ * PASID.
+ *
+ * So even though the Stop Marker might be issued by the device *after* the stop
+ * request completes, outstanding faults will have been dealt with by the time
+ * the PASID is freed.
+ *
+ * Return: 0 on success and <0 on error.
+ */
+int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
+{
+ int ret;
+ struct iopf_group *group;
+ struct iopf_fault *iopf, *next;
+ struct iopf_device_param *iopf_param;
+
+ struct device *dev = cookie;
+ struct dev_iommu *param = dev->iommu;
+
+ lockdep_assert_held(&param->lock);
+
+ if (fault->type != IOMMU_FAULT_PAGE_REQ)
+ /* Not a recoverable page fault */
+ return -EOPNOTSUPP;
+
+ /*
+ * As long as we're holding param->lock, the queue can't be unlinked
+ * from the device and therefore cannot disappear.
+ */
+ iopf_param = param->iopf_param;
+ if (!iopf_param)
+ return -ENODEV;
+
+ if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
+ iopf = kzalloc(sizeof(*iopf), GFP_KERNEL);
+ if (!iopf)
+ return -ENOMEM;
+
+ iopf->fault = *fault;
+
+ /* Non-last request of a group. Postpone until the last one */
+ list_add(&iopf->list, &iopf_param->partial);
+
+ return 0;
+ }
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group) {
+ /*
+ * The caller will send a response to the hardware. But we do
+ * need to clean up before leaving, otherwise partial faults
+ * will be stuck.
+ */
+ ret = -ENOMEM;
+ goto cleanup_partial;
+ }
+
+ group->dev = dev;
+ group->last_fault.fault = *fault;
+ INIT_LIST_HEAD(&group->faults);
+ list_add(&group->last_fault.list, &group->faults);
+ INIT_WORK(&group->work, iopf_handle_group);
+
+ /* See if we have partial faults for this group */
+ list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
+ if (iopf->fault.prm.grpid == fault->prm.grpid)
+ /* Insert *before* the last fault */
+ list_move(&iopf->list, &group->faults);
+ }
+
+ queue_work(iopf_param->queue->wq, &group->work);
+ return 0;
+
+cleanup_partial:
+ list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
+ if (iopf->fault.prm.grpid == fault->prm.grpid) {
+ list_del(&iopf->list);
+ kfree(iopf);
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_queue_iopf);
+
+/**
+ * iopf_queue_flush_dev - Ensure that all queued faults have been processed
+ * @dev: the endpoint whose faults need to be flushed.
+ *
+ * The IOMMU driver calls this before releasing a PASID, to ensure that all
+ * pending faults for this PASID have been handled, and won't hit the address
+ * space of the next process that uses this PASID. The driver must make sure
+ * that no new fault is added to the queue. In particular it must flush its
+ * low-level queue before calling this function.
+ *
+ * Return: 0 on success and <0 on error.
+ */
+int iopf_queue_flush_dev(struct device *dev)
+{
+ int ret = 0;
+ struct iopf_device_param *iopf_param;
+ struct dev_iommu *param = dev->iommu;
+
+ if (!param)
+ return -ENODEV;
+
+ mutex_lock(&param->lock);
+ iopf_param = param->iopf_param;
+ if (iopf_param)
+ flush_workqueue(iopf_param->queue->wq);
+ else
+ ret = -ENODEV;
+ mutex_unlock(&param->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
+
+/**
+ * iopf_queue_discard_partial - Remove all pending partial fault
+ * @queue: the queue whose partial faults need to be discarded
+ *
+ * When the hardware queue overflows, last page faults in a group may have been
+ * lost and the IOMMU driver calls this to discard all partial faults. The
+ * driver shouldn't be adding new faults to this queue concurrently.
+ *
+ * Return: 0 on success and <0 on error.
+ */
+int iopf_queue_discard_partial(struct iopf_queue *queue)
+{
+ struct iopf_fault *iopf, *next;
+ struct iopf_device_param *iopf_param;
+
+ if (!queue)
+ return -EINVAL;
+
+ mutex_lock(&queue->lock);
+ list_for_each_entry(iopf_param, &queue->devices, queue_list) {
+ list_for_each_entry_safe(iopf, next, &iopf_param->partial,
+ list) {
+ list_del(&iopf->list);
+ kfree(iopf);
+ }
+ }
+ mutex_unlock(&queue->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iopf_queue_discard_partial);
+
+/**
+ * iopf_queue_add_device - Add producer to the fault queue
+ * @queue: IOPF queue
+ * @dev: device to add
+ *
+ * Return: 0 on success and <0 on error.
+ */
+int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
+{
+ int ret = -EBUSY;
+ struct iopf_device_param *iopf_param;
+ struct dev_iommu *param = dev->iommu;
+
+ if (!param)
+ return -ENODEV;
+
+ iopf_param = kzalloc(sizeof(*iopf_param), GFP_KERNEL);
+ if (!iopf_param)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&iopf_param->partial);
+ iopf_param->queue = queue;
+ iopf_param->dev = dev;
+
+ mutex_lock(&queue->lock);
+ mutex_lock(&param->lock);
+ if (!param->iopf_param) {
+ list_add(&iopf_param->queue_list, &queue->devices);
+ param->iopf_param = iopf_param;
+ ret = 0;
+ }
+ mutex_unlock(&param->lock);
+ mutex_unlock(&queue->lock);
+
+ if (ret)
+ kfree(iopf_param);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iopf_queue_add_device);
+
+/**
+ * iopf_queue_remove_device - Remove producer from fault queue
+ * @queue: IOPF queue
+ * @dev: device to remove
+ *
+ * Caller makes sure that no more faults are reported for this device.
+ *
+ * Return: 0 on success and <0 on error.
+ */
+int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
+{
+ int ret = -EINVAL;
+ struct iopf_fault *iopf, *next;
+ struct iopf_device_param *iopf_param;
+ struct dev_iommu *param = dev->iommu;
+
+ if (!param || !queue)
+ return -EINVAL;
+
+ mutex_lock(&queue->lock);
+ mutex_lock(&param->lock);
+ iopf_param = param->iopf_param;
+ if (iopf_param && iopf_param->queue == queue) {
+ list_del(&iopf_param->queue_list);
+ param->iopf_param = NULL;
+ ret = 0;
+ }
+ mutex_unlock(&param->lock);
+ mutex_unlock(&queue->lock);
+ if (ret)
+ return ret;
+
+ /* Just in case some faults are still stuck */
+ list_for_each_entry_safe(iopf, next, &iopf_param->partial, list)
+ kfree(iopf);
+
+ kfree(iopf_param);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iopf_queue_remove_device);
+
+/**
+ * iopf_queue_alloc - Allocate and initialize a fault queue
+ * @name: a unique string identifying the queue (for workqueue)
+ *
+ * Return: the queue on success and NULL on error.
+ */
+struct iopf_queue *iopf_queue_alloc(const char *name)
+{
+ struct iopf_queue *queue;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return NULL;
+
+ /*
+ * The WQ is unordered because the low-level handler enqueues faults by
+ * group. PRI requests within a group have to be ordered, but once
+ * that's dealt with, the high-level function can handle groups out of
+ * order.
+ */
+ queue->wq = alloc_workqueue("iopf_queue/%s", WQ_UNBOUND, 0, name);
+ if (!queue->wq) {
+ kfree(queue);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&queue->devices);
+ mutex_init(&queue->lock);
+
+ return queue;
+}
+EXPORT_SYMBOL_GPL(iopf_queue_alloc);
+
+/**
+ * iopf_queue_free - Free IOPF queue
+ * @queue: queue to free
+ *
+ * Counterpart to iopf_queue_alloc(). The driver must not be queuing faults or
+ * adding/removing devices on this queue anymore.
+ */
+void iopf_queue_free(struct iopf_queue *queue)
+{
+ struct iopf_device_param *iopf_param, *next;
+
+ if (!queue)
+ return;
+
+ list_for_each_entry_safe(iopf_param, next, &queue->devices, queue_list)
+ iopf_queue_remove_device(queue, iopf_param->dev);
+
+ destroy_workqueue(queue->wq);
+ kfree(queue);
+}
+EXPORT_SYMBOL_GPL(iopf_queue_free);
diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva-lib.h
index b40990aef3fd..031155010ca8 100644
--- a/drivers/iommu/iommu-sva-lib.h
+++ b/drivers/iommu/iommu-sva-lib.h
@@ -12,4 +12,57 @@ int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max);
void iommu_sva_free_pasid(struct mm_struct *mm);
struct mm_struct *iommu_sva_find(ioasid_t pasid);
+/* I/O Page fault */
+struct device;
+struct iommu_fault;
+struct iopf_queue;
+
+#ifdef CONFIG_IOMMU_SVA_LIB
+int iommu_queue_iopf(struct iommu_fault *fault, void *cookie);
+
+int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
+int iopf_queue_remove_device(struct iopf_queue *queue,
+ struct device *dev);
+int iopf_queue_flush_dev(struct device *dev);
+struct iopf_queue *iopf_queue_alloc(const char *name);
+void iopf_queue_free(struct iopf_queue *queue);
+int iopf_queue_discard_partial(struct iopf_queue *queue);
+
+#else /* CONFIG_IOMMU_SVA_LIB */
+static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
+{
+ return -ENODEV;
+}
+
+static inline int iopf_queue_add_device(struct iopf_queue *queue,
+ struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline int iopf_queue_remove_device(struct iopf_queue *queue,
+ struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline int iopf_queue_flush_dev(struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline struct iopf_queue *iopf_queue_alloc(const char *name)
+{
+ return NULL;
+}
+
+static inline void iopf_queue_free(struct iopf_queue *queue)
+{
+}
+
+static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_IOMMU_SVA_LIB */
#endif /* _IOMMU_SVA_LIB_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d0b0a15dba84..808ab70d5df5 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -69,16 +69,7 @@ static const char * const iommu_group_resv_type_string[] = {
};
#define IOMMU_CMD_LINE_DMA_API BIT(0)
-
-static void iommu_set_cmd_line_dma_api(void)
-{
- iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
-}
-
-static bool iommu_cmd_line_dma_api(void)
-{
- return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
-}
+#define IOMMU_CMD_LINE_STRICT BIT(1)
static int iommu_alloc_default_domain(struct iommu_group *group,
struct device *dev);
@@ -130,9 +121,7 @@ static const char *iommu_domain_type_str(unsigned int t)
static int __init iommu_subsys_init(void)
{
- bool cmd_line = iommu_cmd_line_dma_api();
-
- if (!cmd_line) {
+ if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) {
if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
iommu_set_default_passthrough(false);
else
@@ -146,14 +135,32 @@ static int __init iommu_subsys_init(void)
pr_info("Default domain type: %s %s\n",
iommu_domain_type_str(iommu_def_domain_type),
- cmd_line ? "(set via kernel command line)" : "");
+ (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ?
+ "(set via kernel command line)" : "");
return 0;
}
subsys_initcall(iommu_subsys_init);
-int iommu_device_register(struct iommu_device *iommu)
+/**
+ * iommu_device_register() - Register an IOMMU hardware instance
+ * @iommu: IOMMU handle for the instance
+ * @ops: IOMMU ops to associate with the instance
+ * @hwdev: (optional) actual instance device, used for fwnode lookup
+ *
+ * Return: 0 on success, or an error.
+ */
+int iommu_device_register(struct iommu_device *iommu,
+ const struct iommu_ops *ops, struct device *hwdev)
{
+ /* We need to be able to take module references appropriately */
+ if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
+ return -EINVAL;
+
+ iommu->ops = ops;
+ if (hwdev)
+ iommu->fwnode = hwdev->fwnode;
+
spin_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
@@ -329,10 +336,29 @@ early_param("iommu.passthrough", iommu_set_def_domain_type);
static int __init iommu_dma_setup(char *str)
{
- return kstrtobool(str, &iommu_dma_strict);
+ int ret = kstrtobool(str, &iommu_dma_strict);
+
+ if (!ret)
+ iommu_cmd_line |= IOMMU_CMD_LINE_STRICT;
+ return ret;
}
early_param("iommu.strict", iommu_dma_setup);
+void iommu_set_dma_strict(bool strict)
+{
+ if (strict || !(iommu_cmd_line & IOMMU_CMD_LINE_STRICT))
+ iommu_dma_strict = strict;
+}
+
+bool iommu_get_dma_strict(struct iommu_domain *domain)
+{
+ /* only allow lazy flushing for DMA domains */
+ if (domain->type == IOMMU_DOMAIN_DMA)
+ return iommu_dma_strict;
+ return true;
+}
+EXPORT_SYMBOL_GPL(iommu_get_dma_strict);
+
static ssize_t iommu_group_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf)
{
@@ -1511,14 +1537,6 @@ static int iommu_group_alloc_default_domain(struct bus_type *bus,
group->default_domain = dom;
if (!group->domain)
group->domain = dom;
-
- if (!iommu_dma_strict) {
- int attr = 1;
- iommu_domain_set_attr(dom,
- DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
- &attr);
- }
-
return 0;
}
@@ -2610,17 +2628,6 @@ size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
}
-int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
- phys_addr_t paddr, u64 size, int prot)
-{
- if (unlikely(domain->ops->domain_window_enable == NULL))
- return -ENODEV;
-
- return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
- prot);
-}
-EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
-
/**
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
* @domain: the iommu domain where the fault has happened
@@ -2675,50 +2682,26 @@ static int __init iommu_init(void)
}
core_initcall(iommu_init);
-int iommu_domain_get_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+int iommu_enable_nesting(struct iommu_domain *domain)
{
- struct iommu_domain_geometry *geometry;
- bool *paging;
- int ret = 0;
-
- switch (attr) {
- case DOMAIN_ATTR_GEOMETRY:
- geometry = data;
- *geometry = domain->geometry;
-
- break;
- case DOMAIN_ATTR_PAGING:
- paging = data;
- *paging = (domain->pgsize_bitmap != 0UL);
- break;
- default:
- if (!domain->ops->domain_get_attr)
- return -EINVAL;
-
- ret = domain->ops->domain_get_attr(domain, attr, data);
- }
-
- return ret;
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+ if (!domain->ops->enable_nesting)
+ return -EINVAL;
+ return domain->ops->enable_nesting(domain);
}
-EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
+EXPORT_SYMBOL_GPL(iommu_enable_nesting);
-int iommu_domain_set_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+int iommu_set_pgtable_quirks(struct iommu_domain *domain,
+ unsigned long quirk)
{
- int ret = 0;
-
- switch (attr) {
- default:
- if (domain->ops->domain_set_attr == NULL)
- return -EINVAL;
-
- ret = domain->ops->domain_set_attr(domain, attr, data);
- }
-
- return ret;
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+ if (!domain->ops->set_pgtable_quirks)
+ return -EINVAL;
+ return domain->ops->set_pgtable_quirks(domain, quirk);
}
-EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
+EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
void iommu_get_resv_regions(struct device *dev, struct list_head *list)
{
@@ -2777,16 +2760,14 @@ EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
void iommu_set_default_passthrough(bool cmd_line)
{
if (cmd_line)
- iommu_set_cmd_line_dma_api();
-
+ iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
}
void iommu_set_default_translated(bool cmd_line)
{
if (cmd_line)
- iommu_set_cmd_line_dma_api();
-
+ iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
iommu_def_domain_type = IOMMU_DOMAIN_DMA;
}
@@ -2878,10 +2859,12 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
*/
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ if (dev->iommu && dev->iommu->iommu_dev) {
+ const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
- if (ops && ops->dev_enable_feat)
- return ops->dev_enable_feat(dev, feat);
+ if (ops->dev_enable_feat)
+ return ops->dev_enable_feat(dev, feat);
+ }
return -ENODEV;
}
@@ -2894,10 +2877,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
*/
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ if (dev->iommu && dev->iommu->iommu_dev) {
+ const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
- if (ops && ops->dev_disable_feat)
- return ops->dev_disable_feat(dev, feat);
+ if (ops->dev_disable_feat)
+ return ops->dev_disable_feat(dev, feat);
+ }
return -EBUSY;
}
@@ -2905,10 +2890,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ if (dev->iommu && dev->iommu->iommu_dev) {
+ const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
- if (ops && ops->dev_feat_enabled)
- return ops->dev_feat_enabled(dev, feat);
+ if (ops->dev_feat_enabled)
+ return ops->dev_feat_enabled(dev, feat);
+ }
return false;
}
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index e6e2fa85271c..b7ecd5b08039 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -22,11 +22,28 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn);
static void init_iova_rcaches(struct iova_domain *iovad);
+static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
static void fq_destroy_all_entries(struct iova_domain *iovad);
static void fq_flush_timeout(struct timer_list *t);
+
+static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
+{
+ struct iova_domain *iovad;
+
+ iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
+
+ free_cpu_cached_iovas(cpu, iovad);
+ return 0;
+}
+
static void free_global_cached_iovas(struct iova_domain *iovad);
+static struct iova *to_iova(struct rb_node *node)
+{
+ return rb_entry(node, struct iova, node);
+}
+
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn)
@@ -51,6 +68,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
+ cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
init_iova_rcaches(iovad);
}
EXPORT_SYMBOL_GPL(init_iova_domain);
@@ -136,7 +154,7 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
{
struct iova *cached_iova;
- cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
+ cached_iova = to_iova(iovad->cached32_node);
if (free == cached_iova ||
(free->pfn_hi < iovad->dma_32bit_pfn &&
free->pfn_lo >= cached_iova->pfn_lo)) {
@@ -144,11 +162,48 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
}
- cached_iova = rb_entry(iovad->cached_node, struct iova, node);
+ cached_iova = to_iova(iovad->cached_node);
if (free->pfn_lo >= cached_iova->pfn_lo)
iovad->cached_node = rb_next(&free->node);
}
+static struct rb_node *iova_find_limit(struct iova_domain *iovad, unsigned long limit_pfn)
+{
+ struct rb_node *node, *next;
+ /*
+ * Ideally what we'd like to judge here is whether limit_pfn is close
+ * enough to the highest-allocated IOVA that starting the allocation
+ * walk from the anchor node will be quicker than this initial work to
+ * find an exact starting point (especially if that ends up being the
+ * anchor node anyway). This is an incredibly crude approximation which
+ * only really helps the most likely case, but is at least trivially easy.
+ */
+ if (limit_pfn > iovad->dma_32bit_pfn)
+ return &iovad->anchor.node;
+
+ node = iovad->rbroot.rb_node;
+ while (to_iova(node)->pfn_hi < limit_pfn)
+ node = node->rb_right;
+
+search_left:
+ while (node->rb_left && to_iova(node->rb_left)->pfn_lo >= limit_pfn)
+ node = node->rb_left;
+
+ if (!node->rb_left)
+ return node;
+
+ next = node->rb_left;
+ while (next->rb_right) {
+ next = next->rb_right;
+ if (to_iova(next)->pfn_lo >= limit_pfn) {
+ node = next;
+ goto search_left;
+ }
+ }
+
+ return node;
+}
+
/* Insert the iova into domain rbtree by holding writer lock */
static void
iova_insert_rbtree(struct rb_root *root, struct iova *iova,
@@ -159,7 +214,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova,
new = (start) ? &start : &(root->rb_node);
/* Figure out where to put new node */
while (*new) {
- struct iova *this = rb_entry(*new, struct iova, node);
+ struct iova *this = to_iova(*new);
parent = *new;
@@ -198,7 +253,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
goto iova32_full;
curr = __get_cached_rbnode(iovad, limit_pfn);
- curr_iova = rb_entry(curr, struct iova, node);
+ curr_iova = to_iova(curr);
retry_pfn = curr_iova->pfn_hi + 1;
retry:
@@ -207,15 +262,15 @@ retry:
new_pfn = (high_pfn - size) & align_mask;
prev = curr;
curr = rb_prev(curr);
- curr_iova = rb_entry(curr, struct iova, node);
+ curr_iova = to_iova(curr);
} while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
if (high_pfn < size || new_pfn < low_pfn) {
if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
high_pfn = limit_pfn;
low_pfn = retry_pfn;
- curr = &iovad->anchor.node;
- curr_iova = rb_entry(curr, struct iova, node);
+ curr = iova_find_limit(iovad, limit_pfn);
+ curr_iova = to_iova(curr);
goto retry;
}
iovad->max32_alloc_size = size;
@@ -257,10 +312,21 @@ int iova_cache_get(void)
{
mutex_lock(&iova_cache_mutex);
if (!iova_cache_users) {
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL,
+ iova_cpuhp_dead);
+ if (ret) {
+ mutex_unlock(&iova_cache_mutex);
+ pr_err("Couldn't register cpuhp handler\n");
+ return ret;
+ }
+
iova_cache = kmem_cache_create(
"iommu_iova", sizeof(struct iova), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!iova_cache) {
+ cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
mutex_unlock(&iova_cache_mutex);
pr_err("Couldn't create iova cache\n");
return -ENOMEM;
@@ -282,8 +348,10 @@ void iova_cache_put(void)
return;
}
iova_cache_users--;
- if (!iova_cache_users)
+ if (!iova_cache_users) {
+ cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
kmem_cache_destroy(iova_cache);
+ }
mutex_unlock(&iova_cache_mutex);
}
EXPORT_SYMBOL_GPL(iova_cache_put);
@@ -331,7 +399,7 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn)
assert_spin_locked(&iovad->iova_rbtree_lock);
while (node) {
- struct iova *iova = rb_entry(node, struct iova, node);
+ struct iova *iova = to_iova(node);
if (pfn < iova->pfn_lo)
node = node->rb_left;
@@ -467,7 +535,6 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
free_iova(iovad, pfn);
}
-EXPORT_SYMBOL_GPL(free_iova_fast);
#define fq_ring_for_each(i, fq) \
for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
@@ -606,6 +673,9 @@ void put_iova_domain(struct iova_domain *iovad)
{
struct iova *iova, *tmp;
+ cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
+ &iovad->cpuhp_dead);
+
free_iova_flush_queue(iovad);
free_iova_rcaches(iovad);
rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
@@ -617,7 +687,7 @@ static int
__is_range_overlap(struct rb_node *node,
unsigned long pfn_lo, unsigned long pfn_hi)
{
- struct iova *iova = rb_entry(node, struct iova, node);
+ struct iova *iova = to_iova(node);
if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
return 1;
@@ -685,7 +755,7 @@ reserve_iova(struct iova_domain *iovad,
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
- iova = rb_entry(node, struct iova, node);
+ iova = to_iova(node);
__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
if ((pfn_lo >= iova->pfn_lo) &&
(pfn_hi <= iova->pfn_hi))
@@ -970,7 +1040,7 @@ static void free_iova_rcaches(struct iova_domain *iovad)
/*
* free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
*/
-void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
+static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
{
struct iova_cpu_rcache *cpu_rcache;
struct iova_rcache *rcache;
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index eaaec0a55cc6..aaa6a4d59057 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1076,11 +1076,7 @@ static int ipmmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
- iommu_device_set_fwnode(&mmu->iommu,
- &pdev->dev.of_node->fwnode);
-
- ret = iommu_device_register(&mmu->iommu);
+ ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
if (ret)
return ret;
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index f0ba6a09b434..7880f307cb2d 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -792,10 +792,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
goto fail;
}
- iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
- iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
-
- ret = iommu_device_register(&iommu->iommu);
+ ret = iommu_device_register(&iommu->iommu, &msm_iommu_ops, &pdev->dev);
if (ret) {
pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
goto fail;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 6ecc007f07cd..e06b8a0e2b56 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -17,6 +17,7 @@
#include <linux/iopoll.h>
#include <linux/list.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_iommu.h>
#include <linux/of_irq.h>
@@ -683,18 +684,12 @@ static const struct iommu_ops mtk_iommu_ops = {
.get_resv_regions = mtk_iommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
+ .owner = THIS_MODULE,
};
static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
{
u32 regval;
- int ret;
-
- ret = clk_prepare_enable(data->bclk);
- if (ret) {
- dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
- return ret;
- }
if (data->plat_data->m4u_plat == M4U_MT8173) {
regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
@@ -760,7 +755,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
dev_name(data->dev), (void *)data)) {
writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
- clk_disable_unprepare(data->bclk);
dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
return -ENODEV;
}
@@ -898,10 +892,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret)
goto out_link_remove;
- iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
- iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
-
- ret = iommu_device_register(&data->iommu);
+ ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
if (ret)
goto out_sysfs_remove;
@@ -977,14 +968,19 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
void __iomem *base = data->base;
int ret;
- /* Avoid first resume to affect the default value of registers below. */
- if (!m4u_dom)
- return 0;
ret = clk_prepare_enable(data->bclk);
if (ret) {
dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
return ret;
}
+
+ /*
+ * Uppon first resume, only enable the clk and return, since the values of the
+ * registers are not yet set.
+ */
+ if (!m4u_dom)
+ return 0;
+
writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
@@ -1079,16 +1075,7 @@ static struct platform_driver mtk_iommu_driver = {
.pm = &mtk_iommu_pm_ops,
}
};
+module_platform_driver(mtk_iommu_driver);
-static int __init mtk_iommu_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&mtk_iommu_driver);
- if (ret != 0)
- pr_err("Failed to register MTK IOMMU driver\n");
-
- return ret;
-}
-
-subsys_initcall(mtk_iommu_init)
+MODULE_DESCRIPTION("IOMMU API for MediaTek M4U implementations");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 82ddfe9170d4..5915d7b38211 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -20,6 +20,7 @@
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_iommu.h>
#include <linux/of_irq.h>
@@ -423,23 +424,21 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct of_phandle_args iommu_spec;
- struct of_phandle_iterator it;
struct mtk_iommu_data *data;
- int err;
+ int err, idx = 0;
- of_for_each_phandle(&it, err, dev->of_node, "iommus",
- "#iommu-cells", -1) {
- int count = of_phandle_iterator_args(&it, iommu_spec.args,
- MAX_PHANDLE_ARGS);
- iommu_spec.np = of_node_get(it.node);
- iommu_spec.args_count = count;
+ while (!of_parse_phandle_with_args(dev->of_node, "iommus",
+ "#iommu-cells",
+ idx, &iommu_spec)) {
- mtk_iommu_create_mapping(dev, &iommu_spec);
+ err = mtk_iommu_create_mapping(dev, &iommu_spec);
+ of_node_put(iommu_spec.np);
+ if (err)
+ return ERR_PTR(err);
/* dev->iommu_fwspec might have changed */
fwspec = dev_iommu_fwspec_get(dev);
-
- of_node_put(iommu_spec.np);
+ idx++;
}
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
@@ -529,6 +528,7 @@ static const struct iommu_ops mtk_iommu_ops = {
.def_domain_type = mtk_iommu_def_domain_type,
.device_group = generic_device_group,
.pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
+ .owner = THIS_MODULE,
};
static const struct of_device_id mtk_iommu_of_ids[] = {
@@ -547,10 +547,8 @@ static int mtk_iommu_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct component_match *match = NULL;
- struct of_phandle_args larb_spec;
- struct of_phandle_iterator it;
void *protect;
- int larb_nr, ret, err;
+ int larb_nr, ret, i;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -578,35 +576,33 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (IS_ERR(data->bclk))
return PTR_ERR(data->bclk);
- larb_nr = 0;
- of_for_each_phandle(&it, err, dev->of_node,
- "mediatek,larbs", NULL, 0) {
+ larb_nr = of_count_phandle_with_args(dev->of_node,
+ "mediatek,larbs", NULL);
+ if (larb_nr < 0)
+ return larb_nr;
+
+ for (i = 0; i < larb_nr; i++) {
+ struct device_node *larbnode;
struct platform_device *plarbdev;
- int count = of_phandle_iterator_args(&it, larb_spec.args,
- MAX_PHANDLE_ARGS);
- if (count)
- continue;
+ larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
+ if (!larbnode)
+ return -EINVAL;
- larb_spec.np = of_node_get(it.node);
- if (!of_device_is_available(larb_spec.np))
+ if (!of_device_is_available(larbnode)) {
+ of_node_put(larbnode);
continue;
+ }
- plarbdev = of_find_device_by_node(larb_spec.np);
+ plarbdev = of_find_device_by_node(larbnode);
if (!plarbdev) {
- plarbdev = of_platform_device_create(
- larb_spec.np, NULL,
- platform_bus_type.dev_root);
- if (!plarbdev) {
- of_node_put(larb_spec.np);
- return -EPROBE_DEFER;
- }
+ of_node_put(larbnode);
+ return -EPROBE_DEFER;
}
+ data->larb_imu[i].dev = &plarbdev->dev;
- data->larb_imu[larb_nr].dev = &plarbdev->dev;
component_match_add_release(dev, &match, release_of,
- compare_of, larb_spec.np);
- larb_nr++;
+ compare_of, larbnode);
}
platform_set_drvdata(pdev, data);
@@ -620,16 +616,28 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret)
return ret;
- iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
-
- ret = iommu_device_register(&data->iommu);
+ ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
if (ret)
- return ret;
+ goto out_sysfs_remove;
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
+ if (!iommu_present(&platform_bus_type)) {
+ ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
+ if (ret)
+ goto out_dev_unreg;
+ }
+
+ ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+ if (ret)
+ goto out_bus_set_null;
+ return ret;
- return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+out_bus_set_null:
+ bus_set_iommu(&platform_bus_type, NULL);
+out_dev_unreg:
+ iommu_device_unregister(&data->iommu);
+out_sysfs_remove:
+ iommu_device_sysfs_remove(&data->iommu);
+ return ret;
}
static int mtk_iommu_remove(struct platform_device *pdev)
@@ -691,9 +699,7 @@ static struct platform_driver mtk_iommu_driver = {
.pm = &mtk_iommu_pm_ops,
}
};
+module_platform_driver(mtk_iommu_driver);
-static int __init m4u_init(void)
-{
- return platform_driver_register(&mtk_iommu_driver);
-}
-subsys_initcall(m4u_init);
+MODULE_DESCRIPTION("IOMMU API for MediaTek M4U v1 implementations");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index e505b9130a1c..a9d2df001149 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -210,11 +210,6 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
of_pci_iommu_init, &info);
} else {
err = of_iommu_configure_device(master_np, dev, id);
-
- fwspec = dev_iommu_fwspec_get(dev);
- if (!err && fwspec)
- of_property_read_u32(master_np, "pasid-num-bits",
- &fwspec->num_pasid_bits);
}
/*
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 71f29c0927fc..26e517eb0dd3 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1235,10 +1235,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
if (err)
goto out_group;
- iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
- iommu_device_set_fwnode(&obj->iommu, &of->fwnode);
-
- err = iommu_device_register(&obj->iommu);
+ err = iommu_device_register(&obj->iommu, &omap_iommu_ops, &pdev->dev);
if (err)
goto out_sysfs;
}
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index e5d86b7177de..7a2932772fdf 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1196,10 +1196,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (err)
goto err_put_group;
- iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
- iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
-
- err = iommu_device_register(&iommu->iommu);
+ err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev);
if (err)
goto err_remove_sysfs;
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 8895dbb705eb..6019e58ce4fb 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -333,9 +333,7 @@ int zpci_init_iommu(struct zpci_dev *zdev)
if (rc)
goto out_err;
- iommu_device_set_ops(&zdev->iommu_dev, &s390_iommu_ops);
-
- rc = iommu_device_register(&zdev->iommu_dev);
+ rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops, NULL);
if (rc)
goto out_sysfs;
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
new file mode 100644
index 000000000000..73dfd9946312
--- /dev/null
+++ b/drivers/iommu/sprd-iommu.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Unisoc IOMMU driver
+ *
+ * Copyright (C) 2020 Unisoc, Inc.
+ * Author: Chunyan Zhang <chunyan.zhang@unisoc.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/iommu.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define SPRD_IOMMU_PAGE_SHIFT 12
+#define SPRD_IOMMU_PAGE_SIZE SZ_4K
+
+#define SPRD_EX_CFG 0x0
+#define SPRD_IOMMU_VAOR_BYPASS BIT(4)
+#define SPRD_IOMMU_GATE_EN BIT(1)
+#define SPRD_IOMMU_EN BIT(0)
+#define SPRD_EX_UPDATE 0x4
+#define SPRD_EX_FIRST_VPN 0x8
+#define SPRD_EX_VPN_RANGE 0xc
+#define SPRD_EX_FIRST_PPN 0x10
+#define SPRD_EX_DEFAULT_PPN 0x14
+
+#define SPRD_IOMMU_VERSION 0x0
+#define SPRD_VERSION_MASK GENMASK(15, 8)
+#define SPRD_VERSION_SHIFT 0x8
+#define SPRD_VAU_CFG 0x4
+#define SPRD_VAU_UPDATE 0x8
+#define SPRD_VAU_AUTH_CFG 0xc
+#define SPRD_VAU_FIRST_PPN 0x10
+#define SPRD_VAU_DEFAULT_PPN_RD 0x14
+#define SPRD_VAU_DEFAULT_PPN_WR 0x18
+#define SPRD_VAU_FIRST_VPN 0x1c
+#define SPRD_VAU_VPN_RANGE 0x20
+
+enum sprd_iommu_version {
+ SPRD_IOMMU_EX,
+ SPRD_IOMMU_VAU,
+};
+
+/*
+ * struct sprd_iommu_device - high-level sprd IOMMU device representation,
+ * including hardware information and configuration, also driver data, etc
+ *
+ * @ver: sprd IOMMU IP version
+ * @prot_page_va: protect page base virtual address
+ * @prot_page_pa: protect page base physical address, data would be
+ * written to here while translation fault
+ * @base: mapped base address for accessing registers
+ * @dev: pointer to basic device structure
+ * @iommu: IOMMU core representation
+ * @group: IOMMU group
+ * @eb: gate clock which controls IOMMU access
+ */
+struct sprd_iommu_device {
+ enum sprd_iommu_version ver;
+ u32 *prot_page_va;
+ dma_addr_t prot_page_pa;
+ void __iomem *base;
+ struct device *dev;
+ struct iommu_device iommu;
+ struct iommu_group *group;
+ struct clk *eb;
+};
+
+struct sprd_iommu_domain {
+ spinlock_t pgtlock; /* lock for page table */
+ struct iommu_domain domain;
+ u32 *pgt_va; /* page table virtual address base */
+ dma_addr_t pgt_pa; /* page table physical address base */
+ struct sprd_iommu_device *sdev;
+};
+
+static const struct iommu_ops sprd_iommu_ops;
+
+static struct sprd_iommu_domain *to_sprd_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct sprd_iommu_domain, domain);
+}
+
+static inline void
+sprd_iommu_write(struct sprd_iommu_device *sdev, unsigned int reg, u32 val)
+{
+ writel_relaxed(val, sdev->base + reg);
+}
+
+static inline u32
+sprd_iommu_read(struct sprd_iommu_device *sdev, unsigned int reg)
+{
+ return readl_relaxed(sdev->base + reg);
+}
+
+static inline void
+sprd_iommu_update_bits(struct sprd_iommu_device *sdev, unsigned int reg,
+ u32 mask, u32 shift, u32 val)
+{
+ u32 t = sprd_iommu_read(sdev, reg);
+
+ t = (t & (~(mask << shift))) | ((val & mask) << shift);
+ sprd_iommu_write(sdev, reg, t);
+}
+
+static inline int
+sprd_iommu_get_version(struct sprd_iommu_device *sdev)
+{
+ int ver = (sprd_iommu_read(sdev, SPRD_IOMMU_VERSION) &
+ SPRD_VERSION_MASK) >> SPRD_VERSION_SHIFT;
+
+ switch (ver) {
+ case SPRD_IOMMU_EX:
+ case SPRD_IOMMU_VAU:
+ return ver;
+ default:
+ return -EINVAL;
+ }
+}
+
+static size_t
+sprd_iommu_pgt_size(struct iommu_domain *domain)
+{
+ return ((domain->geometry.aperture_end -
+ domain->geometry.aperture_start + 1) >>
+ SPRD_IOMMU_PAGE_SHIFT) * sizeof(u32);
+}
+
+static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type)
+{
+ struct sprd_iommu_domain *dom;
+
+ if (domain_type != IOMMU_DOMAIN_DMA && domain_type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ dom = kzalloc(sizeof(*dom), GFP_KERNEL);
+ if (!dom)
+ return NULL;
+
+ if (iommu_get_dma_cookie(&dom->domain)) {
+ kfree(dom);
+ return NULL;
+ }
+
+ spin_lock_init(&dom->pgtlock);
+
+ dom->domain.geometry.aperture_start = 0;
+ dom->domain.geometry.aperture_end = SZ_256M - 1;
+
+ return &dom->domain;
+}
+
+static void sprd_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+
+ iommu_put_dma_cookie(domain);
+ kfree(dom);
+}
+
+static void sprd_iommu_first_vpn(struct sprd_iommu_domain *dom)
+{
+ struct sprd_iommu_device *sdev = dom->sdev;
+ u32 val;
+ unsigned int reg;
+
+ if (sdev->ver == SPRD_IOMMU_EX)
+ reg = SPRD_EX_FIRST_VPN;
+ else
+ reg = SPRD_VAU_FIRST_VPN;
+
+ val = dom->domain.geometry.aperture_start >> SPRD_IOMMU_PAGE_SHIFT;
+ sprd_iommu_write(sdev, reg, val);
+}
+
+static void sprd_iommu_vpn_range(struct sprd_iommu_domain *dom)
+{
+ struct sprd_iommu_device *sdev = dom->sdev;
+ u32 val;
+ unsigned int reg;
+
+ if (sdev->ver == SPRD_IOMMU_EX)
+ reg = SPRD_EX_VPN_RANGE;
+ else
+ reg = SPRD_VAU_VPN_RANGE;
+
+ val = (dom->domain.geometry.aperture_end -
+ dom->domain.geometry.aperture_start) >> SPRD_IOMMU_PAGE_SHIFT;
+ sprd_iommu_write(sdev, reg, val);
+}
+
+static void sprd_iommu_first_ppn(struct sprd_iommu_domain *dom)
+{
+ u32 val = dom->pgt_pa >> SPRD_IOMMU_PAGE_SHIFT;
+ struct sprd_iommu_device *sdev = dom->sdev;
+ unsigned int reg;
+
+ if (sdev->ver == SPRD_IOMMU_EX)
+ reg = SPRD_EX_FIRST_PPN;
+ else
+ reg = SPRD_VAU_FIRST_PPN;
+
+ sprd_iommu_write(sdev, reg, val);
+}
+
+static void sprd_iommu_default_ppn(struct sprd_iommu_device *sdev)
+{
+ u32 val = sdev->prot_page_pa >> SPRD_IOMMU_PAGE_SHIFT;
+
+ if (sdev->ver == SPRD_IOMMU_EX) {
+ sprd_iommu_write(sdev, SPRD_EX_DEFAULT_PPN, val);
+ } else if (sdev->ver == SPRD_IOMMU_VAU) {
+ sprd_iommu_write(sdev, SPRD_VAU_DEFAULT_PPN_RD, val);
+ sprd_iommu_write(sdev, SPRD_VAU_DEFAULT_PPN_WR, val);
+ }
+}
+
+static void sprd_iommu_hw_en(struct sprd_iommu_device *sdev, bool en)
+{
+ unsigned int reg_cfg;
+ u32 mask, val;
+
+ if (sdev->ver == SPRD_IOMMU_EX)
+ reg_cfg = SPRD_EX_CFG;
+ else
+ reg_cfg = SPRD_VAU_CFG;
+
+ mask = SPRD_IOMMU_EN | SPRD_IOMMU_GATE_EN;
+ val = en ? mask : 0;
+ sprd_iommu_update_bits(sdev, reg_cfg, mask, 0, val);
+}
+
+static int sprd_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+ size_t pgt_size = sprd_iommu_pgt_size(domain);
+
+ if (dom->sdev) {
+ pr_err("There's already a device attached to this domain.\n");
+ return -EINVAL;
+ }
+
+ dom->pgt_va = dma_alloc_coherent(sdev->dev, pgt_size, &dom->pgt_pa, GFP_KERNEL);
+ if (!dom->pgt_va)
+ return -ENOMEM;
+
+ dom->sdev = sdev;
+
+ sprd_iommu_first_ppn(dom);
+ sprd_iommu_first_vpn(dom);
+ sprd_iommu_vpn_range(dom);
+ sprd_iommu_default_ppn(sdev);
+ sprd_iommu_hw_en(sdev, true);
+
+ return 0;
+}
+
+static void sprd_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+ struct sprd_iommu_device *sdev = dom->sdev;
+ size_t pgt_size = sprd_iommu_pgt_size(domain);
+
+ if (!sdev)
+ return;
+
+ dma_free_coherent(sdev->dev, pgt_size, dom->pgt_va, dom->pgt_pa);
+ sprd_iommu_hw_en(sdev, false);
+ dom->sdev = NULL;
+}
+
+static int sprd_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+ unsigned int page_num = size >> SPRD_IOMMU_PAGE_SHIFT;
+ unsigned long flags;
+ unsigned int i;
+ u32 *pgt_base_iova;
+ u32 pabase = (u32)paddr;
+ unsigned long start = domain->geometry.aperture_start;
+ unsigned long end = domain->geometry.aperture_end;
+
+ if (!dom->sdev) {
+ pr_err("No sprd_iommu_device attached to the domain\n");
+ return -EINVAL;
+ }
+
+ if (iova < start || (iova + size) > (end + 1)) {
+ dev_err(dom->sdev->dev, "(iova(0x%lx) + size(%zx)) are not in the range!\n",
+ iova, size);
+ return -EINVAL;
+ }
+
+ pgt_base_iova = dom->pgt_va + ((iova - start) >> SPRD_IOMMU_PAGE_SHIFT);
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ for (i = 0; i < page_num; i++) {
+ pgt_base_iova[i] = pabase >> SPRD_IOMMU_PAGE_SHIFT;
+ pabase += SPRD_IOMMU_PAGE_SIZE;
+ }
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ return 0;
+}
+
+static size_t sprd_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *iotlb_gather)
+{
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+ unsigned long flags;
+ u32 *pgt_base_iova;
+ unsigned int page_num = size >> SPRD_IOMMU_PAGE_SHIFT;
+ unsigned long start = domain->geometry.aperture_start;
+ unsigned long end = domain->geometry.aperture_end;
+
+ if (iova < start || (iova + size) > (end + 1))
+ return -EINVAL;
+
+ pgt_base_iova = dom->pgt_va + ((iova - start) >> SPRD_IOMMU_PAGE_SHIFT);
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ memset(pgt_base_iova, 0, page_num * sizeof(u32));
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ return 0;
+}
+
+static void sprd_iommu_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+ unsigned int reg;
+
+ if (dom->sdev->ver == SPRD_IOMMU_EX)
+ reg = SPRD_EX_UPDATE;
+ else
+ reg = SPRD_VAU_UPDATE;
+
+ /* clear IOMMU TLB buffer after page table updated */
+ sprd_iommu_write(dom->sdev, reg, 0xffffffff);
+}
+
+static void sprd_iommu_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *iotlb_gather)
+{
+ sprd_iommu_sync_map(domain, 0, 0);
+}
+
+static phys_addr_t sprd_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+ unsigned long flags;
+ phys_addr_t pa;
+ unsigned long start = domain->geometry.aperture_start;
+ unsigned long end = domain->geometry.aperture_end;
+
+ if (WARN_ON(iova < start || iova > end))
+ return 0;
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ pa = *(dom->pgt_va + ((iova - start) >> SPRD_IOMMU_PAGE_SHIFT));
+ pa = (pa << SPRD_IOMMU_PAGE_SHIFT) + ((iova - start) & (SPRD_IOMMU_PAGE_SIZE - 1));
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ return pa;
+}
+
+static struct iommu_device *sprd_iommu_probe_device(struct device *dev)
+{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct sprd_iommu_device *sdev;
+
+ if (!fwspec || fwspec->ops != &sprd_iommu_ops)
+ return ERR_PTR(-ENODEV);
+
+ sdev = dev_iommu_priv_get(dev);
+
+ return &sdev->iommu;
+}
+
+static void sprd_iommu_release_device(struct device *dev)
+{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ if (!fwspec || fwspec->ops != &sprd_iommu_ops)
+ return;
+
+ iommu_fwspec_free(dev);
+}
+
+static struct iommu_group *sprd_iommu_device_group(struct device *dev)
+{
+ struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
+
+ return iommu_group_ref_get(sdev->group);
+}
+
+static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+ struct platform_device *pdev;
+
+ if (!dev_iommu_priv_get(dev)) {
+ pdev = of_find_device_by_node(args->np);
+ dev_iommu_priv_set(dev, platform_get_drvdata(pdev));
+ platform_device_put(pdev);
+ }
+
+ return 0;
+}
+
+
+static const struct iommu_ops sprd_iommu_ops = {
+ .domain_alloc = sprd_iommu_domain_alloc,
+ .domain_free = sprd_iommu_domain_free,
+ .attach_dev = sprd_iommu_attach_device,
+ .detach_dev = sprd_iommu_detach_device,
+ .map = sprd_iommu_map,
+ .unmap = sprd_iommu_unmap,
+ .iotlb_sync_map = sprd_iommu_sync_map,
+ .iotlb_sync = sprd_iommu_sync,
+ .iova_to_phys = sprd_iommu_iova_to_phys,
+ .probe_device = sprd_iommu_probe_device,
+ .release_device = sprd_iommu_release_device,
+ .device_group = sprd_iommu_device_group,
+ .of_xlate = sprd_iommu_of_xlate,
+ .pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id sprd_iommu_of_match[] = {
+ { .compatible = "sprd,iommu-v1" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_iommu_of_match);
+
+/*
+ * Clock is not required, access to some of IOMMUs is controlled by gate
+ * clk, enabled clocks for that kind of IOMMUs before accessing.
+ * Return 0 for success or no clocks found.
+ */
+static int sprd_iommu_clk_enable(struct sprd_iommu_device *sdev)
+{
+ struct clk *eb;
+
+ eb = devm_clk_get_optional(sdev->dev, NULL);
+ if (!eb)
+ return 0;
+
+ if (IS_ERR(eb))
+ return PTR_ERR(eb);
+
+ sdev->eb = eb;
+ return clk_prepare_enable(eb);
+}
+
+static void sprd_iommu_clk_disable(struct sprd_iommu_device *sdev)
+{
+ if (sdev->eb)
+ clk_disable_unprepare(sdev->eb);
+}
+
+static int sprd_iommu_probe(struct platform_device *pdev)
+{
+ struct sprd_iommu_device *sdev;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ int ret;
+
+ sdev = devm_kzalloc(dev, sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ dev_err(dev, "Failed to get ioremap resource.\n");
+ return PTR_ERR(base);
+ }
+ sdev->base = base;
+
+ sdev->prot_page_va = dma_alloc_coherent(dev, SPRD_IOMMU_PAGE_SIZE,
+ &sdev->prot_page_pa, GFP_KERNEL);
+ if (!sdev->prot_page_va)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sdev);
+ sdev->dev = dev;
+
+ /* All the client devices are in the same iommu-group */
+ sdev->group = iommu_group_alloc();
+ if (IS_ERR(sdev->group)) {
+ ret = PTR_ERR(sdev->group);
+ goto free_page;
+ }
+
+ ret = iommu_device_sysfs_add(&sdev->iommu, dev, NULL, dev_name(dev));
+ if (ret)
+ goto put_group;
+
+ ret = iommu_device_register(&sdev->iommu, &sprd_iommu_ops, dev);
+ if (ret)
+ goto remove_sysfs;
+
+ if (!iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, &sprd_iommu_ops);
+
+ ret = sprd_iommu_clk_enable(sdev);
+ if (ret)
+ goto unregister_iommu;
+
+ ret = sprd_iommu_get_version(sdev);
+ if (ret < 0) {
+ dev_err(dev, "IOMMU version(%d) is invalid.\n", ret);
+ goto disable_clk;
+ }
+ sdev->ver = ret;
+
+ return 0;
+
+disable_clk:
+ sprd_iommu_clk_disable(sdev);
+unregister_iommu:
+ iommu_device_unregister(&sdev->iommu);
+remove_sysfs:
+ iommu_device_sysfs_remove(&sdev->iommu);
+put_group:
+ iommu_group_put(sdev->group);
+free_page:
+ dma_free_coherent(sdev->dev, SPRD_IOMMU_PAGE_SIZE, sdev->prot_page_va, sdev->prot_page_pa);
+ return ret;
+}
+
+static int sprd_iommu_remove(struct platform_device *pdev)
+{
+ struct sprd_iommu_device *sdev = platform_get_drvdata(pdev);
+
+ dma_free_coherent(sdev->dev, SPRD_IOMMU_PAGE_SIZE, sdev->prot_page_va, sdev->prot_page_pa);
+
+ iommu_group_put(sdev->group);
+ sdev->group = NULL;
+
+ bus_set_iommu(&platform_bus_type, NULL);
+
+ platform_set_drvdata(pdev, NULL);
+ iommu_device_sysfs_remove(&sdev->iommu);
+ iommu_device_unregister(&sdev->iommu);
+
+ return 0;
+}
+
+static struct platform_driver sprd_iommu_driver = {
+ .driver = {
+ .name = "sprd-iommu",
+ .of_match_table = sprd_iommu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = sprd_iommu_probe,
+ .remove = sprd_iommu_remove,
+};
+module_platform_driver(sprd_iommu_driver);
+
+MODULE_DESCRIPTION("IOMMU driver for Unisoc SoCs");
+MODULE_ALIAS("platform:sprd-iommu");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index ea6db1341916..181bb1c3437c 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -968,10 +968,7 @@ static int sun50i_iommu_probe(struct platform_device *pdev)
if (ret)
goto err_free_group;
- iommu_device_set_ops(&iommu->iommu, &sun50i_iommu_ops);
- iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
-
- ret = iommu_device_register(&iommu->iommu);
+ ret = iommu_device_register(&iommu->iommu, &sun50i_iommu_ops, &pdev->dev);
if (ret)
goto err_remove_sysfs;
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 6f130e51f072..6a358f92c7e5 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -353,10 +353,7 @@ struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
if (err)
goto free_gart;
- iommu_device_set_ops(&gart->iommu, &gart_iommu_ops);
- iommu_device_set_fwnode(&gart->iommu, dev->fwnode);
-
- err = iommu_device_register(&gart->iommu);
+ err = iommu_device_register(&gart->iommu, &gart_iommu_ops, dev);
if (err)
goto remove_sysfs;
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 602aab98c079..1e98dc63ad13 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -1145,10 +1145,7 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
if (err)
return ERR_PTR(err);
- iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
- iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
-
- err = iommu_device_register(&smmu->iommu);
+ err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev);
if (err)
goto remove_sysfs;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 2bfdd5734844..7c02481a81b4 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -945,6 +945,7 @@ static struct iommu_ops viommu_ops = {
.get_resv_regions = viommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.of_xlate = viommu_of_xlate,
+ .owner = THIS_MODULE,
};
static int viommu_init_vqs(struct viommu_dev *viommu)
@@ -1065,10 +1066,7 @@ static int viommu_probe(struct virtio_device *vdev)
if (ret)
goto err_free_vqs;
- iommu_device_set_ops(&viommu->iommu, &viommu_ops);
- iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode);
-
- iommu_device_register(&viommu->iommu);
+ iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
#ifdef CONFIG_PCI
if (pci_bus_type.iommu_ops != &viommu_ops) {
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c3485b230d70..2e6923c2c8a8 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -794,8 +794,13 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
its_encode_alloc(cmd, alloc);
- /* We can only signal PTZ when alloc==1. Why do we have two bits? */
- its_encode_ptz(cmd, alloc);
+ /*
+ * GICv4.1 provides a way to get the VLPI state, which needs the vPE
+ * to be unmapped first, and in this case, we may remap the vPE
+ * back while the VPT is not empty. So we can't assume that the
+ * VPT is empty on map. This is why we never advertise PTZ.
+ */
+ its_encode_ptz(cmd, false);
its_encode_vconf_addr(cmd, vconf_addr);
its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
@@ -4554,6 +4559,15 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
its_send_vmapp(its, vpe, false);
}
+
+ /*
+ * There may be a direct read to the VPT after unmapping the
+ * vPE, to guarantee the validity of this, we make the VPT
+ * memory coherent with the CPU caches here.
+ */
+ if (find_4_1_its() && !atomic_read(&vpe->vmapp_count))
+ gic_flush_dcache_to_poc(page_address(vpe->vpt_page),
+ LPI_PENDBASE_SZ);
}
static const struct irq_domain_ops its_vpe_domain_ops = {
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index b5ed4ea145cb..77e951206809 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -201,6 +201,7 @@ static ssize_t empty_read(struct file *file, char __user *buf,
static const struct proc_ops empty_proc_ops = {
.proc_read = empty_read,
+ .proc_lseek = default_llseek,
};
// ---------------------------------------------------------------------------
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 7013a3f08429..4f7eaa17fb27 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -173,13 +173,13 @@
#define MAX_FRAGS (32 * MAX_CARDS)
static LIST_HEAD(HFClist);
-static spinlock_t HFClock; /* global hfc list lock */
+static DEFINE_SPINLOCK(HFClock); /* global hfc list lock */
static void ph_state_change(struct dchannel *);
static struct hfc_multi *syncmaster;
static int plxsd_master; /* if we have a master card (yet) */
-static spinlock_t plx_lock; /* may not acquire other lock inside */
+static DEFINE_SPINLOCK(plx_lock); /* may not acquire other lock inside */
#define TYP_E1 1
#define TYP_4S 4
@@ -2748,8 +2748,6 @@ hfcmulti_interrupt(int intno, void *dev_id)
if (hc->ctype != HFC_TYPE_E1)
ph_state_irq(hc, r_irq_statech);
}
- if (status & V_EXT_IRQSTA)
- ; /* external IRQ */
if (status & V_LOST_STA) {
/* LOST IRQ */
HFC_outb(hc, R_INC_RES_FIFO, V_RES_LOST); /* clear irq! */
@@ -5482,9 +5480,6 @@ HFCmulti_init(void)
printk(KERN_DEBUG "%s: IRQ_DEBUG IS ENABLED!\n", __func__);
#endif
- spin_lock_init(&HFClock);
- spin_lock_init(&plx_lock);
-
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: init entered\n", __func__);
diff --git a/drivers/isdn/hardware/mISDN/iohelper.h b/drivers/isdn/hardware/mISDN/iohelper.h
index b2b2bde8edba..c81f7aba4b57 100644
--- a/drivers/isdn/hardware/mISDN/iohelper.h
+++ b/drivers/isdn/hardware/mISDN/iohelper.h
@@ -13,14 +13,14 @@
#ifndef _IOHELPER_H
#define _IOHELPER_H
-typedef u8 (read_reg_func)(void *hwp, u8 offset);
- typedef void (write_reg_func)(void *hwp, u8 offset, u8 value);
- typedef void (fifo_func)(void *hwp, u8 offset, u8 *datap, int size);
+typedef u8 (read_reg_func)(void *hwp, u8 offset);
+typedef void (write_reg_func)(void *hwp, u8 offset, u8 value);
+typedef void (fifo_func)(void *hwp, u8 offset, u8 *datap, int size);
- struct _ioport {
- u32 port;
- u32 ale;
- };
+struct _ioport {
+ u32 port;
+ u32 ale;
+};
#define IOFUNC_IO(name, hws, ap) \
static u8 Read##name##_IO(void *p, u8 off) { \
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 038e72a84b33..386084530c2f 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -176,9 +176,9 @@ MODULE_LICENSE("GPL");
/*int spinnest = 0;*/
-spinlock_t dsp_lock; /* global dsp lock */
-struct list_head dsp_ilist;
-struct list_head conf_ilist;
+DEFINE_SPINLOCK(dsp_lock); /* global dsp lock */
+LIST_HEAD(dsp_ilist);
+LIST_HEAD(conf_ilist);
int dsp_debug;
int dsp_options;
int dsp_poll, dsp_tics;
@@ -953,7 +953,6 @@ dsp_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct dsp *dsp = container_of(ch, struct dsp, ch);
u_long flags;
- int err = 0;
if (debug & DEBUG_DSP_CTRL)
printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
@@ -998,7 +997,7 @@ dsp_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
module_put(THIS_MODULE);
break;
}
- return err;
+ return 0;
}
static void
@@ -1170,10 +1169,6 @@ static int __init dsp_init(void)
printk(KERN_INFO "mISDN_dsp: DSP clocks every %d samples. This equals "
"%d jiffies.\n", dsp_poll, dsp_tics);
- spin_lock_init(&dsp_lock);
- INIT_LIST_HEAD(&dsp_ilist);
- INIT_LIST_HEAD(&conf_ilist);
-
/* init conversion tables */
dsp_audio_generate_law_tables();
dsp_silence = (dsp_options & DSP_OPT_ULAW) ? 0xff : 0x2a;
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index b57dcb834594..2c40412466e6 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -200,7 +200,7 @@
The complete socket opening and closing is done by a thread.
When the thread opened a socket, the hc->socket descriptor is set. Whenever a
- packet shall be sent to the socket, the hc->socket must be checked wheter not
+ packet shall be sent to the socket, the hc->socket must be checked whether not
NULL. To prevent change in socket descriptor, the hc->socket_lock must be used.
To change the socket, a recall of l1oip_socket_open() will safely kill the
socket process and create a new one.
@@ -229,8 +229,8 @@
static const char *l1oip_revision = "2.00";
static int l1oip_cnt;
-static spinlock_t l1oip_lock;
-static struct list_head l1oip_ilist;
+static DEFINE_SPINLOCK(l1oip_lock);
+static LIST_HEAD(l1oip_ilist);
#define MAX_CARDS 16
static u_int type[MAX_CARDS];
@@ -1440,9 +1440,6 @@ l1oip_init(void)
printk(KERN_INFO "mISDN: Layer-1-over-IP driver Rev. %s\n",
l1oip_revision);
- INIT_LIST_HEAD(&l1oip_ilist);
- spin_lock_init(&l1oip_lock);
-
if (l1oip_4bit_alloc(ulaw))
return -ENOMEM;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index b6742b4231bf..49d99cb084db 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -18,7 +18,7 @@ config LEDS_CLASS
tristate "LED Class Support"
help
This option enables the LED sysfs class in /sys/class/leds. You'll
- need this to do anything useful with LEDs. If unsure, say N.
+ need this to do anything useful with LEDs. If unsure, say Y.
config LEDS_CLASS_FLASH
tristate "LED Flash Class Support"
@@ -928,13 +928,12 @@ config LEDS_ACER_A500
This option enables support for the Power Button LED of
Acer Iconia Tab A500.
+source "drivers/leds/blink/Kconfig"
+
comment "Flash and Torch LED drivers"
source "drivers/leds/flash/Kconfig"
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
-comment "LED Blink"
-source "drivers/leds/blink/Kconfig"
-
endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 2a698df9da57..7e604d3028c8 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -110,4 +110,4 @@ obj-$(CONFIG_LEDS_CLASS_FLASH) += flash/
obj-$(CONFIG_LEDS_TRIGGERS) += trigger/
# LED Blink
-obj-$(CONFIG_LEDS_BLINK) += blink/
+obj-y += blink/
diff --git a/drivers/leds/blink/Kconfig b/drivers/leds/blink/Kconfig
index 265b53476a80..59ba81e40e85 100644
--- a/drivers/leds/blink/Kconfig
+++ b/drivers/leds/blink/Kconfig
@@ -1,20 +1,17 @@
-menuconfig LEDS_BLINK
- bool "LED Blink support"
- depends on LEDS_CLASS
- help
- This option enables blink support for the leds class.
- If unsure, say Y.
+config LEDS_LGM
+ tristate "LED support for LGM SoC series"
+ depends on X86 || COMPILE_TEST
+ depends on GPIOLIB && LEDS_CLASS && MFD_SYSCON && OF
+ help
+ This option enables support for LEDs connected to GPIO lines on
+ Lightning Mountain (LGM) SoC. Lightning Mountain is a AnyWAN
+ gateway-on-a-chip SoC to be shipped on mid and high end home
+ gateways and routers.
-if LEDS_BLINK
+ These LEDs are driven by a Serial Shift Output (SSO) controller.
+ The driver supports hardware blinking and the LEDs can be configured
+ to be triggered by software/CPU or by hardware.
-config LEDS_BLINK_LGM
- tristate "LED support for Intel LGM SoC series"
- depends on LEDS_CLASS
- depends on MFD_SYSCON
- depends on OF
- help
- Parallel to serial conversion, which is also called SSO controller,
- can drive external shift register for LED outputs.
- This enables LED support for Serial Shift Output controller(SSO).
-
-endif # LEDS_BLINK
+ Say 'Y' here if you are working on LGM SoC based platform. Otherwise,
+ say 'N'. To compile this driver as a module, choose M here: the module
+ will be called leds-lgm-sso.
diff --git a/drivers/leds/blink/Makefile b/drivers/leds/blink/Makefile
index 2fa6c7b7b67e..fa5d04dccf13 100644
--- a/drivers/leds/blink/Makefile
+++ b/drivers/leds/blink/Makefile
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_LEDS_BLINK_LGM) += leds-lgm-sso.o
+obj-$(CONFIG_LEDS_LGM) += leds-lgm-sso.o
diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
index 7d5c9ca007d6..6a63846d10b5 100644
--- a/drivers/leds/blink/leds-lgm-sso.c
+++ b/drivers/leds/blink/leds-lgm-sso.c
@@ -793,7 +793,7 @@ static int intel_sso_led_probe(struct platform_device *pdev)
ret = clk_prepare_enable(priv->gclk);
if (ret) {
- dev_err(dev, "Failed to prepate/enable sso gate clock!\n");
+ dev_err(dev, "Failed to prepare/enable sso gate clock!\n");
return ret;
}
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
index b580b416b9a4..3f49f3edbffb 100644
--- a/drivers/leds/flash/Kconfig
+++ b/drivers/leds/flash/Kconfig
@@ -2,6 +2,17 @@
if LEDS_CLASS_FLASH
+config LEDS_RT4505
+ tristate "LED support for RT4505 flashlight controller"
+ depends on I2C && OF
+ depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
+ select REGMAP_I2C
+ help
+ This option enables support for the RT4505 flash LED controller.
+ RT4505 includes torch and flash functions with programmable current.
+ And it's commonly used to compensate the illuminance for the camera
+ inside the mobile product like as phones or tablets.
+
config LEDS_RT8515
tristate "LED support for Richtek RT8515 flash/torch LED"
depends on GPIOLIB
diff --git a/drivers/leds/flash/Makefile b/drivers/leds/flash/Makefile
index e990e257f4d7..09aee561f769 100644
--- a/drivers/leds/flash/Makefile
+++ b/drivers/leds/flash/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_LEDS_RT4505) += leds-rt4505.o
obj-$(CONFIG_LEDS_RT8515) += leds-rt8515.o
diff --git a/drivers/leds/flash/leds-rt4505.c b/drivers/leds/flash/leds-rt4505.c
new file mode 100644
index 000000000000..ee129ab7255d
--- /dev/null
+++ b/drivers/leds/flash/leds-rt4505.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/led-class-flash.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <media/v4l2-flash-led-class.h>
+
+#define RT4505_REG_RESET 0x0
+#define RT4505_REG_CONFIG 0x8
+#define RT4505_REG_ILED 0x9
+#define RT4505_REG_ENABLE 0xA
+#define RT4505_REG_FLAGS 0xB
+
+#define RT4505_RESET_MASK BIT(7)
+#define RT4505_FLASHTO_MASK GENMASK(2, 0)
+#define RT4505_ITORCH_MASK GENMASK(7, 5)
+#define RT4505_ITORCH_SHIFT 5
+#define RT4505_IFLASH_MASK GENMASK(4, 0)
+#define RT4505_ENABLE_MASK GENMASK(5, 0)
+#define RT4505_TORCH_SET (BIT(0) | BIT(4))
+#define RT4505_FLASH_SET (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define RT4505_EXT_FLASH_SET (BIT(0) | BIT(1) | BIT(4) | BIT(5))
+#define RT4505_FLASH_GET (BIT(0) | BIT(1) | BIT(4))
+#define RT4505_OVP_MASK BIT(3)
+#define RT4505_SHORT_MASK BIT(2)
+#define RT4505_OTP_MASK BIT(1)
+#define RT4505_TIMEOUT_MASK BIT(0)
+
+#define RT4505_ITORCH_MINUA 46000
+#define RT4505_ITORCH_MAXUA 375000
+#define RT4505_ITORCH_STPUA 47000
+#define RT4505_IFLASH_MINUA 93750
+#define RT4505_IFLASH_MAXUA 1500000
+#define RT4505_IFLASH_STPUA 93750
+#define RT4505_FLASHTO_MINUS 100000
+#define RT4505_FLASHTO_MAXUS 800000
+#define RT4505_FLASHTO_STPUS 100000
+
+struct rt4505_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ struct led_classdev_flash flash;
+ struct v4l2_flash *v4l2_flash;
+};
+
+static int rt4505_torch_brightness_set(struct led_classdev *lcdev,
+ enum led_brightness level)
+{
+ struct rt4505_priv *priv =
+ container_of(lcdev, struct rt4505_priv, flash.led_cdev);
+ u32 val = 0;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ if (level != LED_OFF) {
+ ret = regmap_update_bits(priv->regmap,
+ RT4505_REG_ILED, RT4505_ITORCH_MASK,
+ (level - 1) << RT4505_ITORCH_SHIFT);
+ if (ret)
+ goto unlock;
+
+ val = RT4505_TORCH_SET;
+ }
+
+ ret = regmap_update_bits(priv->regmap, RT4505_REG_ENABLE,
+ RT4505_ENABLE_MASK, val);
+
+unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static enum led_brightness rt4505_torch_brightness_get(
+ struct led_classdev *lcdev)
+{
+ struct rt4505_priv *priv =
+ container_of(lcdev, struct rt4505_priv, flash.led_cdev);
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ ret = regmap_read(priv->regmap, RT4505_REG_ENABLE, &val);
+ if (ret) {
+ dev_err(lcdev->dev, "Failed to get LED enable\n");
+ ret = LED_OFF;
+ goto unlock;
+ }
+
+ if ((val & RT4505_ENABLE_MASK) != RT4505_TORCH_SET) {
+ ret = LED_OFF;
+ goto unlock;
+ }
+
+ ret = regmap_read(priv->regmap, RT4505_REG_ILED, &val);
+ if (ret) {
+ dev_err(lcdev->dev, "Failed to get LED brightness\n");
+ ret = LED_OFF;
+ goto unlock;
+ }
+
+ ret = ((val & RT4505_ITORCH_MASK) >> RT4505_ITORCH_SHIFT) + 1;
+
+unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int rt4505_flash_brightness_set(struct led_classdev_flash *fled_cdev,
+ u32 brightness)
+{
+ struct rt4505_priv *priv =
+ container_of(fled_cdev, struct rt4505_priv, flash);
+ struct led_flash_setting *s = &fled_cdev->brightness;
+ u32 val = (brightness - s->min) / s->step;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = regmap_update_bits(priv->regmap, RT4505_REG_ILED,
+ RT4505_IFLASH_MASK, val);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int rt4505_flash_strobe_set(struct led_classdev_flash *fled_cdev,
+ bool state)
+{
+ struct rt4505_priv *priv =
+ container_of(fled_cdev, struct rt4505_priv, flash);
+ u32 val = state ? RT4505_FLASH_SET : 0;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = regmap_update_bits(priv->regmap, RT4505_REG_ENABLE,
+ RT4505_ENABLE_MASK, val);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int rt4505_flash_strobe_get(struct led_classdev_flash *fled_cdev,
+ bool *state)
+{
+ struct rt4505_priv *priv =
+ container_of(fled_cdev, struct rt4505_priv, flash);
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ ret = regmap_read(priv->regmap, RT4505_REG_ENABLE, &val);
+ if (ret)
+ goto unlock;
+
+ *state = (val & RT4505_FLASH_GET) == RT4505_FLASH_GET;
+
+unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int rt4505_flash_timeout_set(struct led_classdev_flash *fled_cdev,
+ u32 timeout)
+{
+ struct rt4505_priv *priv =
+ container_of(fled_cdev, struct rt4505_priv, flash);
+ struct led_flash_setting *s = &fled_cdev->timeout;
+ u32 val = (timeout - s->min) / s->step;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = regmap_update_bits(priv->regmap, RT4505_REG_CONFIG,
+ RT4505_FLASHTO_MASK, val);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int rt4505_fault_get(struct led_classdev_flash *fled_cdev, u32 *fault)
+{
+ struct rt4505_priv *priv =
+ container_of(fled_cdev, struct rt4505_priv, flash);
+ u32 val, led_faults = 0;
+ int ret;
+
+ ret = regmap_read(priv->regmap, RT4505_REG_FLAGS, &val);
+ if (ret)
+ return ret;
+
+ if (val & RT4505_OVP_MASK)
+ led_faults |= LED_FAULT_OVER_VOLTAGE;
+
+ if (val & RT4505_SHORT_MASK)
+ led_faults |= LED_FAULT_SHORT_CIRCUIT;
+
+ if (val & RT4505_OTP_MASK)
+ led_faults |= LED_FAULT_OVER_TEMPERATURE;
+
+ if (val & RT4505_TIMEOUT_MASK)
+ led_faults |= LED_FAULT_TIMEOUT;
+
+ *fault = led_faults;
+ return 0;
+}
+
+static const struct led_flash_ops rt4505_flash_ops = {
+ .flash_brightness_set = rt4505_flash_brightness_set,
+ .strobe_set = rt4505_flash_strobe_set,
+ .strobe_get = rt4505_flash_strobe_get,
+ .timeout_set = rt4505_flash_timeout_set,
+ .fault_get = rt4505_fault_get,
+};
+
+static bool rt4505_is_accessible_reg(struct device *dev, unsigned int reg)
+{
+ if (reg == RT4505_REG_RESET ||
+ (reg >= RT4505_REG_CONFIG && reg <= RT4505_REG_FLAGS))
+ return true;
+ return false;
+}
+
+static const struct regmap_config rt4505_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RT4505_REG_FLAGS,
+
+ .readable_reg = rt4505_is_accessible_reg,
+ .writeable_reg = rt4505_is_accessible_reg,
+};
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+static int rt4505_flash_external_strobe_set(struct v4l2_flash *v4l2_flash,
+ bool enable)
+{
+ struct led_classdev_flash *flash = v4l2_flash->fled_cdev;
+ struct rt4505_priv *priv =
+ container_of(flash, struct rt4505_priv, flash);
+ u32 val = enable ? RT4505_EXT_FLASH_SET : 0;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = regmap_update_bits(priv->regmap, RT4505_REG_ENABLE,
+ RT4505_ENABLE_MASK, val);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static const struct v4l2_flash_ops v4l2_flash_ops = {
+ .external_strobe_set = rt4505_flash_external_strobe_set,
+};
+
+static void rt4505_init_v4l2_config(struct rt4505_priv *priv,
+ struct v4l2_flash_config *config)
+{
+ struct led_classdev_flash *flash = &priv->flash;
+ struct led_classdev *lcdev = &flash->led_cdev;
+ struct led_flash_setting *s;
+
+ strscpy(config->dev_name, lcdev->dev->kobj.name,
+ sizeof(config->dev_name));
+
+ s = &config->intensity;
+ s->min = RT4505_ITORCH_MINUA;
+ s->step = RT4505_ITORCH_STPUA;
+ s->max = s->val = s->min + (lcdev->max_brightness - 1) * s->step;
+
+ config->flash_faults = LED_FAULT_OVER_VOLTAGE |
+ LED_FAULT_SHORT_CIRCUIT |
+ LED_FAULT_LED_OVER_TEMPERATURE |
+ LED_FAULT_TIMEOUT;
+ config->has_external_strobe = 1;
+}
+#else
+static const struct v4l2_flash_ops v4l2_flash_ops;
+static void rt4505_init_v4l2_config(struct rt4505_priv *priv,
+ struct v4l2_flash_config *config)
+{
+}
+#endif
+
+static void rt4505_init_flash_properties(struct rt4505_priv *priv,
+ struct fwnode_handle *child)
+{
+ struct led_classdev_flash *flash = &priv->flash;
+ struct led_classdev *lcdev = &flash->led_cdev;
+ struct led_flash_setting *s;
+ u32 val;
+ int ret;
+
+ ret = fwnode_property_read_u32(child, "led-max-microamp", &val);
+ if (ret) {
+ dev_warn(priv->dev, "led-max-microamp DT property missing\n");
+ val = RT4505_ITORCH_MINUA;
+ } else
+ val = clamp_val(val, RT4505_ITORCH_MINUA, RT4505_ITORCH_MAXUA);
+
+ lcdev->max_brightness =
+ (val - RT4505_ITORCH_MINUA) / RT4505_ITORCH_STPUA + 1;
+ lcdev->brightness_set_blocking = rt4505_torch_brightness_set;
+ lcdev->brightness_get = rt4505_torch_brightness_get;
+ lcdev->flags |= LED_DEV_CAP_FLASH;
+
+ ret = fwnode_property_read_u32(child, "flash-max-microamp", &val);
+ if (ret) {
+ dev_warn(priv->dev, "flash-max-microamp DT property missing\n");
+ val = RT4505_IFLASH_MINUA;
+ } else
+ val = clamp_val(val, RT4505_IFLASH_MINUA, RT4505_IFLASH_MAXUA);
+
+ s = &flash->brightness;
+ s->min = RT4505_IFLASH_MINUA;
+ s->step = RT4505_IFLASH_STPUA;
+ s->max = s->val = val;
+
+ ret = fwnode_property_read_u32(child, "flash-max-timeout-us", &val);
+ if (ret) {
+ dev_warn(priv->dev,
+ "flash-max-timeout-us DT property missing\n");
+ val = RT4505_FLASHTO_MINUS;
+ } else
+ val = clamp_val(val, RT4505_FLASHTO_MINUS,
+ RT4505_FLASHTO_MAXUS);
+
+ s = &flash->timeout;
+ s->min = RT4505_FLASHTO_MINUS;
+ s->step = RT4505_FLASHTO_STPUS;
+ s->max = s->val = val;
+
+ flash->ops = &rt4505_flash_ops;
+}
+
+static int rt4505_probe(struct i2c_client *client)
+{
+ struct rt4505_priv *priv;
+ struct fwnode_handle *child;
+ struct led_init_data init_data = {};
+ struct v4l2_flash_config v4l2_config = {};
+ int ret;
+
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &client->dev;
+ mutex_init(&priv->lock);
+
+ priv->regmap = devm_regmap_init_i2c(client, &rt4505_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(priv->dev, "Failed to allocate register map\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ ret = regmap_write(priv->regmap, RT4505_REG_RESET, RT4505_RESET_MASK);
+ if (ret) {
+ dev_err(priv->dev, "Failed to reset registers\n");
+ return ret;
+ }
+
+ child = fwnode_get_next_available_child_node(client->dev.fwnode, NULL);
+ if (!child) {
+ dev_err(priv->dev, "Failed to get child node\n");
+ return -EINVAL;
+ }
+ init_data.fwnode = child;
+
+ rt4505_init_flash_properties(priv, child);
+ ret = devm_led_classdev_flash_register_ext(priv->dev, &priv->flash,
+ &init_data);
+ if (ret) {
+ dev_err(priv->dev, "Failed to register flash\n");
+ return ret;
+ }
+
+ rt4505_init_v4l2_config(priv, &v4l2_config);
+ priv->v4l2_flash = v4l2_flash_init(priv->dev, init_data.fwnode,
+ &priv->flash, &v4l2_flash_ops,
+ &v4l2_config);
+ if (IS_ERR(priv->v4l2_flash)) {
+ dev_err(priv->dev, "Failed to register v4l2 flash\n");
+ return PTR_ERR(priv->v4l2_flash);
+ }
+
+ i2c_set_clientdata(client, priv);
+ return 0;
+}
+
+static int rt4505_remove(struct i2c_client *client)
+{
+ struct rt4505_priv *priv = i2c_get_clientdata(client);
+
+ v4l2_flash_release(priv->v4l2_flash);
+ return 0;
+}
+
+static void rt4505_shutdown(struct i2c_client *client)
+{
+ struct rt4505_priv *priv = i2c_get_clientdata(client);
+
+ /* Reset registers to make sure all off before shutdown */
+ regmap_write(priv->regmap, RT4505_REG_RESET, RT4505_RESET_MASK);
+}
+
+static const struct of_device_id __maybe_unused rt4505_leds_match[] = {
+ { .compatible = "richtek,rt4505", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rt4505_leds_match);
+
+static struct i2c_driver rt4505_driver = {
+ .driver = {
+ .name = "rt4505",
+ .of_match_table = of_match_ptr(rt4505_leds_match),
+ },
+ .probe_new = rt4505_probe,
+ .remove = rt4505_remove,
+ .shutdown = rt4505_shutdown,
+};
+module_i2c_driver(rt4505_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index 8007b82985a8..435309154e6b 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -339,7 +339,7 @@ static int lm3642_probe(struct i2c_client *client,
chip->cdev_flash.max_brightness = 16;
chip->cdev_flash.brightness_set_blocking = lm3642_strobe_brightness_set;
chip->cdev_flash.default_trigger = "flash";
- chip->cdev_flash.groups = lm3642_flash_groups,
+ chip->cdev_flash.groups = lm3642_flash_groups;
err = led_classdev_register(&client->dev, &chip->cdev_flash);
if (err < 0) {
dev_err(chip->dev, "failed to register flash\n");
@@ -351,7 +351,7 @@ static int lm3642_probe(struct i2c_client *client,
chip->cdev_torch.max_brightness = 8;
chip->cdev_torch.brightness_set_blocking = lm3642_torch_brightness_set;
chip->cdev_torch.default_trigger = "torch";
- chip->cdev_torch.groups = lm3642_torch_groups,
+ chip->cdev_torch.groups = lm3642_torch_groups;
err = led_classdev_register(&client->dev, &chip->cdev_torch);
if (err < 0) {
dev_err(chip->dev, "failed to register torch\n");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 27d027165472..017794bb87ae 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -480,6 +480,8 @@ pca9532_of_populate_pdata(struct device *dev, struct device_node *np)
if (!pdata)
return ERR_PTR(-ENOMEM);
+ pdata->gpio_base = -1;
+
of_property_read_u8_array(np, "nxp,pwm", &pdata->pwm[0],
ARRAY_SIZE(pdata->pwm));
of_property_read_u8_array(np, "nxp,psc", &pdata->psc[0],
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
index 4d138d5317e9..43a265dc4696 100644
--- a/drivers/leds/trigger/ledtrig-pattern.c
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -333,7 +333,7 @@ static DEVICE_ATTR_RW(hw_pattern);
static umode_t pattern_trig_attrs_mode(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct led_classdev *led_cdev = dev_get_drvdata(dev);
if (attr == &dev_attr_repeat.attr || attr == &dev_attr_pattern.attr)
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 73e6ae88fafd..4bdd4c45e7a7 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -180,14 +180,13 @@ static struct proc_dir_entry *proc_pmu_options;
static int option_server_mode;
int pmu_battery_count;
-int pmu_cur_battery;
+static int pmu_cur_battery;
unsigned int pmu_power_flags = PMU_PWR_AC_PRESENT;
struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES];
static int query_batt_timer = BATTERY_POLLING_COUNT;
static struct adb_request batt_req;
static struct proc_dir_entry *proc_pmu_batt[PMU_MAX_BATTERIES];
-int __fake_sleep;
int asleep;
#ifdef CONFIG_ADB
@@ -1833,6 +1832,7 @@ pmu_present(void)
*/
static u32 save_via[8];
+static int __fake_sleep;
static void
save_via_state(void)
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index 77612303841e..07f91ec1f960 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -56,7 +56,7 @@ static BLOCKING_NOTIFIER_HEAD(wf_client_list);
static int wf_client_count;
static unsigned int wf_overtemp;
static unsigned int wf_overtemp_counter;
-struct task_struct *wf_thread;
+static struct task_struct *wf_thread;
static struct platform_device wf_platform_device = {
.name = "windfarm",
diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c
index ab467b9c31be..ba1ec6fc11d2 100644
--- a/drivers/macintosh/windfarm_pm121.c
+++ b/drivers/macintosh/windfarm_pm121.c
@@ -433,7 +433,7 @@ struct pm121_sys_state {
struct wf_pid_state pid;
};
-struct pm121_sys_state *pm121_sys_state[N_LOOPS] = {};
+static struct pm121_sys_state *pm121_sys_state[N_LOOPS] = {};
/*
* ****** CPU Fans Control Loop ******
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index 79cb1ad09bfd..75966052819a 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -94,7 +94,7 @@ static int smu_set_fan(int pwm, u8 id, u16 value)
return rc;
wait_for_completion(&comp);
- /* Handle fallback (see coment above) */
+ /* Handle fallback (see comment above) */
if (cmd.status != 0 && smu_supports_new_fans_ops) {
printk(KERN_WARNING "windfarm: SMU failed new fan command "
"falling back to old method\n");
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 2b6d6e9cd680..bea8c4429ae8 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -16,6 +16,7 @@
#include "features.h"
#include <linux/blkdev.h>
+#include <linux/pagemap.h>
#include <linux/debugfs.h>
#include <linux/genhd.h>
#include <linux/idr.h>
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 541c45027cc8..6ab01ff25747 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3387,7 +3387,7 @@ static bool origin_dev_supports_discard(struct block_device *origin_bdev)
{
struct request_queue *q = bdev_get_queue(origin_bdev);
- return q && blk_queue_discard(q);
+ return blk_queue_discard(q);
}
/*
diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
index 17712456fa63..c43d55672bce 100644
--- a/drivers/md/dm-clone-metadata.c
+++ b/drivers/md/dm-clone-metadata.c
@@ -276,12 +276,6 @@ static inline int superblock_read_lock(struct dm_clone_metadata *cmd,
return dm_bm_read_lock(cmd->bm, SUPERBLOCK_LOCATION, &sb_validator, sblock);
}
-static inline int superblock_write_lock(struct dm_clone_metadata *cmd,
- struct dm_block **sblock)
-{
- return dm_bm_write_lock(cmd->bm, SUPERBLOCK_LOCATION, &sb_validator, sblock);
-}
-
static inline int superblock_write_lock_zero(struct dm_clone_metadata *cmd,
struct dm_block **sblock)
{
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index 55bcfb74f51f..71475a2410be 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -28,7 +28,7 @@ struct ebs_c {
spinlock_t lock; /* Guard bios input list above. */
sector_t start; /* <start> table line argument, see ebs_ctr below. */
unsigned int e_bs; /* Emulated block size in sectors exposed to upper layer. */
- unsigned int u_bs; /* Underlying block size in sectors retrievd from/set on lower layer device. */
+ unsigned int u_bs; /* Underlying block size in sectors retrieved from/set on lower layer device. */
unsigned char block_shift; /* bitshift sectors -> blocks used in dm-bufio API. */
bool u_bs_set:1; /* Flag to indicate underlying block size is set on table line. */
};
@@ -43,7 +43,7 @@ static inline sector_t __block_mod(sector_t sector, unsigned int bs)
return sector & (bs - 1);
}
-/* Return number of blocks for a bio, accounting for misalignement of start and end sectors. */
+/* Return number of blocks for a bio, accounting for misalignment of start and end sectors. */
static inline unsigned int __nr_blocks(struct ebs_c *ec, struct bio *bio)
{
sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio);
@@ -171,7 +171,7 @@ static void __ebs_forget_bio(struct ebs_c *ec, struct bio *bio)
dm_bufio_forget_buffers(ec->bufio, __sector_to_block(ec, sector), blocks);
}
-/* Worker funtion to process incoming bios. */
+/* Worker function to process incoming bios. */
static void __ebs_process_bios(struct work_struct *ws)
{
int r;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 46b5d542b8fe..781942aeddd1 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -35,7 +35,7 @@
#define MIN_LOG2_INTERLEAVE_SECTORS 3
#define MAX_LOG2_INTERLEAVE_SECTORS 31
#define METADATA_WORKQUEUE_MAX_ACTIVE 16
-#define RECALC_SECTORS 8192
+#define RECALC_SECTORS 32768
#define RECALC_WRITE_SUPER 16
#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
#define BITMAP_FLUSH_INTERVAL (10 * HZ)
@@ -262,6 +262,7 @@ struct dm_integrity_c {
bool journal_uptodate;
bool just_formatted;
bool recalculate_flag;
+ bool reset_recalculate_flag;
bool discard;
bool fix_padding;
bool fix_hmac;
@@ -1428,8 +1429,10 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
if (op == TAG_READ) {
memcpy(tag, dp, to_copy);
} else if (op == TAG_WRITE) {
- memcpy(dp, tag, to_copy);
- dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
+ if (memcmp(dp, tag, to_copy)) {
+ memcpy(dp, tag, to_copy);
+ dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
+ }
} else {
/* e.g.: op == TAG_CMP */
@@ -2686,26 +2689,30 @@ next_chunk:
if (unlikely(dm_integrity_failed(ic)))
goto err;
- io_req.bi_op = REQ_OP_READ;
- io_req.bi_op_flags = 0;
- io_req.mem.type = DM_IO_VMA;
- io_req.mem.ptr.addr = ic->recalc_buffer;
- io_req.notify.fn = NULL;
- io_req.client = ic->io;
- io_loc.bdev = ic->dev->bdev;
- io_loc.sector = get_data_sector(ic, area, offset);
- io_loc.count = n_sectors;
+ if (!ic->discard) {
+ io_req.bi_op = REQ_OP_READ;
+ io_req.bi_op_flags = 0;
+ io_req.mem.type = DM_IO_VMA;
+ io_req.mem.ptr.addr = ic->recalc_buffer;
+ io_req.notify.fn = NULL;
+ io_req.client = ic->io;
+ io_loc.bdev = ic->dev->bdev;
+ io_loc.sector = get_data_sector(ic, area, offset);
+ io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
- if (unlikely(r)) {
- dm_integrity_io_error(ic, "reading data", r);
- goto err;
- }
+ r = dm_io(&io_req, 1, &io_loc, NULL);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "reading data", r);
+ goto err;
+ }
- t = ic->recalc_tags;
- for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
- integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
- t += ic->tag_size;
+ t = ic->recalc_tags;
+ for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
+ integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
+ t += ic->tag_size;
+ }
+ } else {
+ t = ic->recalc_tags + (n_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
}
metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
@@ -3134,7 +3141,8 @@ static void dm_integrity_resume(struct dm_target *ti)
rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
if (ic->mode == 'B') {
- if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
+ if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
+ !ic->reset_recalculate_flag) {
block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
@@ -3156,7 +3164,8 @@ static void dm_integrity_resume(struct dm_target *ti)
}
} else {
if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
- block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
+ block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) ||
+ ic->reset_recalculate_flag) {
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
ic->sb->recalc_sector = cpu_to_le64(0);
}
@@ -3169,6 +3178,10 @@ static void dm_integrity_resume(struct dm_target *ti)
dm_integrity_io_error(ic, "writing superblock", r);
} else {
replay_journal(ic);
+ if (ic->reset_recalculate_flag) {
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
+ ic->sb->recalc_sector = cpu_to_le64(0);
+ }
if (ic->mode == 'B') {
ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
@@ -3242,6 +3255,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
arg_count += !!ic->meta_dev;
arg_count += ic->sectors_per_block != 1;
arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
+ arg_count += ic->reset_recalculate_flag;
arg_count += ic->discard;
arg_count += ic->mode == 'J';
arg_count += ic->mode == 'J';
@@ -3261,6 +3275,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
DMEMIT(" recalculate");
+ if (ic->reset_recalculate_flag)
+ DMEMIT(" reset_recalculate");
if (ic->discard)
DMEMIT(" allow_discards");
DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
@@ -3914,7 +3930,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
unsigned extra_args;
struct dm_arg_set as;
static const struct dm_arg _args[] = {
- {0, 17, "Invalid number of feature args"},
+ {0, 18, "Invalid number of feature args"},
};
unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
bool should_write_sb;
@@ -4039,6 +4055,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
r = -EINVAL;
ti->error = "Invalid bitmap_flush_interval argument";
+ goto bad;
}
ic->bitmap_flush_interval = msecs_to_jiffies(val);
} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
@@ -4058,6 +4075,9 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
} else if (!strcmp(opt_string, "recalculate")) {
ic->recalculate_flag = true;
+ } else if (!strcmp(opt_string, "reset_recalculate")) {
+ ic->recalculate_flag = true;
+ ic->reset_recalculate_flag = true;
} else if (!strcmp(opt_string, "allow_discards")) {
ic->discard = true;
} else if (!strcmp(opt_string, "fix_padding")) {
@@ -4348,11 +4368,13 @@ try_smaller_buffer:
goto bad;
}
INIT_WORK(&ic->recalc_work, integrity_recalc);
- ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
- if (!ic->recalc_buffer) {
- ti->error = "Cannot allocate buffer for recalculating";
- r = -ENOMEM;
- goto bad;
+ if (!ic->discard) {
+ ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
+ if (!ic->recalc_buffer) {
+ ti->error = "Cannot allocate buffer for recalculating";
+ r = -ENOMEM;
+ goto bad;
+ }
}
ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
ic->tag_size, GFP_KERNEL);
@@ -4361,6 +4383,9 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
+ if (ic->discard)
+ memset(ic->recalc_tags, DISCARD_FILLER,
+ (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size);
} else {
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
ti->error = "Recalculate can only be specified with internal_hash";
@@ -4554,7 +4579,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 7, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 1ca65b434f1f..2209cbcd84db 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/slab.h>
+#include <linux/rbtree.h>
#include <linux/dm-ioctl.h>
#include <linux/hdreg.h>
#include <linux/compat.h>
@@ -36,8 +37,10 @@ struct dm_file {
* name or uuid.
*---------------------------------------------------------------*/
struct hash_cell {
- struct list_head name_list;
- struct list_head uuid_list;
+ struct rb_node name_node;
+ struct rb_node uuid_node;
+ bool name_set;
+ bool uuid_set;
char *name;
char *uuid;
@@ -53,10 +56,8 @@ struct vers_iter {
};
-#define NUM_BUCKETS 64
-#define MASK_BUCKETS (NUM_BUCKETS - 1)
-static struct list_head _name_buckets[NUM_BUCKETS];
-static struct list_head _uuid_buckets[NUM_BUCKETS];
+static struct rb_root name_rb_tree = RB_ROOT;
+static struct rb_root uuid_rb_tree = RB_ROOT;
static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred);
@@ -70,73 +71,110 @@ static DECLARE_RWSEM(_hash_lock);
*/
static DEFINE_MUTEX(dm_hash_cells_mutex);
-static void init_buckets(struct list_head *buckets)
-{
- unsigned int i;
-
- for (i = 0; i < NUM_BUCKETS; i++)
- INIT_LIST_HEAD(buckets + i);
-}
-
-static int dm_hash_init(void)
-{
- init_buckets(_name_buckets);
- init_buckets(_uuid_buckets);
- return 0;
-}
-
static void dm_hash_exit(void)
{
dm_hash_remove_all(false, false, false);
}
/*-----------------------------------------------------------------
- * Hash function:
- * We're not really concerned with the str hash function being
- * fast since it's only used by the ioctl interface.
- *---------------------------------------------------------------*/
-static unsigned int hash_str(const char *str)
-{
- const unsigned int hash_mult = 2654435387U;
- unsigned int h = 0;
-
- while (*str)
- h = (h + (unsigned int) *str++) * hash_mult;
-
- return h & MASK_BUCKETS;
-}
-
-/*-----------------------------------------------------------------
* Code for looking up a device by name
*---------------------------------------------------------------*/
static struct hash_cell *__get_name_cell(const char *str)
{
- struct hash_cell *hc;
- unsigned int h = hash_str(str);
+ struct rb_node *n = name_rb_tree.rb_node;
- list_for_each_entry (hc, _name_buckets + h, name_list)
- if (!strcmp(hc->name, str)) {
+ while (n) {
+ struct hash_cell *hc = container_of(n, struct hash_cell, name_node);
+ int c = strcmp(hc->name, str);
+ if (!c) {
dm_get(hc->md);
return hc;
}
+ n = c >= 0 ? n->rb_left : n->rb_right;
+ }
return NULL;
}
static struct hash_cell *__get_uuid_cell(const char *str)
{
- struct hash_cell *hc;
- unsigned int h = hash_str(str);
+ struct rb_node *n = uuid_rb_tree.rb_node;
- list_for_each_entry (hc, _uuid_buckets + h, uuid_list)
- if (!strcmp(hc->uuid, str)) {
+ while (n) {
+ struct hash_cell *hc = container_of(n, struct hash_cell, uuid_node);
+ int c = strcmp(hc->uuid, str);
+ if (!c) {
dm_get(hc->md);
return hc;
}
+ n = c >= 0 ? n->rb_left : n->rb_right;
+ }
return NULL;
}
+static void __unlink_name(struct hash_cell *hc)
+{
+ if (hc->name_set) {
+ hc->name_set = false;
+ rb_erase(&hc->name_node, &name_rb_tree);
+ }
+}
+
+static void __unlink_uuid(struct hash_cell *hc)
+{
+ if (hc->uuid_set) {
+ hc->uuid_set = false;
+ rb_erase(&hc->uuid_node, &uuid_rb_tree);
+ }
+}
+
+static void __link_name(struct hash_cell *new_hc)
+{
+ struct rb_node **n, *parent;
+
+ __unlink_name(new_hc);
+
+ new_hc->name_set = true;
+
+ n = &name_rb_tree.rb_node;
+ parent = NULL;
+
+ while (*n) {
+ struct hash_cell *hc = container_of(*n, struct hash_cell, name_node);
+ int c = strcmp(hc->name, new_hc->name);
+ BUG_ON(!c);
+ parent = *n;
+ n = c >= 0 ? &hc->name_node.rb_left : &hc->name_node.rb_right;
+ }
+
+ rb_link_node(&new_hc->name_node, parent, n);
+ rb_insert_color(&new_hc->name_node, &name_rb_tree);
+}
+
+static void __link_uuid(struct hash_cell *new_hc)
+{
+ struct rb_node **n, *parent;
+
+ __unlink_uuid(new_hc);
+
+ new_hc->uuid_set = true;
+
+ n = &uuid_rb_tree.rb_node;
+ parent = NULL;
+
+ while (*n) {
+ struct hash_cell *hc = container_of(*n, struct hash_cell, uuid_node);
+ int c = strcmp(hc->uuid, new_hc->uuid);
+ BUG_ON(!c);
+ parent = *n;
+ n = c > 0 ? &hc->uuid_node.rb_left : &hc->uuid_node.rb_right;
+ }
+
+ rb_link_node(&new_hc->uuid_node, parent, n);
+ rb_insert_color(&new_hc->uuid_node, &uuid_rb_tree);
+}
+
static struct hash_cell *__get_dev_cell(uint64_t dev)
{
struct mapped_device *md;
@@ -185,8 +223,7 @@ static struct hash_cell *alloc_cell(const char *name, const char *uuid,
}
}
- INIT_LIST_HEAD(&hc->name_list);
- INIT_LIST_HEAD(&hc->uuid_list);
+ hc->name_set = hc->uuid_set = false;
hc->md = md;
hc->new_map = NULL;
return hc;
@@ -226,16 +263,16 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi
goto bad;
}
- list_add(&cell->name_list, _name_buckets + hash_str(name));
+ __link_name(cell);
if (uuid) {
hc = __get_uuid_cell(uuid);
if (hc) {
- list_del(&cell->name_list);
+ __unlink_name(cell);
dm_put(hc->md);
goto bad;
}
- list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
+ __link_uuid(cell);
}
dm_get(md);
mutex_lock(&dm_hash_cells_mutex);
@@ -256,9 +293,9 @@ static struct dm_table *__hash_remove(struct hash_cell *hc)
struct dm_table *table;
int srcu_idx;
- /* remove from the dev hash */
- list_del(&hc->uuid_list);
- list_del(&hc->name_list);
+ /* remove from the dev trees */
+ __unlink_name(hc);
+ __unlink_uuid(hc);
mutex_lock(&dm_hash_cells_mutex);
dm_set_mdptr(hc->md, NULL);
mutex_unlock(&dm_hash_cells_mutex);
@@ -279,7 +316,8 @@ static struct dm_table *__hash_remove(struct hash_cell *hc)
static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred)
{
- int i, dev_skipped;
+ int dev_skipped;
+ struct rb_node *n;
struct hash_cell *hc;
struct mapped_device *md;
struct dm_table *t;
@@ -289,40 +327,39 @@ retry:
down_write(&_hash_lock);
- for (i = 0; i < NUM_BUCKETS; i++) {
- list_for_each_entry(hc, _name_buckets + i, name_list) {
- md = hc->md;
- dm_get(md);
+ for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
+ hc = container_of(n, struct hash_cell, name_node);
+ md = hc->md;
+ dm_get(md);
- if (keep_open_devices &&
- dm_lock_for_deletion(md, mark_deferred, only_deferred)) {
- dm_put(md);
- dev_skipped++;
- continue;
- }
+ if (keep_open_devices &&
+ dm_lock_for_deletion(md, mark_deferred, only_deferred)) {
+ dm_put(md);
+ dev_skipped++;
+ continue;
+ }
- t = __hash_remove(hc);
+ t = __hash_remove(hc);
- up_write(&_hash_lock);
+ up_write(&_hash_lock);
- if (t) {
- dm_sync_table(md);
- dm_table_destroy(t);
- }
- dm_put(md);
- if (likely(keep_open_devices))
- dm_destroy(md);
- else
- dm_destroy_immediate(md);
-
- /*
- * Some mapped devices may be using other mapped
- * devices, so repeat until we make no further
- * progress. If a new mapped device is created
- * here it will also get removed.
- */
- goto retry;
+ if (t) {
+ dm_sync_table(md);
+ dm_table_destroy(t);
}
+ dm_put(md);
+ if (likely(keep_open_devices))
+ dm_destroy(md);
+ else
+ dm_destroy_immediate(md);
+
+ /*
+ * Some mapped devices may be using other mapped
+ * devices, so repeat until we make no further
+ * progress. If a new mapped device is created
+ * here it will also get removed.
+ */
+ goto retry;
}
up_write(&_hash_lock);
@@ -340,7 +377,7 @@ static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid)
hc->uuid = new_uuid;
mutex_unlock(&dm_hash_cells_mutex);
- list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid));
+ __link_uuid(hc);
}
/*
@@ -354,14 +391,14 @@ static char *__change_cell_name(struct hash_cell *hc, char *new_name)
/*
* Rename and move the name cell.
*/
- list_del(&hc->name_list);
+ __unlink_name(hc);
old_name = hc->name;
mutex_lock(&dm_hash_cells_mutex);
hc->name = new_name;
mutex_unlock(&dm_hash_cells_mutex);
- list_add(&hc->name_list, _name_buckets + hash_str(new_name));
+ __link_name(hc);
return old_name;
}
@@ -503,9 +540,33 @@ static void *get_result_buffer(struct dm_ioctl *param, size_t param_size,
return ((void *) param) + param->data_start;
}
+static bool filter_device(struct hash_cell *hc, const char *pfx_name, const char *pfx_uuid)
+{
+ const char *val;
+ size_t val_len, pfx_len;
+
+ val = hc->name;
+ val_len = strlen(val);
+ pfx_len = strnlen(pfx_name, DM_NAME_LEN);
+ if (pfx_len > val_len)
+ return false;
+ if (memcmp(val, pfx_name, pfx_len))
+ return false;
+
+ val = hc->uuid ? hc->uuid : "";
+ val_len = strlen(val);
+ pfx_len = strnlen(pfx_uuid, DM_UUID_LEN);
+ if (pfx_len > val_len)
+ return false;
+ if (memcmp(val, pfx_uuid, pfx_len))
+ return false;
+
+ return true;
+}
+
static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
- unsigned int i;
+ struct rb_node *n;
struct hash_cell *hc;
size_t len, needed = 0;
struct gendisk *disk;
@@ -518,11 +579,14 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
* Loop through all the devices working out how much
* space we need.
*/
- for (i = 0; i < NUM_BUCKETS; i++) {
- list_for_each_entry (hc, _name_buckets + i, name_list) {
- needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
- needed += align_val(sizeof(uint32_t));
- }
+ for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
+ hc = container_of(n, struct hash_cell, name_node);
+ if (!filter_device(hc, param->name, param->uuid))
+ continue;
+ needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
+ needed += align_val(sizeof(uint32_t) * 2);
+ if (param->flags & DM_UUID_FLAG && hc->uuid)
+ needed += align_val(strlen(hc->uuid) + 1);
}
/*
@@ -540,21 +604,34 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
/*
* Now loop through filling out the names.
*/
- for (i = 0; i < NUM_BUCKETS; i++) {
- list_for_each_entry (hc, _name_buckets + i, name_list) {
- if (old_nl)
- old_nl->next = (uint32_t) ((void *) nl -
- (void *) old_nl);
- disk = dm_disk(hc->md);
- nl->dev = huge_encode_dev(disk_devt(disk));
- nl->next = 0;
- strcpy(nl->name, hc->name);
-
- old_nl = nl;
- event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
- *event_nr = dm_get_event_nr(hc->md);
- nl = align_ptr(event_nr + 1);
+ for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
+ void *uuid_ptr;
+ hc = container_of(n, struct hash_cell, name_node);
+ if (!filter_device(hc, param->name, param->uuid))
+ continue;
+ if (old_nl)
+ old_nl->next = (uint32_t) ((void *) nl -
+ (void *) old_nl);
+ disk = dm_disk(hc->md);
+ nl->dev = huge_encode_dev(disk_devt(disk));
+ nl->next = 0;
+ strcpy(nl->name, hc->name);
+
+ old_nl = nl;
+ event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
+ event_nr[0] = dm_get_event_nr(hc->md);
+ event_nr[1] = 0;
+ uuid_ptr = align_ptr(event_nr + 2);
+ if (param->flags & DM_UUID_FLAG) {
+ if (hc->uuid) {
+ event_nr[1] |= DM_NAME_LIST_FLAG_HAS_UUID;
+ strcpy(uuid_ptr, hc->uuid);
+ uuid_ptr = align_ptr(uuid_ptr + strlen(hc->uuid) + 1);
+ } else {
+ event_nr[1] |= DM_NAME_LIST_FLAG_DOESNT_HAVE_UUID;
+ }
}
+ nl = uuid_ptr;
}
/*
* If mismatch happens, security may be compromised due to buffer
@@ -1991,14 +2068,9 @@ int __init dm_interface_init(void)
{
int r;
- r = dm_hash_init();
- if (r)
- return r;
-
r = misc_register(&_dm_misc);
if (r) {
DMERR("misc_register failed for control device");
- dm_hash_exit();
return r;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index cab12b2251ba..bf4a467fc73a 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1853,6 +1853,7 @@ static int rs_check_takeover(struct raid_set *rs)
((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
__within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC)))
return 0;
+ break;
default:
break;
@@ -1868,6 +1869,14 @@ static bool rs_takeover_requested(struct raid_set *rs)
return rs->md.new_level != rs->md.level;
}
+/* True if layout is set to reshape. */
+static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
+{
+ return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
+ rs->md.new_layout != rs->md.layout ||
+ rs->md.new_chunk_sectors != rs->md.chunk_sectors;
+}
+
/* True if @rs is requested to reshape by ctr */
static bool rs_reshape_requested(struct raid_set *rs)
{
@@ -1880,9 +1889,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
if (rs_is_raid0(rs))
return false;
- change = mddev->new_layout != mddev->layout ||
- mddev->new_chunk_sectors != mddev->chunk_sectors ||
- rs->delta_disks;
+ change = rs_is_layout_change(rs, false);
/* Historical case to support raid1 reshape without delta disks */
if (rs_is_raid1(rs)) {
@@ -2817,7 +2824,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs)
}
/*
- *
+ * Reshape:
* - change raid layout
* - change chunk size
* - add disks
@@ -2927,6 +2934,20 @@ static int rs_setup_reshape(struct raid_set *rs)
}
/*
+ * If the md resync thread has updated superblock with max reshape position
+ * at the end of a reshape but not (yet) reset the layout configuration
+ * changes -> reset the latter.
+ */
+static void rs_reset_inconclusive_reshape(struct raid_set *rs)
+{
+ if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
+ rs_set_cur(rs);
+ rs->md.delta_disks = 0;
+ rs->md.reshape_backwards = 0;
+ }
+}
+
+/*
* Enable/disable discard support on RAID set depending on
* RAID level and discard properties of underlying RAID members.
*/
@@ -3212,11 +3233,14 @@ size_check:
if (r)
goto bad;
+ /* Catch any inconclusive reshape superblock content. */
+ rs_reset_inconclusive_reshape(rs);
+
/* Start raid set read-only and assumed clean to change in raid_resume() */
rs->md.ro = 1;
rs->md.in_sync = 1;
- /* Keep array frozen */
+ /* Keep array frozen until resume. */
set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
/* Has to be held on running the array */
@@ -3230,7 +3254,6 @@ size_check:
}
r = md_start(&rs->md);
-
if (r) {
ti->error = "Failed to start raid array";
mddev_unlock(&rs->md);
@@ -3727,15 +3750,6 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, chunk_size_bytes);
blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
-
- /*
- * RAID0 and RAID10 personalities require bio splitting,
- * RAID1/4/5/6 don't and process large discard bios properly.
- */
- if (rs_is_raid0(rs) || rs_is_raid10(rs)) {
- limits->discard_granularity = chunk_size_bytes;
- limits->max_discard_sectors = rs->md.chunk_sectors;
- }
}
static void raid_postsuspend(struct dm_target *ti)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 13b4385f4d5a..9c3bc3711b33 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -569,6 +569,7 @@ out_tag_set:
blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set:
kfree(md->tag_set);
+ md->tag_set = NULL;
return err;
}
@@ -578,6 +579,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
if (md->tag_set) {
blk_mq_free_tag_set(md->tag_set);
kfree(md->tag_set);
+ md->tag_set = NULL;
}
}
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 8e329c3f3a78..9ab4bf651ca9 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -596,7 +596,7 @@ static void persistent_dtr(struct dm_exception_store *store)
free_area(ps);
/* Allocated in persistent_read_metadata */
- vfree(ps->callbacks);
+ kvfree(ps->callbacks);
kfree(ps);
}
@@ -621,8 +621,8 @@ static int persistent_read_metadata(struct dm_exception_store *store,
*/
ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
sizeof(struct disk_exception);
- ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
- sizeof(*ps->callbacks));
+ ps->callbacks = kvcalloc(ps->exceptions_per_area,
+ sizeof(*ps->callbacks), GFP_KERNEL);
if (!ps->callbacks)
return -ENOMEM;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 11890db71f3f..a2acb014c13a 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -663,7 +663,8 @@ static int dm_exception_table_init(struct dm_exception_table *et,
et->hash_shift = hash_shift;
et->hash_mask = size - 1;
- et->table = dm_vcalloc(size, sizeof(struct hlist_bl_head));
+ et->table = kvmalloc_array(size, sizeof(struct hlist_bl_head),
+ GFP_KERNEL);
if (!et->table)
return -ENOMEM;
@@ -689,7 +690,7 @@ static void dm_exception_table_exit(struct dm_exception_table *et,
kmem_cache_free(mem, ex);
}
- vfree(et->table);
+ kvfree(et->table);
}
static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index e5f0f1703c5d..ee47a332b462 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -94,24 +94,6 @@ static int setup_btree_index(unsigned int l, struct dm_table *t)
return 0;
}
-void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
-{
- unsigned long size;
- void *addr;
-
- /*
- * Check that we're not going to overflow.
- */
- if (nmemb > (ULONG_MAX / elem_size))
- return NULL;
-
- size = nmemb * elem_size;
- addr = vzalloc(size);
-
- return addr;
-}
-EXPORT_SYMBOL(dm_vcalloc);
-
/*
* highs, and targets are managed as dynamic arrays during a
* table load.
@@ -124,15 +106,15 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
/*
* Allocate both the target array and offset array at once.
*/
- n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
- sizeof(sector_t));
+ n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t),
+ GFP_KERNEL);
if (!n_highs)
return -ENOMEM;
n_targets = (struct dm_target *) (n_highs + num);
memset(n_highs, -1, sizeof(*n_highs) * num);
- vfree(t->highs);
+ kvfree(t->highs);
t->num_allocated = num;
t->highs = n_highs;
@@ -198,7 +180,7 @@ void dm_table_destroy(struct dm_table *t)
/* free the indexes */
if (t->depth >= 2)
- vfree(t->index[t->depth - 2]);
+ kvfree(t->index[t->depth - 2]);
/* free the targets */
for (i = 0; i < t->num_targets; i++) {
@@ -210,7 +192,7 @@ void dm_table_destroy(struct dm_table *t)
dm_put_target_type(tgt->type);
}
- vfree(t->highs);
+ kvfree(t->highs);
/* free the device list */
free_devices(&t->devices, t->md);
@@ -1077,7 +1059,7 @@ static int setup_indexes(struct dm_table *t)
total += t->counts[i];
}
- indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
+ indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL);
if (!indexes)
return -ENOMEM;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index fff4c50df74d..985baee3a678 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2816,7 +2816,7 @@ static bool data_dev_supports_discard(struct pool_c *pt)
{
struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
- return q && blk_queue_discard(q);
+ return blk_queue_discard(q);
}
static bool is_factor(sector_t block_size, uint32_t n)
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 808a98ef624c..d3e76aefc1a6 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -893,6 +893,28 @@ out:
return r;
}
+static inline bool verity_is_verity_mode(const char *arg_name)
+{
+ return (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING) ||
+ !strcasecmp(arg_name, DM_VERITY_OPT_RESTART) ||
+ !strcasecmp(arg_name, DM_VERITY_OPT_PANIC));
+}
+
+static int verity_parse_verity_mode(struct dm_verity *v, const char *arg_name)
+{
+ if (v->mode)
+ return -EINVAL;
+
+ if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING))
+ v->mode = DM_VERITY_MODE_LOGGING;
+ else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART))
+ v->mode = DM_VERITY_MODE_RESTART;
+ else if (!strcasecmp(arg_name, DM_VERITY_OPT_PANIC))
+ v->mode = DM_VERITY_MODE_PANIC;
+
+ return 0;
+}
+
static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
struct dm_verity_sig_opts *verify_args)
{
@@ -916,16 +938,12 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
arg_name = dm_shift_arg(as);
argc--;
- if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING)) {
- v->mode = DM_VERITY_MODE_LOGGING;
- continue;
-
- } else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART)) {
- v->mode = DM_VERITY_MODE_RESTART;
- continue;
-
- } else if (!strcasecmp(arg_name, DM_VERITY_OPT_PANIC)) {
- v->mode = DM_VERITY_MODE_PANIC;
+ if (verity_is_verity_mode(arg_name)) {
+ r = verity_parse_verity_mode(v, arg_name);
+ if (r) {
+ ti->error = "Conflicting error handling parameters";
+ return r;
+ }
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
@@ -1242,7 +1260,7 @@ bad:
static struct target_type verity_target = {
.name = "verity",
- .version = {1, 7, 0},
+ .version = {1, 8, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 4f72b6f66c3a..aecc246ade26 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -73,7 +73,7 @@ struct wc_memory_superblock {
};
__le64 padding[8];
};
- struct wc_memory_entry entries[0];
+ struct wc_memory_entry entries[];
};
struct wc_entry {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3f3be9408afa..ca2aedd8ee7d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -840,7 +840,6 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
*result = &td->dm_dev;
return 0;
}
-EXPORT_SYMBOL_GPL(dm_get_table_device);
void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
{
@@ -854,7 +853,6 @@ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
}
mutex_unlock(&md->table_devices_lock);
}
-EXPORT_SYMBOL(dm_put_table_device);
static void free_table_devices(struct list_head *devices)
{
@@ -1641,38 +1639,35 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
} else {
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
- while (ci.sector_count && !error) {
- error = __split_and_process_non_flush(&ci);
- if (ci.sector_count && !error) {
- /*
- * Remainder must be passed to submit_bio_noacct()
- * so that it gets handled *after* bios already submitted
- * have been completely processed.
- * We take a clone of the original to store in
- * ci.io->orig_bio to be used by end_io_acct() and
- * for dec_pending to use for completion handling.
- */
- struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
- GFP_NOIO, &md->queue->bio_split);
- ci.io->orig_bio = b;
-
- /*
- * Adjust IO stats for each split, otherwise upon queue
- * reentry there will be redundant IO accounting.
- * NOTE: this is a stop-gap fix, a proper fix involves
- * significant refactoring of DM core's bio splitting
- * (by eliminating DM's splitting and just using bio_split)
- */
- part_stat_lock();
- __dm_part_stat_sub(dm_disk(md)->part0,
- sectors[op_stat_group(bio_op(bio))], ci.sector_count);
- part_stat_unlock();
-
- bio_chain(b, bio);
- trace_block_split(b, bio->bi_iter.bi_sector);
- ret = submit_bio_noacct(bio);
- break;
- }
+ error = __split_and_process_non_flush(&ci);
+ if (ci.sector_count && !error) {
+ /*
+ * Remainder must be passed to submit_bio_noacct()
+ * so that it gets handled *after* bios already submitted
+ * have been completely processed.
+ * We take a clone of the original to store in
+ * ci.io->orig_bio to be used by end_io_acct() and
+ * for dec_pending to use for completion handling.
+ */
+ struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
+ GFP_NOIO, &md->queue->bio_split);
+ ci.io->orig_bio = b;
+
+ /*
+ * Adjust IO stats for each split, otherwise upon queue
+ * reentry there will be redundant IO accounting.
+ * NOTE: this is a stop-gap fix, a proper fix involves
+ * significant refactoring of DM core's bio splitting
+ * (by eliminating DM's splitting and just using bio_split)
+ */
+ part_stat_lock();
+ __dm_part_stat_sub(dm_disk(md)->part0,
+ sectors[op_stat_group(bio_op(bio))], ci.sector_count);
+ part_stat_unlock();
+
+ bio_chain(b, bio);
+ trace_block_split(b, bio->bi_iter.bi_sector);
+ ret = submit_bio_noacct(bio);
}
}
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index fe073d92f01e..b1788853a355 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -34,12 +34,12 @@ struct node_header {
__le32 max_entries;
__le32 value_size;
__le32 padding;
-} __packed;
+} __attribute__((packed, aligned(8)));
struct btree_node {
struct node_header header;
__le64 keys[];
-} __packed;
+} __attribute__((packed, aligned(8)));
/*
@@ -83,7 +83,7 @@ struct shadow_spine {
};
void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info);
-int exit_shadow_spine(struct shadow_spine *s);
+void exit_shadow_spine(struct shadow_spine *s);
int shadow_step(struct shadow_spine *s, dm_block_t b,
struct dm_btree_value_type *vt);
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index 8a2bfbfb218b..2061ab865567 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -30,8 +30,6 @@ static void node_prepare_for_write(struct dm_block_validator *v,
h->csum = cpu_to_le32(dm_bm_checksum(&h->flags,
block_size - sizeof(__le32),
BTREE_CSUM_XOR));
-
- BUG_ON(node_check(v, b, 4096));
}
static int node_check(struct dm_block_validator *v,
@@ -183,15 +181,13 @@ void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info)
s->count = 0;
}
-int exit_shadow_spine(struct shadow_spine *s)
+void exit_shadow_spine(struct shadow_spine *s)
{
- int r = 0, i;
+ int i;
for (i = 0; i < s->count; i++) {
unlock_block(s->info, s->nodes[i]);
}
-
- return r;
}
int shadow_step(struct shadow_spine *s, dm_block_t b,
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index d8b4125e338c..a213bf11738f 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -339,6 +339,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
*/
begin = do_div(index_begin, ll->entries_per_block);
end = do_div(end, ll->entries_per_block);
+ if (end == 0)
+ end = ll->entries_per_block;
for (i = index_begin; i < index_end; i++, begin = 0) {
struct dm_block *blk;
diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
index 8de63ce39bdd..87e17909ef52 100644
--- a/drivers/md/persistent-data/dm-space-map-common.h
+++ b/drivers/md/persistent-data/dm-space-map-common.h
@@ -33,7 +33,7 @@ struct disk_index_entry {
__le64 blocknr;
__le32 nr_free;
__le32 none_free_before;
-} __packed;
+} __attribute__ ((packed, aligned(8)));
#define MAX_METADATA_BITMAPS 255
@@ -43,7 +43,7 @@ struct disk_metadata_index {
__le64 blocknr;
struct disk_index_entry index[MAX_METADATA_BITMAPS];
-} __packed;
+} __attribute__ ((packed, aligned(8)));
struct ll_disk;
@@ -86,7 +86,7 @@ struct disk_sm_root {
__le64 nr_allocated;
__le64 bitmap_root;
__le64 ref_count_root;
-} __packed;
+} __attribute__ ((packed, aligned(8)));
#define ENTRIES_PER_BYTE 4
@@ -94,7 +94,7 @@ struct disk_bitmap_header {
__le32 csum;
__le32 not_used;
__le64 blocknr;
-} __packed;
+} __attribute__ ((packed, aligned(8)));
enum allocation_event {
SM_NONE,
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index bf4c5e2ccb6f..61f56909e00b 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -187,13 +187,8 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
static int sm_disk_commit(struct dm_space_map *sm)
{
int r;
- dm_block_t nr_free;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
- r = sm_disk_get_nr_free(sm, &nr_free);
- if (r)
- return r;
-
r = sm_ll_commit(&smd->ll);
if (r)
return r;
@@ -202,10 +197,6 @@ static int sm_disk_commit(struct dm_space_map *sm)
smd->begin = 0;
smd->nr_allocated_this_transaction = 0;
- r = sm_disk_get_nr_free(sm, &nr_free);
- if (r)
- return r;
-
return 0;
}
diff --git a/drivers/media/usb/pwc/pwc-uncompress.c b/drivers/media/usb/pwc/pwc-uncompress.c
index abfc88391036..68bc3829c6b3 100644
--- a/drivers/media/usb/pwc/pwc-uncompress.c
+++ b/drivers/media/usb/pwc/pwc-uncompress.c
@@ -9,9 +9,6 @@
Please send bug reports and support requests to <luc@saillard.org>.
The decompression routines have been implemented by reverse-engineering the
Nemosoft binary pwcx module. Caveat emptor.
-
-
- vim: set ts=8:
*/
#include <asm/current.h>
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index f2f565281e63..a777b389a66e 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -6,11 +6,14 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
@@ -1096,6 +1099,29 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return data[0];
}
+static inline enum dma_data_direction uvc_stream_dir(
+ struct uvc_streaming *stream)
+{
+ if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return DMA_FROM_DEVICE;
+ else
+ return DMA_TO_DEVICE;
+}
+
+static inline struct device *uvc_stream_to_dmadev(struct uvc_streaming *stream)
+{
+ return bus_to_hcd(stream->dev->udev->bus)->self.sysdev;
+}
+
+static int uvc_submit_urb(struct uvc_urb *uvc_urb, gfp_t mem_flags)
+{
+ /* Sync DMA. */
+ dma_sync_sgtable_for_device(uvc_stream_to_dmadev(uvc_urb->stream),
+ uvc_urb->sgt,
+ uvc_stream_dir(uvc_urb->stream));
+ return usb_submit_urb(uvc_urb->urb, mem_flags);
+}
+
/*
* uvc_video_decode_data_work: Asynchronous memcpy processing
*
@@ -1117,7 +1143,7 @@ static void uvc_video_copy_data_work(struct work_struct *work)
uvc_queue_buffer_release(op->buf);
}
- ret = usb_submit_urb(uvc_urb->urb, GFP_KERNEL);
+ ret = uvc_submit_urb(uvc_urb, GFP_KERNEL);
if (ret < 0)
dev_err(&uvc_urb->stream->intf->dev,
"Failed to resubmit video URB (%d).\n", ret);
@@ -1537,6 +1563,12 @@ static void uvc_video_complete(struct urb *urb)
/* Re-initialise the URB async work. */
uvc_urb->async_operations = 0;
+ /* Sync DMA and invalidate vmap range. */
+ dma_sync_sgtable_for_cpu(uvc_stream_to_dmadev(uvc_urb->stream),
+ uvc_urb->sgt, uvc_stream_dir(stream));
+ invalidate_kernel_vmap_range(uvc_urb->buffer,
+ uvc_urb->stream->urb_size);
+
/*
* Process the URB headers, and optionally queue expensive memcpy tasks
* to be deferred to a work queue.
@@ -1545,7 +1577,7 @@ static void uvc_video_complete(struct urb *urb)
/* If no async work is needed, resubmit the URB immediately. */
if (!uvc_urb->async_operations) {
- ret = usb_submit_urb(uvc_urb->urb, GFP_ATOMIC);
+ ret = uvc_submit_urb(uvc_urb, GFP_ATOMIC);
if (ret < 0)
dev_err(&stream->intf->dev,
"Failed to resubmit video URB (%d).\n", ret);
@@ -1560,24 +1592,49 @@ static void uvc_video_complete(struct urb *urb)
*/
static void uvc_free_urb_buffers(struct uvc_streaming *stream)
{
+ struct device *dma_dev = uvc_stream_to_dmadev(stream);
struct uvc_urb *uvc_urb;
for_each_uvc_urb(uvc_urb, stream) {
if (!uvc_urb->buffer)
continue;
-#ifndef CONFIG_DMA_NONCOHERENT
- usb_free_coherent(stream->dev->udev, stream->urb_size,
- uvc_urb->buffer, uvc_urb->dma);
-#else
- kfree(uvc_urb->buffer);
-#endif
+ dma_vunmap_noncontiguous(dma_dev, uvc_urb->buffer);
+ dma_free_noncontiguous(dma_dev, stream->urb_size, uvc_urb->sgt,
+ uvc_stream_dir(stream));
+
uvc_urb->buffer = NULL;
+ uvc_urb->sgt = NULL;
}
stream->urb_size = 0;
}
+static bool uvc_alloc_urb_buffer(struct uvc_streaming *stream,
+ struct uvc_urb *uvc_urb, gfp_t gfp_flags)
+{
+ struct device *dma_dev = uvc_stream_to_dmadev(stream);
+
+ uvc_urb->sgt = dma_alloc_noncontiguous(dma_dev, stream->urb_size,
+ uvc_stream_dir(stream),
+ gfp_flags, 0);
+ if (!uvc_urb->sgt)
+ return false;
+ uvc_urb->dma = uvc_urb->sgt->sgl->dma_address;
+
+ uvc_urb->buffer = dma_vmap_noncontiguous(dma_dev, stream->urb_size,
+ uvc_urb->sgt);
+ if (!uvc_urb->buffer) {
+ dma_free_noncontiguous(dma_dev, stream->urb_size,
+ uvc_urb->sgt,
+ uvc_stream_dir(stream));
+ uvc_urb->sgt = NULL;
+ return false;
+ }
+
+ return true;
+}
+
/*
* Allocate transfer buffers. This function can be called with buffers
* already allocated when resuming from suspend, in which case it will
@@ -1608,19 +1665,12 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
/* Retry allocations until one succeed. */
for (; npackets > 1; npackets /= 2) {
+ stream->urb_size = psize * npackets;
+
for (i = 0; i < UVC_URBS; ++i) {
struct uvc_urb *uvc_urb = &stream->uvc_urb[i];
- stream->urb_size = psize * npackets;
-#ifndef CONFIG_DMA_NONCOHERENT
- uvc_urb->buffer = usb_alloc_coherent(
- stream->dev->udev, stream->urb_size,
- gfp_flags | __GFP_NOWARN, &uvc_urb->dma);
-#else
- uvc_urb->buffer =
- kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN);
-#endif
- if (!uvc_urb->buffer) {
+ if (!uvc_alloc_urb_buffer(stream, uvc_urb, gfp_flags)) {
uvc_free_urb_buffers(stream);
break;
}
@@ -1730,12 +1780,8 @@ static int uvc_init_video_isoc(struct uvc_streaming *stream,
urb->context = uvc_urb;
urb->pipe = usb_rcvisocpipe(stream->dev->udev,
ep->desc.bEndpointAddress);
-#ifndef CONFIG_DMA_NONCOHERENT
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = uvc_urb->dma;
-#else
- urb->transfer_flags = URB_ISO_ASAP;
-#endif
urb->interval = ep->desc.bInterval;
urb->transfer_buffer = uvc_urb->buffer;
urb->complete = uvc_video_complete;
@@ -1795,10 +1841,8 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
usb_fill_bulk_urb(urb, stream->dev->udev, pipe, uvc_urb->buffer,
size, uvc_video_complete, uvc_urb);
-#ifndef CONFIG_DMA_NONCOHERENT
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = uvc_urb->dma;
-#endif
uvc_urb->urb = urb;
}
@@ -1895,7 +1939,7 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
/* Submit the URBs. */
for_each_uvc_urb(uvc_urb, stream) {
- ret = usb_submit_urb(uvc_urb->urb, gfp_flags);
+ ret = uvc_submit_urb(uvc_urb, gfp_flags);
if (ret < 0) {
dev_err(&stream->intf->dev,
"Failed to submit URB %u (%d).\n",
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 97df5ecd66c9..cce5e38133cd 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -219,6 +219,7 @@
*/
struct gpio_desc;
+struct sg_table;
struct uvc_device;
/* TODO: Put the most frequently accessed fields at the beginning of
@@ -545,7 +546,8 @@ struct uvc_copy_op {
* @urb: the URB described by this context structure
* @stream: UVC streaming context
* @buffer: memory storage for the URB
- * @dma: DMA coherent addressing for the urb_buffer
+ * @dma: Allocated DMA handle
+ * @sgt: sgt_table with the urb locations in memory
* @async_operations: counter to indicate the number of copy operations
* @copy_operations: work descriptors for asynchronous copy operations
* @work: work queue entry for asynchronous decode
@@ -556,6 +558,7 @@ struct uvc_urb {
char *buffer;
dma_addr_t dma;
+ struct sg_table *sgt;
unsigned int async_operations;
struct uvc_copy_op copy_operations[UVC_MAX_PACKETS];
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index 94843e0e51c6..bae18ef03dcb 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -385,6 +385,33 @@ static void uacce_release(struct device *dev)
kfree(uacce);
}
+static unsigned int uacce_enable_sva(struct device *parent, unsigned int flags)
+{
+ if (!(flags & UACCE_DEV_SVA))
+ return flags;
+
+ flags &= ~UACCE_DEV_SVA;
+
+ if (iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_IOPF))
+ return flags;
+
+ if (iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA)) {
+ iommu_dev_disable_feature(parent, IOMMU_DEV_FEAT_IOPF);
+ return flags;
+ }
+
+ return flags | UACCE_DEV_SVA;
+}
+
+static void uacce_disable_sva(struct uacce_device *uacce)
+{
+ if (!(uacce->flags & UACCE_DEV_SVA))
+ return;
+
+ iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
+ iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_IOPF);
+}
+
/**
* uacce_alloc() - alloc an accelerator
* @parent: pointer of uacce parent device
@@ -404,11 +431,7 @@ struct uacce_device *uacce_alloc(struct device *parent,
if (!uacce)
return ERR_PTR(-ENOMEM);
- if (flags & UACCE_DEV_SVA) {
- ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
- if (ret)
- flags &= ~UACCE_DEV_SVA;
- }
+ flags = uacce_enable_sva(parent, flags);
uacce->parent = parent;
uacce->flags = flags;
@@ -432,8 +455,7 @@ struct uacce_device *uacce_alloc(struct device *parent,
return uacce;
err_with_uacce:
- if (flags & UACCE_DEV_SVA)
- iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
+ uacce_disable_sva(uacce);
kfree(uacce);
return ERR_PTR(ret);
}
@@ -487,8 +509,7 @@ void uacce_remove(struct uacce_device *uacce)
mutex_unlock(&uacce->queues_lock);
/* disable sva now since no opened queues */
- if (uacce->flags & UACCE_DEV_SVA)
- iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
+ uacce_disable_sva(uacce);
if (uacce->cdev)
cdev_device_del(uacce->cdev, &uacce->dev);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index f399edc82191..a7e3eb9befb6 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1350,6 +1350,7 @@ static int bytes_str_to_int(const char *str)
fallthrough;
case 'K':
result *= 1024;
+ break;
case '\0':
break;
default:
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index c2da77163f94..7c083ad58274 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -388,8 +388,6 @@ struct ubi_volume_desc {
int mode;
};
-struct ubi_wl_entry;
-
/**
* struct ubi_debug_info - debugging information for an UBI device.
*
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index bcd31f458d1a..74dc8e249faa 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -502,6 +502,8 @@ source "drivers/net/wan/Kconfig"
source "drivers/net/ieee802154/Kconfig"
+source "drivers/net/wwan/Kconfig"
+
config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver"
depends on XEN
@@ -579,6 +581,7 @@ config NETDEVSIM
depends on DEBUG_FS
depends on INET
depends on IPV6 || IPV6=n
+ depends on PSAMPLE || PSAMPLE=n
select NET_DEVLINK
help
This driver is a developer testing tool and software model that can
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index f4990ff32fa4..7ffd2d03efaf 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -45,7 +45,7 @@ obj-$(CONFIG_ARCNET) += arcnet/
obj-$(CONFIG_DEV_APPLETALK) += appletalk/
obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_CAN) += can/
-obj-y += dsa/
+obj-$(CONFIG_NET_DSA) += dsa/
obj-$(CONFIG_ETHERNET) += ethernet/
obj-$(CONFIG_FDDI) += fddi/
obj-$(CONFIG_HIPPI) += hippi/
@@ -68,6 +68,7 @@ obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
obj-$(CONFIG_WAN) += wan/
obj-$(CONFIG_WLAN) += wireless/
obj-$(CONFIG_IEEE802154) += ieee802154/
+obj-$(CONFIG_WWAN) += wwan/
obj-$(CONFIG_VMXNET3) += vmxnet3/
obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 890c86e11bcc..df79e7370bcc 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -59,9 +59,6 @@ static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe)
* look for EISA/PCI cards in addition to ISA cards).
*/
static struct devprobe2 isa_probes[] __initdata = {
-#if defined(CONFIG_HP100) && defined(CONFIG_ISA) /* ISA, EISA */
- {hp100_probe, 0},
-#endif
#ifdef CONFIG_3C515
{tc515_probe, 0},
#endif
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 7511bca9c15e..edfad93e7b68 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -218,6 +218,7 @@ static struct socket *bareudp_create_sock(struct net *net, __be16 port)
if (err < 0)
return ERR_PTR(err);
+ udp_allow_gso(sock->sk);
return sock;
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c3091e00dd5f..3455f2cc13f2 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1098,7 +1098,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
* If @slave's permanent hw address is different both from its current
* address and from @bond's address, then somewhere in the bond there's
* a slave that has @slave's permanet address as its current address.
- * We'll make sure that that slave no longer uses @slave's permanent address.
+ * We'll make sure that slave no longer uses @slave's permanent address.
*
* Caller must hold RTNL and no other locks
*/
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 74cbbb22470b..20bbda1b36e1 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -964,7 +964,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
}
/**
- * change_active_interface - change the active slave into the specified one
+ * bond_change_active_slave - change the active slave into the specified one
* @bond: our bonding struct
* @new_active: the new slave to make the active one
*
@@ -4391,9 +4391,7 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
int agg_id = 0;
int ret = 0;
-#ifdef CONFIG_LOCKDEP
- WARN_ON(lockdep_is_held(&bond->mode_lock));
-#endif
+ might_sleep();
usable_slaves = kzalloc(struct_size(usable_slaves, arr,
bond->slave_cnt), GFP_KERNEL);
@@ -4406,7 +4404,9 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
+ spin_lock_bh(&bond->mode_lock);
if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
+ spin_unlock_bh(&bond->mode_lock);
pr_debug("bond_3ad_get_active_agg_info failed\n");
/* No active aggragator means it's not safe to use
* the previous array.
@@ -4414,6 +4414,7 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
bond_reset_slave_arr(bond);
goto out;
}
+ spin_unlock_bh(&bond->mode_lock);
agg_id = ad_info.aggregator_id;
}
bond_for_each_slave(bond, slave, iter) {
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 77d7c38bd435..c9d3604ae129 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -640,6 +640,15 @@ static void bond_opt_error_interpret(struct bonding *bond,
netdev_err(bond->dev, "option %s: unable to set because the bond device is up\n",
opt->name);
break;
+ case -ENODEV:
+ if (val && val->string) {
+ p = strchr(val->string, '\n');
+ if (p)
+ *p = '\0';
+ netdev_err(bond->dev, "option %s: interface %s does not exist!\n",
+ opt->name, val->string);
+ }
+ break;
default:
break;
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 1c28eade6bec..e355d3974977 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -103,7 +103,7 @@ config CAN_FLEXCAN
config CAN_GRCAN
tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices"
- depends on OF && HAS_DMA
+ depends on OF && HAS_DMA && HAS_IOMEM
help
Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN.
Note that the driver supports little endian, even though little
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 6958830cb983..313793f6922d 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -132,7 +132,6 @@
/* For the high buffers we clear the interrupt bit and newdat */
#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
-
/* Receive setup of message objects */
#define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL)
@@ -161,9 +160,7 @@
#define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB)
-/*
- * Use IF1 for RX and IF2 for TX
- */
+/* Use IF1 for RX and IF2 for TX */
#define IF_RX 0
#define IF_TX 1
@@ -173,9 +170,6 @@
/* Wait for ~1 sec for INIT bit */
#define INIT_WAIT_MS 1000
-/* napi related */
-#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
-
/* c_can lec values */
enum c_can_lec_type {
LEC_NO_ERROR = 0,
@@ -189,8 +183,7 @@ enum c_can_lec_type {
LEC_MASK = LEC_UNUSED,
};
-/*
- * c_can error types:
+/* c_can error types:
* Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
*/
enum c_can_bus_error_types {
@@ -253,7 +246,6 @@ static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj
udelay(1);
}
netdev_err(dev, "Updating object timed out\n");
-
}
static inline void c_can_object_get(struct net_device *dev, int iface,
@@ -268,8 +260,7 @@ static inline void c_can_object_put(struct net_device *dev, int iface,
c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj);
}
-/*
- * Note: According to documentation clearing TXIE while MSGVAL is set
+/* Note: According to documentation clearing TXIE while MSGVAL is set
* is not allowed, but works nicely on C/DCAN. And that lowers the I/O
* load significantly.
*/
@@ -285,8 +276,7 @@ static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj)
{
struct c_can_priv *priv = netdev_priv(dev);
- priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
- priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
+ priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
c_can_inval_tx_object(dev, iface, obj);
}
@@ -309,12 +299,11 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
if (!rtr)
arb |= IF_ARB_TRANSMIT;
- /*
- * If we change the DIR bit, we need to invalidate the buffer
+ /* If we change the DIR bit, we need to invalidate the buffer
* first, i.e. clear the MSGVAL flag in the arbiter.
*/
if (rtr != (bool)test_bit(idx, &priv->tx_dir)) {
- u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
+ u32 obj = idx + priv->msg_obj_tx_first;
c_can_inval_msg_object(dev, iface, obj);
change_bit(idx, &priv->tx_dir);
@@ -447,18 +436,16 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
- /*
- * This is not a FIFO. C/D_CAN sends out the buffers
+ /* This is not a FIFO. C/D_CAN sends out the buffers
* prioritized. The lowest buffer number wins.
*/
idx = fls(atomic_read(&priv->tx_active));
- obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
+ obj = idx + priv->msg_obj_tx_first;
/* If this is the last buffer, stop the xmit queue */
- if (idx == C_CAN_MSG_OBJ_TX_NUM - 1)
+ if (idx == priv->msg_obj_tx_num - 1)
netif_stop_queue(dev);
- /*
- * Store the message in the interface so we can call
+ /* Store the message in the interface so we can call
* can_put_echo_skb(). We must do this before we enable
* transmit as we might race against do_tx().
*/
@@ -467,7 +454,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
can_put_echo_skb(skb, dev, idx, 0);
/* Update the active bits */
- atomic_add((1 << idx), &priv->tx_active);
+ atomic_add(BIT(idx), &priv->tx_active);
/* Start transmission */
c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
@@ -511,7 +498,7 @@ static int c_can_set_bittiming(struct net_device *dev)
reg_brpe = brpe & BRP_EXT_BRPE_MASK;
netdev_info(dev,
- "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
+ "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
ctrl_save &= ~CONTROL_INIT;
@@ -527,8 +514,7 @@ static int c_can_set_bittiming(struct net_device *dev)
return c_can_wait_for_ctrl_init(dev, priv, 0);
}
-/*
- * Configure C_CAN message objects for Tx and Rx purposes:
+/* Configure C_CAN message objects for Tx and Rx purposes:
* C_CAN provides a total of 32 message objects that can be configured
* either for Tx or Rx purposes. Here the first 16 message objects are used as
* a reception FIFO. The end of reception FIFO is signified by the EoB bit
@@ -538,17 +524,18 @@ static int c_can_set_bittiming(struct net_device *dev)
*/
static void c_can_configure_msg_objects(struct net_device *dev)
{
+ struct c_can_priv *priv = netdev_priv(dev);
int i;
/* first invalidate all message objects */
- for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
+ for (i = priv->msg_obj_rx_first; i <= priv->msg_obj_num; i++)
c_can_inval_msg_object(dev, IF_RX, i);
/* setup receive message objects */
- for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
+ for (i = priv->msg_obj_rx_first; i < priv->msg_obj_rx_last; i++)
c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV);
- c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
+ c_can_setup_receive_object(dev, IF_RX, priv->msg_obj_rx_last, 0, 0,
IF_MCONT_RCV_EOB);
}
@@ -572,8 +559,7 @@ static int c_can_software_reset(struct net_device *dev)
return 0;
}
-/*
- * Configure C_CAN chip:
+/* Configure C_CAN chip:
* - enable/disable auto-retransmission
* - set operating mode
* - configure message objects
@@ -714,12 +700,21 @@ static void c_can_do_tx(struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
u32 idx, obj, pkts = 0, bytes = 0, pend, clr;
- clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
+ if (priv->msg_obj_tx_last > 32)
+ pend = priv->read_reg32(priv, C_CAN_INTPND3_REG);
+ else
+ pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
+ clr = pend;
while ((idx = ffs(pend))) {
idx--;
- pend &= ~(1 << idx);
- obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
+ pend &= ~BIT(idx);
+ obj = idx + priv->msg_obj_tx_first;
+
+ /* We use IF_RX interface instead of IF_TX because we
+ * are called from c_can_poll(), which runs inside
+ * NAPI. We are not trasmitting.
+ */
c_can_inval_tx_object(dev, IF_RX, obj);
can_get_echo_skb(dev, idx, NULL);
bytes += priv->dlc[idx];
@@ -729,7 +724,7 @@ static void c_can_do_tx(struct net_device *dev)
/* Clear the bits in the tx_active mask */
atomic_sub(clr, &priv->tx_active);
- if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1)))
+ if (clr & BIT(priv->msg_obj_tx_num - 1))
netif_wake_queue(dev);
if (pkts) {
@@ -739,20 +734,18 @@ static void c_can_do_tx(struct net_device *dev)
}
}
-/*
- * If we have a gap in the pending bits, that means we either
+/* If we have a gap in the pending bits, that means we either
* raced with the hardware or failed to readout all upper
* objects in the last run due to quota limit.
*/
-static u32 c_can_adjust_pending(u32 pend)
+static u32 c_can_adjust_pending(u32 pend, u32 rx_mask)
{
u32 weight, lasts;
- if (pend == RECEIVE_OBJECT_BITS)
+ if (pend == rx_mask)
return pend;
- /*
- * If the last set bit is larger than the number of pending
+ /* If the last set bit is larger than the number of pending
* bits we have a gap.
*/
weight = hweight32(pend);
@@ -762,19 +755,19 @@ static u32 c_can_adjust_pending(u32 pend)
if (lasts == weight)
return pend;
- /*
- * Find the first set bit after the gap. We walk backwards
+ /* Find the first set bit after the gap. We walk backwards
* from the last set bit.
*/
- for (lasts--; pend & (1 << (lasts - 1)); lasts--);
+ for (lasts--; pend & BIT(lasts - 1); lasts--)
+ ;
- return pend & ~((1 << lasts) - 1);
+ return pend & ~GENMASK(lasts - 1, 0);
}
static inline void c_can_rx_object_get(struct net_device *dev,
struct c_can_priv *priv, u32 obj)
{
- c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
+ c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
}
static inline void c_can_rx_finalize(struct net_device *dev,
@@ -803,8 +796,7 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
continue;
}
- /*
- * This really should not happen, but this covers some
+ /* This really should not happen, but this covers some
* odd HW behaviour. Do not remove that unless you
* want to brick your machine.
*/
@@ -825,19 +817,22 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
static inline u32 c_can_get_pending(struct c_can_priv *priv)
{
- u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
+ u32 pend;
+
+ if (priv->msg_obj_rx_last > 16)
+ pend = priv->read_reg32(priv, C_CAN_NEWDAT1_REG);
+ else
+ pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
return pend;
}
-/*
- * theory of operation:
+/* theory of operation:
*
* c_can core saves a received CAN message into the first free message
* object it finds free (starting with the lowest). Bits NEWDAT and
* INTPND are set for this message object indicating that a new message
- * has arrived. To work-around this issue, we keep two groups of message
- * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
+ * has arrived.
*
* We clear the newdat bit right away.
*
@@ -848,23 +843,16 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
struct c_can_priv *priv = netdev_priv(dev);
u32 pkts = 0, pend = 0, toread, n;
- /*
- * It is faster to read only one 16bit register. This is only possible
- * for a maximum number of 16 objects.
- */
- BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
- "Implementation does not support more message objects than 16");
-
while (quota > 0) {
if (!pend) {
pend = c_can_get_pending(priv);
if (!pend)
break;
- /*
- * If the pending field has a gap, handle the
+ /* If the pending field has a gap, handle the
* bits above the gap first.
*/
- toread = c_can_adjust_pending(pend);
+ toread = c_can_adjust_pending(pend,
+ priv->msg_obj_rx_mask);
} else {
toread = pend;
}
@@ -883,7 +871,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
}
static int c_can_handle_state_change(struct net_device *dev,
- enum c_can_bus_error_types error_type)
+ enum c_can_bus_error_types error_type)
{
unsigned int reg_err_counter;
unsigned int rx_err_passive;
@@ -979,8 +967,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
struct can_frame *cf;
struct sk_buff *skb;
- /*
- * early exit if no lec update or no error.
+ /* early exit if no lec update or no error.
* no lec update means that no CAN bus event has been detected
* since CPU wrote 0x7 value to status reg.
*/
@@ -999,8 +986,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
if (unlikely(!skb))
return 0;
- /*
- * check for 'last error code' which tells us the
+ /* check for 'last error code' which tells us the
* type of the last error to occur on the CAN bus
*/
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -1049,7 +1035,8 @@ static int c_can_poll(struct napi_struct *napi, int quota)
/* Only read the status register if a status interrupt was pending */
if (atomic_xchg(&priv->sie_pending, 0)) {
- priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
+ priv->last_status = priv->read_reg(priv, C_CAN_STS_REG);
+ curr = priv->last_status;
/* Ack status on C_CAN. D_CAN is self clearing */
if (priv->type != BOSCH_D_CAN)
priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
@@ -1147,7 +1134,7 @@ static int c_can_open(struct net_device *dev)
/* register interrupt handler */
err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
- dev);
+ dev);
if (err < 0) {
netdev_err(dev, "failed to request interrupt\n");
goto exit_irq_fail;
@@ -1195,17 +1182,31 @@ static int c_can_close(struct net_device *dev)
return 0;
}
-struct net_device *alloc_c_can_dev(void)
+struct net_device *alloc_c_can_dev(int msg_obj_num)
{
struct net_device *dev;
struct c_can_priv *priv;
+ int msg_obj_tx_num = msg_obj_num / 2;
- dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
+ dev = alloc_candev(struct_size(priv, dlc, msg_obj_tx_num),
+ msg_obj_tx_num);
if (!dev)
return NULL;
priv = netdev_priv(dev);
- netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
+ priv->msg_obj_num = msg_obj_num;
+ priv->msg_obj_rx_num = msg_obj_num - msg_obj_tx_num;
+ priv->msg_obj_rx_first = 1;
+ priv->msg_obj_rx_last =
+ priv->msg_obj_rx_first + priv->msg_obj_rx_num - 1;
+ priv->msg_obj_rx_mask = GENMASK(priv->msg_obj_rx_num - 1, 0);
+
+ priv->msg_obj_tx_num = msg_obj_tx_num;
+ priv->msg_obj_tx_first = priv->msg_obj_rx_last + 1;
+ priv->msg_obj_tx_last =
+ priv->msg_obj_tx_first + priv->msg_obj_tx_num - 1;
+
+ netif_napi_add(dev, &priv->napi, c_can_poll, priv->msg_obj_rx_num);
priv->dev = dev;
priv->can.bittiming_const = &c_can_bittiming_const;
@@ -1239,7 +1240,7 @@ int c_can_power_down(struct net_device *dev)
/* Wait for the PDA bit to get set */
time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
- time_after(time_out, jiffies))
+ time_after(time_out, jiffies))
cpu_relax();
if (time_after(jiffies, time_out))
@@ -1280,7 +1281,7 @@ int c_can_power_up(struct net_device *dev)
/* Wait for the PDA bit to get clear */
time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
- time_after(time_out, jiffies))
+ time_after(time_out, jiffies))
cpu_relax();
if (time_after(jiffies, time_out)) {
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 92213d3d96eb..06045f610f0e 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -22,23 +22,6 @@
#ifndef C_CAN_H
#define C_CAN_H
-/* message object split */
-#define C_CAN_NO_OF_OBJECTS 32
-#define C_CAN_MSG_OBJ_RX_NUM 16
-#define C_CAN_MSG_OBJ_TX_NUM 16
-
-#define C_CAN_MSG_OBJ_RX_FIRST 1
-#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
- C_CAN_MSG_OBJ_RX_NUM - 1)
-
-#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
-#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \
- C_CAN_MSG_OBJ_TX_NUM - 1)
-
-#define C_CAN_MSG_OBJ_RX_SPLIT 9
-#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
-#define RECEIVE_OBJECT_BITS 0x0000ffff
-
enum reg {
C_CAN_CTRL_REG = 0,
C_CAN_CTRL_EX_REG,
@@ -76,6 +59,7 @@ enum reg {
C_CAN_NEWDAT2_REG,
C_CAN_INTPND1_REG,
C_CAN_INTPND2_REG,
+ C_CAN_INTPND3_REG,
C_CAN_MSGVAL1_REG,
C_CAN_MSGVAL2_REG,
C_CAN_FUNCTION_REG,
@@ -137,6 +121,7 @@ static const u16 __maybe_unused reg_map_d_can[] = {
[C_CAN_NEWDAT2_REG] = 0x9E,
[C_CAN_INTPND1_REG] = 0xB0,
[C_CAN_INTPND2_REG] = 0xB2,
+ [C_CAN_INTPND3_REG] = 0xB4,
[C_CAN_MSGVAL1_REG] = 0xC4,
[C_CAN_MSGVAL2_REG] = 0xC6,
[C_CAN_IF1_COMREQ_REG] = 0x100,
@@ -164,7 +149,6 @@ static const u16 __maybe_unused reg_map_d_can[] = {
};
enum c_can_dev_id {
- BOSCH_C_CAN_PLATFORM,
BOSCH_C_CAN,
BOSCH_D_CAN,
};
@@ -176,6 +160,7 @@ struct raminit_bits {
struct c_can_driver_data {
enum c_can_dev_id id;
+ unsigned int msg_obj_num;
/* RAMINIT register description. Optional. */
const struct raminit_bits *raminit_bits; /* Array of START/DONE bit positions */
@@ -197,26 +182,34 @@ struct c_can_priv {
struct napi_struct napi;
struct net_device *dev;
struct device *device;
+ unsigned int msg_obj_num;
+ unsigned int msg_obj_rx_num;
+ unsigned int msg_obj_tx_num;
+ unsigned int msg_obj_rx_first;
+ unsigned int msg_obj_rx_last;
+ unsigned int msg_obj_tx_first;
+ unsigned int msg_obj_tx_last;
+ u32 msg_obj_rx_mask;
atomic_t tx_active;
atomic_t sie_pending;
unsigned long tx_dir;
int last_status;
- u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
- void (*write_reg) (const struct c_can_priv *priv, enum reg index, u16 val);
- u32 (*read_reg32) (const struct c_can_priv *priv, enum reg index);
- void (*write_reg32) (const struct c_can_priv *priv, enum reg index, u32 val);
+ u16 (*read_reg)(const struct c_can_priv *priv, enum reg index);
+ void (*write_reg)(const struct c_can_priv *priv, enum reg index, u16 val);
+ u32 (*read_reg32)(const struct c_can_priv *priv, enum reg index);
+ void (*write_reg32)(const struct c_can_priv *priv, enum reg index, u32 val);
void __iomem *base;
const u16 *regs;
void *priv; /* for board-specific data */
enum c_can_dev_id type;
struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */
- void (*raminit) (const struct c_can_priv *priv, bool enable);
+ void (*raminit)(const struct c_can_priv *priv, bool enable);
u32 comm_rcv_high;
u32 rxmasked;
- u32 dlc[C_CAN_MSG_OBJ_TX_NUM];
+ u32 dlc[];
};
-struct net_device *alloc_c_can_dev(void);
+struct net_device *alloc_c_can_dev(int msg_obj_num);
void free_c_can_dev(struct net_device *dev);
int register_c_can_dev(struct net_device *dev);
void unregister_c_can_dev(struct net_device *dev);
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index 7efb60b50876..bf2f8c3da1c1 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -31,6 +31,8 @@ enum c_can_pci_reg_align {
struct c_can_pci_data {
/* Specify if is C_CAN or D_CAN */
enum c_can_dev_id type;
+ /* Number of message objects */
+ unsigned int msg_obj_num;
/* Set the register alignment in the memory */
enum c_can_pci_reg_align reg_align;
/* Set the frequency */
@@ -41,32 +43,31 @@ struct c_can_pci_data {
void (*init)(const struct c_can_priv *priv, bool enable);
};
-/*
- * 16-bit c_can registers can be arranged differently in the memory
+/* 16-bit c_can registers can be arranged differently in the memory
* architecture of different implementations. For example: 16-bit
* registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
* Handle the same by providing a common read/write interface.
*/
static u16 c_can_pci_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
- enum reg index)
+ enum reg index)
{
return readw(priv->base + priv->regs[index]);
}
static void c_can_pci_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
- enum reg index, u16 val)
+ enum reg index, u16 val)
{
writew(val, priv->base + priv->regs[index]);
}
static u16 c_can_pci_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
- enum reg index)
+ enum reg index)
{
return readw(priv->base + 2 * priv->regs[index]);
}
static void c_can_pci_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
- enum reg index, u16 val)
+ enum reg index, u16 val)
{
writew(val, priv->base + 2 * priv->regs[index]);
}
@@ -88,13 +89,13 @@ static u32 c_can_pci_read_reg32(const struct c_can_priv *priv, enum reg index)
u32 val;
val = priv->read_reg(priv, index);
- val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
+ val |= ((u32)priv->read_reg(priv, index + 1)) << 16;
return val;
}
static void c_can_pci_write_reg32(const struct c_can_priv *priv, enum reg index,
- u32 val)
+ u32 val)
{
priv->write_reg(priv, index + 1, val >> 16);
priv->write_reg(priv, index, val);
@@ -142,14 +143,13 @@ static int c_can_pci_probe(struct pci_dev *pdev,
pci_resource_len(pdev, c_can_pci_data->bar));
if (!addr) {
dev_err(&pdev->dev,
- "device has no PCI memory resources, "
- "failing adapter\n");
+ "device has no PCI memory resources, failing adapter\n");
ret = -ENOMEM;
goto out_release_regions;
}
/* allocate the c_can device */
- dev = alloc_c_can_dev();
+ dev = alloc_c_can_dev(c_can_pci_data->msg_obj_num);
if (!dev) {
ret = -ENOMEM;
goto out_iounmap;
@@ -217,7 +217,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
}
dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
- KBUILD_MODNAME, priv->regs, dev->irq);
+ KBUILD_MODNAME, priv->regs, dev->irq);
return 0;
@@ -252,8 +252,9 @@ static void c_can_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static const struct c_can_pci_data c_can_sta2x11= {
+static const struct c_can_pci_data c_can_sta2x11 = {
.type = BOSCH_C_CAN,
+ .msg_obj_num = 32,
.reg_align = C_CAN_REG_ALIGN_32,
.freq = 52000000, /* 52 Mhz */
.bar = 0,
@@ -261,6 +262,7 @@ static const struct c_can_pci_data c_can_sta2x11= {
static const struct c_can_pci_data c_can_pch = {
.type = BOSCH_C_CAN,
+ .msg_obj_num = 32,
.reg_align = C_CAN_REG_32,
.freq = 50000000, /* 50 MHz */
.init = c_can_pci_reset_pch,
@@ -269,7 +271,7 @@ static const struct c_can_pci_data c_can_pch = {
#define C_CAN_ID(_vend, _dev, _driverdata) { \
PCI_DEVICE(_vend, _dev), \
- .driver_data = (unsigned long)&_driverdata, \
+ .driver_data = (unsigned long)&(_driverdata), \
}
static const struct pci_device_id c_can_pci_tbl[] = {
@@ -279,6 +281,7 @@ static const struct pci_device_id c_can_pci_tbl[] = {
c_can_pch),
{},
};
+
static struct pci_driver c_can_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = c_can_pci_tbl,
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 47b251b1607c..36950363682f 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -193,10 +193,12 @@ static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
static const struct c_can_driver_data c_can_drvdata = {
.id = BOSCH_C_CAN,
+ .msg_obj_num = 32,
};
static const struct c_can_driver_data d_can_drvdata = {
.id = BOSCH_D_CAN,
+ .msg_obj_num = 32,
};
static const struct raminit_bits dra7_raminit_bits[] = {
@@ -206,6 +208,7 @@ static const struct raminit_bits dra7_raminit_bits[] = {
static const struct c_can_driver_data dra7_dcan_drvdata = {
.id = BOSCH_D_CAN,
+ .msg_obj_num = 64,
.raminit_num = ARRAY_SIZE(dra7_raminit_bits),
.raminit_bits = dra7_raminit_bits,
.raminit_pulse = true,
@@ -218,6 +221,7 @@ static const struct raminit_bits am3352_raminit_bits[] = {
static const struct c_can_driver_data am3352_dcan_drvdata = {
.id = BOSCH_D_CAN,
+ .msg_obj_num = 64,
.raminit_num = ARRAY_SIZE(am3352_raminit_bits),
.raminit_bits = am3352_raminit_bits,
};
@@ -294,7 +298,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
}
/* allocate the c_can device */
- dev = alloc_c_can_dev();
+ dev = alloc_c_can_dev(drvdata->msg_obj_num);
if (!dev) {
ret = -ENOMEM;
goto exit;
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index f7fe226bb395..f49170eadd54 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -81,9 +81,9 @@ int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
if (bt->sample_point) {
sample_point_nominal = bt->sample_point;
} else {
- if (bt->bitrate > 800000)
+ if (bt->bitrate > 800 * CAN_KBPS)
sample_point_nominal = 750;
- else if (bt->bitrate > 500000)
+ else if (bt->bitrate > 500 * CAN_KBPS)
sample_point_nominal = 800;
else
sample_point_nominal = 875;
@@ -174,6 +174,30 @@ int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
return 0;
}
+
+void can_calc_tdco(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ const struct can_bittiming *dbt = &priv->data_bittiming;
+ struct can_tdc *tdc = &priv->tdc;
+ const struct can_tdc_const *tdc_const = priv->tdc_const;
+
+ if (!tdc_const)
+ return;
+
+ /* As specified in ISO 11898-1 section 11.3.3 "Transmitter
+ * delay compensation" (TDC) is only applicable if data BRP is
+ * one or two.
+ */
+ if (dbt->brp == 1 || dbt->brp == 2) {
+ /* Reuse "normal" sample point and convert it to time quanta */
+ u32 sample_point_in_tq = can_bit_time(dbt) * dbt->sample_point / 1000;
+
+ tdc->tdco = min(sample_point_in_tq, tdc_const->tdco_max);
+ } else {
+ tdc->tdco = 0;
+ }
+}
#endif /* CONFIG_CAN_CALC_BITTIMING */
/* Checks the validity of the specified bit-timing parameters prop_seg,
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index f5d79e6e5483..e38c2566aff4 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -8,20 +8,17 @@
#include <net/rtnetlink.h>
static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
- [IFLA_CAN_STATE] = { .type = NLA_U32 },
- [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
- [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
- [IFLA_CAN_RESTART] = { .type = NLA_U32 },
- [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
- [IFLA_CAN_BITTIMING_CONST]
- = { .len = sizeof(struct can_bittiming_const) },
- [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
- [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
- [IFLA_CAN_DATA_BITTIMING]
- = { .len = sizeof(struct can_bittiming) },
- [IFLA_CAN_DATA_BITTIMING_CONST]
- = { .len = sizeof(struct can_bittiming_const) },
- [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
+ [IFLA_CAN_STATE] = { .type = NLA_U32 },
+ [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
+ [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
+ [IFLA_CAN_RESTART] = { .type = NLA_U32 },
+ [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
+ [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
+ [IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
};
static int can_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -189,6 +186,8 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
+ can_calc_tdco(dev);
+
if (priv->do_set_data_bittiming) {
/* Finally, set the bit-timing registers */
err = priv->do_set_data_bittiming(dev);
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
index 6a64fe410987..61660248c69e 100644
--- a/drivers/net/can/dev/skb.c
+++ b/drivers/net/can/dev/skb.c
@@ -45,7 +45,7 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
BUG_ON(idx >= priv->echo_skb_max);
/* check flag whether this packet has to be looped back */
- if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
+ if (!(dev->flags & IFF_ECHO) ||
(skb->protocol != htons(ETH_P_CAN) &&
skb->protocol != htons(ETH_P_CANFD))) {
kfree_skb(skb);
@@ -58,7 +58,6 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
return -ENOMEM;
/* make settings for echo to reduce code in irq context */
- skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->dev = dev;
@@ -111,6 +110,13 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
priv->echo_skb[idx] = NULL;
+ if (skb->pkt_type == PACKET_LOOPBACK) {
+ skb->pkt_type = PACKET_BROADCAST;
+ } else {
+ dev_consume_skb_any(skb);
+ return NULL;
+ }
+
return skb;
}
@@ -147,14 +153,25 @@ EXPORT_SYMBOL_GPL(can_get_echo_skb);
*
* The function is typically called when TX failed.
*/
-void can_free_echo_skb(struct net_device *dev, unsigned int idx)
+void can_free_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *frame_len_ptr)
{
struct can_priv *priv = netdev_priv(dev);
- BUG_ON(idx >= priv->echo_skb_max);
+ if (idx >= priv->echo_skb_max) {
+ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+ __func__, idx, priv->echo_skb_max);
+ return;
+ }
if (priv->echo_skb[idx]) {
- dev_kfree_skb_any(priv->echo_skb[idx]);
+ struct sk_buff *skb = priv->echo_skb[idx];
+ struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
+
+ if (frame_len_ptr)
+ *frame_len_ptr = can_skb_priv->frame_len;
+
+ dev_kfree_skb_any(skb);
priv->echo_skb[idx] = NULL;
}
}
@@ -166,8 +183,11 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
sizeof(struct can_frame));
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ *cf = NULL;
+
return NULL;
+ }
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
@@ -194,8 +214,11 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
sizeof(struct canfd_frame));
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ *cfd = NULL;
+
return NULL;
+ }
skb->protocol = htons(ETH_P_CANFD);
skb->pkt_type = PACKET_BROADCAST;
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 4a8453290530..78e27940b2af 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -520,7 +520,7 @@ static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo)
can_get_echo_skb(dev, i, NULL);
} else {
/* For cleanup of untransmitted messages */
- can_free_echo_skb(dev, i);
+ can_free_echo_skb(dev, i, NULL);
}
priv->eskbp = grcan_ring_add(priv->eskbp, GRCAN_MSG_SIZE,
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 0c8d36bc668c..34073cd077e4 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -8,6 +8,7 @@
* https://github.com/linux-can/can-doc/tree/master/m_can
*/
+#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -148,6 +149,16 @@ enum m_can_reg {
#define NBTP_NTSEG2_SHIFT 0
#define NBTP_NTSEG2_MASK (0x7f << NBTP_NTSEG2_SHIFT)
+/* Timestamp Counter Configuration Register (TSCC) */
+#define TSCC_TCP_MASK GENMASK(19, 16)
+#define TSCC_TSS_MASK GENMASK(1, 0)
+#define TSCC_TSS_DISABLE 0x0
+#define TSCC_TSS_INTERNAL 0x1
+#define TSCC_TSS_EXTERNAL 0x2
+
+/* Timestamp Counter Value Register (TSCV) */
+#define TSCV_TSC_MASK GENMASK(15, 0)
+
/* Error Counter Register(ECR) */
#define ECR_RP BIT(15)
#define ECR_REC_SHIFT 8
@@ -302,6 +313,7 @@ enum m_can_reg {
#define RX_BUF_ANMF BIT(31)
#define RX_BUF_FDF BIT(21)
#define RX_BUF_BRS BIT(20)
+#define RX_BUF_RXTS_MASK GENMASK(15, 0)
/* Tx Buffer Element */
/* T0 */
@@ -319,6 +331,7 @@ enum m_can_reg {
/* E1 */
#define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT
#define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT)
+#define TX_EVENT_TXTS_MASK GENMASK(15, 0)
static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
{
@@ -413,6 +426,20 @@ static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
m_can_write(cdev, M_CAN_ILE, 0x0);
}
+/* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit
+ * width.
+ */
+static u32 m_can_get_timestamp(struct m_can_classdev *cdev)
+{
+ u32 tscv;
+ u32 tsc;
+
+ tscv = m_can_read(cdev, M_CAN_TSCV);
+ tsc = FIELD_GET(TSCV_TSC_MASK, tscv);
+
+ return (tsc << 16);
+}
+
static void m_can_clean(struct net_device *net)
{
struct m_can_classdev *cdev = netdev_priv(net);
@@ -425,11 +452,33 @@ static void m_can_clean(struct net_device *net)
putidx = ((m_can_read(cdev, M_CAN_TXFQS) &
TXFQS_TFQPI_MASK) >> TXFQS_TFQPI_SHIFT);
- can_free_echo_skb(cdev->net, putidx);
+ can_free_echo_skb(cdev->net, putidx, NULL);
cdev->tx_skb = NULL;
}
}
+/* For peripherals, pass skb to rx-offload, which will push skb from
+ * napi. For non-peripherals, RX is done in napi already, so push
+ * directly. timestamp is used to ensure good skb ordering in
+ * rx-offload and is ignored for non-peripherals.
+*/
+static void m_can_receive_skb(struct m_can_classdev *cdev,
+ struct sk_buff *skb,
+ u32 timestamp)
+{
+ if (cdev->is_peripheral) {
+ struct net_device_stats *stats = &cdev->net->stats;
+ int err;
+
+ err = can_rx_offload_queue_sorted(&cdev->offload, skb,
+ timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+ } else {
+ netif_receive_skb(skb);
+ }
+}
+
static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
{
struct net_device_stats *stats = &dev->stats;
@@ -437,6 +486,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
struct canfd_frame *cf;
struct sk_buff *skb;
u32 id, fgi, dlc;
+ u32 timestamp = 0;
int i;
/* calculate the fifo get index for where to read data */
@@ -485,7 +535,9 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
stats->rx_packets++;
stats->rx_bytes += cf->len;
- netif_receive_skb(skb);
+ timestamp = FIELD_GET(RX_BUF_RXTS_MASK, dlc);
+
+ m_can_receive_skb(cdev, skb, timestamp);
}
static int m_can_do_rx_poll(struct net_device *dev, int quota)
@@ -516,9 +568,11 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
static int m_can_handle_lost_msg(struct net_device *dev)
{
+ struct m_can_classdev *cdev = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
struct can_frame *frame;
+ u32 timestamp = 0;
netdev_err(dev, "msg lost in rxf0\n");
@@ -532,7 +586,10 @@ static int m_can_handle_lost_msg(struct net_device *dev)
frame->can_id |= CAN_ERR_CRTL;
frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- netif_receive_skb(skb);
+ if (cdev->is_peripheral)
+ timestamp = m_can_get_timestamp(cdev);
+
+ m_can_receive_skb(cdev, skb, timestamp);
return 1;
}
@@ -544,6 +601,7 @@ static int m_can_handle_lec_err(struct net_device *dev,
struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
+ u32 timestamp = 0;
cdev->can.can_stats.bus_error++;
stats->rx_errors++;
@@ -589,7 +647,11 @@ static int m_can_handle_lec_err(struct net_device *dev,
stats->rx_packets++;
stats->rx_bytes += cf->len;
- netif_receive_skb(skb);
+
+ if (cdev->is_peripheral)
+ timestamp = m_can_get_timestamp(cdev);
+
+ m_can_receive_skb(cdev, skb, timestamp);
return 1;
}
@@ -647,6 +709,7 @@ static int m_can_handle_state_change(struct net_device *dev,
struct sk_buff *skb;
struct can_berr_counter bec;
unsigned int ecr;
+ u32 timestamp = 0;
switch (new_state) {
case CAN_STATE_ERROR_WARNING:
@@ -708,7 +771,11 @@ static int m_can_handle_state_change(struct net_device *dev,
stats->rx_packets++;
stats->rx_bytes += cf->len;
- netif_receive_skb(skb);
+
+ if (cdev->is_peripheral)
+ timestamp = m_can_get_timestamp(cdev);
+
+ m_can_receive_skb(cdev, skb, timestamp);
return 1;
}
@@ -773,6 +840,7 @@ static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus)
struct m_can_classdev *cdev = netdev_priv(dev);
struct can_frame *cf;
struct sk_buff *skb;
+ u32 timestamp = 0;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
@@ -794,7 +862,11 @@ static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus)
netdev_dbg(dev, "allocation of skb failed\n");
return 0;
}
- netif_receive_skb(skb);
+
+ if (cdev->is_peripheral)
+ timestamp = m_can_get_timestamp(cdev);
+
+ m_can_receive_skb(cdev, skb, timestamp);
return 1;
}
@@ -895,6 +967,29 @@ static int m_can_poll(struct napi_struct *napi, int quota)
return work_done;
}
+/* Echo tx skb and update net stats. Peripherals use rx-offload for
+ * echo. timestamp is used for peripherals to ensure correct ordering
+ * by rx-offload, and is ignored for non-peripherals.
+*/
+static void m_can_tx_update_stats(struct m_can_classdev *cdev,
+ unsigned int msg_mark,
+ u32 timestamp)
+{
+ struct net_device *dev = cdev->net;
+ struct net_device_stats *stats = &dev->stats;
+
+ if (cdev->is_peripheral)
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb(&cdev->offload,
+ msg_mark,
+ timestamp,
+ NULL);
+ else
+ stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
+
+ stats->tx_packets++;
+}
+
static void m_can_echo_tx_event(struct net_device *dev)
{
u32 txe_count = 0;
@@ -904,7 +999,6 @@ static void m_can_echo_tx_event(struct net_device *dev)
unsigned int msg_mark;
struct m_can_classdev *cdev = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
/* read tx event fifo status */
m_can_txefs = m_can_read(cdev, M_CAN_TXEFS);
@@ -914,21 +1008,23 @@ static void m_can_echo_tx_event(struct net_device *dev)
/* Get and process all sent elements */
for (i = 0; i < txe_count; i++) {
+ u32 txe, timestamp = 0;
+
/* retrieve get index */
fgi = (m_can_read(cdev, M_CAN_TXEFS) & TXEFS_EFGI_MASK) >>
TXEFS_EFGI_SHIFT;
- /* get message marker */
- msg_mark = (m_can_txe_fifo_read(cdev, fgi, 4) &
- TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT;
+ /* get message marker, timestamp */
+ txe = m_can_txe_fifo_read(cdev, fgi, 4);
+ msg_mark = (txe & TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT;
+ timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe);
/* ack txe element */
m_can_write(cdev, M_CAN_TXEFA, (TXEFA_EFAI_MASK &
(fgi << TXEFA_EFAI_SHIFT)));
/* update stats */
- stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
- stats->tx_packets++;
+ m_can_tx_update_stats(cdev, msg_mark, timestamp);
}
}
@@ -936,7 +1032,6 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct m_can_classdev *cdev = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
u32 ir;
if (pm_runtime_suspended(cdev->dev))
@@ -969,8 +1064,12 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (cdev->version == 30) {
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
- stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
- stats->tx_packets++;
+ u32 timestamp = 0;
+
+ if (cdev->is_peripheral)
+ timestamp = m_can_get_timestamp(cdev);
+ m_can_tx_update_stats(cdev, 0, timestamp);
+
can_led_event(dev, CAN_LED_EVENT_TX);
netif_wake_queue(dev);
}
@@ -1108,6 +1207,7 @@ static int m_can_set_bittiming(struct net_device *dev)
* - >= v3.1.x: TX FIFO is used
* - configure mode
* - setup bittiming
+ * - configure timestamp generation
*/
static void m_can_chip_config(struct net_device *dev)
{
@@ -1219,6 +1319,10 @@ static void m_can_chip_config(struct net_device *dev)
/* set bittiming params */
m_can_set_bittiming(dev);
+ /* enable internal timestamp generation, with a prescalar of 16. The
+ * prescalar is applied to the nominal bit timing */
+ m_can_write(cdev, M_CAN_TSCC, FIELD_PREP(TSCC_TCP_MASK, 0xf));
+
m_can_config_endisable(cdev, false);
if (cdev->ops->init)
@@ -1426,6 +1530,9 @@ static int m_can_close(struct net_device *dev)
cdev->tx_wq = NULL;
}
+ if (cdev->is_peripheral)
+ can_rx_offload_disable(&cdev->offload);
+
close_candev(dev);
can_led_event(dev, CAN_LED_EVENT_STOP);
@@ -1624,6 +1731,9 @@ static int m_can_open(struct net_device *dev)
goto exit_disable_clks;
}
+ if (cdev->is_peripheral)
+ can_rx_offload_enable(&cdev->offload);
+
/* register interrupt handler */
if (cdev->is_peripheral) {
cdev->tx_skb = NULL;
@@ -1665,6 +1775,8 @@ exit_irq_fail:
if (cdev->is_peripheral)
destroy_workqueue(cdev->tx_wq);
out_wq_fail:
+ if (cdev->is_peripheral)
+ can_rx_offload_disable(&cdev->offload);
close_candev(dev);
exit_disable_clks:
m_can_clk_stop(cdev);
@@ -1787,11 +1899,6 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
}
class_dev = netdev_priv(net_dev);
- if (!class_dev) {
- dev_err(dev, "Failed to init netdev cdevate");
- goto out;
- }
-
class_dev->net = net_dev;
class_dev->dev = dev;
SET_NETDEV_DEV(net_dev, dev);
@@ -1818,15 +1925,22 @@ int m_can_class_register(struct m_can_classdev *cdev)
return ret;
}
+ if (cdev->is_peripheral) {
+ ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
+ M_CAN_NAPI_WEIGHT);
+ if (ret)
+ goto clk_disable;
+ }
+
ret = m_can_dev_setup(cdev);
if (ret)
- goto clk_disable;
+ goto rx_offload_del;
ret = register_m_can_dev(cdev->net);
if (ret) {
dev_err(cdev->dev, "registering %s failed (err=%d)\n",
cdev->net->name, ret);
- goto clk_disable;
+ goto rx_offload_del;
}
devm_can_led_init(cdev->net);
@@ -1839,6 +1953,13 @@ int m_can_class_register(struct m_can_classdev *cdev)
/* Probe finished
* Stop clocks. They will be reactivated once the M_CAN device is opened
*/
+ m_can_clk_stop(cdev);
+
+ return 0;
+
+rx_offload_del:
+ if (cdev->is_peripheral)
+ can_rx_offload_del(&cdev->offload);
clk_disable:
m_can_clk_stop(cdev);
@@ -1848,6 +1969,8 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
+ if (cdev->is_peripheral)
+ can_rx_offload_del(&cdev->offload);
unregister_candev(cdev->net);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 3fda84cef351..ace071c3e58c 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -8,6 +8,7 @@
#include <linux/can/core.h>
#include <linux/can/led.h>
+#include <linux/can/rx-offload.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
@@ -71,6 +72,7 @@ struct m_can_ops {
struct m_can_classdev {
struct can_priv can;
+ struct can_rx_offload offload;
struct napi_struct napi;
struct net_device *net;
struct device *dev;
diff --git a/drivers/net/can/m_can/tcan4x5x.h b/drivers/net/can/m_can/tcan4x5x.h
index c66da829b795..e62c030d3e1e 100644
--- a/drivers/net/can/m_can/tcan4x5x.h
+++ b/drivers/net/can/m_can/tcan4x5x.h
@@ -11,7 +11,6 @@
#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
-#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 4870c4ea190a..00e4533c8bdd 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -217,7 +217,7 @@ static void tx_failure_cleanup(struct net_device *ndev)
int i;
for (i = 0; i < RCAR_CAN_FIFO_DEPTH; i++)
- can_free_echo_skb(ndev, i);
+ can_free_echo_skb(ndev, i, NULL);
}
static void rcar_can_error(struct net_device *ndev)
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index d8d233e62990..311e6ca3bdc4 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -617,7 +617,7 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
u32 i;
for (i = 0; i < RCANFD_FIFO_DEPTH; i++)
- can_free_echo_skb(ndev, i);
+ can_free_echo_skb(ndev, i, NULL);
}
static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 9e86488ba55f..3fad54646746 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -525,7 +525,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT &&
!(status & SR_TCS)) {
stats->tx_errors++;
- can_free_echo_skb(dev, 0);
+ can_free_echo_skb(dev, 0, NULL);
} else {
/* transmission complete */
stats->tx_bytes +=
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index c3e020c90111..6f5d6d04a8b9 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -179,7 +179,7 @@ static void hi3110_clean(struct net_device *net)
net->stats.tx_errors++;
dev_kfree_skb(priv->tx_skb);
if (priv->tx_len)
- can_free_echo_skb(priv->net, 0);
+ can_free_echo_skb(priv->net, 0, NULL);
priv->tx_skb = NULL;
priv->tx_len = 0;
}
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index a57da43680d8..492f1bcb0516 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -276,7 +276,7 @@ static void mcp251x_clean(struct net_device *net)
net->stats.tx_errors++;
dev_kfree_skb(priv->tx_skb);
if (priv->tx_len)
- can_free_echo_skb(priv->net, 0);
+ can_free_echo_skb(priv->net, 0, NULL);
priv->tx_skb = NULL;
priv->tx_len = 0;
}
diff --git a/drivers/net/can/spi/mcp251xfd/Kconfig b/drivers/net/can/spi/mcp251xfd/Kconfig
index f5a147a92cb2..dd0fc0a54be1 100644
--- a/drivers/net/can/spi/mcp251xfd/Kconfig
+++ b/drivers/net/can/spi/mcp251xfd/Kconfig
@@ -3,6 +3,7 @@
config CAN_MCP251XFD
tristate "Microchip MCP251xFD SPI CAN controllers"
select REGMAP
+ select WANT_DEV_COREDUMP
help
Driver for the Microchip MCP251XFD SPI FD-CAN controller
family.
diff --git a/drivers/net/can/spi/mcp251xfd/Makefile b/drivers/net/can/spi/mcp251xfd/Makefile
index cb71244cbe89..3cba3b9447ea 100644
--- a/drivers/net/can/spi/mcp251xfd/Makefile
+++ b/drivers/net/can/spi/mcp251xfd/Makefile
@@ -6,3 +6,6 @@ mcp251xfd-objs :=
mcp251xfd-objs += mcp251xfd-core.o
mcp251xfd-objs += mcp251xfd-crc16.o
mcp251xfd-objs += mcp251xfd-regmap.o
+mcp251xfd-objs += mcp251xfd-timestamp.o
+
+mcp251xfd-$(CONFIG_DEV_COREDUMP) += mcp251xfd-dump.o
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 799e9d5d3481..970dc570e7a5 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -2,8 +2,8 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020 Pengutronix,
-// Marc Kleine-Budde <kernel@pengutronix.de>
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
//
// Based on:
//
@@ -16,7 +16,6 @@
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/module.h>
-#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
@@ -330,11 +329,14 @@ static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
struct mcp251xfd_tx_ring *tx_ring;
struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
struct mcp251xfd_tx_obj *tx_obj;
+ struct spi_transfer *xfer;
u32 val;
u16 addr;
u8 len;
int i, j;
+ netdev_reset_queue(priv->ndev);
+
/* TEF */
tef_ring = priv->tef;
tef_ring->head = 0;
@@ -347,8 +349,6 @@ static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
addr, val, val);
for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
- struct spi_transfer *xfer;
-
xfer = &tef_ring->uinc_xfer[j];
xfer->tx_buf = &tef_ring->uinc_buf;
xfer->len = len;
@@ -357,6 +357,15 @@ static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
}
+ /* "cs_change == 1" on the last transfer results in an active
+ * chip select after the complete SPI message. This causes the
+ * controller to interpret the next register access as
+ * data. Set "cs_change" of the last transfer to "0" to
+ * properly deactivate the chip select at the end of the
+ * message.
+ */
+ xfer->cs_change = 0;
+
/* TX */
tx_ring = priv->tx;
tx_ring->head = 0;
@@ -397,8 +406,6 @@ static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
addr, val, val);
for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
- struct spi_transfer *xfer;
-
xfer = &rx_ring->uinc_xfer[j];
xfer->tx_buf = &rx_ring->uinc_buf;
xfer->len = len;
@@ -406,6 +413,15 @@ static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
xfer->cs_change_delay.value = 0;
xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
}
+
+ /* "cs_change == 1" on the last transfer results in an
+ * active chip select after the complete SPI
+ * message. This causes the controller to interpret
+ * the next register access as data. Set "cs_change"
+ * of the last transfer to "0" to properly deactivate
+ * the chip select at the end of the message.
+ */
+ xfer->cs_change = 0;
}
}
@@ -1097,6 +1113,7 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
return 0;
out_chip_stop:
+ mcp251xfd_dump(priv);
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
return err;
@@ -1247,10 +1264,12 @@ mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
static int
mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
- const struct mcp251xfd_hw_tef_obj *hw_tef_obj)
+ const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
+ unsigned int *frame_len_ptr)
{
struct net_device_stats *stats = &priv->ndev->stats;
- u32 seq, seq_masked, tef_tail_masked;
+ struct sk_buff *skb;
+ u32 seq, seq_masked, tef_tail_masked, tef_tail;
seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
hw_tef_obj->flags);
@@ -1266,10 +1285,14 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
if (seq_masked != tef_tail_masked)
return mcp251xfd_handle_tefif_recover(priv, seq);
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+ skb = priv->can.echo_skb[tef_tail];
+ if (skb)
+ mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
stats->tx_bytes +=
can_rx_offload_get_echo_skb(&priv->offload,
- mcp251xfd_get_tef_tail(priv),
- hw_tef_obj->ts, NULL);
+ tef_tail, hw_tef_obj->ts,
+ frame_len_ptr);
stats->tx_packets++;
priv->tef->tail++;
@@ -1327,6 +1350,7 @@ mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
{
struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
+ unsigned int total_frame_len = 0;
u8 tef_tail, len, l;
int err, i;
@@ -1348,7 +1372,9 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
}
for (i = 0; i < len; i++) {
- err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i]);
+ unsigned int frame_len = 0;
+
+ err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
/* -EAGAIN means the Sequence Number in the TEF
* doesn't match our tef_tail. This can happen if we
* read the TEF objects too early. Leave loop let the
@@ -1358,6 +1384,8 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
goto out_netif_wake_queue;
if (err)
return err;
+
+ total_frame_len += frame_len;
}
out_netif_wake_queue:
@@ -1365,29 +1393,25 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
if (len) {
struct mcp251xfd_tef_ring *ring = priv->tef;
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- struct spi_transfer *last_xfer;
+ int offset;
/* Increment the TEF FIFO tail pointer 'len' times in
* a single SPI message.
*
* Note:
- *
- * "cs_change == 1" on the last transfer results in an
- * active chip select after the complete SPI
- * message. This causes the controller to interpret
- * the next register access as data. Temporary set
- * "cs_change" of the last transfer to "0" to properly
- * deactivate the chip select at the end of the
- * message.
+ * Calculate offset, so that the SPI transfer ends on
+ * the last message of the uinc_xfer array, which has
+ * "cs_change == 0", to properly deactivate the chip
+ * select.
*/
- last_xfer = &ring->uinc_xfer[len - 1];
- last_xfer->cs_change = 0;
- err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len);
- last_xfer->cs_change = 1;
+ offset = ARRAY_SIZE(ring->uinc_xfer) - len;
+ err = spi_sync_transfer(priv->spi,
+ ring->uinc_xfer + offset, len);
if (err)
return err;
tx_ring->tail += len;
+ netdev_completed_queue(priv->ndev, len, total_frame_len);
err = mcp251xfd_check_tef_tail(priv);
if (err)
@@ -1432,7 +1456,7 @@ mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
}
static void
-mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
+mcp251xfd_hw_rx_obj_to_skb(struct mcp251xfd_priv *priv,
const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
struct sk_buff *skb)
{
@@ -1475,6 +1499,8 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
memcpy(cfd->data, hw_rx_obj->data, cfd->len);
+
+ mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
}
static int
@@ -1535,7 +1561,7 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
return err;
while ((len = mcp251xfd_get_rx_linear_len(ring))) {
- struct spi_transfer *last_xfer;
+ int offset;
rx_tail = mcp251xfd_get_rx_tail(ring);
@@ -1556,19 +1582,14 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
* single SPI message.
*
* Note:
- *
- * "cs_change == 1" on the last transfer results in an
- * active chip select after the complete SPI
- * message. This causes the controller to interpret
- * the next register access as data. Temporary set
- * "cs_change" of the last transfer to "0" to properly
- * deactivate the chip select at the end of the
- * message.
+ * Calculate offset, so that the SPI transfer ends on
+ * the last message of the uinc_xfer array, which has
+ * "cs_change == 0", to properly deactivate the chip
+ * select.
*/
- last_xfer = &ring->uinc_xfer[len - 1];
- last_xfer->cs_change = 0;
- err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len);
- last_xfer->cs_change = 1;
+ offset = ARRAY_SIZE(ring->uinc_xfer) - len;
+ err = spi_sync_transfer(priv->spi,
+ ring->uinc_xfer + offset, len);
if (err)
return err;
@@ -1592,23 +1613,22 @@ static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
return 0;
}
-static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
- u32 *timestamp)
-{
- return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
-}
-
static struct sk_buff *
-mcp251xfd_alloc_can_err_skb(const struct mcp251xfd_priv *priv,
+mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv,
struct can_frame **cf, u32 *timestamp)
{
+ struct sk_buff *skb;
int err;
err = mcp251xfd_get_timestamp(priv, timestamp);
if (err)
return NULL;
- return alloc_can_err_skb(priv->ndev, cf);
+ skb = alloc_can_err_skb(priv->ndev, cf);
+ if (skb)
+ mcp251xfd_skb_set_timestamp(priv, skb, *timestamp);
+
+ return skb;
}
static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
@@ -1760,6 +1780,7 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
if (!cf)
return 0;
+ mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -2277,6 +2298,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
out_fail:
netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
err, priv->regs_status.intf);
+ mcp251xfd_dump(priv);
mcp251xfd_chip_interrupts_disable(priv);
return handled;
@@ -2433,6 +2455,7 @@ static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
struct mcp251xfd_priv *priv = netdev_priv(ndev);
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
struct mcp251xfd_tx_obj *tx_obj;
+ unsigned int frame_len;
u8 tx_head;
int err;
@@ -2451,7 +2474,10 @@ static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
if (mcp251xfd_get_tx_free(tx_ring) == 0)
netif_stop_queue(ndev);
- can_put_echo_skb(skb, ndev, tx_head, 0);
+ frame_len = can_skb_get_frame_len(skb);
+ err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
+ if (!err)
+ netdev_sent_queue(priv->ndev, frame_len);
err = mcp251xfd_tx_obj_write(priv, tx_obj);
if (err)
@@ -2493,6 +2519,7 @@ static int mcp251xfd_open(struct net_device *ndev)
if (err)
goto out_transceiver_disable;
+ mcp251xfd_timestamp_init(priv);
can_rx_offload_enable(&priv->offload);
err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
@@ -2513,6 +2540,7 @@ static int mcp251xfd_open(struct net_device *ndev)
free_irq(spi->irq, priv);
out_can_rx_offload_disable:
can_rx_offload_disable(&priv->offload);
+ mcp251xfd_timestamp_stop(priv);
out_transceiver_disable:
mcp251xfd_transceiver_disable(priv);
out_mcp251xfd_ring_free:
@@ -2534,6 +2562,7 @@ static int mcp251xfd_stop(struct net_device *ndev)
mcp251xfd_chip_interrupts_disable(priv);
free_irq(ndev->irq, priv);
can_rx_offload_disable(&priv->offload);
+ mcp251xfd_timestamp_stop(priv);
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
mcp251xfd_transceiver_disable(priv);
mcp251xfd_ring_free(priv);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
new file mode 100644
index 000000000000..ffae8fdd3af0
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+// Copyright (C) 2015-2018 Etnaviv Project
+//
+
+#include <linux/devcoredump.h>
+
+#include "mcp251xfd.h"
+#include "mcp251xfd-dump.h"
+
+struct mcp251xfd_dump_iter {
+ void *start;
+ struct mcp251xfd_dump_object_header *hdr;
+ void *data;
+};
+
+struct mcp251xfd_dump_reg_space {
+ u16 base;
+ u16 size;
+};
+
+struct mcp251xfd_dump_ring {
+ enum mcp251xfd_dump_object_ring_key key;
+ u32 val;
+};
+
+static const struct mcp251xfd_dump_reg_space mcp251xfd_dump_reg_space[] = {
+ {
+ .base = MCP251XFD_REG_CON,
+ .size = MCP251XFD_REG_FLTOBJ(32) - MCP251XFD_REG_CON,
+ }, {
+ .base = MCP251XFD_RAM_START,
+ .size = MCP251XFD_RAM_SIZE,
+ }, {
+ .base = MCP251XFD_REG_OSC,
+ .size = MCP251XFD_REG_DEVID - MCP251XFD_REG_OSC,
+ },
+};
+
+static void mcp251xfd_dump_header(struct mcp251xfd_dump_iter *iter,
+ enum mcp251xfd_dump_object_type object_type,
+ const void *data_end)
+{
+ struct mcp251xfd_dump_object_header *hdr = iter->hdr;
+ unsigned int len;
+
+ len = data_end - iter->data;
+ if (!len)
+ return;
+
+ hdr->magic = cpu_to_le32(MCP251XFD_DUMP_MAGIC);
+ hdr->type = cpu_to_le32(object_type);
+ hdr->offset = cpu_to_le32(iter->data - iter->start);
+ hdr->len = cpu_to_le32(len);
+
+ iter->hdr++;
+ iter->data += len;
+}
+
+static void mcp251xfd_dump_registers(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_dump_iter *iter)
+{
+ const int val_bytes = regmap_get_val_bytes(priv->map_rx);
+ struct mcp251xfd_dump_object_reg *reg = iter->data;
+ unsigned int i, j;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(mcp251xfd_dump_reg_space); i++) {
+ const struct mcp251xfd_dump_reg_space *reg_space;
+ void *buf;
+
+ reg_space = &mcp251xfd_dump_reg_space[i];
+
+ buf = kmalloc(reg_space->size, GFP_KERNEL);
+ if (!buf)
+ goto out;
+
+ err = regmap_bulk_read(priv->map_reg, reg_space->base,
+ buf, reg_space->size / val_bytes);
+ if (err) {
+ kfree(buf);
+ continue;
+ }
+
+ for (j = 0; j < reg_space->size; j += sizeof(u32), reg++) {
+ reg->reg = cpu_to_le32(reg_space->base + j);
+ reg->val = cpu_to_le32p(buf + j);
+ }
+
+ kfree(buf);
+ }
+
+ out:
+ mcp251xfd_dump_header(iter, MCP251XFD_DUMP_OBJECT_TYPE_REG, reg);
+}
+
+static void mcp251xfd_dump_ring(struct mcp251xfd_dump_iter *iter,
+ enum mcp251xfd_dump_object_type object_type,
+ const struct mcp251xfd_dump_ring *dump_ring,
+ unsigned int len)
+{
+ struct mcp251xfd_dump_object_reg *reg = iter->data;
+ unsigned int i;
+
+ for (i = 0; i < len; i++, reg++) {
+ reg->reg = cpu_to_le32(dump_ring[i].key);
+ reg->val = cpu_to_le32(dump_ring[i].val);
+ }
+
+ mcp251xfd_dump_header(iter, object_type, reg);
+}
+
+static void mcp251xfd_dump_tef_ring(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_dump_iter *iter)
+{
+ const struct mcp251xfd_tef_ring *tef = priv->tef;
+ const struct mcp251xfd_tx_ring *tx = priv->tx;
+ const struct mcp251xfd_dump_ring dump_ring[] = {
+ {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD,
+ .val = tef->head,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL,
+ .val = tef->tail,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_BASE,
+ .val = 0,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR,
+ .val = 0,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR,
+ .val = 0,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM,
+ .val = tx->obj_num,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE,
+ .val = sizeof(struct mcp251xfd_hw_tef_obj),
+ },
+ };
+
+ mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_TEF,
+ dump_ring, ARRAY_SIZE(dump_ring));
+}
+
+static void mcp251xfd_dump_rx_ring_one(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_dump_iter *iter,
+ const struct mcp251xfd_rx_ring *rx)
+{
+ const struct mcp251xfd_dump_ring dump_ring[] = {
+ {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD,
+ .val = rx->head,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL,
+ .val = rx->tail,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_BASE,
+ .val = rx->base,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR,
+ .val = rx->nr,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR,
+ .val = rx->fifo_nr,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM,
+ .val = rx->obj_num,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE,
+ .val = rx->obj_size,
+ },
+ };
+
+ mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_RX,
+ dump_ring, ARRAY_SIZE(dump_ring));
+}
+
+static void mcp251xfd_dump_rx_ring(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_dump_iter *iter)
+{
+ struct mcp251xfd_rx_ring *rx_ring;
+ unsigned int i;
+
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, i)
+ mcp251xfd_dump_rx_ring_one(priv, iter, rx_ring);
+}
+
+static void mcp251xfd_dump_tx_ring(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_dump_iter *iter)
+{
+ const struct mcp251xfd_tx_ring *tx = priv->tx;
+ const struct mcp251xfd_dump_ring dump_ring[] = {
+ {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD,
+ .val = tx->head,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL,
+ .val = tx->tail,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_BASE,
+ .val = tx->base,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR,
+ .val = 0,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR,
+ .val = MCP251XFD_TX_FIFO,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM,
+ .val = tx->obj_num,
+ }, {
+ .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE,
+ .val = tx->obj_size,
+ },
+ };
+
+ mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_TX,
+ dump_ring, ARRAY_SIZE(dump_ring));
+}
+
+static void mcp251xfd_dump_end(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_dump_iter *iter)
+{
+ struct mcp251xfd_dump_object_header *hdr = iter->hdr;
+
+ hdr->magic = cpu_to_le32(MCP251XFD_DUMP_MAGIC);
+ hdr->type = cpu_to_le32(MCP251XFD_DUMP_OBJECT_TYPE_END);
+ hdr->offset = cpu_to_le32(0);
+ hdr->len = cpu_to_le32(0);
+
+ /* provoke NULL pointer access, if used after END object */
+ iter->hdr = NULL;
+}
+
+void mcp251xfd_dump(const struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_dump_iter iter;
+ unsigned int rings_num, obj_num;
+ unsigned int file_size = 0;
+ unsigned int i;
+
+ /* register space + end marker */
+ obj_num = 2;
+
+ /* register space */
+ for (i = 0; i < ARRAY_SIZE(mcp251xfd_dump_reg_space); i++)
+ file_size += mcp251xfd_dump_reg_space[i].size / sizeof(u32) *
+ sizeof(struct mcp251xfd_dump_object_reg);
+
+ /* TEF ring, RX ring, TX rings */
+ rings_num = 1 + priv->rx_ring_num + 1;
+ obj_num += rings_num;
+ file_size += rings_num * __MCP251XFD_DUMP_OBJECT_RING_KEY_MAX *
+ sizeof(struct mcp251xfd_dump_object_reg);
+
+ /* size of the headers */
+ file_size += sizeof(*iter.hdr) * obj_num;
+
+ /* allocate the file in vmalloc memory, it's likely to be big */
+ iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
+ __GFP_ZERO | __GFP_NORETRY);
+ if (!iter.start) {
+ netdev_warn(priv->ndev, "Failed to allocate devcoredump file.\n");
+ return;
+ }
+
+ /* point the data member after the headers */
+ iter.hdr = iter.start;
+ iter.data = &iter.hdr[obj_num];
+
+ mcp251xfd_dump_registers(priv, &iter);
+ mcp251xfd_dump_tef_ring(priv, &iter);
+ mcp251xfd_dump_rx_ring(priv, &iter);
+ mcp251xfd_dump_tx_ring(priv, &iter);
+ mcp251xfd_dump_end(priv, &iter);
+
+ dev_coredumpv(&priv->spi->dev, iter.start,
+ iter.data - iter.start, GFP_KERNEL);
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.h
new file mode 100644
index 000000000000..e7560b0712eb
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ *
+ * Copyright (c) 2019, 2020, 2021 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _MCP251XFD_DUMP_H
+#define _MCP251XFD_DUMP_H
+
+#define MCP251XFD_DUMP_MAGIC 0x1825434d
+
+enum mcp251xfd_dump_object_type {
+ MCP251XFD_DUMP_OBJECT_TYPE_REG,
+ MCP251XFD_DUMP_OBJECT_TYPE_TEF,
+ MCP251XFD_DUMP_OBJECT_TYPE_RX,
+ MCP251XFD_DUMP_OBJECT_TYPE_TX,
+ MCP251XFD_DUMP_OBJECT_TYPE_END = -1,
+};
+
+enum mcp251xfd_dump_object_ring_key {
+ MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD,
+ MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL,
+ MCP251XFD_DUMP_OBJECT_RING_KEY_BASE,
+ MCP251XFD_DUMP_OBJECT_RING_KEY_NR,
+ MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR,
+ MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM,
+ MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE,
+ __MCP251XFD_DUMP_OBJECT_RING_KEY_MAX,
+};
+
+struct mcp251xfd_dump_object_header {
+ __le32 magic;
+ __le32 type;
+ __le32 offset;
+ __le32 len;
+};
+
+struct mcp251xfd_dump_object_reg {
+ __le32 reg;
+ __le32 val;
+};
+
+#endif
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index 314f868b3465..297491516a26 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -233,12 +233,30 @@ mcp251xfd_regmap_crc_write(void *context,
}
static int
+mcp251xfd_regmap_crc_read_check_crc(const struct mcp251xfd_map_buf_crc * const buf_rx,
+ const struct mcp251xfd_map_buf_crc * const buf_tx,
+ unsigned int data_len)
+{
+ u16 crc_received, crc_calculated;
+
+ crc_received = get_unaligned_be16(buf_rx->data + data_len);
+ crc_calculated = mcp251xfd_crc16_compute2(&buf_tx->cmd,
+ sizeof(buf_tx->cmd),
+ buf_rx->data,
+ data_len);
+ if (crc_received != crc_calculated)
+ return -EBADMSG;
+
+ return 0;
+}
+
+
+static int
mcp251xfd_regmap_crc_read_one(struct mcp251xfd_priv *priv,
struct spi_message *msg, unsigned int data_len)
{
const struct mcp251xfd_map_buf_crc *buf_rx = priv->map_buf_crc_rx;
const struct mcp251xfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx;
- u16 crc_received, crc_calculated;
int err;
BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16) + sizeof(u8));
@@ -248,15 +266,7 @@ mcp251xfd_regmap_crc_read_one(struct mcp251xfd_priv *priv,
if (err)
return err;
- crc_received = get_unaligned_be16(buf_rx->data + data_len);
- crc_calculated = mcp251xfd_crc16_compute2(&buf_tx->cmd,
- sizeof(buf_tx->cmd),
- buf_rx->data,
- data_len);
- if (crc_received != crc_calculated)
- return -EBADMSG;
-
- return 0;
+ return mcp251xfd_regmap_crc_read_check_crc(buf_rx, buf_tx, data_len);
}
static int
@@ -311,6 +321,40 @@ mcp251xfd_regmap_crc_read(void *context,
if (err != -EBADMSG)
return err;
+ /* MCP251XFD_REG_TBC is the time base counter
+ * register. It increments once per SYS clock tick,
+ * which is 20 or 40 MHz.
+ *
+ * Observation shows that if the lowest byte (which is
+ * transferred first on the SPI bus) of that register
+ * is 0x00 or 0x80 the calculated CRC doesn't always
+ * match the transferred one.
+ *
+ * If the highest bit in the lowest byte is flipped
+ * the transferred CRC matches the calculated one. We
+ * assume for now the CRC calculation in the chip
+ * works on wrong data and the transferred data is
+ * correct.
+ */
+ if (reg == MCP251XFD_REG_TBC &&
+ (buf_rx->data[0] == 0x0 || buf_rx->data[0] == 0x80)) {
+ /* Flip highest bit in lowest byte of le32 */
+ buf_rx->data[0] ^= 0x80;
+
+ /* re-check CRC */
+ err = mcp251xfd_regmap_crc_read_check_crc(buf_rx,
+ buf_tx,
+ val_len);
+ if (!err) {
+ /* If CRC is now correct, assume
+ * transferred data was OK, flip bit
+ * back to original value.
+ */
+ buf_rx->data[0] ^= 0x80;
+ goto out;
+ }
+ }
+
/* MCP251XFD_REG_OSC is the first ever reg we read from.
*
* The chip may be in deep sleep and this SPI transfer
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
new file mode 100644
index 000000000000..ed3169274d24
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <linux/clocksource.h>
+#include <linux/workqueue.h>
+
+#include "mcp251xfd.h"
+
+static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc)
+{
+ struct mcp251xfd_priv *priv;
+ u32 timestamp = 0;
+ int err;
+
+ priv = container_of(cc, struct mcp251xfd_priv, cc);
+ err = mcp251xfd_get_timestamp(priv, &timestamp);
+ if (err)
+ netdev_err(priv->ndev,
+ "Error %d while reading timestamp. HW timestamps may be inaccurate.",
+ err);
+
+ return timestamp;
+}
+
+static void mcp251xfd_timestamp_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct mcp251xfd_priv *priv;
+
+ priv = container_of(delayed_work, struct mcp251xfd_priv, timestamp);
+ timecounter_read(&priv->tc);
+
+ schedule_delayed_work(&priv->timestamp,
+ MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
+}
+
+void mcp251xfd_skb_set_timestamp(struct mcp251xfd_priv *priv,
+ struct sk_buff *skb, u32 timestamp)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ u64 ns;
+
+ ns = timecounter_cyc2time(&priv->tc, timestamp);
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv)
+{
+ struct cyclecounter *cc = &priv->cc;
+
+ cc->read = mcp251xfd_timestamp_read;
+ cc->mask = CYCLECOUNTER_MASK(32);
+ cc->shift = 1;
+ cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift);
+
+ timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
+
+ INIT_DELAYED_WORK(&priv->timestamp, mcp251xfd_timestamp_work);
+ schedule_delayed_work(&priv->timestamp,
+ MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
+}
+
+void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv)
+{
+ cancel_delayed_work_sync(&priv->timestamp);
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 480bd4480bdf..1002f3902ad2 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -15,9 +15,12 @@
#include <linux/can/rx-offload.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
+#include <linux/netdevice.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <linux/timecounter.h>
+#include <linux/workqueue.h>
/* MPC251x registers */
@@ -394,6 +397,9 @@
#define MCP251XFD_SYSCLOCK_HZ_MAX 40000000
#define MCP251XFD_SYSCLOCK_HZ_MIN 1000000
#define MCP251XFD_SPICLOCK_HZ_MAX 20000000
+#define MCP251XFD_TIMESTAMP_WORK_DELAY_SEC 45
+static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC <
+ CYCLECOUNTER_MASK(32) / MCP251XFD_SYSCLOCK_HZ_MAX / 2);
#define MCP251XFD_OSC_PLL_MULTIPLIER 10
#define MCP251XFD_OSC_STAB_SLEEP_US (3 * USEC_PER_MSEC)
#define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US)
@@ -595,6 +601,10 @@ struct mcp251xfd_priv {
struct mcp251xfd_ecc ecc;
struct mcp251xfd_regs_status regs_status;
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct delayed_work timestamp;
+
struct gpio_desc *rx_int;
struct clk *clk;
struct regulator *reg_vdd;
@@ -727,6 +737,12 @@ mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv,
return data;
}
+static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
+ u32 *timestamp)
+{
+ return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
+}
+
static inline u16 mcp251xfd_get_tef_obj_addr(u8 n)
{
return MCP251XFD_RAM_START +
@@ -837,5 +853,17 @@ int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size,
const void *data, size_t data_size);
u16 mcp251xfd_crc16_compute(const void *data, size_t data_size);
+void mcp251xfd_skb_set_timestamp(struct mcp251xfd_priv *priv,
+ struct sk_buff *skb, u32 timestamp);
+void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
+void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
+
+#if IS_ENABLED(CONFIG_DEV_COREDUMP)
+void mcp251xfd_dump(const struct mcp251xfd_priv *priv);
+#else
+static inline void mcp251xfd_dump(const struct mcp251xfd_priv *priv)
+{
+}
+#endif
#endif
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 538f4d9adb91..3deb9f1cd292 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -20,6 +20,16 @@ config CAN_ESD_USB2
This driver supports the CAN-USB/2 interface
from esd electronic system design gmbh (http://www.esd.eu).
+config CAN_ETAS_ES58X
+ tristate "ETAS ES58X CAN/USB interfaces"
+ select CRC16
+ help
+ This driver supports the ES581.4, ES582.1 and ES584.1 interfaces
+ from ETAS GmbH (https://www.etas.com/en/products/es58x.php).
+
+ To compile this driver as a module, choose M here: the module
+ will be called etas_es58x.
+
config CAN_GS_USB
tristate "Geschwister Schneider UG interfaces"
help
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index aa0f17c0b2ed..748cf31a0d53 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
+obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x/
obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/
obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 18f40eb20360..5af69787d9d5 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -807,7 +807,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
err = usb_submit_urb(urb, GFP_ATOMIC);
if (unlikely(err)) {
- can_free_echo_skb(netdev, context->echo_index);
+ can_free_echo_skb(netdev, context->echo_index, NULL);
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 562acbf454fd..65b58f8fc328 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -360,7 +360,7 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
can_get_echo_skb(netdev, context->echo_index, NULL);
} else {
stats->tx_errors++;
- can_free_echo_skb(netdev, context->echo_index);
+ can_free_echo_skb(netdev, context->echo_index, NULL);
}
/* Release context */
@@ -793,7 +793,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err) {
- can_free_echo_skb(netdev, context->echo_index);
+ can_free_echo_skb(netdev, context->echo_index, NULL);
atomic_dec(&priv->active_tx_jobs);
usb_unanchor_urb(urb);
diff --git a/drivers/net/can/usb/etas_es58x/Makefile b/drivers/net/can/usb/etas_es58x/Makefile
new file mode 100644
index 000000000000..a129b4aa0215
--- /dev/null
+++ b/drivers/net/can/usb/etas_es58x/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x.o
+etas_es58x-y = es58x_core.o es581_4.o es58x_fd.o
diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c
new file mode 100644
index 000000000000..1985f772fc3c
--- /dev/null
+++ b/drivers/net/can/usb/etas_es58x/es581_4.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces.
+ *
+ * File es581_4.c: Adds support to ETAS ES581.4.
+ *
+ * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
+ * Copyright (c) 2020 ETAS K.K.. All rights reserved.
+ * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ */
+
+#include <linux/kernel.h>
+#include <asm/unaligned.h>
+
+#include "es58x_core.h"
+#include "es581_4.h"
+
+/**
+ * es581_4_sizeof_rx_tx_msg() - Calculate the actual length of the
+ * structure of a rx or tx message.
+ * @msg: message of variable length, must have a dlc field.
+ *
+ * Even if RTR frames have actually no payload, the ES58X devices
+ * still expect it. Must be a macro in order to accept several types
+ * (struct es581_4_tx_can_msg and struct es581_4_rx_can_msg) as an
+ * input.
+ *
+ * Return: length of the message.
+ */
+#define es581_4_sizeof_rx_tx_msg(msg) \
+ offsetof(typeof(msg), data[can_cc_dlc2len((msg).dlc)])
+
+static u16 es581_4_get_msg_len(const union es58x_urb_cmd *urb_cmd)
+{
+ return get_unaligned_le16(&urb_cmd->es581_4_urb_cmd.msg_len);
+}
+
+static int es581_4_echo_msg(struct es58x_device *es58x_dev,
+ const struct es581_4_urb_cmd *es581_4_urb_cmd)
+{
+ struct net_device *netdev;
+ const struct es581_4_bulk_echo_msg *bulk_echo_msg;
+ const struct es581_4_echo_msg *echo_msg;
+ u64 *tstamps = es58x_dev->timestamps;
+ u16 msg_len;
+ u32 first_packet_idx, packet_idx;
+ unsigned int dropped = 0;
+ int i, num_element, ret;
+
+ bulk_echo_msg = &es581_4_urb_cmd->bulk_echo_msg;
+ msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len) -
+ sizeof(bulk_echo_msg->channel_no);
+ num_element = es58x_msg_num_element(es58x_dev->dev,
+ bulk_echo_msg->echo_msg, msg_len);
+ if (num_element <= 0)
+ return num_element;
+
+ ret = es58x_get_netdev(es58x_dev, bulk_echo_msg->channel_no,
+ ES581_4_CHANNEL_IDX_OFFSET, &netdev);
+ if (ret)
+ return ret;
+
+ echo_msg = &bulk_echo_msg->echo_msg[0];
+ first_packet_idx = get_unaligned_le32(&echo_msg->packet_idx);
+ packet_idx = first_packet_idx;
+ for (i = 0; i < num_element; i++) {
+ u32 tmp_idx;
+
+ echo_msg = &bulk_echo_msg->echo_msg[i];
+ tmp_idx = get_unaligned_le32(&echo_msg->packet_idx);
+ if (tmp_idx == packet_idx - 1) {
+ if (net_ratelimit())
+ netdev_warn(netdev,
+ "Received echo packet idx %u twice\n",
+ packet_idx - 1);
+ dropped++;
+ continue;
+ }
+ if (tmp_idx != packet_idx) {
+ netdev_err(netdev, "Echo packet idx jumped from %u to %u\n",
+ packet_idx - 1, echo_msg->packet_idx);
+ return -EBADMSG;
+ }
+
+ tstamps[i] = get_unaligned_le64(&echo_msg->timestamp);
+ packet_idx++;
+ }
+
+ netdev->stats.tx_dropped += dropped;
+ return es58x_can_get_echo_skb(netdev, first_packet_idx,
+ tstamps, num_element - dropped);
+}
+
+static int es581_4_rx_can_msg(struct es58x_device *es58x_dev,
+ const struct es581_4_urb_cmd *es581_4_urb_cmd,
+ u16 msg_len)
+{
+ const struct device *dev = es58x_dev->dev;
+ struct net_device *netdev;
+ int pkts, num_element, channel_no, ret;
+
+ num_element = es58x_msg_num_element(dev, es581_4_urb_cmd->rx_can_msg,
+ msg_len);
+ if (num_element <= 0)
+ return num_element;
+
+ channel_no = es581_4_urb_cmd->rx_can_msg[0].channel_no;
+ ret = es58x_get_netdev(es58x_dev, channel_no,
+ ES581_4_CHANNEL_IDX_OFFSET, &netdev);
+ if (ret)
+ return ret;
+
+ if (!netif_running(netdev)) {
+ if (net_ratelimit())
+ netdev_info(netdev,
+ "%s: %s is down, dropping %d rx packets\n",
+ __func__, netdev->name, num_element);
+ netdev->stats.rx_dropped += num_element;
+ return 0;
+ }
+
+ for (pkts = 0; pkts < num_element; pkts++) {
+ const struct es581_4_rx_can_msg *rx_can_msg =
+ &es581_4_urb_cmd->rx_can_msg[pkts];
+ u64 tstamp = get_unaligned_le64(&rx_can_msg->timestamp);
+ canid_t can_id = get_unaligned_le32(&rx_can_msg->can_id);
+
+ if (channel_no != rx_can_msg->channel_no)
+ return -EBADMSG;
+
+ ret = es58x_rx_can_msg(netdev, tstamp, rx_can_msg->data,
+ can_id, rx_can_msg->flags,
+ rx_can_msg->dlc);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int es581_4_rx_err_msg(struct es58x_device *es58x_dev,
+ const struct es581_4_rx_err_msg *rx_err_msg)
+{
+ struct net_device *netdev;
+ enum es58x_err error = get_unaligned_le32(&rx_err_msg->error);
+ int ret;
+
+ ret = es58x_get_netdev(es58x_dev, rx_err_msg->channel_no,
+ ES581_4_CHANNEL_IDX_OFFSET, &netdev);
+ if (ret)
+ return ret;
+
+ return es58x_rx_err_msg(netdev, error, 0,
+ get_unaligned_le64(&rx_err_msg->timestamp));
+}
+
+static int es581_4_rx_event_msg(struct es58x_device *es58x_dev,
+ const struct es581_4_rx_event_msg *rx_event_msg)
+{
+ struct net_device *netdev;
+ enum es58x_event event = get_unaligned_le32(&rx_event_msg->event);
+ int ret;
+
+ ret = es58x_get_netdev(es58x_dev, rx_event_msg->channel_no,
+ ES581_4_CHANNEL_IDX_OFFSET, &netdev);
+ if (ret)
+ return ret;
+
+ return es58x_rx_err_msg(netdev, 0, event,
+ get_unaligned_le64(&rx_event_msg->timestamp));
+}
+
+static int es581_4_rx_cmd_ret_u32(struct es58x_device *es58x_dev,
+ const struct es581_4_urb_cmd *es581_4_urb_cmd,
+ enum es58x_ret_type ret_type)
+{
+ struct net_device *netdev;
+ const struct es581_4_rx_cmd_ret *rx_cmd_ret;
+ u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len);
+ int ret;
+
+ ret = es58x_check_msg_len(es58x_dev->dev,
+ es581_4_urb_cmd->rx_cmd_ret, msg_len);
+ if (ret)
+ return ret;
+
+ rx_cmd_ret = &es581_4_urb_cmd->rx_cmd_ret;
+
+ ret = es58x_get_netdev(es58x_dev, rx_cmd_ret->channel_no,
+ ES581_4_CHANNEL_IDX_OFFSET, &netdev);
+ if (ret)
+ return ret;
+
+ return es58x_rx_cmd_ret_u32(netdev, ret_type,
+ get_unaligned_le32(&rx_cmd_ret->rx_cmd_ret_le32));
+}
+
+static int es581_4_tx_ack_msg(struct es58x_device *es58x_dev,
+ const struct es581_4_urb_cmd *es581_4_urb_cmd)
+{
+ struct net_device *netdev;
+ const struct es581_4_tx_ack_msg *tx_ack_msg;
+ u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len);
+ int ret;
+
+ tx_ack_msg = &es581_4_urb_cmd->tx_ack_msg;
+ ret = es58x_check_msg_len(es58x_dev->dev, *tx_ack_msg, msg_len);
+ if (ret)
+ return ret;
+
+ if (tx_ack_msg->rx_cmd_ret_u8 != ES58X_RET_U8_OK)
+ return es58x_rx_cmd_ret_u8(es58x_dev->dev,
+ ES58X_RET_TYPE_TX_MSG,
+ tx_ack_msg->rx_cmd_ret_u8);
+
+ ret = es58x_get_netdev(es58x_dev, tx_ack_msg->channel_no,
+ ES581_4_CHANNEL_IDX_OFFSET, &netdev);
+ if (ret)
+ return ret;
+
+ return es58x_tx_ack_msg(netdev,
+ get_unaligned_le16(&tx_ack_msg->tx_free_entries),
+ ES58X_RET_U32_OK);
+}
+
+static int es581_4_dispatch_rx_cmd(struct es58x_device *es58x_dev,
+ const struct es581_4_urb_cmd *es581_4_urb_cmd)
+{
+ const struct device *dev = es58x_dev->dev;
+ u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len);
+ enum es581_4_rx_type rx_type = es581_4_urb_cmd->rx_can_msg[0].rx_type;
+ int ret = 0;
+
+ switch (rx_type) {
+ case ES581_4_RX_TYPE_MESSAGE:
+ return es581_4_rx_can_msg(es58x_dev, es581_4_urb_cmd, msg_len);
+
+ case ES581_4_RX_TYPE_ERROR:
+ ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_err_msg,
+ msg_len);
+ if (ret < 0)
+ return ret;
+ return es581_4_rx_err_msg(es58x_dev,
+ &es581_4_urb_cmd->rx_err_msg);
+
+ case ES581_4_RX_TYPE_EVENT:
+ ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_event_msg,
+ msg_len);
+ if (ret < 0)
+ return ret;
+ return es581_4_rx_event_msg(es58x_dev,
+ &es581_4_urb_cmd->rx_event_msg);
+
+ default:
+ dev_err(dev, "%s: Unknown rx_type 0x%02X\n", __func__, rx_type);
+ return -EBADRQC;
+ }
+}
+
+static int es581_4_handle_urb_cmd(struct es58x_device *es58x_dev,
+ const union es58x_urb_cmd *urb_cmd)
+{
+ const struct es581_4_urb_cmd *es581_4_urb_cmd;
+ struct device *dev = es58x_dev->dev;
+ u16 msg_len = es581_4_get_msg_len(urb_cmd);
+ int ret;
+
+ es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd;
+
+ if (es581_4_urb_cmd->cmd_type != ES581_4_CAN_COMMAND_TYPE) {
+ dev_err(dev, "%s: Unknown command type (0x%02X)\n",
+ __func__, es581_4_urb_cmd->cmd_type);
+ return -EBADRQC;
+ }
+
+ switch ((enum es581_4_cmd_id)es581_4_urb_cmd->cmd_id) {
+ case ES581_4_CMD_ID_SET_BITTIMING:
+ return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd,
+ ES58X_RET_TYPE_SET_BITTIMING);
+
+ case ES581_4_CMD_ID_ENABLE_CHANNEL:
+ return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd,
+ ES58X_RET_TYPE_ENABLE_CHANNEL);
+
+ case ES581_4_CMD_ID_TX_MSG:
+ return es581_4_tx_ack_msg(es58x_dev, es581_4_urb_cmd);
+
+ case ES581_4_CMD_ID_RX_MSG:
+ return es581_4_dispatch_rx_cmd(es58x_dev, es581_4_urb_cmd);
+
+ case ES581_4_CMD_ID_RESET_RX:
+ ret = es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd,
+ ES58X_RET_TYPE_RESET_RX);
+ return ret;
+
+ case ES581_4_CMD_ID_RESET_TX:
+ ret = es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd,
+ ES58X_RET_TYPE_RESET_TX);
+ return ret;
+
+ case ES581_4_CMD_ID_DISABLE_CHANNEL:
+ return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd,
+ ES58X_RET_TYPE_DISABLE_CHANNEL);
+
+ case ES581_4_CMD_ID_TIMESTAMP:
+ ret = es58x_check_msg_len(dev, es581_4_urb_cmd->timestamp,
+ msg_len);
+ if (ret < 0)
+ return ret;
+ es58x_rx_timestamp(es58x_dev,
+ get_unaligned_le64(&es581_4_urb_cmd->timestamp));
+ return 0;
+
+ case ES581_4_CMD_ID_ECHO:
+ return es581_4_echo_msg(es58x_dev, es581_4_urb_cmd);
+
+ case ES581_4_CMD_ID_DEVICE_ERR:
+ ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_cmd_ret_u8,
+ msg_len);
+ if (ret)
+ return ret;
+ return es58x_rx_cmd_ret_u8(dev, ES58X_RET_TYPE_DEVICE_ERR,
+ es581_4_urb_cmd->rx_cmd_ret_u8);
+
+ default:
+ dev_warn(dev, "%s: Unexpected command ID: 0x%02X\n",
+ __func__, es581_4_urb_cmd->cmd_id);
+ return -EBADRQC;
+ }
+}
+
+static void es581_4_fill_urb_header(union es58x_urb_cmd *urb_cmd, u8 cmd_type,
+ u8 cmd_id, u8 channel_idx, u16 msg_len)
+{
+ struct es581_4_urb_cmd *es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd;
+
+ es581_4_urb_cmd->SOF = cpu_to_le16(es581_4_param.tx_start_of_frame);
+ es581_4_urb_cmd->cmd_type = cmd_type;
+ es581_4_urb_cmd->cmd_id = cmd_id;
+ es581_4_urb_cmd->msg_len = cpu_to_le16(msg_len);
+}
+
+static int es581_4_tx_can_msg(struct es58x_priv *priv,
+ const struct sk_buff *skb)
+{
+ struct es58x_device *es58x_dev = priv->es58x_dev;
+ union es58x_urb_cmd *urb_cmd = priv->tx_urb->transfer_buffer;
+ struct es581_4_urb_cmd *es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ struct es581_4_tx_can_msg *tx_can_msg;
+ u16 msg_len;
+ int ret;
+
+ if (can_is_canfd_skb(skb))
+ return -EMSGSIZE;
+
+ if (priv->tx_can_msg_cnt == 0) {
+ msg_len = 1; /* struct es581_4_bulk_tx_can_msg:num_can_msg */
+ es581_4_fill_urb_header(urb_cmd, ES581_4_CAN_COMMAND_TYPE,
+ ES581_4_CMD_ID_TX_MSG,
+ priv->channel_idx, msg_len);
+ es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg = 0;
+ } else {
+ msg_len = es581_4_get_msg_len(urb_cmd);
+ }
+
+ ret = es58x_check_msg_max_len(es58x_dev->dev,
+ es581_4_urb_cmd->bulk_tx_can_msg,
+ msg_len + sizeof(*tx_can_msg));
+ if (ret)
+ return ret;
+
+ /* Fill message contents. */
+ tx_can_msg = (struct es581_4_tx_can_msg *)
+ &es581_4_urb_cmd->bulk_tx_can_msg.tx_can_msg_buf[msg_len - 1];
+ put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id);
+ put_unaligned_le32(priv->tx_head, &tx_can_msg->packet_idx);
+ put_unaligned_le16((u16)es58x_get_flags(skb), &tx_can_msg->flags);
+ tx_can_msg->channel_no = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET;
+ tx_can_msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
+
+ memcpy(tx_can_msg->data, cf->data, cf->len);
+
+ /* Calculate new sizes. */
+ es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg++;
+ msg_len += es581_4_sizeof_rx_tx_msg(*tx_can_msg);
+ priv->tx_urb->transfer_buffer_length = es58x_get_urb_cmd_len(es58x_dev,
+ msg_len);
+ es581_4_urb_cmd->msg_len = cpu_to_le16(msg_len);
+
+ return 0;
+}
+
+static int es581_4_set_bittiming(struct es58x_priv *priv)
+{
+ struct es581_4_tx_conf_msg tx_conf_msg = { 0 };
+ struct can_bittiming *bt = &priv->can.bittiming;
+
+ tx_conf_msg.bitrate = cpu_to_le32(bt->bitrate);
+ /* bt->sample_point is in tenth of percent. Convert it to percent. */
+ tx_conf_msg.sample_point = cpu_to_le32(bt->sample_point / 10U);
+ tx_conf_msg.samples_per_bit = cpu_to_le32(ES58X_SAMPLES_PER_BIT_ONE);
+ tx_conf_msg.bit_time = cpu_to_le32(can_bit_time(bt));
+ tx_conf_msg.sjw = cpu_to_le32(bt->sjw);
+ tx_conf_msg.sync_edge = cpu_to_le32(ES58X_SYNC_EDGE_SINGLE);
+ tx_conf_msg.physical_layer =
+ cpu_to_le32(ES58X_PHYSICAL_LAYER_HIGH_SPEED);
+ tx_conf_msg.echo_mode = cpu_to_le32(ES58X_ECHO_ON);
+ tx_conf_msg.channel_no = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET;
+
+ return es58x_send_msg(priv->es58x_dev, ES581_4_CAN_COMMAND_TYPE,
+ ES581_4_CMD_ID_SET_BITTIMING, &tx_conf_msg,
+ sizeof(tx_conf_msg), priv->channel_idx);
+}
+
+static int es581_4_enable_channel(struct es58x_priv *priv)
+{
+ int ret;
+ u8 msg = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET;
+
+ ret = es581_4_set_bittiming(priv);
+ if (ret)
+ return ret;
+
+ return es58x_send_msg(priv->es58x_dev, ES581_4_CAN_COMMAND_TYPE,
+ ES581_4_CMD_ID_ENABLE_CHANNEL, &msg, sizeof(msg),
+ priv->channel_idx);
+}
+
+static int es581_4_disable_channel(struct es58x_priv *priv)
+{
+ u8 msg = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET;
+
+ return es58x_send_msg(priv->es58x_dev, ES581_4_CAN_COMMAND_TYPE,
+ ES581_4_CMD_ID_DISABLE_CHANNEL, &msg, sizeof(msg),
+ priv->channel_idx);
+}
+
+static int es581_4_reset_device(struct es58x_device *es58x_dev)
+{
+ return es58x_send_msg(es58x_dev, ES581_4_CAN_COMMAND_TYPE,
+ ES581_4_CMD_ID_RESET_DEVICE,
+ ES58X_EMPTY_MSG, 0, ES58X_CHANNEL_IDX_NA);
+}
+
+static int es581_4_get_timestamp(struct es58x_device *es58x_dev)
+{
+ return es58x_send_msg(es58x_dev, ES581_4_CAN_COMMAND_TYPE,
+ ES581_4_CMD_ID_TIMESTAMP,
+ ES58X_EMPTY_MSG, 0, ES58X_CHANNEL_IDX_NA);
+}
+
+/* Nominal bittiming constants for ES581.4 as specified in the
+ * microcontroller datasheet: "Stellaris(R) LM3S5B91 Microcontroller"
+ * table 17-4 "CAN Protocol Ranges" from Texas Instruments.
+ */
+static const struct can_bittiming_const es581_4_bittiming_const = {
+ .name = "ES581.4",
+ .tseg1_min = 1,
+ .tseg1_max = 8,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 128,
+ .brp_inc = 1
+};
+
+const struct es58x_parameters es581_4_param = {
+ .bittiming_const = &es581_4_bittiming_const,
+ .data_bittiming_const = NULL,
+ .tdc_const = NULL,
+ .bitrate_max = 1 * CAN_MBPS,
+ .clock = {.freq = 50 * CAN_MHZ},
+ .ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC,
+ .tx_start_of_frame = 0xAFAF,
+ .rx_start_of_frame = 0xFAFA,
+ .tx_urb_cmd_max_len = ES581_4_TX_URB_CMD_MAX_LEN,
+ .rx_urb_cmd_max_len = ES581_4_RX_URB_CMD_MAX_LEN,
+ /* Size of internal device TX queue is 330.
+ *
+ * However, we witnessed some ES58X_ERR_PROT_CRC errors from
+ * the device and thus, echo_skb_max was lowered to the
+ * empirical value of 75 which seems stable and then rounded
+ * down to become a power of two.
+ *
+ * Root cause of those ES58X_ERR_PROT_CRC errors is still
+ * unclear.
+ */
+ .fifo_mask = 63, /* echo_skb_max = 64 */
+ .dql_min_limit = CAN_FRAME_LEN_MAX * 50, /* Empirical value. */
+ .tx_bulk_max = ES581_4_TX_BULK_MAX,
+ .urb_cmd_header_len = ES581_4_URB_CMD_HEADER_LEN,
+ .rx_urb_max = ES58X_RX_URBS_MAX,
+ .tx_urb_max = ES58X_TX_URBS_MAX
+};
+
+const struct es58x_operators es581_4_ops = {
+ .get_msg_len = es581_4_get_msg_len,
+ .handle_urb_cmd = es581_4_handle_urb_cmd,
+ .fill_urb_header = es581_4_fill_urb_header,
+ .tx_can_msg = es581_4_tx_can_msg,
+ .enable_channel = es581_4_enable_channel,
+ .disable_channel = es581_4_disable_channel,
+ .reset_device = es581_4_reset_device,
+ .get_timestamp = es581_4_get_timestamp
+};
diff --git a/drivers/net/can/usb/etas_es58x/es581_4.h b/drivers/net/can/usb/etas_es58x/es581_4.h
new file mode 100644
index 000000000000..4bc60a6df697
--- /dev/null
+++ b/drivers/net/can/usb/etas_es58x/es581_4.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces.
+ *
+ * File es581_4.h: Definitions and declarations specific to ETAS
+ * ES581.4.
+ *
+ * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
+ * Copyright (c) 2020 ETAS K.K.. All rights reserved.
+ * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ */
+
+#ifndef __ES581_4_H__
+#define __ES581_4_H__
+
+#include <linux/types.h>
+
+#define ES581_4_NUM_CAN_CH 2
+#define ES581_4_CHANNEL_IDX_OFFSET 1
+
+#define ES581_4_TX_BULK_MAX 25
+#define ES581_4_RX_BULK_MAX 30
+#define ES581_4_ECHO_BULK_MAX 30
+
+enum es581_4_cmd_type {
+ ES581_4_CAN_COMMAND_TYPE = 0x45
+};
+
+enum es581_4_cmd_id {
+ ES581_4_CMD_ID_OPEN_CHANNEL = 0x01,
+ ES581_4_CMD_ID_CLOSE_CHANNEL = 0x02,
+ ES581_4_CMD_ID_SET_BITTIMING = 0x03,
+ ES581_4_CMD_ID_ENABLE_CHANNEL = 0x04,
+ ES581_4_CMD_ID_TX_MSG = 0x05,
+ ES581_4_CMD_ID_RX_MSG = 0x06,
+ ES581_4_CMD_ID_RESET_RX = 0x0A,
+ ES581_4_CMD_ID_RESET_TX = 0x0B,
+ ES581_4_CMD_ID_DISABLE_CHANNEL = 0x0C,
+ ES581_4_CMD_ID_TIMESTAMP = 0x0E,
+ ES581_4_CMD_ID_RESET_DEVICE = 0x28,
+ ES581_4_CMD_ID_ECHO = 0x71,
+ ES581_4_CMD_ID_DEVICE_ERR = 0x72
+};
+
+enum es581_4_rx_type {
+ ES581_4_RX_TYPE_MESSAGE = 1,
+ ES581_4_RX_TYPE_ERROR = 3,
+ ES581_4_RX_TYPE_EVENT = 4
+};
+
+/**
+ * struct es581_4_tx_conf_msg - Channel configuration.
+ * @bitrate: Bitrate.
+ * @sample_point: Sample point is in percent [0..100].
+ * @samples_per_bit: type enum es58x_samples_per_bit.
+ * @bit_time: Number of time quanta in one bit.
+ * @sjw: Synchronization Jump Width.
+ * @sync_edge: type enum es58x_sync_edge.
+ * @physical_layer: type enum es58x_physical_layer.
+ * @echo_mode: type enum es58x_echo_mode.
+ * @channel_no: Channel number, starting from 1. Not to be confused
+ * with channed_idx of the ES58X FD which starts from 0.
+ */
+struct es581_4_tx_conf_msg {
+ __le32 bitrate;
+ __le32 sample_point;
+ __le32 samples_per_bit;
+ __le32 bit_time;
+ __le32 sjw;
+ __le32 sync_edge;
+ __le32 physical_layer;
+ __le32 echo_mode;
+ u8 channel_no;
+} __packed;
+
+struct es581_4_tx_can_msg {
+ __le32 can_id;
+ __le32 packet_idx;
+ __le16 flags;
+ u8 channel_no;
+ u8 dlc;
+ u8 data[CAN_MAX_DLEN];
+} __packed;
+
+/* The ES581.4 allows bulk transfer. */
+struct es581_4_bulk_tx_can_msg {
+ u8 num_can_msg;
+ /* Using type "u8[]" instead of "struct es581_4_tx_can_msg[]"
+ * for tx_msg_buf because each member has a flexible size.
+ */
+ u8 tx_can_msg_buf[ES581_4_TX_BULK_MAX *
+ sizeof(struct es581_4_tx_can_msg)];
+} __packed;
+
+struct es581_4_echo_msg {
+ __le64 timestamp;
+ __le32 packet_idx;
+} __packed;
+
+struct es581_4_bulk_echo_msg {
+ u8 channel_no;
+ struct es581_4_echo_msg echo_msg[ES581_4_ECHO_BULK_MAX];
+} __packed;
+
+/* Normal Rx CAN Message */
+struct es581_4_rx_can_msg {
+ __le64 timestamp;
+ u8 rx_type; /* type enum es581_4_rx_type */
+ u8 flags; /* type enum es58x_flag */
+ u8 channel_no;
+ u8 dlc;
+ __le32 can_id;
+ u8 data[CAN_MAX_DLEN];
+} __packed;
+
+struct es581_4_rx_err_msg {
+ __le64 timestamp;
+ __le16 rx_type; /* type enum es581_4_rx_type */
+ __le16 flags; /* type enum es58x_flag */
+ u8 channel_no;
+ u8 __padding[2];
+ u8 dlc;
+ __le32 tag; /* Related to the CAN filtering. Unused in this module */
+ __le32 can_id;
+ __le32 error; /* type enum es58x_error */
+ __le32 destination; /* Unused in this module */
+} __packed;
+
+struct es581_4_rx_event_msg {
+ __le64 timestamp;
+ __le16 rx_type; /* type enum es581_4_rx_type */
+ u8 channel_no;
+ u8 __padding;
+ __le32 tag; /* Related to the CAN filtering. Unused in this module */
+ __le32 event; /* type enum es58x_event */
+ __le32 destination; /* Unused in this module */
+} __packed;
+
+struct es581_4_tx_ack_msg {
+ __le16 tx_free_entries; /* Number of remaining free entries in the device TX queue */
+ u8 channel_no;
+ u8 rx_cmd_ret_u8; /* type enum es58x_cmd_ret_code_u8 */
+} __packed;
+
+struct es581_4_rx_cmd_ret {
+ __le32 rx_cmd_ret_le32;
+ u8 channel_no;
+ u8 __padding[3];
+} __packed;
+
+/**
+ * struct es581_4_urb_cmd - Commands received from or sent to the
+ * ES581.4 device.
+ * @SOF: Start of Frame.
+ * @cmd_type: Command Type (type: enum es581_4_cmd_type). The CRC
+ * calculation starts at this position.
+ * @cmd_id: Command ID (type: enum es581_4_cmd_id).
+ * @msg_len: Length of the message, excluding CRC (i.e. length of the
+ * union).
+ * @tx_conf_msg: Channel configuration.
+ * @bulk_tx_can_msg: Tx messages.
+ * @rx_can_msg: Array of Rx messages.
+ * @bulk_echo_msg: Tx message being looped back.
+ * @rx_err_msg: Error message.
+ * @rx_event_msg: Event message.
+ * @tx_ack_msg: Tx acknowledgment message.
+ * @rx_cmd_ret: Command return code.
+ * @timestamp: Timestamp reply.
+ * @rx_cmd_ret_u8: Rx 8 bits return code (type: enum
+ * es58x_cmd_ret_code_u8).
+ * @raw_msg: Message raw payload.
+ * @reserved_for_crc16_do_not_use: The structure ends with a
+ * CRC16. Because the structures in above union are of variable
+ * lengths, we can not predict the offset of the CRC in
+ * advance. Use functions es58x_get_crc() and es58x_set_crc() to
+ * manipulate it.
+ */
+struct es581_4_urb_cmd {
+ __le16 SOF;
+ u8 cmd_type;
+ u8 cmd_id;
+ __le16 msg_len;
+
+ union {
+ struct es581_4_tx_conf_msg tx_conf_msg;
+ struct es581_4_bulk_tx_can_msg bulk_tx_can_msg;
+ struct es581_4_rx_can_msg rx_can_msg[ES581_4_RX_BULK_MAX];
+ struct es581_4_bulk_echo_msg bulk_echo_msg;
+ struct es581_4_rx_err_msg rx_err_msg;
+ struct es581_4_rx_event_msg rx_event_msg;
+ struct es581_4_tx_ack_msg tx_ack_msg;
+ struct es581_4_rx_cmd_ret rx_cmd_ret;
+ __le64 timestamp;
+ u8 rx_cmd_ret_u8;
+ u8 raw_msg[0];
+ } __packed;
+
+ __le16 reserved_for_crc16_do_not_use;
+} __packed;
+
+#define ES581_4_URB_CMD_HEADER_LEN (offsetof(struct es581_4_urb_cmd, raw_msg))
+#define ES581_4_TX_URB_CMD_MAX_LEN \
+ ES58X_SIZEOF_URB_CMD(struct es581_4_urb_cmd, bulk_tx_can_msg)
+#define ES581_4_RX_URB_CMD_MAX_LEN \
+ ES58X_SIZEOF_URB_CMD(struct es581_4_urb_cmd, rx_can_msg)
+
+#endif /* __ES581_4_H__ */
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
new file mode 100644
index 000000000000..8e9102482c52
--- /dev/null
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -0,0 +1,2301 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces.
+ *
+ * File es58x_core.c: Core logic to manage the network devices and the
+ * USB interface.
+ *
+ * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
+ * Copyright (c) 2020 ETAS K.K.. All rights reserved.
+ * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/crc16.h>
+#include <asm/unaligned.h>
+
+#include "es58x_core.h"
+
+#define DRV_VERSION "1.00"
+MODULE_AUTHOR("Mailhol Vincent <mailhol.vincent@wanadoo.fr>");
+MODULE_AUTHOR("Arunachalam Santhanam <arunachalam.santhanam@in.bosch.com>");
+MODULE_DESCRIPTION("Socket CAN driver for ETAS ES58X USB adapters");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL v2");
+
+#define ES58X_MODULE_NAME "etas_es58x"
+#define ES58X_VENDOR_ID 0x108C
+#define ES581_4_PRODUCT_ID 0x0159
+#define ES582_1_PRODUCT_ID 0x0168
+#define ES584_1_PRODUCT_ID 0x0169
+
+/* ES58X FD has some interface protocols unsupported by this driver. */
+#define ES58X_FD_INTERFACE_PROTOCOL 0
+
+/* Table of devices which work with this driver. */
+static const struct usb_device_id es58x_id_table[] = {
+ {
+ /* ETAS GmbH ES581.4 USB dual-channel CAN Bus Interface module. */
+ USB_DEVICE(ES58X_VENDOR_ID, ES581_4_PRODUCT_ID),
+ .driver_info = ES58X_DUAL_CHANNEL
+ }, {
+ /* ETAS GmbH ES582.1 USB dual-channel CAN FD Bus Interface module. */
+ USB_DEVICE_INTERFACE_PROTOCOL(ES58X_VENDOR_ID, ES582_1_PRODUCT_ID,
+ ES58X_FD_INTERFACE_PROTOCOL),
+ .driver_info = ES58X_DUAL_CHANNEL | ES58X_FD_FAMILY
+ }, {
+ /* ETAS GmbH ES584.1 USB single-channel CAN FD Bus Interface module. */
+ USB_DEVICE_INTERFACE_PROTOCOL(ES58X_VENDOR_ID, ES584_1_PRODUCT_ID,
+ ES58X_FD_INTERFACE_PROTOCOL),
+ .driver_info = ES58X_FD_FAMILY
+ }, {
+ /* Terminating entry */
+ }
+};
+
+MODULE_DEVICE_TABLE(usb, es58x_id_table);
+
+#define es58x_print_hex_dump(buf, len) \
+ print_hex_dump(KERN_DEBUG, \
+ ES58X_MODULE_NAME " " __stringify(buf) ": ", \
+ DUMP_PREFIX_NONE, 16, 1, buf, len, false)
+
+#define es58x_print_hex_dump_debug(buf, len) \
+ print_hex_dump_debug(ES58X_MODULE_NAME " " __stringify(buf) ": ",\
+ DUMP_PREFIX_NONE, 16, 1, buf, len, false)
+
+/* The last two bytes of an ES58X command is a CRC16. The first two
+ * bytes (the start of frame) are skipped and the CRC calculation
+ * starts on the third byte.
+ */
+#define ES58X_CRC_CALC_OFFSET 2
+
+/**
+ * es58x_calculate_crc() - Compute the crc16 of a given URB.
+ * @urb_cmd: The URB command for which we want to calculate the CRC.
+ * @urb_len: Length of @urb_cmd. Must be at least bigger than 4
+ * (ES58X_CRC_CALC_OFFSET + sizeof(crc))
+ *
+ * Return: crc16 value.
+ */
+static u16 es58x_calculate_crc(const union es58x_urb_cmd *urb_cmd, u16 urb_len)
+{
+ u16 crc;
+ ssize_t len = urb_len - ES58X_CRC_CALC_OFFSET - sizeof(crc);
+
+ crc = crc16(0, &urb_cmd->raw_cmd[ES58X_CRC_CALC_OFFSET], len);
+ return crc;
+}
+
+/**
+ * es58x_get_crc() - Get the CRC value of a given URB.
+ * @urb_cmd: The URB command for which we want to get the CRC.
+ * @urb_len: Length of @urb_cmd. Must be at least bigger than 4
+ * (ES58X_CRC_CALC_OFFSET + sizeof(crc))
+ *
+ * Return: crc16 value.
+ */
+static u16 es58x_get_crc(const union es58x_urb_cmd *urb_cmd, u16 urb_len)
+{
+ u16 crc;
+ const __le16 *crc_addr;
+
+ crc_addr = (__le16 *)&urb_cmd->raw_cmd[urb_len - sizeof(crc)];
+ crc = get_unaligned_le16(crc_addr);
+ return crc;
+}
+
+/**
+ * es58x_set_crc() - Set the CRC value of a given URB.
+ * @urb_cmd: The URB command for which we want to get the CRC.
+ * @urb_len: Length of @urb_cmd. Must be at least bigger than 4
+ * (ES58X_CRC_CALC_OFFSET + sizeof(crc))
+ */
+static void es58x_set_crc(union es58x_urb_cmd *urb_cmd, u16 urb_len)
+{
+ u16 crc;
+ __le16 *crc_addr;
+
+ crc = es58x_calculate_crc(urb_cmd, urb_len);
+ crc_addr = (__le16 *)&urb_cmd->raw_cmd[urb_len - sizeof(crc)];
+ put_unaligned_le16(crc, crc_addr);
+}
+
+/**
+ * es58x_check_crc() - Validate the CRC value of a given URB.
+ * @es58x_dev: ES58X device.
+ * @urb_cmd: The URB command for which we want to check the CRC.
+ * @urb_len: Length of @urb_cmd. Must be at least bigger than 4
+ * (ES58X_CRC_CALC_OFFSET + sizeof(crc))
+ *
+ * Return: zero on success, -EBADMSG if the CRC check fails.
+ */
+static int es58x_check_crc(struct es58x_device *es58x_dev,
+ const union es58x_urb_cmd *urb_cmd, u16 urb_len)
+{
+ u16 calculated_crc = es58x_calculate_crc(urb_cmd, urb_len);
+ u16 expected_crc = es58x_get_crc(urb_cmd, urb_len);
+
+ if (expected_crc != calculated_crc) {
+ dev_err_ratelimited(es58x_dev->dev,
+ "%s: Bad CRC, urb_len: %d\n",
+ __func__, urb_len);
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+/**
+ * es58x_timestamp_to_ns() - Convert a timestamp value received from a
+ * ES58X device to nanoseconds.
+ * @timestamp: Timestamp received from a ES58X device.
+ *
+ * The timestamp received from ES58X is expressed in multiples of 0.5
+ * micro seconds. This function converts it in to nanoseconds.
+ *
+ * Return: Timestamp value in nanoseconds.
+ */
+static u64 es58x_timestamp_to_ns(u64 timestamp)
+{
+ const u64 es58x_timestamp_ns_mult_coef = 500ULL;
+
+ return es58x_timestamp_ns_mult_coef * timestamp;
+}
+
+/**
+ * es58x_set_skb_timestamp() - Set the hardware timestamp of an skb.
+ * @netdev: CAN network device.
+ * @skb: socket buffer of a CAN message.
+ * @timestamp: Timestamp received from an ES58X device.
+ *
+ * Used for both received and echo messages.
+ */
+static void es58x_set_skb_timestamp(struct net_device *netdev,
+ struct sk_buff *skb, u64 timestamp)
+{
+ struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev;
+ struct skb_shared_hwtstamps *hwts;
+
+ hwts = skb_hwtstamps(skb);
+ /* Ignoring overflow (overflow on 64 bits timestamp with nano
+ * second precision would occur after more than 500 years).
+ */
+ hwts->hwtstamp = ns_to_ktime(es58x_timestamp_to_ns(timestamp) +
+ es58x_dev->realtime_diff_ns);
+}
+
+/**
+ * es58x_rx_timestamp() - Handle a received timestamp.
+ * @es58x_dev: ES58X device.
+ * @timestamp: Timestamp received from a ES58X device.
+ *
+ * Calculate the difference between the ES58X device and the kernel
+ * internal clocks. This difference will be later used as an offset to
+ * convert the timestamps of RX and echo messages to match the kernel
+ * system time (e.g. convert to UNIX time).
+ */
+void es58x_rx_timestamp(struct es58x_device *es58x_dev, u64 timestamp)
+{
+ u64 ktime_real_ns = ktime_get_real_ns();
+ u64 device_timestamp = es58x_timestamp_to_ns(timestamp);
+
+ dev_dbg(es58x_dev->dev, "%s: request round-trip time: %llu ns\n",
+ __func__, ktime_real_ns - es58x_dev->ktime_req_ns);
+
+ es58x_dev->realtime_diff_ns =
+ (es58x_dev->ktime_req_ns + ktime_real_ns) / 2 - device_timestamp;
+ es58x_dev->ktime_req_ns = 0;
+
+ dev_dbg(es58x_dev->dev,
+ "%s: Device timestamp: %llu, diff with kernel: %llu\n",
+ __func__, device_timestamp, es58x_dev->realtime_diff_ns);
+}
+
+/**
+ * es58x_set_realtime_diff_ns() - Calculate difference between the
+ * clocks of the ES58X device and the kernel
+ * @es58x_dev: ES58X device.
+ *
+ * Request a timestamp from the ES58X device. Once the answer is
+ * received, the timestamp difference will be set by the callback
+ * function es58x_rx_timestamp().
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+static int es58x_set_realtime_diff_ns(struct es58x_device *es58x_dev)
+{
+ if (es58x_dev->ktime_req_ns) {
+ dev_warn(es58x_dev->dev,
+ "%s: Previous request to set timestamp has not completed yet\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ es58x_dev->ktime_req_ns = ktime_get_real_ns();
+ return es58x_dev->ops->get_timestamp(es58x_dev);
+}
+
+/**
+ * es58x_is_can_state_active() - Is the network device in an active
+ * CAN state?
+ * @netdev: CAN network device.
+ *
+ * The device is considered active if it is able to send or receive
+ * CAN frames, that is to say if it is in any of
+ * CAN_STATE_ERROR_ACTIVE, CAN_STATE_ERROR_WARNING or
+ * CAN_STATE_ERROR_PASSIVE states.
+ *
+ * Caution: when recovering from a bus-off,
+ * net/core/dev.c#can_restart() will call
+ * net/core/dev.c#can_flush_echo_skb() without using any kind of
+ * locks. For this reason, it is critical to guarantee that no TX or
+ * echo operations (i.e. any access to priv->echo_skb[]) can be done
+ * while this function is returning false.
+ *
+ * Return: true if the device is active, else returns false.
+ */
+static bool es58x_is_can_state_active(struct net_device *netdev)
+{
+ return es58x_priv(netdev)->can.state < CAN_STATE_BUS_OFF;
+}
+
+/**
+ * es58x_is_echo_skb_threshold_reached() - Determine the limit of how
+ * many skb slots can be taken before we should stop the network
+ * queue.
+ * @priv: ES58X private parameters related to the network device.
+ *
+ * We need to save enough free skb slots in order to be able to do
+ * bulk send. This function can be used to determine when to wake or
+ * stop the network queue in regard to the number of skb slots already
+ * taken if the echo FIFO.
+ *
+ * Return: boolean.
+ */
+static bool es58x_is_echo_skb_threshold_reached(struct es58x_priv *priv)
+{
+ u32 num_echo_skb = priv->tx_head - priv->tx_tail;
+ u32 threshold = priv->can.echo_skb_max -
+ priv->es58x_dev->param->tx_bulk_max + 1;
+
+ return num_echo_skb >= threshold;
+}
+
+/**
+ * es58x_can_free_echo_skb_tail() - Remove the oldest echo skb of the
+ * echo FIFO.
+ * @netdev: CAN network device.
+ *
+ * Naming convention: the tail is the beginning of the FIFO, i.e. the
+ * first skb to have entered the FIFO.
+ */
+static void es58x_can_free_echo_skb_tail(struct net_device *netdev)
+{
+ struct es58x_priv *priv = es58x_priv(netdev);
+ u16 fifo_mask = priv->es58x_dev->param->fifo_mask;
+ unsigned int frame_len = 0;
+
+ can_free_echo_skb(netdev, priv->tx_tail & fifo_mask, &frame_len);
+ netdev_completed_queue(netdev, 1, frame_len);
+
+ priv->tx_tail++;
+
+ netdev->stats.tx_dropped++;
+}
+
+/**
+ * es58x_can_get_echo_skb_recovery() - Try to re-sync the echo FIFO.
+ * @netdev: CAN network device.
+ * @rcv_packet_idx: Index
+ *
+ * This function should not be called under normal circumstances. In
+ * the unlikely case that one or several URB packages get dropped by
+ * the device, the index will get out of sync. Try to recover by
+ * dropping the echo skb packets with older indexes.
+ *
+ * Return: zero if recovery was successful, -EINVAL otherwise.
+ */
+static int es58x_can_get_echo_skb_recovery(struct net_device *netdev,
+ u32 rcv_packet_idx)
+{
+ struct es58x_priv *priv = es58x_priv(netdev);
+ int ret = 0;
+
+ netdev->stats.tx_errors++;
+
+ if (net_ratelimit())
+ netdev_warn(netdev,
+ "Bad echo packet index: %u. First index: %u, end index %u, num_echo_skb: %02u/%02u\n",
+ rcv_packet_idx, priv->tx_tail, priv->tx_head,
+ priv->tx_head - priv->tx_tail,
+ priv->can.echo_skb_max);
+
+ if ((s32)(rcv_packet_idx - priv->tx_tail) < 0) {
+ if (net_ratelimit())
+ netdev_warn(netdev,
+ "Received echo index is from the past. Ignoring it\n");
+ ret = -EINVAL;
+ } else if ((s32)(rcv_packet_idx - priv->tx_head) >= 0) {
+ if (net_ratelimit())
+ netdev_err(netdev,
+ "Received echo index is from the future. Ignoring it\n");
+ ret = -EINVAL;
+ } else {
+ if (net_ratelimit())
+ netdev_warn(netdev,
+ "Recovery: dropping %u echo skb from index %u to %u\n",
+ rcv_packet_idx - priv->tx_tail,
+ priv->tx_tail, rcv_packet_idx - 1);
+ while (priv->tx_tail != rcv_packet_idx) {
+ if (priv->tx_tail == priv->tx_head)
+ return -EINVAL;
+ es58x_can_free_echo_skb_tail(netdev);
+ }
+ }
+ return ret;
+}
+
+/**
+ * es58x_can_get_echo_skb() - Get the skb from the echo FIFO and loop
+ * it back locally.
+ * @netdev: CAN network device.
+ * @rcv_packet_idx: Index of the first packet received from the device.
+ * @tstamps: Array of hardware timestamps received from a ES58X device.
+ * @pkts: Number of packets (and so, length of @tstamps).
+ *
+ * Callback function for when we receive a self reception
+ * acknowledgment. Retrieves the skb from the echo FIFO, sets its
+ * hardware timestamp (the actual time it was sent) and loops it back
+ * locally.
+ *
+ * The device has to be active (i.e. network interface UP and not in
+ * bus off state or restarting).
+ *
+ * Packet indexes must be consecutive (i.e. index of first packet is
+ * @rcv_packet_idx, index of second packet is @rcv_packet_idx + 1 and
+ * index of last packet is @rcv_packet_idx + @pkts - 1).
+ *
+ * Return: zero on success.
+ */
+int es58x_can_get_echo_skb(struct net_device *netdev, u32 rcv_packet_idx,
+ u64 *tstamps, unsigned int pkts)
+{
+ struct es58x_priv *priv = es58x_priv(netdev);
+ unsigned int rx_total_frame_len = 0;
+ unsigned int num_echo_skb = priv->tx_head - priv->tx_tail;
+ int i;
+ u16 fifo_mask = priv->es58x_dev->param->fifo_mask;
+
+ if (!netif_running(netdev)) {
+ if (net_ratelimit())
+ netdev_info(netdev,
+ "%s: %s is down, dropping %d echo packets\n",
+ __func__, netdev->name, pkts);
+ netdev->stats.tx_dropped += pkts;
+ return 0;
+ } else if (!es58x_is_can_state_active(netdev)) {
+ if (net_ratelimit())
+ netdev_dbg(netdev,
+ "Bus is off or device is restarting. Ignoring %u echo packets from index %u\n",
+ pkts, rcv_packet_idx);
+ /* stats.tx_dropped will be (or was already)
+ * incremented by
+ * drivers/net/can/net/dev.c:can_flush_echo_skb().
+ */
+ return 0;
+ } else if (num_echo_skb == 0) {
+ if (net_ratelimit())
+ netdev_warn(netdev,
+ "Received %u echo packets from index: %u but echo skb queue is empty.\n",
+ pkts, rcv_packet_idx);
+ netdev->stats.tx_dropped += pkts;
+ return 0;
+ }
+
+ if (priv->tx_tail != rcv_packet_idx) {
+ if (es58x_can_get_echo_skb_recovery(netdev, rcv_packet_idx) < 0) {
+ if (net_ratelimit())
+ netdev_warn(netdev,
+ "Could not find echo skb for echo packet index: %u\n",
+ rcv_packet_idx);
+ return 0;
+ }
+ }
+ if (num_echo_skb < pkts) {
+ int pkts_drop = pkts - num_echo_skb;
+
+ if (net_ratelimit())
+ netdev_err(netdev,
+ "Received %u echo packets but have only %d echo skb. Dropping %d echo skb\n",
+ pkts, num_echo_skb, pkts_drop);
+ netdev->stats.tx_dropped += pkts_drop;
+ pkts -= pkts_drop;
+ }
+
+ for (i = 0; i < pkts; i++) {
+ unsigned int skb_idx = priv->tx_tail & fifo_mask;
+ struct sk_buff *skb = priv->can.echo_skb[skb_idx];
+ unsigned int frame_len = 0;
+
+ if (skb)
+ es58x_set_skb_timestamp(netdev, skb, tstamps[i]);
+
+ netdev->stats.tx_bytes += can_get_echo_skb(netdev, skb_idx,
+ &frame_len);
+ rx_total_frame_len += frame_len;
+
+ priv->tx_tail++;
+ }
+
+ netdev_completed_queue(netdev, pkts, rx_total_frame_len);
+ netdev->stats.tx_packets += pkts;
+
+ priv->err_passive_before_rtx_success = 0;
+ if (!es58x_is_echo_skb_threshold_reached(priv))
+ netif_wake_queue(netdev);
+
+ return 0;
+}
+
+/**
+ * es58x_can_reset_echo_fifo() - Reset the echo FIFO.
+ * @netdev: CAN network device.
+ *
+ * The echo_skb array of struct can_priv will be flushed by
+ * drivers/net/can/dev.c:can_flush_echo_skb(). This function resets
+ * the parameters of the struct es58x_priv of our device and reset the
+ * queue (c.f. BQL).
+ */
+static void es58x_can_reset_echo_fifo(struct net_device *netdev)
+{
+ struct es58x_priv *priv = es58x_priv(netdev);
+
+ priv->tx_tail = 0;
+ priv->tx_head = 0;
+ priv->tx_urb = NULL;
+ priv->err_passive_before_rtx_success = 0;
+ netdev_reset_queue(netdev);
+}
+
+/**
+ * es58x_flush_pending_tx_msg() - Reset the buffer for transmission messages.
+ * @netdev: CAN network device.
+ *
+ * es58x_start_xmit() will queue up to tx_bulk_max messages in
+ * &tx_urb buffer and do a bulk send of all messages in one single URB
+ * (c.f. xmit_more flag). When the device recovers from a bus off
+ * state or when the device stops, the tx_urb buffer might still have
+ * pending messages in it and thus need to be flushed.
+ */
+static void es58x_flush_pending_tx_msg(struct net_device *netdev)
+{
+ struct es58x_priv *priv = es58x_priv(netdev);
+ struct es58x_device *es58x_dev = priv->es58x_dev;
+
+ if (priv->tx_urb) {
+ netdev_warn(netdev, "%s: dropping %d TX messages\n",
+ __func__, priv->tx_can_msg_cnt);
+ netdev->stats.tx_dropped += priv->tx_can_msg_cnt;
+ while (priv->tx_can_msg_cnt > 0) {
+ unsigned int frame_len = 0;
+ u16 fifo_mask = priv->es58x_dev->param->fifo_mask;
+
+ priv->tx_head--;
+ priv->tx_can_msg_cnt--;
+ can_free_echo_skb(netdev, priv->tx_head & fifo_mask,
+ &frame_len);
+ netdev_completed_queue(netdev, 1, frame_len);
+ }
+ usb_anchor_urb(priv->tx_urb, &priv->es58x_dev->tx_urbs_idle);
+ atomic_inc(&es58x_dev->tx_urbs_idle_cnt);
+ usb_free_urb(priv->tx_urb);
+ }
+ priv->tx_urb = NULL;
+}
+
+/**
+ * es58x_tx_ack_msg() - Handle acknowledgment messages.
+ * @netdev: CAN network device.
+ * @tx_free_entries: Number of free entries in the device transmit FIFO.
+ * @rx_cmd_ret_u32: error code as returned by the ES58X device.
+ *
+ * ES58X sends an acknowledgment message after a transmission request
+ * is done. This is mandatory for the ES581.4 but is optional (and
+ * deactivated in this driver) for the ES58X_FD family.
+ *
+ * Under normal circumstances, this function should never throw an
+ * error message.
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+int es58x_tx_ack_msg(struct net_device *netdev, u16 tx_free_entries,
+ enum es58x_ret_u32 rx_cmd_ret_u32)
+{
+ struct es58x_priv *priv = es58x_priv(netdev);
+
+ if (tx_free_entries <= priv->es58x_dev->param->tx_bulk_max) {
+ if (net_ratelimit())
+ netdev_err(netdev,
+ "Only %d entries left in device queue, num_echo_skb: %d/%d\n",
+ tx_free_entries,
+ priv->tx_head - priv->tx_tail,
+ priv->can.echo_skb_max);
+ netif_stop_queue(netdev);
+ }
+
+ return es58x_rx_cmd_ret_u32(netdev, ES58X_RET_TYPE_TX_MSG,
+ rx_cmd_ret_u32);
+}
+
+/**
+ * es58x_rx_can_msg() - Handle a received a CAN message.
+ * @netdev: CAN network device.
+ * @timestamp: Hardware time stamp (only relevant in rx branches).
+ * @data: CAN payload.
+ * @can_id: CAN ID.
+ * @es58x_flags: Please refer to enum es58x_flag.
+ * @dlc: Data Length Code (raw value).
+ *
+ * Fill up a CAN skb and post it.
+ *
+ * This function handles the case where the DLC of a classical CAN
+ * frame is greater than CAN_MAX_DLEN (c.f. the len8_dlc field of
+ * struct can_frame).
+ *
+ * Return: zero on success.
+ */
+int es58x_rx_can_msg(struct net_device *netdev, u64 timestamp, const u8 *data,
+ canid_t can_id, enum es58x_flag es58x_flags, u8 dlc)
+{
+ struct canfd_frame *cfd;
+ struct can_frame *ccf;
+ struct sk_buff *skb;
+ u8 len;
+ bool is_can_fd = !!(es58x_flags & ES58X_FLAG_FD_DATA);
+
+ if (dlc > CAN_MAX_RAW_DLC) {
+ netdev_err(netdev,
+ "%s: DLC is %d but maximum should be %d\n",
+ __func__, dlc, CAN_MAX_RAW_DLC);
+ return -EMSGSIZE;
+ }
+
+ if (is_can_fd) {
+ len = can_fd_dlc2len(dlc);
+ skb = alloc_canfd_skb(netdev, &cfd);
+ } else {
+ len = can_cc_dlc2len(dlc);
+ skb = alloc_can_skb(netdev, &ccf);
+ cfd = (struct canfd_frame *)ccf;
+ }
+ if (!skb) {
+ netdev->stats.rx_dropped++;
+ return 0;
+ }
+
+ cfd->can_id = can_id;
+ if (es58x_flags & ES58X_FLAG_EFF)
+ cfd->can_id |= CAN_EFF_FLAG;
+ if (is_can_fd) {
+ cfd->len = len;
+ if (es58x_flags & ES58X_FLAG_FD_BRS)
+ cfd->flags |= CANFD_BRS;
+ if (es58x_flags & ES58X_FLAG_FD_ESI)
+ cfd->flags |= CANFD_ESI;
+ } else {
+ can_frame_set_cc_len(ccf, dlc, es58x_priv(netdev)->can.ctrlmode);
+ if (es58x_flags & ES58X_FLAG_RTR) {
+ ccf->can_id |= CAN_RTR_FLAG;
+ len = 0;
+ }
+ }
+ memcpy(cfd->data, data, len);
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += len;
+
+ es58x_set_skb_timestamp(netdev, skb, timestamp);
+ netif_rx(skb);
+
+ es58x_priv(netdev)->err_passive_before_rtx_success = 0;
+
+ return 0;
+}
+
+/**
+ * es58x_rx_err_msg() - Handle a received CAN event or error message.
+ * @netdev: CAN network device.
+ * @error: Error code.
+ * @event: Event code.
+ * @timestamp: Timestamp received from a ES58X device.
+ *
+ * Handle the errors and events received by the ES58X device, create
+ * a CAN error skb and post it.
+ *
+ * In some rare cases the devices might get stuck alternating between
+ * CAN_STATE_ERROR_PASSIVE and CAN_STATE_ERROR_WARNING. To prevent
+ * this behavior, we force a bus off state if the device goes in
+ * CAN_STATE_ERROR_WARNING for ES58X_MAX_CONSECUTIVE_WARN consecutive
+ * times with no successful transmission or reception in between.
+ *
+ * Once the device is in bus off state, the only way to restart it is
+ * through the drivers/net/can/dev.c:can_restart() function. The
+ * device is technically capable to recover by itself under certain
+ * circumstances, however, allowing self recovery would create
+ * complex race conditions with drivers/net/can/dev.c:can_restart()
+ * and thus was not implemented. To activate automatic restart, please
+ * set the restart-ms parameter (e.g. ip link set can0 type can
+ * restart-ms 100).
+ *
+ * If the bus is really instable, this function would try to send a
+ * lot of log messages. Those are rate limited (i.e. you will see
+ * messages such as "net_ratelimit: XXX callbacks suppressed" in
+ * dmesg).
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error,
+ enum es58x_event event, u64 timestamp)
+{
+ struct es58x_priv *priv = es58x_priv(netdev);
+ struct can_priv *can = netdev_priv(netdev);
+ struct can_device_stats *can_stats = &can->can_stats;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb;
+ int ret;
+
+ if (!netif_running(netdev)) {
+ if (net_ratelimit())
+ netdev_info(netdev, "%s: %s is down, dropping packet\n",
+ __func__, netdev->name);
+ netdev->stats.rx_dropped++;
+ return 0;
+ }
+
+ if (error == ES58X_ERR_OK && event == ES58X_EVENT_OK) {
+ netdev_err(netdev, "%s: Both error and event are zero\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ skb = alloc_can_err_skb(netdev, &cf);
+
+ switch (error) {
+ case ES58X_ERR_OK: /* 0: No error */
+ break;
+
+ case ES58X_ERR_PROT_STUFF:
+ if (net_ratelimit())
+ netdev_dbg(netdev, "Error BITSTUFF\n");
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+
+ case ES58X_ERR_PROT_FORM:
+ if (net_ratelimit())
+ netdev_dbg(netdev, "Error FORMAT\n");
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+
+ case ES58X_ERR_ACK:
+ if (net_ratelimit())
+ netdev_dbg(netdev, "Error ACK\n");
+ if (cf)
+ cf->can_id |= CAN_ERR_ACK;
+ break;
+
+ case ES58X_ERR_PROT_BIT:
+ if (net_ratelimit())
+ netdev_dbg(netdev, "Error BIT\n");
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+
+ case ES58X_ERR_PROT_CRC:
+ if (net_ratelimit())
+ netdev_dbg(netdev, "Error CRC\n");
+ if (cf)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ break;
+
+ case ES58X_ERR_PROT_BIT1:
+ if (net_ratelimit())
+ netdev_dbg(netdev,
+ "Error: expected a recessive bit but monitored a dominant one\n");
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ break;
+
+ case ES58X_ERR_PROT_BIT0:
+ if (net_ratelimit())
+ netdev_dbg(netdev,
+ "Error expected a dominant bit but monitored a recessive one\n");
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ break;
+
+ case ES58X_ERR_PROT_OVERLOAD:
+ if (net_ratelimit())
+ netdev_dbg(netdev, "Error OVERLOAD\n");
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ break;
+
+ case ES58X_ERR_PROT_UNSPEC:
+ if (net_ratelimit())
+ netdev_dbg(netdev, "Unspecified error\n");
+ if (cf)
+ cf->can_id |= CAN_ERR_PROT;
+ break;
+
+ default:
+ if (net_ratelimit())
+ netdev_err(netdev,
+ "%s: Unspecified error code 0x%04X\n",
+ __func__, (int)error);
+ if (cf)
+ cf->can_id |= CAN_ERR_PROT;
+ break;
+ }
+
+ switch (event) {
+ case ES58X_EVENT_OK: /* 0: No event */
+ break;
+
+ case ES58X_EVENT_CRTL_ACTIVE:
+ if (can->state == CAN_STATE_BUS_OFF) {
+ netdev_err(netdev,
+ "%s: state transition: BUS OFF -> ACTIVE\n",
+ __func__);
+ }
+ if (net_ratelimit())
+ netdev_dbg(netdev, "Event CAN BUS ACTIVE\n");
+ if (cf)
+ cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
+ can->state = CAN_STATE_ERROR_ACTIVE;
+ break;
+
+ case ES58X_EVENT_CRTL_PASSIVE:
+ if (net_ratelimit())
+ netdev_dbg(netdev, "Event CAN BUS PASSIVE\n");
+ /* Either TX or RX error count reached passive state
+ * but we do not know which. Setting both flags by
+ * default.
+ */
+ if (cf) {
+ cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+ cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+ }
+ if (can->state < CAN_STATE_BUS_OFF)
+ can->state = CAN_STATE_ERROR_PASSIVE;
+ can_stats->error_passive++;
+ if (priv->err_passive_before_rtx_success < U8_MAX)
+ priv->err_passive_before_rtx_success++;
+ break;
+
+ case ES58X_EVENT_CRTL_WARNING:
+ if (net_ratelimit())
+ netdev_dbg(netdev, "Event CAN BUS WARNING\n");
+ /* Either TX or RX error count reached warning state
+ * but we do not know which. Setting both flags by
+ * default.
+ */
+ if (cf) {
+ cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
+ cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
+ }
+ if (can->state < CAN_STATE_BUS_OFF)
+ can->state = CAN_STATE_ERROR_WARNING;
+ can_stats->error_warning++;
+ break;
+
+ case ES58X_EVENT_BUSOFF:
+ if (net_ratelimit())
+ netdev_dbg(netdev, "Event CAN BUS OFF\n");
+ if (cf)
+ cf->can_id |= CAN_ERR_BUSOFF;
+ can_stats->bus_off++;
+ netif_stop_queue(netdev);
+ if (can->state != CAN_STATE_BUS_OFF) {
+ can->state = CAN_STATE_BUS_OFF;
+ can_bus_off(netdev);
+ ret = can->do_set_mode(netdev, CAN_MODE_STOP);
+ if (ret)
+ return ret;
+ }
+ break;
+
+ case ES58X_EVENT_SINGLE_WIRE:
+ if (net_ratelimit())
+ netdev_warn(netdev,
+ "Lost connection on either CAN high or CAN low\n");
+ /* Lost connection on either CAN high or CAN
+ * low. Setting both flags by default.
+ */
+ if (cf) {
+ cf->data[4] |= CAN_ERR_TRX_CANH_NO_WIRE;
+ cf->data[4] |= CAN_ERR_TRX_CANL_NO_WIRE;
+ }
+ break;
+
+ default:
+ if (net_ratelimit())
+ netdev_err(netdev,
+ "%s: Unspecified event code 0x%04X\n",
+ __func__, (int)event);
+ if (cf)
+ cf->can_id |= CAN_ERR_CRTL;
+ break;
+ }
+
+ /* driver/net/can/dev.c:can_restart() takes in account error
+ * messages in the RX stats. Doing the same here for
+ * consistency.
+ */
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += CAN_ERR_DLC;
+
+ if (cf) {
+ if (cf->data[1])
+ cf->can_id |= CAN_ERR_CRTL;
+ if (cf->data[2] || cf->data[3]) {
+ cf->can_id |= CAN_ERR_PROT;
+ can_stats->bus_error++;
+ }
+ if (cf->data[4])
+ cf->can_id |= CAN_ERR_TRX;
+
+ es58x_set_skb_timestamp(netdev, skb, timestamp);
+ netif_rx(skb);
+ }
+
+ if ((event & ES58X_EVENT_CRTL_PASSIVE) &&
+ priv->err_passive_before_rtx_success == ES58X_CONSECUTIVE_ERR_PASSIVE_MAX) {
+ netdev_info(netdev,
+ "Got %d consecutive warning events with no successful RX or TX. Forcing bus-off\n",
+ priv->err_passive_before_rtx_success);
+ return es58x_rx_err_msg(netdev, ES58X_ERR_OK,
+ ES58X_EVENT_BUSOFF, timestamp);
+ }
+
+ return 0;
+}
+
+/**
+ * es58x_cmd_ret_desc() - Convert a command type to a string.
+ * @cmd_ret_type: Type of the command which triggered the return code.
+ *
+ * The final line (return "<unknown>") should not be reached. If this
+ * is the case, there is an implementation bug.
+ *
+ * Return: a readable description of the @cmd_ret_type.
+ */
+static const char *es58x_cmd_ret_desc(enum es58x_ret_type cmd_ret_type)
+{
+ switch (cmd_ret_type) {
+ case ES58X_RET_TYPE_SET_BITTIMING:
+ return "Set bittiming";
+ case ES58X_RET_TYPE_ENABLE_CHANNEL:
+ return "Enable channel";
+ case ES58X_RET_TYPE_DISABLE_CHANNEL:
+ return "Disable channel";
+ case ES58X_RET_TYPE_TX_MSG:
+ return "Transmit message";
+ case ES58X_RET_TYPE_RESET_RX:
+ return "Reset RX";
+ case ES58X_RET_TYPE_RESET_TX:
+ return "Reset TX";
+ case ES58X_RET_TYPE_DEVICE_ERR:
+ return "Device error";
+ }
+
+ return "<unknown>";
+};
+
+/**
+ * es58x_rx_cmd_ret_u8() - Handle the command's return code received
+ * from the ES58X device.
+ * @dev: Device, only used for the dev_XXX() print functions.
+ * @cmd_ret_type: Type of the command which triggered the return code.
+ * @rx_cmd_ret_u8: Command error code as returned by the ES58X device.
+ *
+ * Handles the 8 bits command return code. Those are specific to the
+ * ES581.4 device. The return value will eventually be used by
+ * es58x_handle_urb_cmd() function which will take proper actions in
+ * case of critical issues such and memory errors or bad CRC values.
+ *
+ * In contrast with es58x_rx_cmd_ret_u32(), the network device is
+ * unknown.
+ *
+ * Return: zero on success, return errno when any error occurs.
+ */
+int es58x_rx_cmd_ret_u8(struct device *dev,
+ enum es58x_ret_type cmd_ret_type,
+ enum es58x_ret_u8 rx_cmd_ret_u8)
+{
+ const char *ret_desc = es58x_cmd_ret_desc(cmd_ret_type);
+
+ switch (rx_cmd_ret_u8) {
+ case ES58X_RET_U8_OK:
+ dev_dbg_ratelimited(dev, "%s: OK\n", ret_desc);
+ return 0;
+
+ case ES58X_RET_U8_ERR_UNSPECIFIED_FAILURE:
+ dev_err(dev, "%s: unspecified failure\n", ret_desc);
+ return -EBADMSG;
+
+ case ES58X_RET_U8_ERR_NO_MEM:
+ dev_err(dev, "%s: device ran out of memory\n", ret_desc);
+ return -ENOMEM;
+
+ case ES58X_RET_U8_ERR_BAD_CRC:
+ dev_err(dev, "%s: CRC of previous command is incorrect\n",
+ ret_desc);
+ return -EIO;
+
+ default:
+ dev_err(dev, "%s: returned unknown value: 0x%02X\n",
+ ret_desc, rx_cmd_ret_u8);
+ return -EBADMSG;
+ }
+}
+
+/**
+ * es58x_rx_cmd_ret_u32() - Handle the command return code received
+ * from the ES58X device.
+ * @netdev: CAN network device.
+ * @cmd_ret_type: Type of the command which triggered the return code.
+ * @rx_cmd_ret_u32: error code as returned by the ES58X device.
+ *
+ * Handles the 32 bits command return code. The return value will
+ * eventually be used by es58x_handle_urb_cmd() function which will
+ * take proper actions in case of critical issues such and memory
+ * errors or bad CRC values.
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+int es58x_rx_cmd_ret_u32(struct net_device *netdev,
+ enum es58x_ret_type cmd_ret_type,
+ enum es58x_ret_u32 rx_cmd_ret_u32)
+{
+ struct es58x_priv *priv = es58x_priv(netdev);
+ const struct es58x_operators *ops = priv->es58x_dev->ops;
+ const char *ret_desc = es58x_cmd_ret_desc(cmd_ret_type);
+
+ switch (rx_cmd_ret_u32) {
+ case ES58X_RET_U32_OK:
+ switch (cmd_ret_type) {
+ case ES58X_RET_TYPE_ENABLE_CHANNEL:
+ es58x_can_reset_echo_fifo(netdev);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ netif_wake_queue(netdev);
+ netdev_info(netdev,
+ "%s: %s (Serial Number %s): CAN%d channel becomes ready\n",
+ ret_desc, priv->es58x_dev->udev->product,
+ priv->es58x_dev->udev->serial,
+ priv->channel_idx + 1);
+ break;
+
+ case ES58X_RET_TYPE_TX_MSG:
+ if (IS_ENABLED(CONFIG_VERBOSE_DEBUG) && net_ratelimit())
+ netdev_vdbg(netdev, "%s: OK\n", ret_desc);
+ break;
+
+ default:
+ netdev_dbg(netdev, "%s: OK\n", ret_desc);
+ break;
+ }
+ return 0;
+
+ case ES58X_RET_U32_ERR_UNSPECIFIED_FAILURE:
+ if (cmd_ret_type == ES58X_RET_TYPE_ENABLE_CHANNEL) {
+ int ret;
+
+ netdev_warn(netdev,
+ "%s: channel is already opened, closing and re-opening it to reflect new configuration\n",
+ ret_desc);
+ ret = ops->disable_channel(es58x_priv(netdev));
+ if (ret)
+ return ret;
+ return ops->enable_channel(es58x_priv(netdev));
+ }
+ if (cmd_ret_type == ES58X_RET_TYPE_DISABLE_CHANNEL) {
+ netdev_info(netdev,
+ "%s: channel is already closed\n", ret_desc);
+ return 0;
+ }
+ netdev_err(netdev,
+ "%s: unspecified failure\n", ret_desc);
+ return -EBADMSG;
+
+ case ES58X_RET_U32_ERR_NO_MEM:
+ netdev_err(netdev, "%s: device ran out of memory\n", ret_desc);
+ return -ENOMEM;
+
+ case ES58X_RET_U32_WARN_PARAM_ADJUSTED:
+ netdev_warn(netdev,
+ "%s: some incompatible parameters have been adjusted\n",
+ ret_desc);
+ return 0;
+
+ case ES58X_RET_U32_WARN_TX_MAYBE_REORDER:
+ netdev_warn(netdev,
+ "%s: TX messages might have been reordered\n",
+ ret_desc);
+ return 0;
+
+ case ES58X_RET_U32_ERR_TIMEDOUT:
+ netdev_err(netdev, "%s: command timed out\n", ret_desc);
+ return -ETIMEDOUT;
+
+ case ES58X_RET_U32_ERR_FIFO_FULL:
+ netdev_warn(netdev, "%s: fifo is full\n", ret_desc);
+ return 0;
+
+ case ES58X_RET_U32_ERR_BAD_CONFIG:
+ netdev_err(netdev, "%s: bad configuration\n", ret_desc);
+ return -EINVAL;
+
+ case ES58X_RET_U32_ERR_NO_RESOURCE:
+ netdev_err(netdev, "%s: no resource available\n", ret_desc);
+ return -EBUSY;
+
+ default:
+ netdev_err(netdev, "%s returned unknown value: 0x%08X\n",
+ ret_desc, rx_cmd_ret_u32);
+ return -EBADMSG;
+ }
+}
+
+/**
+ * es58x_increment_rx_errors() - Increment the network devices' error
+ * count.
+ * @es58x_dev: ES58X device.
+ *
+ * If an error occurs on the early stages on receiving an URB command,
+ * we might not be able to figure out on which network device the
+ * error occurred. In such case, we arbitrarily increment the error
+ * count of all the network devices attached to our ES58X device.
+ */
+static void es58x_increment_rx_errors(struct es58x_device *es58x_dev)
+{
+ int i;
+
+ for (i = 0; i < es58x_dev->num_can_ch; i++)
+ if (es58x_dev->netdev[i])
+ es58x_dev->netdev[i]->stats.rx_errors++;
+}
+
+/**
+ * es58x_handle_urb_cmd() - Handle the URB command
+ * @es58x_dev: ES58X device.
+ * @urb_cmd: The URB command received from the ES58X device, might not
+ * be aligned.
+ *
+ * Sends the URB command to the device specific function. Manages the
+ * errors thrown back by those functions.
+ */
+static void es58x_handle_urb_cmd(struct es58x_device *es58x_dev,
+ const union es58x_urb_cmd *urb_cmd)
+{
+ const struct es58x_operators *ops = es58x_dev->ops;
+ size_t cmd_len;
+ int i, ret;
+
+ ret = ops->handle_urb_cmd(es58x_dev, urb_cmd);
+ switch (ret) {
+ case 0: /* OK */
+ return;
+
+ case -ENODEV:
+ dev_err_ratelimited(es58x_dev->dev, "Device is not ready\n");
+ break;
+
+ case -EINVAL:
+ case -EMSGSIZE:
+ case -EBADRQC:
+ case -EBADMSG:
+ case -ECHRNG:
+ case -ETIMEDOUT:
+ cmd_len = es58x_get_urb_cmd_len(es58x_dev,
+ ops->get_msg_len(urb_cmd));
+ dev_err(es58x_dev->dev,
+ "ops->handle_urb_cmd() returned error %pe",
+ ERR_PTR(ret));
+ es58x_print_hex_dump(urb_cmd, cmd_len);
+ break;
+
+ case -EFAULT:
+ case -ENOMEM:
+ case -EIO:
+ default:
+ dev_crit(es58x_dev->dev,
+ "ops->handle_urb_cmd() returned error %pe, detaching all network devices\n",
+ ERR_PTR(ret));
+ for (i = 0; i < es58x_dev->num_can_ch; i++)
+ if (es58x_dev->netdev[i])
+ netif_device_detach(es58x_dev->netdev[i]);
+ if (es58x_dev->ops->reset_device)
+ es58x_dev->ops->reset_device(es58x_dev);
+ break;
+ }
+
+ /* Because the urb command could not fully be parsed,
+ * channel_id is not confirmed. Incrementing rx_errors count
+ * of all channels.
+ */
+ es58x_increment_rx_errors(es58x_dev);
+}
+
+/**
+ * es58x_check_rx_urb() - Check the length and format of the URB command.
+ * @es58x_dev: ES58X device.
+ * @urb_cmd: The URB command received from the ES58X device, might not
+ * be aligned.
+ * @urb_actual_len: The actual length of the URB command.
+ *
+ * Check if the first message of the received urb is valid, that is to
+ * say that both the header and the length are coherent.
+ *
+ * Return:
+ * the length of the first message of the URB on success.
+ *
+ * -ENODATA if the URB command is incomplete (in which case, the URB
+ * command should be buffered and combined with the next URB to try to
+ * reconstitute the URB command).
+ *
+ * -EOVERFLOW if the length is bigger than the maximum expected one.
+ *
+ * -EBADRQC if the start of frame does not match the expected value.
+ */
+static signed int es58x_check_rx_urb(struct es58x_device *es58x_dev,
+ const union es58x_urb_cmd *urb_cmd,
+ u32 urb_actual_len)
+{
+ const struct device *dev = es58x_dev->dev;
+ const struct es58x_parameters *param = es58x_dev->param;
+ u16 sof, msg_len;
+ signed int urb_cmd_len, ret;
+
+ if (urb_actual_len < param->urb_cmd_header_len) {
+ dev_vdbg(dev,
+ "%s: Received %d bytes [%*ph]: header incomplete\n",
+ __func__, urb_actual_len, urb_actual_len,
+ urb_cmd->raw_cmd);
+ return -ENODATA;
+ }
+
+ sof = get_unaligned_le16(&urb_cmd->sof);
+ if (sof != param->rx_start_of_frame) {
+ dev_err_ratelimited(es58x_dev->dev,
+ "%s: Expected sequence 0x%04X for start of frame but got 0x%04X.\n",
+ __func__, param->rx_start_of_frame, sof);
+ return -EBADRQC;
+ }
+
+ msg_len = es58x_dev->ops->get_msg_len(urb_cmd);
+ urb_cmd_len = es58x_get_urb_cmd_len(es58x_dev, msg_len);
+ if (urb_cmd_len > param->rx_urb_cmd_max_len) {
+ dev_err_ratelimited(es58x_dev->dev,
+ "%s: Biggest expected size for rx urb_cmd is %u but receive a command of size %d\n",
+ __func__,
+ param->rx_urb_cmd_max_len, urb_cmd_len);
+ return -EOVERFLOW;
+ } else if (urb_actual_len < urb_cmd_len) {
+ dev_vdbg(dev, "%s: Received %02d/%02d bytes\n",
+ __func__, urb_actual_len, urb_cmd_len);
+ return -ENODATA;
+ }
+
+ ret = es58x_check_crc(es58x_dev, urb_cmd, urb_cmd_len);
+ if (ret)
+ return ret;
+
+ return urb_cmd_len;
+}
+
+/**
+ * es58x_copy_to_cmd_buf() - Copy an array to the URB command buffer.
+ * @es58x_dev: ES58X device.
+ * @raw_cmd: the buffer we want to copy.
+ * @raw_cmd_len: length of @raw_cmd.
+ *
+ * Concatenates @raw_cmd_len bytes of @raw_cmd to the end of the URB
+ * command buffer.
+ *
+ * Return: zero on success, -EMSGSIZE if not enough space is available
+ * to do the copy.
+ */
+static int es58x_copy_to_cmd_buf(struct es58x_device *es58x_dev,
+ u8 *raw_cmd, int raw_cmd_len)
+{
+ if (es58x_dev->rx_cmd_buf_len + raw_cmd_len >
+ es58x_dev->param->rx_urb_cmd_max_len)
+ return -EMSGSIZE;
+
+ memcpy(&es58x_dev->rx_cmd_buf.raw_cmd[es58x_dev->rx_cmd_buf_len],
+ raw_cmd, raw_cmd_len);
+ es58x_dev->rx_cmd_buf_len += raw_cmd_len;
+
+ return 0;
+}
+
+/**
+ * es58x_split_urb_try_recovery() - Try to recover bad URB sequences.
+ * @es58x_dev: ES58X device.
+ * @raw_cmd: pointer to the buffer we want to copy.
+ * @raw_cmd_len: length of @raw_cmd.
+ *
+ * Under some rare conditions, we might get incorrect URBs from the
+ * device. From our observations, one of the valid URB gets replaced
+ * by one from the past. The full root cause is not identified.
+ *
+ * This function looks for the next start of frame in the urb buffer
+ * in order to try to recover.
+ *
+ * Such behavior was not observed on the devices of the ES58X FD
+ * family and only seems to impact the ES581.4.
+ *
+ * Return: the number of bytes dropped on success, -EBADMSG if recovery failed.
+ */
+static int es58x_split_urb_try_recovery(struct es58x_device *es58x_dev,
+ u8 *raw_cmd, size_t raw_cmd_len)
+{
+ union es58x_urb_cmd *urb_cmd;
+ signed int urb_cmd_len;
+ u16 sof;
+ int dropped_bytes = 0;
+
+ es58x_increment_rx_errors(es58x_dev);
+
+ while (raw_cmd_len > sizeof(sof)) {
+ urb_cmd = (union es58x_urb_cmd *)raw_cmd;
+ sof = get_unaligned_le16(&urb_cmd->sof);
+
+ if (sof == es58x_dev->param->rx_start_of_frame) {
+ urb_cmd_len = es58x_check_rx_urb(es58x_dev,
+ urb_cmd, raw_cmd_len);
+ if ((urb_cmd_len == -ENODATA) || urb_cmd_len > 0) {
+ dev_info_ratelimited(es58x_dev->dev,
+ "Recovery successful! Dropped %d bytes (urb_cmd_len: %d)\n",
+ dropped_bytes,
+ urb_cmd_len);
+ return dropped_bytes;
+ }
+ }
+ raw_cmd++;
+ raw_cmd_len--;
+ dropped_bytes++;
+ }
+
+ dev_warn_ratelimited(es58x_dev->dev, "%s: Recovery failed\n", __func__);
+ return -EBADMSG;
+}
+
+/**
+ * es58x_handle_incomplete_cmd() - Reconstitute an URB command from
+ * different URB pieces.
+ * @es58x_dev: ES58X device.
+ * @urb: last urb buffer received.
+ *
+ * The device might split the URB commands in an arbitrary amount of
+ * pieces. This function concatenates those in an URB buffer until a
+ * full URB command is reconstituted and consume it.
+ *
+ * Return:
+ * number of bytes consumed from @urb if successful.
+ *
+ * -ENODATA if the URB command is still incomplete.
+ *
+ * -EBADMSG if the URB command is incorrect.
+ */
+static signed int es58x_handle_incomplete_cmd(struct es58x_device *es58x_dev,
+ struct urb *urb)
+{
+ size_t cpy_len;
+ signed int urb_cmd_len, tmp_cmd_buf_len, ret;
+
+ tmp_cmd_buf_len = es58x_dev->rx_cmd_buf_len;
+ cpy_len = min_t(int, es58x_dev->param->rx_urb_cmd_max_len -
+ es58x_dev->rx_cmd_buf_len, urb->actual_length);
+ ret = es58x_copy_to_cmd_buf(es58x_dev, urb->transfer_buffer, cpy_len);
+ if (ret < 0)
+ return ret;
+
+ urb_cmd_len = es58x_check_rx_urb(es58x_dev, &es58x_dev->rx_cmd_buf,
+ es58x_dev->rx_cmd_buf_len);
+ if (urb_cmd_len == -ENODATA) {
+ return -ENODATA;
+ } else if (urb_cmd_len < 0) {
+ dev_err_ratelimited(es58x_dev->dev,
+ "Could not reconstitute incomplete command from previous URB, dropping %d bytes\n",
+ tmp_cmd_buf_len + urb->actual_length);
+ dev_err_ratelimited(es58x_dev->dev,
+ "Error code: %pe, es58x_dev->rx_cmd_buf_len: %d, urb->actual_length: %u\n",
+ ERR_PTR(urb_cmd_len),
+ tmp_cmd_buf_len, urb->actual_length);
+ es58x_print_hex_dump(&es58x_dev->rx_cmd_buf, tmp_cmd_buf_len);
+ es58x_print_hex_dump(urb->transfer_buffer, urb->actual_length);
+ return urb->actual_length;
+ }
+
+ es58x_handle_urb_cmd(es58x_dev, &es58x_dev->rx_cmd_buf);
+ return urb_cmd_len - tmp_cmd_buf_len; /* consumed length */
+}
+
+/**
+ * es58x_split_urb() - Cut the received URB in individual URB commands.
+ * @es58x_dev: ES58X device.
+ * @urb: last urb buffer received.
+ *
+ * The device might send urb in bulk format (i.e. several URB commands
+ * concatenated together). This function will split all the commands
+ * contained in the urb.
+ *
+ * Return:
+ * number of bytes consumed from @urb if successful.
+ *
+ * -ENODATA if the URB command is incomplete.
+ *
+ * -EBADMSG if the URB command is incorrect.
+ */
+static signed int es58x_split_urb(struct es58x_device *es58x_dev,
+ struct urb *urb)
+{
+ union es58x_urb_cmd *urb_cmd;
+ u8 *raw_cmd = urb->transfer_buffer;
+ s32 raw_cmd_len = urb->actual_length;
+ int ret;
+
+ if (es58x_dev->rx_cmd_buf_len != 0) {
+ ret = es58x_handle_incomplete_cmd(es58x_dev, urb);
+ if (ret != -ENODATA)
+ es58x_dev->rx_cmd_buf_len = 0;
+ if (ret < 0)
+ return ret;
+
+ raw_cmd += ret;
+ raw_cmd_len -= ret;
+ }
+
+ while (raw_cmd_len > 0) {
+ if (raw_cmd[0] == ES58X_HEARTBEAT) {
+ raw_cmd++;
+ raw_cmd_len--;
+ continue;
+ }
+ urb_cmd = (union es58x_urb_cmd *)raw_cmd;
+ ret = es58x_check_rx_urb(es58x_dev, urb_cmd, raw_cmd_len);
+ if (ret > 0) {
+ es58x_handle_urb_cmd(es58x_dev, urb_cmd);
+ } else if (ret == -ENODATA) {
+ es58x_copy_to_cmd_buf(es58x_dev, raw_cmd, raw_cmd_len);
+ return -ENODATA;
+ } else if (ret < 0) {
+ ret = es58x_split_urb_try_recovery(es58x_dev, raw_cmd,
+ raw_cmd_len);
+ if (ret < 0)
+ return ret;
+ }
+ raw_cmd += ret;
+ raw_cmd_len -= ret;
+ }
+
+ return 0;
+}
+
+/**
+ * es58x_read_bulk_callback() - Callback for reading data from device.
+ * @urb: last urb buffer received.
+ *
+ * This function gets eventually called each time an URB is received
+ * from the ES58X device.
+ *
+ * Checks urb status, calls read function and resubmits urb read
+ * operation.
+ */
+static void es58x_read_bulk_callback(struct urb *urb)
+{
+ struct es58x_device *es58x_dev = urb->context;
+ const struct device *dev = es58x_dev->dev;
+ int i, ret;
+
+ switch (urb->status) {
+ case 0: /* success */
+ break;
+
+ case -EOVERFLOW:
+ dev_err_ratelimited(dev, "%s: error %pe\n",
+ __func__, ERR_PTR(urb->status));
+ es58x_print_hex_dump_debug(urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ goto resubmit_urb;
+
+ case -EPROTO:
+ dev_warn_ratelimited(dev, "%s: error %pe. Device unplugged?\n",
+ __func__, ERR_PTR(urb->status));
+ goto free_urb;
+
+ case -ENOENT:
+ case -EPIPE:
+ dev_err_ratelimited(dev, "%s: error %pe\n",
+ __func__, ERR_PTR(urb->status));
+ goto free_urb;
+
+ case -ESHUTDOWN:
+ dev_dbg_ratelimited(dev, "%s: error %pe\n",
+ __func__, ERR_PTR(urb->status));
+ goto free_urb;
+
+ default:
+ dev_err_ratelimited(dev, "%s: error %pe\n",
+ __func__, ERR_PTR(urb->status));
+ goto resubmit_urb;
+ }
+
+ ret = es58x_split_urb(es58x_dev, urb);
+ if ((ret != -ENODATA) && ret < 0) {
+ dev_err(es58x_dev->dev, "es58x_split_urb() returned error %pe",
+ ERR_PTR(ret));
+ es58x_print_hex_dump_debug(urb->transfer_buffer,
+ urb->actual_length);
+
+ /* Because the urb command could not be parsed,
+ * channel_id is not confirmed. Incrementing rx_errors
+ * count of all channels.
+ */
+ es58x_increment_rx_errors(es58x_dev);
+ }
+
+ resubmit_urb:
+ usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->rx_pipe,
+ urb->transfer_buffer, urb->transfer_buffer_length,
+ es58x_read_bulk_callback, es58x_dev);
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret == -ENODEV) {
+ for (i = 0; i < es58x_dev->num_can_ch; i++)
+ if (es58x_dev->netdev[i])
+ netif_device_detach(es58x_dev->netdev[i]);
+ } else if (ret)
+ dev_err_ratelimited(dev,
+ "Failed resubmitting read bulk urb: %pe\n",
+ ERR_PTR(ret));
+ return;
+
+ free_urb:
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
+}
+
+/**
+ * es58x_write_bulk_callback() - Callback after writing data to the device.
+ * @urb: urb buffer which was previously submitted.
+ *
+ * This function gets eventually called each time an URB was sent to
+ * the ES58X device.
+ *
+ * Puts the @urb back to the urbs idle anchor and tries to restart the
+ * network queue.
+ */
+static void es58x_write_bulk_callback(struct urb *urb)
+{
+ struct net_device *netdev = urb->context;
+ struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev;
+
+ switch (urb->status) {
+ case 0: /* success */
+ break;
+
+ case -EOVERFLOW:
+ if (net_ratelimit())
+ netdev_err(netdev, "%s: error %pe\n",
+ __func__, ERR_PTR(urb->status));
+ es58x_print_hex_dump(urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ break;
+
+ case -ENOENT:
+ if (net_ratelimit())
+ netdev_dbg(netdev, "%s: error %pe\n",
+ __func__, ERR_PTR(urb->status));
+ usb_free_coherent(urb->dev,
+ es58x_dev->param->tx_urb_cmd_max_len,
+ urb->transfer_buffer, urb->transfer_dma);
+ return;
+
+ default:
+ if (net_ratelimit())
+ netdev_info(netdev, "%s: error %pe\n",
+ __func__, ERR_PTR(urb->status));
+ break;
+ }
+
+ usb_anchor_urb(urb, &es58x_dev->tx_urbs_idle);
+ atomic_inc(&es58x_dev->tx_urbs_idle_cnt);
+}
+
+/**
+ * es58x_alloc_urb() - Allocate memory for an URB and its transfer
+ * buffer.
+ * @es58x_dev: ES58X device.
+ * @urb: URB to be allocated.
+ * @buf: used to return DMA address of buffer.
+ * @buf_len: requested buffer size.
+ * @mem_flags: affect whether allocation may block.
+ *
+ * Allocates an URB and its @transfer_buffer and set its @transfer_dma
+ * address.
+ *
+ * This function is used at start-up to allocate all RX URBs at once
+ * and during run time for TX URBs.
+ *
+ * Return: zero on success, -ENOMEM if no memory is available.
+ */
+static int es58x_alloc_urb(struct es58x_device *es58x_dev, struct urb **urb,
+ u8 **buf, size_t buf_len, gfp_t mem_flags)
+{
+ *urb = usb_alloc_urb(0, mem_flags);
+ if (!*urb) {
+ dev_err(es58x_dev->dev, "No memory left for URBs\n");
+ return -ENOMEM;
+ }
+
+ *buf = usb_alloc_coherent(es58x_dev->udev, buf_len,
+ mem_flags, &(*urb)->transfer_dma);
+ if (!*buf) {
+ dev_err(es58x_dev->dev, "No memory left for USB buffer\n");
+ usb_free_urb(*urb);
+ return -ENOMEM;
+ }
+
+ (*urb)->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ return 0;
+}
+
+/**
+ * es58x_get_tx_urb() - Get an URB for transmission.
+ * @es58x_dev: ES58X device.
+ *
+ * Gets an URB from the idle urbs anchor or allocate a new one if the
+ * anchor is empty.
+ *
+ * If there are more than ES58X_TX_URBS_MAX in the idle anchor, do
+ * some garbage collection. The garbage collection is done here
+ * instead of within es58x_write_bulk_callback() because
+ * usb_free_coherent() should not be used in IRQ context:
+ * c.f. WARN_ON(irqs_disabled()) in dma_free_attrs().
+ *
+ * Return: a pointer to an URB on success, NULL if no memory is
+ * available.
+ */
+static struct urb *es58x_get_tx_urb(struct es58x_device *es58x_dev)
+{
+ atomic_t *idle_cnt = &es58x_dev->tx_urbs_idle_cnt;
+ struct urb *urb = usb_get_from_anchor(&es58x_dev->tx_urbs_idle);
+
+ if (!urb) {
+ size_t tx_buf_len;
+ u8 *buf;
+
+ tx_buf_len = es58x_dev->param->tx_urb_cmd_max_len;
+ if (es58x_alloc_urb(es58x_dev, &urb, &buf, tx_buf_len,
+ GFP_ATOMIC))
+ return NULL;
+
+ usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe,
+ buf, tx_buf_len, NULL, NULL);
+ return urb;
+ }
+
+ while (atomic_dec_return(idle_cnt) > ES58X_TX_URBS_MAX) {
+ /* Garbage collector */
+ struct urb *tmp = usb_get_from_anchor(&es58x_dev->tx_urbs_idle);
+
+ if (!tmp)
+ break;
+ usb_free_coherent(tmp->dev,
+ es58x_dev->param->tx_urb_cmd_max_len,
+ tmp->transfer_buffer, tmp->transfer_dma);
+ usb_free_urb(tmp);
+ }
+
+ return urb;
+}
+
+/**
+ * es58x_submit_urb() - Send data to the device.
+ * @es58x_dev: ES58X device.
+ * @urb: URB to be sent.
+ * @netdev: CAN network device.
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+static int es58x_submit_urb(struct es58x_device *es58x_dev, struct urb *urb,
+ struct net_device *netdev)
+{
+ int ret;
+
+ es58x_set_crc(urb->transfer_buffer, urb->transfer_buffer_length);
+ usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe,
+ urb->transfer_buffer, urb->transfer_buffer_length,
+ es58x_write_bulk_callback, netdev);
+ usb_anchor_urb(urb, &es58x_dev->tx_urbs_busy);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ netdev_err(netdev, "%s: USB send urb failure: %pe\n",
+ __func__, ERR_PTR(ret));
+ usb_unanchor_urb(urb);
+ usb_free_coherent(urb->dev,
+ es58x_dev->param->tx_urb_cmd_max_len,
+ urb->transfer_buffer, urb->transfer_dma);
+ }
+ usb_free_urb(urb);
+
+ return ret;
+}
+
+/**
+ * es58x_send_msg() - Prepare an URB and submit it.
+ * @es58x_dev: ES58X device.
+ * @cmd_type: Command type.
+ * @cmd_id: Command ID.
+ * @msg: ES58X message to be sent.
+ * @msg_len: Length of @msg.
+ * @channel_idx: Index of the network device.
+ *
+ * Creates an URB command from a given message, sets the header and the
+ * CRC and then submits it.
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+int es58x_send_msg(struct es58x_device *es58x_dev, u8 cmd_type, u8 cmd_id,
+ const void *msg, u16 msg_len, int channel_idx)
+{
+ struct net_device *netdev;
+ union es58x_urb_cmd *urb_cmd;
+ struct urb *urb;
+ int urb_cmd_len;
+
+ if (channel_idx == ES58X_CHANNEL_IDX_NA)
+ netdev = es58x_dev->netdev[0]; /* Default to first channel */
+ else
+ netdev = es58x_dev->netdev[channel_idx];
+
+ urb_cmd_len = es58x_get_urb_cmd_len(es58x_dev, msg_len);
+ if (urb_cmd_len > es58x_dev->param->tx_urb_cmd_max_len)
+ return -EOVERFLOW;
+
+ urb = es58x_get_tx_urb(es58x_dev);
+ if (!urb)
+ return -ENOMEM;
+
+ urb_cmd = urb->transfer_buffer;
+ es58x_dev->ops->fill_urb_header(urb_cmd, cmd_type, cmd_id,
+ channel_idx, msg_len);
+ memcpy(&urb_cmd->raw_cmd[es58x_dev->param->urb_cmd_header_len],
+ msg, msg_len);
+ urb->transfer_buffer_length = urb_cmd_len;
+
+ return es58x_submit_urb(es58x_dev, urb, netdev);
+}
+
+/**
+ * es58x_alloc_rx_urbs() - Allocate RX URBs.
+ * @es58x_dev: ES58X device.
+ *
+ * Allocate URBs for reception and anchor them.
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev)
+{
+ const struct device *dev = es58x_dev->dev;
+ const struct es58x_parameters *param = es58x_dev->param;
+ size_t rx_buf_len = es58x_dev->rx_max_packet_size;
+ struct urb *urb;
+ u8 *buf;
+ int i;
+ int ret = -EINVAL;
+
+ for (i = 0; i < param->rx_urb_max; i++) {
+ ret = es58x_alloc_urb(es58x_dev, &urb, &buf, rx_buf_len,
+ GFP_KERNEL);
+ if (ret)
+ break;
+
+ usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->rx_pipe,
+ buf, rx_buf_len, es58x_read_bulk_callback,
+ es58x_dev);
+ usb_anchor_urb(urb, &es58x_dev->rx_urbs);
+
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ usb_free_coherent(es58x_dev->udev, rx_buf_len,
+ buf, urb->transfer_dma);
+ usb_free_urb(urb);
+ break;
+ }
+ usb_free_urb(urb);
+ }
+
+ if (i == 0) {
+ dev_err(dev, "%s: Could not setup any rx URBs\n", __func__);
+ return ret;
+ }
+ dev_dbg(dev, "%s: Allocated %d rx URBs each of size %zu\n",
+ __func__, i, rx_buf_len);
+
+ return ret;
+}
+
+/**
+ * es58x_free_urbs() - Free all the TX and RX URBs.
+ * @es58x_dev: ES58X device.
+ */
+static void es58x_free_urbs(struct es58x_device *es58x_dev)
+{
+ struct urb *urb;
+
+ if (!usb_wait_anchor_empty_timeout(&es58x_dev->tx_urbs_busy, 1000)) {
+ dev_err(es58x_dev->dev, "%s: Timeout, some TX urbs still remain\n",
+ __func__);
+ usb_kill_anchored_urbs(&es58x_dev->tx_urbs_busy);
+ }
+
+ while ((urb = usb_get_from_anchor(&es58x_dev->tx_urbs_idle)) != NULL) {
+ usb_free_coherent(urb->dev, es58x_dev->param->tx_urb_cmd_max_len,
+ urb->transfer_buffer, urb->transfer_dma);
+ usb_free_urb(urb);
+ atomic_dec(&es58x_dev->tx_urbs_idle_cnt);
+ }
+ if (atomic_read(&es58x_dev->tx_urbs_idle_cnt))
+ dev_err(es58x_dev->dev,
+ "All idle urbs were freed but tx_urb_idle_cnt is %d\n",
+ atomic_read(&es58x_dev->tx_urbs_idle_cnt));
+
+ usb_kill_anchored_urbs(&es58x_dev->rx_urbs);
+}
+
+/**
+ * es58x_open() - Enable the network device.
+ * @netdev: CAN network device.
+ *
+ * Called when the network transitions to the up state. Allocate the
+ * URB resources if needed and open the channel.
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+static int es58x_open(struct net_device *netdev)
+{
+ struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev;
+ int ret;
+
+ if (atomic_inc_return(&es58x_dev->opened_channel_cnt) == 1) {
+ ret = es58x_alloc_rx_urbs(es58x_dev);
+ if (ret)
+ return ret;
+
+ ret = es58x_set_realtime_diff_ns(es58x_dev);
+ if (ret)
+ goto free_urbs;
+ }
+
+ ret = open_candev(netdev);
+ if (ret)
+ goto free_urbs;
+
+ ret = es58x_dev->ops->enable_channel(es58x_priv(netdev));
+ if (ret)
+ goto free_urbs;
+
+ netif_start_queue(netdev);
+
+ return ret;
+
+ free_urbs:
+ if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+ es58x_free_urbs(es58x_dev);
+ netdev_err(netdev, "%s: Could not open the network device: %pe\n",
+ __func__, ERR_PTR(ret));
+
+ return ret;
+}
+
+/**
+ * es58x_stop() - Disable the network device.
+ * @netdev: CAN network device.
+ *
+ * Called when the network transitions to the down state. If all the
+ * channels of the device are closed, free the URB resources which are
+ * not needed anymore.
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+static int es58x_stop(struct net_device *netdev)
+{
+ struct es58x_priv *priv = es58x_priv(netdev);
+ struct es58x_device *es58x_dev = priv->es58x_dev;
+ int ret;
+
+ netif_stop_queue(netdev);
+ ret = es58x_dev->ops->disable_channel(priv);
+ if (ret)
+ return ret;
+
+ priv->can.state = CAN_STATE_STOPPED;
+ es58x_can_reset_echo_fifo(netdev);
+ close_candev(netdev);
+
+ es58x_flush_pending_tx_msg(netdev);
+
+ if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+ es58x_free_urbs(es58x_dev);
+
+ return 0;
+}
+
+/**
+ * es58x_xmit_commit() - Send the bulk urb.
+ * @netdev: CAN network device.
+ *
+ * Do the bulk send. This function should be called only once by bulk
+ * transmission.
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+static int es58x_xmit_commit(struct net_device *netdev)
+{
+ struct es58x_priv *priv = es58x_priv(netdev);
+ int ret;
+
+ if (!es58x_is_can_state_active(netdev))
+ return -ENETDOWN;
+
+ if (es58x_is_echo_skb_threshold_reached(priv))
+ netif_stop_queue(netdev);
+
+ ret = es58x_submit_urb(priv->es58x_dev, priv->tx_urb, netdev);
+ if (ret == 0)
+ priv->tx_urb = NULL;
+
+ return ret;
+}
+
+/**
+ * es58x_xmit_more() - Can we put more packets?
+ * @priv: ES58X private parameters related to the network device.
+ *
+ * Return: true if we can put more, false if it is time to send.
+ */
+static bool es58x_xmit_more(struct es58x_priv *priv)
+{
+ unsigned int free_slots =
+ priv->can.echo_skb_max - (priv->tx_head - priv->tx_tail);
+
+ return netdev_xmit_more() && free_slots > 0 &&
+ priv->tx_can_msg_cnt < priv->es58x_dev->param->tx_bulk_max;
+}
+
+/**
+ * es58x_start_xmit() - Transmit an skb.
+ * @skb: socket buffer of a CAN message.
+ * @netdev: CAN network device.
+ *
+ * Called when a packet needs to be transmitted.
+ *
+ * This function relies on Byte Queue Limits (BQL). The main benefit
+ * is to increase the throughput by allowing bulk transfers
+ * (c.f. xmit_more flag).
+ *
+ * Queues up to tx_bulk_max messages in &tx_urb buffer and does
+ * a bulk send of all messages in one single URB.
+ *
+ * Return: NETDEV_TX_OK regardless of if we could transmit the @skb or
+ * had to drop it.
+ */
+static netdev_tx_t es58x_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct es58x_priv *priv = es58x_priv(netdev);
+ struct es58x_device *es58x_dev = priv->es58x_dev;
+ unsigned int frame_len;
+ int ret;
+
+ if (can_dropped_invalid_skb(netdev, skb)) {
+ if (priv->tx_urb)
+ goto xmit_commit;
+ return NETDEV_TX_OK;
+ }
+
+ if (priv->tx_urb && priv->tx_can_msg_is_fd != can_is_canfd_skb(skb)) {
+ /* Can not do bulk send with mixed CAN and CAN FD frames. */
+ ret = es58x_xmit_commit(netdev);
+ if (ret)
+ goto drop_skb;
+ }
+
+ if (!priv->tx_urb) {
+ priv->tx_urb = es58x_get_tx_urb(es58x_dev);
+ if (!priv->tx_urb) {
+ ret = -ENOMEM;
+ goto drop_skb;
+ }
+ priv->tx_can_msg_cnt = 0;
+ priv->tx_can_msg_is_fd = can_is_canfd_skb(skb);
+ }
+
+ ret = es58x_dev->ops->tx_can_msg(priv, skb);
+ if (ret)
+ goto drop_skb;
+
+ frame_len = can_skb_get_frame_len(skb);
+ ret = can_put_echo_skb(skb, netdev,
+ priv->tx_head & es58x_dev->param->fifo_mask,
+ frame_len);
+ if (ret)
+ goto xmit_failure;
+ netdev_sent_queue(netdev, frame_len);
+
+ priv->tx_head++;
+ priv->tx_can_msg_cnt++;
+
+ xmit_commit:
+ if (!es58x_xmit_more(priv)) {
+ ret = es58x_xmit_commit(netdev);
+ if (ret)
+ goto xmit_failure;
+ }
+
+ return NETDEV_TX_OK;
+
+ drop_skb:
+ dev_kfree_skb(skb);
+ netdev->stats.tx_dropped++;
+ xmit_failure:
+ netdev_warn(netdev, "%s: send message failure: %pe\n",
+ __func__, ERR_PTR(ret));
+ netdev->stats.tx_errors++;
+ es58x_flush_pending_tx_msg(netdev);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops es58x_netdev_ops = {
+ .ndo_open = es58x_open,
+ .ndo_stop = es58x_stop,
+ .ndo_start_xmit = es58x_start_xmit
+};
+
+/**
+ * es58x_set_mode() - Change network device mode.
+ * @netdev: CAN network device.
+ * @mode: either %CAN_MODE_START, %CAN_MODE_STOP or %CAN_MODE_SLEEP
+ *
+ * Currently, this function is only used to stop and restart the
+ * channel during a bus off event (c.f. es58x_rx_err_msg() and
+ * drivers/net/can/dev.c:can_restart() which are the two only
+ * callers).
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+static int es58x_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ struct es58x_priv *priv = es58x_priv(netdev);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ switch (priv->can.state) {
+ case CAN_STATE_BUS_OFF:
+ return priv->es58x_dev->ops->enable_channel(priv);
+
+ case CAN_STATE_STOPPED:
+ return es58x_open(netdev);
+
+ case CAN_STATE_ERROR_ACTIVE:
+ case CAN_STATE_ERROR_WARNING:
+ case CAN_STATE_ERROR_PASSIVE:
+ default:
+ return 0;
+ }
+
+ case CAN_MODE_STOP:
+ switch (priv->can.state) {
+ case CAN_STATE_STOPPED:
+ return 0;
+
+ case CAN_STATE_ERROR_ACTIVE:
+ case CAN_STATE_ERROR_WARNING:
+ case CAN_STATE_ERROR_PASSIVE:
+ case CAN_STATE_BUS_OFF:
+ default:
+ return priv->es58x_dev->ops->disable_channel(priv);
+ }
+
+ case CAN_MODE_SLEEP:
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * es58x_init_priv() - Initialize private parameters.
+ * @es58x_dev: ES58X device.
+ * @priv: ES58X private parameters related to the network device.
+ * @channel_idx: Index of the network device.
+ */
+static void es58x_init_priv(struct es58x_device *es58x_dev,
+ struct es58x_priv *priv, int channel_idx)
+{
+ const struct es58x_parameters *param = es58x_dev->param;
+ struct can_priv *can = &priv->can;
+
+ priv->es58x_dev = es58x_dev;
+ priv->channel_idx = channel_idx;
+ priv->tx_urb = NULL;
+ priv->tx_can_msg_cnt = 0;
+
+ can->bittiming_const = param->bittiming_const;
+ if (param->ctrlmode_supported & CAN_CTRLMODE_FD) {
+ can->data_bittiming_const = param->data_bittiming_const;
+ can->tdc_const = param->tdc_const;
+ }
+ can->bitrate_max = param->bitrate_max;
+ can->clock = param->clock;
+ can->state = CAN_STATE_STOPPED;
+ can->ctrlmode_supported = param->ctrlmode_supported;
+ can->do_set_mode = es58x_set_mode;
+}
+
+/**
+ * es58x_init_netdev() - Initialize the network device.
+ * @es58x_dev: ES58X device.
+ * @channel_idx: Index of the network device.
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx)
+{
+ struct net_device *netdev;
+ struct device *dev = es58x_dev->dev;
+ int ret;
+
+ netdev = alloc_candev(sizeof(struct es58x_priv),
+ es58x_dev->param->fifo_mask + 1);
+ if (!netdev) {
+ dev_err(dev, "Could not allocate candev\n");
+ return -ENOMEM;
+ }
+ SET_NETDEV_DEV(netdev, dev);
+ es58x_dev->netdev[channel_idx] = netdev;
+ es58x_init_priv(es58x_dev, es58x_priv(netdev), channel_idx);
+
+ netdev->netdev_ops = &es58x_netdev_ops;
+ netdev->flags |= IFF_ECHO; /* We support local echo */
+
+ ret = register_candev(netdev);
+ if (ret)
+ return ret;
+
+ netdev_queue_set_dql_min_limit(netdev_get_tx_queue(netdev, 0),
+ es58x_dev->param->dql_min_limit);
+
+ return ret;
+}
+
+/**
+ * es58x_get_product_info() - Get the product information and print them.
+ * @es58x_dev: ES58X device.
+ *
+ * Do a synchronous call to get the product information.
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+static int es58x_get_product_info(struct es58x_device *es58x_dev)
+{
+ struct usb_device *udev = es58x_dev->udev;
+ const int es58x_prod_info_idx = 6;
+ /* Empirical tests show a prod_info length of maximum 83,
+ * below should be more than enough.
+ */
+ const size_t prod_info_len = 127;
+ char *prod_info;
+ int ret;
+
+ prod_info = kmalloc(prod_info_len, GFP_KERNEL);
+ if (!prod_info)
+ return -ENOMEM;
+
+ ret = usb_string(udev, es58x_prod_info_idx, prod_info, prod_info_len);
+ if (ret < 0) {
+ dev_err(es58x_dev->dev,
+ "%s: Could not read the product info: %pe\n",
+ __func__, ERR_PTR(ret));
+ goto out_free;
+ }
+ if (ret >= prod_info_len - 1) {
+ dev_warn(es58x_dev->dev,
+ "%s: Buffer is too small, result might be truncated\n",
+ __func__);
+ }
+ dev_info(es58x_dev->dev, "Product info: %s\n", prod_info);
+
+ out_free:
+ kfree(prod_info);
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * es58x_init_es58x_dev() - Initialize the ES58X device.
+ * @intf: USB interface.
+ * @p_es58x_dev: pointer to the address of the ES58X device.
+ * @driver_info: Quirks of the device.
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+static int es58x_init_es58x_dev(struct usb_interface *intf,
+ struct es58x_device **p_es58x_dev,
+ kernel_ulong_t driver_info)
+{
+ struct device *dev = &intf->dev;
+ struct es58x_device *es58x_dev;
+ const struct es58x_parameters *param;
+ const struct es58x_operators *ops;
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *ep_in, *ep_out;
+ int ret;
+
+ dev_info(dev,
+ "Starting %s %s (Serial Number %s) driver version %s\n",
+ udev->manufacturer, udev->product, udev->serial, DRV_VERSION);
+
+ ret = usb_find_common_endpoints(intf->cur_altsetting, &ep_in, &ep_out,
+ NULL, NULL);
+ if (ret)
+ return ret;
+
+ if (driver_info & ES58X_FD_FAMILY) {
+ param = &es58x_fd_param;
+ ops = &es58x_fd_ops;
+ } else {
+ param = &es581_4_param;
+ ops = &es581_4_ops;
+ }
+
+ es58x_dev = kzalloc(es58x_sizeof_es58x_device(param), GFP_KERNEL);
+ if (!es58x_dev)
+ return -ENOMEM;
+
+ es58x_dev->param = param;
+ es58x_dev->ops = ops;
+ es58x_dev->dev = dev;
+ es58x_dev->udev = udev;
+
+ if (driver_info & ES58X_DUAL_CHANNEL)
+ es58x_dev->num_can_ch = 2;
+ else
+ es58x_dev->num_can_ch = 1;
+
+ init_usb_anchor(&es58x_dev->rx_urbs);
+ init_usb_anchor(&es58x_dev->tx_urbs_idle);
+ init_usb_anchor(&es58x_dev->tx_urbs_busy);
+ atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0);
+ atomic_set(&es58x_dev->opened_channel_cnt, 0);
+ usb_set_intfdata(intf, es58x_dev);
+
+ es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev,
+ ep_in->bEndpointAddress);
+ es58x_dev->tx_pipe = usb_sndbulkpipe(es58x_dev->udev,
+ ep_out->bEndpointAddress);
+ es58x_dev->rx_max_packet_size = le16_to_cpu(ep_in->wMaxPacketSize);
+
+ *p_es58x_dev = es58x_dev;
+
+ return 0;
+}
+
+/**
+ * es58x_probe() - Initialize the USB device.
+ * @intf: USB interface.
+ * @id: USB device ID.
+ *
+ * Return: zero on success, -ENODEV if the interface is not supported
+ * or errno when any other error occurs.
+ */
+static int es58x_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct es58x_device *es58x_dev;
+ int ch_idx, ret;
+
+ ret = es58x_init_es58x_dev(intf, &es58x_dev, id->driver_info);
+ if (ret)
+ return ret;
+
+ ret = es58x_get_product_info(es58x_dev);
+ if (ret)
+ goto cleanup_es58x_dev;
+
+ for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) {
+ ret = es58x_init_netdev(es58x_dev, ch_idx);
+ if (ret)
+ goto cleanup_candev;
+ }
+
+ return ret;
+
+ cleanup_candev:
+ for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++)
+ if (es58x_dev->netdev[ch_idx]) {
+ unregister_candev(es58x_dev->netdev[ch_idx]);
+ free_candev(es58x_dev->netdev[ch_idx]);
+ }
+ cleanup_es58x_dev:
+ kfree(es58x_dev);
+
+ return ret;
+}
+
+/**
+ * es58x_disconnect() - Disconnect the USB device.
+ * @intf: USB interface
+ *
+ * Called by the usb core when driver is unloaded or device is
+ * removed.
+ */
+static void es58x_disconnect(struct usb_interface *intf)
+{
+ struct es58x_device *es58x_dev = usb_get_intfdata(intf);
+ struct net_device *netdev;
+ int i;
+
+ dev_info(&intf->dev, "Disconnecting %s %s\n",
+ es58x_dev->udev->manufacturer, es58x_dev->udev->product);
+
+ for (i = 0; i < es58x_dev->num_can_ch; i++) {
+ netdev = es58x_dev->netdev[i];
+ if (!netdev)
+ continue;
+ unregister_candev(netdev);
+ es58x_dev->netdev[i] = NULL;
+ free_candev(netdev);
+ }
+
+ es58x_free_urbs(es58x_dev);
+
+ kfree(es58x_dev);
+ usb_set_intfdata(intf, NULL);
+}
+
+static struct usb_driver es58x_driver = {
+ .name = ES58X_MODULE_NAME,
+ .probe = es58x_probe,
+ .disconnect = es58x_disconnect,
+ .id_table = es58x_id_table
+};
+
+module_usb_driver(es58x_driver);
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
new file mode 100644
index 000000000000..fcf219e727bf
--- /dev/null
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
@@ -0,0 +1,700 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces.
+ *
+ * File es58x_core.h: All common definitions and declarations.
+ *
+ * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
+ * Copyright (c) 2020 ETAS K.K.. All rights reserved.
+ * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ */
+
+#ifndef __ES58X_COMMON_H__
+#define __ES58X_COMMON_H__
+
+#include <linux/types.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+
+#include "es581_4.h"
+#include "es58x_fd.h"
+
+/* Driver constants */
+#define ES58X_RX_URBS_MAX 5 /* Empirical value */
+#define ES58X_TX_URBS_MAX 6 /* Empirical value */
+
+#define ES58X_MAX(param) \
+ (ES581_4_##param > ES58X_FD_##param ? \
+ ES581_4_##param : ES58X_FD_##param)
+#define ES58X_TX_BULK_MAX ES58X_MAX(TX_BULK_MAX)
+#define ES58X_RX_BULK_MAX ES58X_MAX(RX_BULK_MAX)
+#define ES58X_ECHO_BULK_MAX ES58X_MAX(ECHO_BULK_MAX)
+#define ES58X_NUM_CAN_CH_MAX ES58X_MAX(NUM_CAN_CH)
+
+/* Use this when channel index is irrelevant (e.g. device
+ * timestamp).
+ */
+#define ES58X_CHANNEL_IDX_NA 0xFF
+#define ES58X_EMPTY_MSG NULL
+
+/* Threshold on consecutive CAN_STATE_ERROR_PASSIVE. If we receive
+ * ES58X_CONSECUTIVE_ERR_PASSIVE_MAX times the event
+ * ES58X_ERR_CRTL_PASSIVE in a row without any successful RX or TX,
+ * we force the device to switch to CAN_STATE_BUS_OFF state.
+ */
+#define ES58X_CONSECUTIVE_ERR_PASSIVE_MAX 254
+
+/* A magic number sent by the ES581.4 to inform it is alive. */
+#define ES58X_HEARTBEAT 0x11
+
+/**
+ * enum es58x_driver_info - Quirks of the device.
+ * @ES58X_DUAL_CHANNEL: Device has two CAN channels. If this flag is
+ * not set, it is implied that the device has only one CAN
+ * channel.
+ * @ES58X_FD_FAMILY: Device is CAN-FD capable. If this flag is not
+ * set, the device only supports classical CAN.
+ */
+enum es58x_driver_info {
+ ES58X_DUAL_CHANNEL = BIT(0),
+ ES58X_FD_FAMILY = BIT(1)
+};
+
+enum es58x_echo {
+ ES58X_ECHO_OFF = 0,
+ ES58X_ECHO_ON = 1
+};
+
+/**
+ * enum es58x_physical_layer - Type of the physical layer.
+ * @ES58X_PHYSICAL_LAYER_HIGH_SPEED: High-speed CAN (c.f. ISO
+ * 11898-2).
+ *
+ * Some products of the ETAS portfolio also support low-speed CAN
+ * (c.f. ISO 11898-3). However, all the devices in scope of this
+ * driver do not support the option, thus, the enum has only one
+ * member.
+ */
+enum es58x_physical_layer {
+ ES58X_PHYSICAL_LAYER_HIGH_SPEED = 1
+};
+
+enum es58x_samples_per_bit {
+ ES58X_SAMPLES_PER_BIT_ONE = 1,
+ ES58X_SAMPLES_PER_BIT_THREE = 2
+};
+
+/**
+ * enum es58x_sync_edge - Synchronization method.
+ * @ES58X_SYNC_EDGE_SINGLE: ISO CAN specification defines the use of a
+ * single edge synchronization. The synchronization should be
+ * done on recessive to dominant level change.
+ *
+ * For information, ES582.1 and ES584.1 also support a double
+ * synchronization, requiring both recessive to dominant then dominant
+ * to recessive level change. However, this is not supported in
+ * SocketCAN framework, thus, the enum has only one member.
+ */
+enum es58x_sync_edge {
+ ES58X_SYNC_EDGE_SINGLE = 1
+};
+
+/**
+ * enum es58x_flag - CAN flags for RX/TX messages.
+ * @ES58X_FLAG_EFF: Extended Frame Format (EFF).
+ * @ES58X_FLAG_RTR: Remote Transmission Request (RTR).
+ * @ES58X_FLAG_FD_BRS: Bit rate switch (BRS): second bitrate for
+ * payload data.
+ * @ES58X_FLAG_FD_ESI: Error State Indicator (ESI): tell if the
+ * transmitting node is in error passive mode.
+ * @ES58X_FLAG_FD_DATA: CAN FD frame.
+ */
+enum es58x_flag {
+ ES58X_FLAG_EFF = BIT(0),
+ ES58X_FLAG_RTR = BIT(1),
+ ES58X_FLAG_FD_BRS = BIT(3),
+ ES58X_FLAG_FD_ESI = BIT(5),
+ ES58X_FLAG_FD_DATA = BIT(6)
+};
+
+/**
+ * enum es58x_err - CAN error detection.
+ * @ES58X_ERR_OK: No errors.
+ * @ES58X_ERR_PROT_STUFF: Bit stuffing error: more than 5 consecutive
+ * equal bits.
+ * @ES58X_ERR_PROT_FORM: Frame format error.
+ * @ES58X_ERR_ACK: Received no ACK on transmission.
+ * @ES58X_ERR_PROT_BIT: Single bit error.
+ * @ES58X_ERR_PROT_CRC: Incorrect 15, 17 or 21 bits CRC.
+ * @ES58X_ERR_PROT_BIT1: Unable to send recessive bit: tried to send
+ * recessive bit 1 but monitored dominant bit 0.
+ * @ES58X_ERR_PROT_BIT0: Unable to send dominant bit: tried to send
+ * dominant bit 0 but monitored recessive bit 1.
+ * @ES58X_ERR_PROT_OVERLOAD: Bus overload.
+ * @ES58X_ERR_PROT_UNSPEC: Unspecified.
+ *
+ * Please refer to ISO 11898-1:2015, section 10.11 "Error detection"
+ * and section 10.13 "Overload signaling" for additional details.
+ */
+enum es58x_err {
+ ES58X_ERR_OK = 0,
+ ES58X_ERR_PROT_STUFF = BIT(0),
+ ES58X_ERR_PROT_FORM = BIT(1),
+ ES58X_ERR_ACK = BIT(2),
+ ES58X_ERR_PROT_BIT = BIT(3),
+ ES58X_ERR_PROT_CRC = BIT(4),
+ ES58X_ERR_PROT_BIT1 = BIT(5),
+ ES58X_ERR_PROT_BIT0 = BIT(6),
+ ES58X_ERR_PROT_OVERLOAD = BIT(7),
+ ES58X_ERR_PROT_UNSPEC = BIT(31)
+};
+
+/**
+ * enum es58x_event - CAN error codes returned by the device.
+ * @ES58X_EVENT_OK: No errors.
+ * @ES58X_EVENT_CRTL_ACTIVE: Active state: both TR and RX error count
+ * is less than 128.
+ * @ES58X_EVENT_CRTL_PASSIVE: Passive state: either TX or RX error
+ * count is greater than 127.
+ * @ES58X_EVENT_CRTL_WARNING: Warning state: either TX or RX error
+ * count is greater than 96.
+ * @ES58X_EVENT_BUSOFF: Bus off.
+ * @ES58X_EVENT_SINGLE_WIRE: Lost connection on either CAN high or CAN
+ * low.
+ *
+ * Please refer to ISO 11898-1:2015, section 12.1.4 "Rules of fault
+ * confinement" for additional details.
+ */
+enum es58x_event {
+ ES58X_EVENT_OK = 0,
+ ES58X_EVENT_CRTL_ACTIVE = BIT(0),
+ ES58X_EVENT_CRTL_PASSIVE = BIT(1),
+ ES58X_EVENT_CRTL_WARNING = BIT(2),
+ ES58X_EVENT_BUSOFF = BIT(3),
+ ES58X_EVENT_SINGLE_WIRE = BIT(4)
+};
+
+/* enum es58x_ret_u8 - Device return error codes, 8 bit format.
+ *
+ * Specific to ES581.4.
+ */
+enum es58x_ret_u8 {
+ ES58X_RET_U8_OK = 0x00,
+ ES58X_RET_U8_ERR_UNSPECIFIED_FAILURE = 0x80,
+ ES58X_RET_U8_ERR_NO_MEM = 0x81,
+ ES58X_RET_U8_ERR_BAD_CRC = 0x99
+};
+
+/* enum es58x_ret_u32 - Device return error codes, 32 bit format.
+ */
+enum es58x_ret_u32 {
+ ES58X_RET_U32_OK = 0x00000000UL,
+ ES58X_RET_U32_ERR_UNSPECIFIED_FAILURE = 0x80000000UL,
+ ES58X_RET_U32_ERR_NO_MEM = 0x80004001UL,
+ ES58X_RET_U32_WARN_PARAM_ADJUSTED = 0x40004000UL,
+ ES58X_RET_U32_WARN_TX_MAYBE_REORDER = 0x40004001UL,
+ ES58X_RET_U32_ERR_TIMEDOUT = 0x80000008UL,
+ ES58X_RET_U32_ERR_FIFO_FULL = 0x80003002UL,
+ ES58X_RET_U32_ERR_BAD_CONFIG = 0x80004000UL,
+ ES58X_RET_U32_ERR_NO_RESOURCE = 0x80004002UL
+};
+
+/* enum es58x_ret_type - Type of the command returned by the ES58X
+ * device.
+ */
+enum es58x_ret_type {
+ ES58X_RET_TYPE_SET_BITTIMING,
+ ES58X_RET_TYPE_ENABLE_CHANNEL,
+ ES58X_RET_TYPE_DISABLE_CHANNEL,
+ ES58X_RET_TYPE_TX_MSG,
+ ES58X_RET_TYPE_RESET_RX,
+ ES58X_RET_TYPE_RESET_TX,
+ ES58X_RET_TYPE_DEVICE_ERR
+};
+
+union es58x_urb_cmd {
+ struct es581_4_urb_cmd es581_4_urb_cmd;
+ struct es58x_fd_urb_cmd es58x_fd_urb_cmd;
+ struct { /* Common header parts of all variants */
+ __le16 sof;
+ u8 cmd_type;
+ u8 cmd_id;
+ } __packed;
+ u8 raw_cmd[0];
+};
+
+/**
+ * struct es58x_priv - All information specific to a CAN channel.
+ * @can: struct can_priv must be the first member (Socket CAN relies
+ * on the fact that function netdev_priv() returns a pointer to
+ * a struct can_priv).
+ * @es58x_dev: pointer to the corresponding ES58X device.
+ * @tx_urb: Used as a buffer to concatenate the TX messages and to do
+ * a bulk send. Please refer to es58x_start_xmit() for more
+ * details.
+ * @tx_tail: Index of the oldest packet still pending for
+ * completion. @tx_tail & echo_skb_mask represents the beginning
+ * of the echo skb FIFO, i.e. index of the first element.
+ * @tx_head: Index of the next packet to be sent to the
+ * device. @tx_head & echo_skb_mask represents the end of the
+ * echo skb FIFO plus one, i.e. the first free index.
+ * @tx_can_msg_cnt: Number of messages in @tx_urb.
+ * @tx_can_msg_is_fd: false: all messages in @tx_urb are Classical
+ * CAN, true: all messages in @tx_urb are CAN FD. Rationale:
+ * ES58X FD devices do not allow to mix Classical CAN and FD CAN
+ * frames in one single bulk transmission.
+ * @err_passive_before_rtx_success: The ES58X device might enter in a
+ * state in which it keeps alternating between error passive
+ * and active states. This counter keeps track of the number of
+ * error passive and if it gets bigger than
+ * ES58X_CONSECUTIVE_ERR_PASSIVE_MAX, es58x_rx_err_msg() will
+ * force the status to bus-off.
+ * @channel_idx: Channel index, starts at zero.
+ */
+struct es58x_priv {
+ struct can_priv can;
+ struct es58x_device *es58x_dev;
+ struct urb *tx_urb;
+
+ u32 tx_tail;
+ u32 tx_head;
+
+ u8 tx_can_msg_cnt;
+ bool tx_can_msg_is_fd;
+
+ u8 err_passive_before_rtx_success;
+
+ u8 channel_idx;
+};
+
+/**
+ * struct es58x_parameters - Constant parameters of a given hardware
+ * variant.
+ * @bittiming_const: Nominal bittimming constant parameters.
+ * @data_bittiming_const: Data bittiming constant parameters.
+ * @tdc_const: Transmission Delay Compensation constant parameters.
+ * @bitrate_max: Maximum bitrate supported by the device.
+ * @clock: CAN clock parameters.
+ * @ctrlmode_supported: List of supported modes. Please refer to
+ * can/netlink.h file for additional details.
+ * @tx_start_of_frame: Magic number at the beginning of each TX URB
+ * command.
+ * @rx_start_of_frame: Magic number at the beginning of each RX URB
+ * command.
+ * @tx_urb_cmd_max_len: Maximum length of a TX URB command.
+ * @rx_urb_cmd_max_len: Maximum length of a RX URB command.
+ * @fifo_mask: Bit mask to quickly convert the tx_tail and tx_head
+ * field of the struct es58x_priv into echo_skb
+ * indexes. Properties: @fifo_mask = echos_skb_max - 1 where
+ * echo_skb_max must be a power of two. Also, echo_skb_max must
+ * not exceed the maximum size of the device internal TX FIFO
+ * length. This parameter is used to control the network queue
+ * wake/stop logic.
+ * @dql_min_limit: Dynamic Queue Limits (DQL) absolute minimum limit
+ * of bytes allowed to be queued on this network device transmit
+ * queue. Used by the Byte Queue Limits (BQL) to determine how
+ * frequently the xmit_more flag will be set to true in
+ * es58x_start_xmit(). Set this value higher to optimize for
+ * throughput but be aware that it might have a negative impact
+ * on the latency! This value can also be set dynamically. Please
+ * refer to Documentation/ABI/testing/sysfs-class-net-queues for
+ * more details.
+ * @tx_bulk_max: Maximum number of TX messages that can be sent in one
+ * single URB packet.
+ * @urb_cmd_header_len: Length of the URB command header.
+ * @rx_urb_max: Number of RX URB to be allocated during device probe.
+ * @tx_urb_max: Number of TX URB to be allocated during device probe.
+ */
+struct es58x_parameters {
+ const struct can_bittiming_const *bittiming_const;
+ const struct can_bittiming_const *data_bittiming_const;
+ const struct can_tdc_const *tdc_const;
+ u32 bitrate_max;
+ struct can_clock clock;
+ u32 ctrlmode_supported;
+ u16 tx_start_of_frame;
+ u16 rx_start_of_frame;
+ u16 tx_urb_cmd_max_len;
+ u16 rx_urb_cmd_max_len;
+ u16 fifo_mask;
+ u16 dql_min_limit;
+ u8 tx_bulk_max;
+ u8 urb_cmd_header_len;
+ u8 rx_urb_max;
+ u8 tx_urb_max;
+};
+
+/**
+ * struct es58x_operators - Function pointers used to encode/decode
+ * the TX/RX messages.
+ * @get_msg_len: Get field msg_len of the urb_cmd. The offset of
+ * msg_len inside urb_cmd depends of the device model.
+ * @handle_urb_cmd: Decode the URB command received from the device
+ * and dispatch it to the relevant sub function.
+ * @fill_urb_header: Fill the header of urb_cmd.
+ * @tx_can_msg: Encode a TX CAN message and add it to the bulk buffer
+ * cmd_buf of es58x_dev.
+ * @enable_channel: Start the CAN channel.
+ * @disable_channel: Stop the CAN channel.
+ * @reset_device: Full reset of the device. N.B: this feature is only
+ * present on the ES581.4. For ES58X FD devices, this field is
+ * set to NULL.
+ * @get_timestamp: Request a timestamp from the ES58X device.
+ */
+struct es58x_operators {
+ u16 (*get_msg_len)(const union es58x_urb_cmd *urb_cmd);
+ int (*handle_urb_cmd)(struct es58x_device *es58x_dev,
+ const union es58x_urb_cmd *urb_cmd);
+ void (*fill_urb_header)(union es58x_urb_cmd *urb_cmd, u8 cmd_type,
+ u8 cmd_id, u8 channel_idx, u16 cmd_len);
+ int (*tx_can_msg)(struct es58x_priv *priv, const struct sk_buff *skb);
+ int (*enable_channel)(struct es58x_priv *priv);
+ int (*disable_channel)(struct es58x_priv *priv);
+ int (*reset_device)(struct es58x_device *es58x_dev);
+ int (*get_timestamp)(struct es58x_device *es58x_dev);
+};
+
+/**
+ * struct es58x_device - All information specific to an ES58X device.
+ * @dev: Device information.
+ * @udev: USB device information.
+ * @netdev: Array of our CAN channels.
+ * @param: The constant parameters.
+ * @ops: Operators.
+ * @rx_pipe: USB reception pipe.
+ * @tx_pipe: USB transmission pipe.
+ * @rx_urbs: Anchor for received URBs.
+ * @tx_urbs_busy: Anchor for TX URBs which were send to the device.
+ * @tx_urbs_idle: Anchor for TX USB which are idle. This driver
+ * allocates the memory for the URBs during the probe. When a TX
+ * URB is needed, it can be taken from this anchor. The network
+ * queue wake/stop logic should prevent this URB from getting
+ * empty. Please refer to es58x_get_tx_urb() for more details.
+ * @tx_urbs_idle_cnt: number of urbs in @tx_urbs_idle.
+ * @opened_channel_cnt: number of channels opened (c.f. es58x_open()
+ * and es58x_stop()).
+ * @ktime_req_ns: kernel timestamp when es58x_set_realtime_diff_ns()
+ * was called.
+ * @realtime_diff_ns: difference in nanoseconds between the clocks of
+ * the ES58X device and the kernel.
+ * @timestamps: a temporary buffer to store the time stamps before
+ * feeding them to es58x_can_get_echo_skb(). Can only be used
+ * in RX branches.
+ * @rx_max_packet_size: Maximum length of bulk-in URB.
+ * @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev).
+ * @rx_cmd_buf_len: Length of @rx_cmd_buf.
+ * @rx_cmd_buf: The device might split the URB commands in an
+ * arbitrary amount of pieces. This buffer is used to concatenate
+ * all those pieces. Can only be used in RX branches. This field
+ * has to be the last one of the structure because it is has a
+ * flexible size (c.f. es58x_sizeof_es58x_device() function).
+ */
+struct es58x_device {
+ struct device *dev;
+ struct usb_device *udev;
+ struct net_device *netdev[ES58X_NUM_CAN_CH_MAX];
+
+ const struct es58x_parameters *param;
+ const struct es58x_operators *ops;
+
+ int rx_pipe;
+ int tx_pipe;
+
+ struct usb_anchor rx_urbs;
+ struct usb_anchor tx_urbs_busy;
+ struct usb_anchor tx_urbs_idle;
+ atomic_t tx_urbs_idle_cnt;
+ atomic_t opened_channel_cnt;
+
+ u64 ktime_req_ns;
+ s64 realtime_diff_ns;
+
+ u64 timestamps[ES58X_ECHO_BULK_MAX];
+
+ u16 rx_max_packet_size;
+ u8 num_can_ch;
+
+ u16 rx_cmd_buf_len;
+ union es58x_urb_cmd rx_cmd_buf;
+};
+
+/**
+ * es58x_sizeof_es58x_device() - Calculate the maximum length of
+ * struct es58x_device.
+ * @es58x_dev_param: The constant parameters of the device.
+ *
+ * The length of struct es58x_device depends on the length of its last
+ * field: rx_cmd_buf. This macro allows to optimize the memory
+ * allocation.
+ *
+ * Return: length of struct es58x_device.
+ */
+static inline size_t es58x_sizeof_es58x_device(const struct es58x_parameters
+ *es58x_dev_param)
+{
+ return offsetof(struct es58x_device, rx_cmd_buf) +
+ es58x_dev_param->rx_urb_cmd_max_len;
+}
+
+static inline int __es58x_check_msg_len(const struct device *dev,
+ const char *stringified_msg,
+ size_t actual_len, size_t expected_len)
+{
+ if (expected_len != actual_len) {
+ dev_err(dev,
+ "Length of %s is %zu but received command is %zu.\n",
+ stringified_msg, expected_len, actual_len);
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+/**
+ * es58x_check_msg_len() - Check the size of a received message.
+ * @dev: Device, used to print error messages.
+ * @msg: Received message, must not be a pointer.
+ * @actual_len: Length of the message as advertised in the command header.
+ *
+ * Must be a macro in order to accept the different types of messages
+ * as an input. Can be use with any of the messages which have a fixed
+ * length. Check for an exact match of the size.
+ *
+ * Return: zero on success, -EMSGSIZE if @actual_len differs from the
+ * expected length.
+ */
+#define es58x_check_msg_len(dev, msg, actual_len) \
+ __es58x_check_msg_len(dev, __stringify(msg), \
+ actual_len, sizeof(msg))
+
+static inline int __es58x_check_msg_max_len(const struct device *dev,
+ const char *stringified_msg,
+ size_t actual_len,
+ size_t expected_len)
+{
+ if (actual_len > expected_len) {
+ dev_err(dev,
+ "Maximum length for %s is %zu but received command is %zu.\n",
+ stringified_msg, expected_len, actual_len);
+ return -EOVERFLOW;
+ }
+ return 0;
+}
+
+/**
+ * es58x_check_msg_max_len() - Check the maximum size of a received message.
+ * @dev: Device, used to print error messages.
+ * @msg: Received message, must not be a pointer.
+ * @actual_len: Length of the message as advertised in the command header.
+ *
+ * Must be a macro in order to accept the different types of messages
+ * as an input. To be used with the messages of variable sizes. Only
+ * check that the message is not bigger than the maximum expected
+ * size.
+ *
+ * Return: zero on success, -EOVERFLOW if @actual_len is greater than
+ * the expected length.
+ */
+#define es58x_check_msg_max_len(dev, msg, actual_len) \
+ __es58x_check_msg_max_len(dev, __stringify(msg), \
+ actual_len, sizeof(msg))
+
+static inline int __es58x_msg_num_element(const struct device *dev,
+ const char *stringified_msg,
+ size_t actual_len, size_t msg_len,
+ size_t elem_len)
+{
+ size_t actual_num_elem = actual_len / elem_len;
+ size_t expected_num_elem = msg_len / elem_len;
+
+ if (actual_num_elem == 0) {
+ dev_err(dev,
+ "Minimum length for %s is %zu but received command is %zu.\n",
+ stringified_msg, elem_len, actual_len);
+ return -EMSGSIZE;
+ } else if ((actual_len % elem_len) != 0) {
+ dev_err(dev,
+ "Received command length: %zu is not a multiple of %s[0]: %zu\n",
+ actual_len, stringified_msg, elem_len);
+ return -EMSGSIZE;
+ } else if (actual_num_elem > expected_num_elem) {
+ dev_err(dev,
+ "Array %s is supposed to have %zu elements each of size %zu...\n",
+ stringified_msg, expected_num_elem, elem_len);
+ dev_err(dev,
+ "... But received command has %zu elements (total length %zu).\n",
+ actual_num_elem, actual_len);
+ return -EOVERFLOW;
+ }
+ return actual_num_elem;
+}
+
+/**
+ * es58x_msg_num_element() - Check size and give the number of
+ * elements in a message of array type.
+ * @dev: Device, used to print error messages.
+ * @msg: Received message, must be an array.
+ * @actual_len: Length of the message as advertised in the command
+ * header.
+ *
+ * Must be a macro in order to accept the different types of messages
+ * as an input. To be used on message of array type. Array's element
+ * has to be of fixed size (else use es58x_check_msg_max_len()). Check
+ * that the total length is an exact multiple of the length of a
+ * single element.
+ *
+ * Return: number of elements in the array on success, -EOVERFLOW if
+ * @actual_len is greater than the expected length, -EMSGSIZE if
+ * @actual_len is not a multiple of a single element.
+ */
+#define es58x_msg_num_element(dev, msg, actual_len) \
+({ \
+ size_t __elem_len = sizeof((msg)[0]) + __must_be_array(msg); \
+ __es58x_msg_num_element(dev, __stringify(msg), actual_len, \
+ sizeof(msg), __elem_len); \
+})
+
+/**
+ * es58x_priv() - Get the priv member and cast it to struct es58x_priv.
+ * @netdev: CAN network device.
+ *
+ * Return: ES58X device.
+ */
+static inline struct es58x_priv *es58x_priv(struct net_device *netdev)
+{
+ return (struct es58x_priv *)netdev_priv(netdev);
+}
+
+/**
+ * ES58X_SIZEOF_URB_CMD() - Calculate the maximum length of an urb
+ * command for a given message field name.
+ * @es58x_urb_cmd_type: type (either "struct es581_4_urb_cmd" or
+ * "struct es58x_fd_urb_cmd").
+ * @msg_field: name of the message field.
+ *
+ * Must be a macro in order to accept the different command types as
+ * an input.
+ *
+ * Return: length of the urb command.
+ */
+#define ES58X_SIZEOF_URB_CMD(es58x_urb_cmd_type, msg_field) \
+ (offsetof(es58x_urb_cmd_type, raw_msg) \
+ + sizeof_field(es58x_urb_cmd_type, msg_field) \
+ + sizeof_field(es58x_urb_cmd_type, \
+ reserved_for_crc16_do_not_use))
+
+/**
+ * es58x_get_urb_cmd_len() - Calculate the actual length of an urb
+ * command for a given message length.
+ * @es58x_dev: ES58X device.
+ * @msg_len: Length of the message.
+ *
+ * Add the header and CRC lengths to the message length.
+ *
+ * Return: length of the urb command.
+ */
+static inline size_t es58x_get_urb_cmd_len(struct es58x_device *es58x_dev,
+ u16 msg_len)
+{
+ return es58x_dev->param->urb_cmd_header_len + msg_len + sizeof(u16);
+}
+
+/**
+ * es58x_get_netdev() - Get the network device.
+ * @es58x_dev: ES58X device.
+ * @channel_no: The channel number as advertised in the urb command.
+ * @channel_idx_offset: Some of the ES58x starts channel numbering
+ * from 0 (ES58X FD), others from 1 (ES581.4).
+ * @netdev: CAN network device.
+ *
+ * Do a sanity check on the index provided by the device.
+ *
+ * Return: zero on success, -ECHRNG if the received channel number is
+ * out of range and -ENODEV if the network device is not yet
+ * configured.
+ */
+static inline int es58x_get_netdev(struct es58x_device *es58x_dev,
+ int channel_no, int channel_idx_offset,
+ struct net_device **netdev)
+{
+ int channel_idx = channel_no - channel_idx_offset;
+
+ *netdev = NULL;
+ if (channel_idx < 0 || channel_idx >= es58x_dev->num_can_ch)
+ return -ECHRNG;
+
+ *netdev = es58x_dev->netdev[channel_idx];
+ if (!*netdev || !netif_device_present(*netdev))
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
+ * es58x_get_raw_can_id() - Get the CAN ID.
+ * @cf: CAN frame.
+ *
+ * Mask the CAN ID in order to only keep the significant bits.
+ *
+ * Return: the raw value of the CAN ID.
+ */
+static inline int es58x_get_raw_can_id(const struct can_frame *cf)
+{
+ if (cf->can_id & CAN_EFF_FLAG)
+ return cf->can_id & CAN_EFF_MASK;
+ else
+ return cf->can_id & CAN_SFF_MASK;
+}
+
+/**
+ * es58x_get_flags() - Get the CAN flags.
+ * @skb: socket buffer of a CAN message.
+ *
+ * Return: the CAN flag as an enum es58x_flag.
+ */
+static inline enum es58x_flag es58x_get_flags(const struct sk_buff *skb)
+{
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ enum es58x_flag es58x_flags = 0;
+
+ if (cf->can_id & CAN_EFF_FLAG)
+ es58x_flags |= ES58X_FLAG_EFF;
+
+ if (can_is_canfd_skb(skb)) {
+ es58x_flags |= ES58X_FLAG_FD_DATA;
+ if (cf->flags & CANFD_BRS)
+ es58x_flags |= ES58X_FLAG_FD_BRS;
+ if (cf->flags & CANFD_ESI)
+ es58x_flags |= ES58X_FLAG_FD_ESI;
+ } else if (cf->can_id & CAN_RTR_FLAG)
+ /* Remote frames are only defined in Classical CAN frames */
+ es58x_flags |= ES58X_FLAG_RTR;
+
+ return es58x_flags;
+}
+
+int es58x_can_get_echo_skb(struct net_device *netdev, u32 packet_idx,
+ u64 *tstamps, unsigned int pkts);
+int es58x_tx_ack_msg(struct net_device *netdev, u16 tx_free_entries,
+ enum es58x_ret_u32 rx_cmd_ret_u32);
+int es58x_rx_can_msg(struct net_device *netdev, u64 timestamp, const u8 *data,
+ canid_t can_id, enum es58x_flag es58x_flags, u8 dlc);
+int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error,
+ enum es58x_event event, u64 timestamp);
+void es58x_rx_timestamp(struct es58x_device *es58x_dev, u64 timestamp);
+int es58x_rx_cmd_ret_u8(struct device *dev, enum es58x_ret_type cmd_ret_type,
+ enum es58x_ret_u8 rx_cmd_ret_u8);
+int es58x_rx_cmd_ret_u32(struct net_device *netdev,
+ enum es58x_ret_type cmd_ret_type,
+ enum es58x_ret_u32 rx_cmd_ret_u32);
+int es58x_send_msg(struct es58x_device *es58x_dev, u8 cmd_type, u8 cmd_id,
+ const void *msg, u16 cmd_len, int channel_idx);
+
+extern const struct es58x_parameters es581_4_param;
+extern const struct es58x_operators es581_4_ops;
+
+extern const struct es58x_parameters es58x_fd_param;
+extern const struct es58x_operators es58x_fd_ops;
+
+#endif /* __ES58X_COMMON_H__ */
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
new file mode 100644
index 000000000000..1a2779d383a4
--- /dev/null
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces.
+ *
+ * File es58x_fd.c: Adds support to ETAS ES582.1 and ES584.1 (naming
+ * convention: we use the term "ES58X FD" when referring to those two
+ * variants together).
+ *
+ * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
+ * Copyright (c) 2020 ETAS K.K.. All rights reserved.
+ * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ */
+
+#include <linux/kernel.h>
+#include <asm/unaligned.h>
+
+#include "es58x_core.h"
+#include "es58x_fd.h"
+
+/**
+ * es58x_fd_sizeof_rx_tx_msg() - Calculate the actual length of the
+ * structure of a rx or tx message.
+ * @msg: message of variable length, must have a dlc and a len fields.
+ *
+ * Even if RTR frames have actually no payload, the ES58X devices
+ * still expect it. Must be a macro in order to accept several types
+ * (struct es58x_fd_tx_can_msg and struct es58x_fd_rx_can_msg) as an
+ * input.
+ *
+ * Return: length of the message.
+ */
+#define es58x_fd_sizeof_rx_tx_msg(msg) \
+({ \
+ typeof(msg) __msg = (msg); \
+ size_t __msg_len; \
+ \
+ if (__msg.flags & ES58X_FLAG_FD_DATA) \
+ __msg_len = canfd_sanitize_len(__msg.len); \
+ else \
+ __msg_len = can_cc_dlc2len(__msg.dlc); \
+ \
+ offsetof(typeof(__msg), data[__msg_len]); \
+})
+
+static enum es58x_fd_cmd_type es58x_fd_cmd_type(struct net_device *netdev)
+{
+ u32 ctrlmode = es58x_priv(netdev)->can.ctrlmode;
+
+ if (ctrlmode & (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO))
+ return ES58X_FD_CMD_TYPE_CANFD;
+ else
+ return ES58X_FD_CMD_TYPE_CAN;
+}
+
+static u16 es58x_fd_get_msg_len(const union es58x_urb_cmd *urb_cmd)
+{
+ return get_unaligned_le16(&urb_cmd->es58x_fd_urb_cmd.msg_len);
+}
+
+static int es58x_fd_echo_msg(struct net_device *netdev,
+ const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd)
+{
+ struct es58x_priv *priv = es58x_priv(netdev);
+ const struct es58x_fd_echo_msg *echo_msg;
+ struct es58x_device *es58x_dev = priv->es58x_dev;
+ u64 *tstamps = es58x_dev->timestamps;
+ u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len);
+ int i, num_element;
+ u32 rcv_packet_idx;
+
+ const u32 mask = GENMASK(31, sizeof(echo_msg->packet_idx) * 8);
+
+ num_element = es58x_msg_num_element(es58x_dev->dev,
+ es58x_fd_urb_cmd->echo_msg,
+ msg_len);
+ if (num_element < 0)
+ return num_element;
+ echo_msg = es58x_fd_urb_cmd->echo_msg;
+
+ rcv_packet_idx = (priv->tx_tail & mask) | echo_msg[0].packet_idx;
+ for (i = 0; i < num_element; i++) {
+ if ((u8)rcv_packet_idx != echo_msg[i].packet_idx) {
+ netdev_err(netdev, "Packet idx jumped from %u to %u\n",
+ (u8)rcv_packet_idx - 1,
+ echo_msg[i].packet_idx);
+ return -EBADMSG;
+ }
+
+ tstamps[i] = get_unaligned_le64(&echo_msg[i].timestamp);
+ rcv_packet_idx++;
+ }
+
+ return es58x_can_get_echo_skb(netdev, priv->tx_tail, tstamps, num_element);
+}
+
+static int es58x_fd_rx_can_msg(struct net_device *netdev,
+ const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd)
+{
+ struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev;
+ const u8 *rx_can_msg_buf = es58x_fd_urb_cmd->rx_can_msg_buf;
+ u16 rx_can_msg_buf_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len);
+ int pkts, ret;
+
+ ret = es58x_check_msg_max_len(es58x_dev->dev,
+ es58x_fd_urb_cmd->rx_can_msg_buf,
+ rx_can_msg_buf_len);
+ if (ret)
+ return ret;
+
+ for (pkts = 0; rx_can_msg_buf_len > 0; pkts++) {
+ const struct es58x_fd_rx_can_msg *rx_can_msg =
+ (const struct es58x_fd_rx_can_msg *)rx_can_msg_buf;
+ bool is_can_fd = !!(rx_can_msg->flags & ES58X_FLAG_FD_DATA);
+ /* rx_can_msg_len is the length of the rx_can_msg
+ * buffer. Not to be confused with rx_can_msg->len
+ * which is the length of the CAN payload
+ * rx_can_msg->data.
+ */
+ u16 rx_can_msg_len = es58x_fd_sizeof_rx_tx_msg(*rx_can_msg);
+
+ if (rx_can_msg_len > rx_can_msg_buf_len) {
+ netdev_err(netdev,
+ "%s: Expected a rx_can_msg of size %d but only %d bytes are left in rx_can_msg_buf\n",
+ __func__,
+ rx_can_msg_len, rx_can_msg_buf_len);
+ return -EMSGSIZE;
+ }
+ if (rx_can_msg->len > CANFD_MAX_DLEN) {
+ netdev_err(netdev,
+ "%s: Data length is %d but maximum should be %d\n",
+ __func__, rx_can_msg->len, CANFD_MAX_DLEN);
+ return -EMSGSIZE;
+ }
+
+ if (netif_running(netdev)) {
+ u64 tstamp = get_unaligned_le64(&rx_can_msg->timestamp);
+ canid_t can_id = get_unaligned_le32(&rx_can_msg->can_id);
+ u8 dlc;
+
+ if (is_can_fd)
+ dlc = can_fd_len2dlc(rx_can_msg->len);
+ else
+ dlc = rx_can_msg->dlc;
+
+ ret = es58x_rx_can_msg(netdev, tstamp, rx_can_msg->data,
+ can_id, rx_can_msg->flags, dlc);
+ if (ret)
+ break;
+ }
+
+ rx_can_msg_buf_len -= rx_can_msg_len;
+ rx_can_msg_buf += rx_can_msg_len;
+ }
+
+ if (!netif_running(netdev)) {
+ if (net_ratelimit())
+ netdev_info(netdev,
+ "%s: %s is down, dropping %d rx packets\n",
+ __func__, netdev->name, pkts);
+ netdev->stats.rx_dropped += pkts;
+ }
+
+ return ret;
+}
+
+static int es58x_fd_rx_event_msg(struct net_device *netdev,
+ const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd)
+{
+ struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev;
+ u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len);
+ const struct es58x_fd_rx_event_msg *rx_event_msg;
+ int ret;
+
+ ret = es58x_check_msg_len(es58x_dev->dev, *rx_event_msg, msg_len);
+ if (ret)
+ return ret;
+
+ rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg;
+
+ return es58x_rx_err_msg(netdev, rx_event_msg->error_code,
+ rx_event_msg->event_code,
+ get_unaligned_le64(&rx_event_msg->timestamp));
+}
+
+static int es58x_fd_rx_cmd_ret_u32(struct net_device *netdev,
+ const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd,
+ enum es58x_ret_type cmd_ret_type)
+{
+ struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev;
+ u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len);
+ int ret;
+
+ ret = es58x_check_msg_len(es58x_dev->dev,
+ es58x_fd_urb_cmd->rx_cmd_ret_le32, msg_len);
+ if (ret)
+ return ret;
+
+ return es58x_rx_cmd_ret_u32(netdev, cmd_ret_type,
+ get_unaligned_le32(&es58x_fd_urb_cmd->rx_cmd_ret_le32));
+}
+
+static int es58x_fd_tx_ack_msg(struct net_device *netdev,
+ const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd)
+{
+ struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev;
+ const struct es58x_fd_tx_ack_msg *tx_ack_msg;
+ u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len);
+ int ret;
+
+ tx_ack_msg = &es58x_fd_urb_cmd->tx_ack_msg;
+ ret = es58x_check_msg_len(es58x_dev->dev, *tx_ack_msg, msg_len);
+ if (ret)
+ return ret;
+
+ return es58x_tx_ack_msg(netdev,
+ get_unaligned_le16(&tx_ack_msg->tx_free_entries),
+ get_unaligned_le32(&tx_ack_msg->rx_cmd_ret_le32));
+}
+
+static int es58x_fd_can_cmd_id(struct es58x_device *es58x_dev,
+ const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd)
+{
+ struct net_device *netdev;
+ int ret;
+
+ ret = es58x_get_netdev(es58x_dev, es58x_fd_urb_cmd->channel_idx,
+ ES58X_FD_CHANNEL_IDX_OFFSET, &netdev);
+ if (ret)
+ return ret;
+
+ switch ((enum es58x_fd_can_cmd_id)es58x_fd_urb_cmd->cmd_id) {
+ case ES58X_FD_CAN_CMD_ID_ENABLE_CHANNEL:
+ return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd,
+ ES58X_RET_TYPE_ENABLE_CHANNEL);
+
+ case ES58X_FD_CAN_CMD_ID_DISABLE_CHANNEL:
+ return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd,
+ ES58X_RET_TYPE_DISABLE_CHANNEL);
+
+ case ES58X_FD_CAN_CMD_ID_TX_MSG:
+ return es58x_fd_tx_ack_msg(netdev, es58x_fd_urb_cmd);
+
+ case ES58X_FD_CAN_CMD_ID_ECHO_MSG:
+ return es58x_fd_echo_msg(netdev, es58x_fd_urb_cmd);
+
+ case ES58X_FD_CAN_CMD_ID_RX_MSG:
+ return es58x_fd_rx_can_msg(netdev, es58x_fd_urb_cmd);
+
+ case ES58X_FD_CAN_CMD_ID_RESET_RX:
+ return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd,
+ ES58X_RET_TYPE_RESET_RX);
+
+ case ES58X_FD_CAN_CMD_ID_RESET_TX:
+ return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd,
+ ES58X_RET_TYPE_RESET_TX);
+
+ case ES58X_FD_CAN_CMD_ID_ERROR_OR_EVENT_MSG:
+ return es58x_fd_rx_event_msg(netdev, es58x_fd_urb_cmd);
+
+ default:
+ return -EBADRQC;
+ }
+}
+
+static int es58x_fd_device_cmd_id(struct es58x_device *es58x_dev,
+ const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd)
+{
+ u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len);
+ int ret;
+
+ switch ((enum es58x_fd_dev_cmd_id)es58x_fd_urb_cmd->cmd_id) {
+ case ES58X_FD_DEV_CMD_ID_TIMESTAMP:
+ ret = es58x_check_msg_len(es58x_dev->dev,
+ es58x_fd_urb_cmd->timestamp, msg_len);
+ if (ret)
+ return ret;
+ es58x_rx_timestamp(es58x_dev,
+ get_unaligned_le64(&es58x_fd_urb_cmd->timestamp));
+ return 0;
+
+ default:
+ return -EBADRQC;
+ }
+}
+
+static int es58x_fd_handle_urb_cmd(struct es58x_device *es58x_dev,
+ const union es58x_urb_cmd *urb_cmd)
+{
+ const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd;
+ int ret;
+
+ es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd;
+
+ switch ((enum es58x_fd_cmd_type)es58x_fd_urb_cmd->cmd_type) {
+ case ES58X_FD_CMD_TYPE_CAN:
+ case ES58X_FD_CMD_TYPE_CANFD:
+ ret = es58x_fd_can_cmd_id(es58x_dev, es58x_fd_urb_cmd);
+ break;
+
+ case ES58X_FD_CMD_TYPE_DEVICE:
+ ret = es58x_fd_device_cmd_id(es58x_dev, es58x_fd_urb_cmd);
+ break;
+
+ default:
+ ret = -EBADRQC;
+ break;
+ }
+
+ if (ret == -EBADRQC)
+ dev_err(es58x_dev->dev,
+ "%s: Unknown command type (0x%02X) and command ID (0x%02X) combination\n",
+ __func__, es58x_fd_urb_cmd->cmd_type,
+ es58x_fd_urb_cmd->cmd_id);
+
+ return ret;
+}
+
+static void es58x_fd_fill_urb_header(union es58x_urb_cmd *urb_cmd, u8 cmd_type,
+ u8 cmd_id, u8 channel_idx, u16 msg_len)
+{
+ struct es58x_fd_urb_cmd *es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd;
+
+ es58x_fd_urb_cmd->SOF = cpu_to_le16(es58x_fd_param.tx_start_of_frame);
+ es58x_fd_urb_cmd->cmd_type = cmd_type;
+ es58x_fd_urb_cmd->cmd_id = cmd_id;
+ es58x_fd_urb_cmd->channel_idx = channel_idx;
+ es58x_fd_urb_cmd->msg_len = cpu_to_le16(msg_len);
+}
+
+static int es58x_fd_tx_can_msg(struct es58x_priv *priv,
+ const struct sk_buff *skb)
+{
+ struct es58x_device *es58x_dev = priv->es58x_dev;
+ union es58x_urb_cmd *urb_cmd = priv->tx_urb->transfer_buffer;
+ struct es58x_fd_urb_cmd *es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ struct es58x_fd_tx_can_msg *tx_can_msg;
+ bool is_fd = can_is_canfd_skb(skb);
+ u16 msg_len;
+ int ret;
+
+ if (priv->tx_can_msg_cnt == 0) {
+ msg_len = 0;
+ es58x_fd_fill_urb_header(urb_cmd,
+ is_fd ? ES58X_FD_CMD_TYPE_CANFD
+ : ES58X_FD_CMD_TYPE_CAN,
+ ES58X_FD_CAN_CMD_ID_TX_MSG_NO_ACK,
+ priv->channel_idx, msg_len);
+ } else {
+ msg_len = es58x_fd_get_msg_len(urb_cmd);
+ }
+
+ ret = es58x_check_msg_max_len(es58x_dev->dev,
+ es58x_fd_urb_cmd->tx_can_msg_buf,
+ msg_len + sizeof(*tx_can_msg));
+ if (ret)
+ return ret;
+
+ /* Fill message contents. */
+ tx_can_msg = (struct es58x_fd_tx_can_msg *)
+ &es58x_fd_urb_cmd->tx_can_msg_buf[msg_len];
+ tx_can_msg->packet_idx = (u8)priv->tx_head;
+ put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id);
+ tx_can_msg->flags = (u8)es58x_get_flags(skb);
+ if (is_fd)
+ tx_can_msg->len = cf->len;
+ else
+ tx_can_msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
+ memcpy(tx_can_msg->data, cf->data, cf->len);
+
+ /* Calculate new sizes */
+ msg_len += es58x_fd_sizeof_rx_tx_msg(*tx_can_msg);
+ priv->tx_urb->transfer_buffer_length = es58x_get_urb_cmd_len(es58x_dev,
+ msg_len);
+ put_unaligned_le16(msg_len, &es58x_fd_urb_cmd->msg_len);
+
+ return 0;
+}
+
+static void es58x_fd_convert_bittiming(struct es58x_fd_bittiming *es58x_fd_bt,
+ struct can_bittiming *bt)
+{
+ /* The actual value set in the hardware registers is one less
+ * than the functional value.
+ */
+ const int offset = 1;
+
+ es58x_fd_bt->bitrate = cpu_to_le32(bt->bitrate);
+ es58x_fd_bt->tseg1 =
+ cpu_to_le16(bt->prop_seg + bt->phase_seg1 - offset);
+ es58x_fd_bt->tseg2 = cpu_to_le16(bt->phase_seg2 - offset);
+ es58x_fd_bt->brp = cpu_to_le16(bt->brp - offset);
+ es58x_fd_bt->sjw = cpu_to_le16(bt->sjw - offset);
+}
+
+static int es58x_fd_enable_channel(struct es58x_priv *priv)
+{
+ struct es58x_device *es58x_dev = priv->es58x_dev;
+ struct net_device *netdev = es58x_dev->netdev[priv->channel_idx];
+ struct es58x_fd_tx_conf_msg tx_conf_msg = { 0 };
+ u32 ctrlmode;
+ size_t conf_len = 0;
+
+ es58x_fd_convert_bittiming(&tx_conf_msg.nominal_bittiming,
+ &priv->can.bittiming);
+ ctrlmode = priv->can.ctrlmode;
+
+ if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ tx_conf_msg.samples_per_bit = ES58X_SAMPLES_PER_BIT_THREE;
+ else
+ tx_conf_msg.samples_per_bit = ES58X_SAMPLES_PER_BIT_ONE;
+ tx_conf_msg.sync_edge = ES58X_SYNC_EDGE_SINGLE;
+ tx_conf_msg.physical_layer = ES58X_PHYSICAL_LAYER_HIGH_SPEED;
+ tx_conf_msg.echo_mode = ES58X_ECHO_ON;
+ if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_PASSIVE;
+ else
+ tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_ACTIVE;
+
+ if (ctrlmode & CAN_CTRLMODE_FD_NON_ISO) {
+ tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_FD_NON_ISO;
+ tx_conf_msg.canfd_enabled = 1;
+ } else if (ctrlmode & CAN_CTRLMODE_FD) {
+ tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_FD;
+ tx_conf_msg.canfd_enabled = 1;
+ }
+
+ if (tx_conf_msg.canfd_enabled) {
+ es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming,
+ &priv->can.data_bittiming);
+
+ if (priv->can.tdc.tdco) {
+ tx_conf_msg.tdc_enabled = 1;
+ tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco);
+ tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf);
+ }
+
+ conf_len = ES58X_FD_CANFD_CONF_LEN;
+ } else {
+ conf_len = ES58X_FD_CAN_CONF_LEN;
+ }
+
+ return es58x_send_msg(es58x_dev, es58x_fd_cmd_type(netdev),
+ ES58X_FD_CAN_CMD_ID_ENABLE_CHANNEL,
+ &tx_conf_msg, conf_len, priv->channel_idx);
+}
+
+static int es58x_fd_disable_channel(struct es58x_priv *priv)
+{
+ /* The type (ES58X_FD_CMD_TYPE_CAN or ES58X_FD_CMD_TYPE_CANFD) does
+ * not matter here.
+ */
+ return es58x_send_msg(priv->es58x_dev, ES58X_FD_CMD_TYPE_CAN,
+ ES58X_FD_CAN_CMD_ID_DISABLE_CHANNEL,
+ ES58X_EMPTY_MSG, 0, priv->channel_idx);
+}
+
+static int es58x_fd_get_timestamp(struct es58x_device *es58x_dev)
+{
+ return es58x_send_msg(es58x_dev, ES58X_FD_CMD_TYPE_DEVICE,
+ ES58X_FD_DEV_CMD_ID_TIMESTAMP, ES58X_EMPTY_MSG,
+ 0, ES58X_CHANNEL_IDX_NA);
+}
+
+/* Nominal bittiming constants for ES582.1 and ES584.1 as specified in
+ * the microcontroller datasheet: "SAM E701/S70/V70/V71 Family"
+ * section 49.6.8 "MCAN Nominal Bit Timing and Prescaler Register"
+ * from Microchip.
+ *
+ * The values from the specification are the hardware register
+ * values. To convert them to the functional values, all ranges were
+ * incremented by 1 (e.g. range [0..n-1] changed to [1..n]).
+ */
+static const struct can_bittiming_const es58x_fd_nom_bittiming_const = {
+ .name = "ES582.1/ES584.1",
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1
+};
+
+/* Data bittiming constants for ES582.1 and ES584.1 as specified in
+ * the microcontroller datasheet: "SAM E701/S70/V70/V71 Family"
+ * section 49.6.4 "MCAN Data Bit Timing and Prescaler Register" from
+ * Microchip.
+ */
+static const struct can_bittiming_const es58x_fd_data_bittiming_const = {
+ .name = "ES582.1/ES584.1",
+ .tseg1_min = 2,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 8,
+ .brp_min = 1,
+ .brp_max = 32,
+ .brp_inc = 1
+};
+
+/* Transmission Delay Compensation constants for ES582.1 and ES584.1
+ * as specified in the microcontroller datasheet: "SAM
+ * E701/S70/V70/V71 Family" section 49.6.15 "MCAN Transmitter Delay
+ * Compensation Register" from Microchip.
+ */
+static const struct can_tdc_const es58x_tdc_const = {
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_max = 127,
+ .tdcf_max = 127
+};
+
+const struct es58x_parameters es58x_fd_param = {
+ .bittiming_const = &es58x_fd_nom_bittiming_const,
+ .data_bittiming_const = &es58x_fd_data_bittiming_const,
+ .tdc_const = &es58x_tdc_const,
+ /* The devices use NXP TJA1044G transievers which guarantee
+ * the timing for data rates up to 5 Mbps. Bitrates up to 8
+ * Mbps work in an optimal environment but are not recommended
+ * for production environment.
+ */
+ .bitrate_max = 8 * CAN_MBPS,
+ .clock = {.freq = 80 * CAN_MHZ},
+ .ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
+ CAN_CTRLMODE_CC_LEN8_DLC,
+ .tx_start_of_frame = 0xCEFA, /* FACE in little endian */
+ .rx_start_of_frame = 0xFECA, /* CAFE in little endian */
+ .tx_urb_cmd_max_len = ES58X_FD_TX_URB_CMD_MAX_LEN,
+ .rx_urb_cmd_max_len = ES58X_FD_RX_URB_CMD_MAX_LEN,
+ /* Size of internal device TX queue is 500.
+ *
+ * However, when reaching value around 278, the device's busy
+ * LED turns on and thus maximum value of 500 is never reached
+ * in practice. Also, when this value is too high, some error
+ * on the echo_msg were witnessed when the device is
+ * recovering from bus off.
+ *
+ * For above reasons, a value that would prevent the device
+ * from becoming busy was chosen. In practice, BQL would
+ * prevent the value from even getting closer to below
+ * maximum, so no impact on performance was measured.
+ */
+ .fifo_mask = 255, /* echo_skb_max = 256 */
+ .dql_min_limit = CAN_FRAME_LEN_MAX * 15, /* Empirical value. */
+ .tx_bulk_max = ES58X_FD_TX_BULK_MAX,
+ .urb_cmd_header_len = ES58X_FD_URB_CMD_HEADER_LEN,
+ .rx_urb_max = ES58X_RX_URBS_MAX,
+ .tx_urb_max = ES58X_TX_URBS_MAX
+};
+
+const struct es58x_operators es58x_fd_ops = {
+ .get_msg_len = es58x_fd_get_msg_len,
+ .handle_urb_cmd = es58x_fd_handle_urb_cmd,
+ .fill_urb_header = es58x_fd_fill_urb_header,
+ .tx_can_msg = es58x_fd_tx_can_msg,
+ .enable_channel = es58x_fd_enable_channel,
+ .disable_channel = es58x_fd_disable_channel,
+ .reset_device = NULL, /* Not implemented in the device firmware. */
+ .get_timestamp = es58x_fd_get_timestamp
+};
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.h b/drivers/net/can/usb/etas_es58x/es58x_fd.h
new file mode 100644
index 000000000000..ee18a87e40c0
--- /dev/null
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces.
+ *
+ * File es58x_fd.h: Definitions and declarations specific to ETAS
+ * ES582.1 and ES584.1 (naming convention: we use the term "ES58X FD"
+ * when referring to those two variants together).
+ *
+ * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
+ * Copyright (c) 2020 ETAS K.K.. All rights reserved.
+ * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ */
+
+#ifndef __ES58X_FD_H__
+#define __ES58X_FD_H__
+
+#include <linux/types.h>
+
+#define ES582_1_NUM_CAN_CH 2
+#define ES584_1_NUM_CAN_CH 1
+#define ES58X_FD_NUM_CAN_CH 2
+#define ES58X_FD_CHANNEL_IDX_OFFSET 0
+
+#define ES58X_FD_TX_BULK_MAX 100
+#define ES58X_FD_RX_BULK_MAX 100
+#define ES58X_FD_ECHO_BULK_MAX 100
+
+enum es58x_fd_cmd_type {
+ ES58X_FD_CMD_TYPE_CAN = 0x03,
+ ES58X_FD_CMD_TYPE_CANFD = 0x04,
+ ES58X_FD_CMD_TYPE_DEVICE = 0xFF
+};
+
+/* Command IDs for ES58X_FD_CMD_TYPE_{CAN,CANFD}. */
+enum es58x_fd_can_cmd_id {
+ ES58X_FD_CAN_CMD_ID_ENABLE_CHANNEL = 0x01,
+ ES58X_FD_CAN_CMD_ID_DISABLE_CHANNEL = 0x02,
+ ES58X_FD_CAN_CMD_ID_TX_MSG = 0x05,
+ ES58X_FD_CAN_CMD_ID_ECHO_MSG = 0x07,
+ ES58X_FD_CAN_CMD_ID_RX_MSG = 0x10,
+ ES58X_FD_CAN_CMD_ID_ERROR_OR_EVENT_MSG = 0x11,
+ ES58X_FD_CAN_CMD_ID_RESET_RX = 0x20,
+ ES58X_FD_CAN_CMD_ID_RESET_TX = 0x21,
+ ES58X_FD_CAN_CMD_ID_TX_MSG_NO_ACK = 0x55
+};
+
+/* Command IDs for ES58X_FD_CMD_TYPE_DEVICE. */
+enum es58x_fd_dev_cmd_id {
+ ES58X_FD_DEV_CMD_ID_GETTIMETICKS = 0x01,
+ ES58X_FD_DEV_CMD_ID_TIMESTAMP = 0x02
+};
+
+/**
+ * enum es58x_fd_ctrlmode - Controller mode.
+ * @ES58X_FD_CTRLMODE_ACTIVE: send and receive messages.
+ * @ES58X_FD_CTRLMODE_PASSIVE: only receive messages (monitor). Do not
+ * send anything, not even the acknowledgment bit.
+ * @ES58X_FD_CTRLMODE_FD: CAN FD according to ISO11898-1.
+ * @ES58X_FD_CTRLMODE_FD_NON_ISO: follow Bosch CAN FD Specification
+ * V1.0
+ * @ES58X_FD_CTRLMODE_DISABLE_PROTOCOL_EXCEPTION_HANDLING: How to
+ * behave when CAN FD reserved bit is monitored as
+ * dominant. (c.f. ISO 11898-1:2015, section 10.4.2.4 "Control
+ * field", paragraph "r0 bit"). 0 (not disable = enable): send
+ * error frame. 1 (disable): goes into bus integration mode
+ * (c.f. below).
+ * @ES58X_FD_CTRLMODE_EDGE_FILTER_DURING_BUS_INTEGRATION: 0: Edge
+ * filtering is disabled. 1: Edge filtering is enabled. Two
+ * consecutive dominant bits required to detect an edge for hard
+ * synchronization.
+ */
+enum es58x_fd_ctrlmode {
+ ES58X_FD_CTRLMODE_ACTIVE = 0,
+ ES58X_FD_CTRLMODE_PASSIVE = BIT(0),
+ ES58X_FD_CTRLMODE_FD = BIT(4),
+ ES58X_FD_CTRLMODE_FD_NON_ISO = BIT(5),
+ ES58X_FD_CTRLMODE_DISABLE_PROTOCOL_EXCEPTION_HANDLING = BIT(6),
+ ES58X_FD_CTRLMODE_EDGE_FILTER_DURING_BUS_INTEGRATION = BIT(7)
+};
+
+struct es58x_fd_bittiming {
+ __le32 bitrate;
+ __le16 tseg1; /* range: [tseg1_min-1..tseg1_max-1] */
+ __le16 tseg2; /* range: [tseg2_min-1..tseg2_max-1] */
+ __le16 brp; /* range: [brp_min-1..brp_max-1] */
+ __le16 sjw; /* range: [0..sjw_max-1] */
+} __packed;
+
+/**
+ * struct es58x_fd_tx_conf_msg - Channel configuration.
+ * @nominal_bittiming: Nominal bittiming.
+ * @samples_per_bit: type enum es58x_samples_per_bit.
+ * @sync_edge: type enum es58x_sync_edge.
+ * @physical_layer: type enum es58x_physical_layer.
+ * @echo_mode: type enum es58x_echo_mode.
+ * @ctrlmode: type enum es58x_fd_ctrlmode.
+ * @canfd_enabled: boolean (0: Classical CAN, 1: CAN and/or CANFD).
+ * @data_bittiming: Bittiming for flexible data-rate transmission.
+ * @tdc_enabled: Transmitter Delay Compensation switch (0: disabled,
+ * 1: enabled). On very high bitrates, the delay between when the
+ * bit is sent and received on the CANTX and CANRX pins of the
+ * transceiver start to be significant enough for errors to occur
+ * and thus need to be compensated.
+ * @tdco: Transmitter Delay Compensation Offset. Offset value, in time
+ * quanta, defining the delay between the start of the bit
+ * reception on the CANRX pin of the transceiver and the SSP
+ * (Secondary Sample Point). Valid values: 0 to 127.
+ * @tdcf: Transmitter Delay Compensation Filter window. Defines the
+ * minimum value for the SSP position, in time quanta. The
+ * feature is enabled when TDCF is configured to a value greater
+ * than TDCO. Valid values: 0 to 127.
+ *
+ * Please refer to the microcontroller datasheet: "SAM
+ * E701/S70/V70/V71 Family" section 49 "Controller Area Network
+ * (MCAN)" for additional information.
+ */
+struct es58x_fd_tx_conf_msg {
+ struct es58x_fd_bittiming nominal_bittiming;
+ u8 samples_per_bit;
+ u8 sync_edge;
+ u8 physical_layer;
+ u8 echo_mode;
+ u8 ctrlmode;
+ u8 canfd_enabled;
+ struct es58x_fd_bittiming data_bittiming;
+ u8 tdc_enabled;
+ __le16 tdco;
+ __le16 tdcf;
+} __packed;
+
+#define ES58X_FD_CAN_CONF_LEN \
+ (offsetof(struct es58x_fd_tx_conf_msg, canfd_enabled))
+#define ES58X_FD_CANFD_CONF_LEN (sizeof(struct es58x_fd_tx_conf_msg))
+
+struct es58x_fd_tx_can_msg {
+ u8 packet_idx;
+ __le32 can_id;
+ u8 flags;
+ union {
+ u8 dlc; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CAN */
+ u8 len; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CANFD */
+ } __packed;
+ u8 data[CANFD_MAX_DLEN];
+} __packed;
+
+#define ES58X_FD_CAN_TX_LEN \
+ (offsetof(struct es58x_fd_tx_can_msg, data[CAN_MAX_DLEN]))
+#define ES58X_FD_CANFD_TX_LEN (sizeof(struct es58x_fd_tx_can_msg))
+
+struct es58x_fd_rx_can_msg {
+ __le64 timestamp;
+ __le32 can_id;
+ u8 flags;
+ union {
+ u8 dlc; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CAN */
+ u8 len; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CANFD */
+ } __packed;
+ u8 data[CANFD_MAX_DLEN];
+} __packed;
+
+#define ES58X_FD_CAN_RX_LEN \
+ (offsetof(struct es58x_fd_rx_can_msg, data[CAN_MAX_DLEN]))
+#define ES58X_FD_CANFD_RX_LEN (sizeof(struct es58x_fd_rx_can_msg))
+
+struct es58x_fd_echo_msg {
+ __le64 timestamp;
+ u8 packet_idx;
+} __packed;
+
+struct es58x_fd_rx_event_msg {
+ __le64 timestamp;
+ __le32 can_id;
+ u8 flags; /* type enum es58x_flag */
+ u8 error_type; /* 0: event, 1: error */
+ u8 error_code;
+ u8 event_code;
+} __packed;
+
+struct es58x_fd_tx_ack_msg {
+ __le32 rx_cmd_ret_le32; /* type enum es58x_cmd_ret_code_u32 */
+ __le16 tx_free_entries; /* Number of remaining free entries in the device TX queue */
+} __packed;
+
+/**
+ * struct es58x_fd_urb_cmd - Commands received from or sent to the
+ * ES58X FD device.
+ * @SOF: Start of Frame.
+ * @cmd_type: Command Type (type: enum es58x_fd_cmd_type). The CRC
+ * calculation starts at this position.
+ * @cmd_id: Command ID (type: enum es58x_fd_cmd_id).
+ * @channel_idx: Channel index starting at 0.
+ * @msg_len: Length of the message, excluding CRC (i.e. length of the
+ * union).
+ * @tx_conf_msg: Channel configuration.
+ * @tx_can_msg_buf: Concatenation of Tx messages. Type is "u8[]"
+ * instead of "struct es58x_fd_tx_msg[]" because the structure
+ * has a flexible size.
+ * @rx_can_msg_buf: Concatenation Rx messages. Type is "u8[]" instead
+ * of "struct es58x_fd_rx_msg[]" because the structure has a
+ * flexible size.
+ * @echo_msg: Array of echo messages (e.g. Tx messages being looped
+ * back).
+ * @rx_event_msg: Error or event message.
+ * @tx_ack_msg: Tx acknowledgment message.
+ * @timestamp: Timestamp reply.
+ * @rx_cmd_ret_le32: Rx 32 bits return code (type: enum
+ * es58x_cmd_ret_code_u32).
+ * @raw_msg: Message raw payload.
+ * @reserved_for_crc16_do_not_use: The structure ends with a
+ * CRC16. Because the structures in above union are of variable
+ * lengths, we can not predict the offset of the CRC in
+ * advance. Use functions es58x_get_crc() and es58x_set_crc() to
+ * manipulate it.
+ */
+struct es58x_fd_urb_cmd {
+ __le16 SOF;
+ u8 cmd_type;
+ u8 cmd_id;
+ u8 channel_idx;
+ __le16 msg_len;
+
+ union {
+ struct es58x_fd_tx_conf_msg tx_conf_msg;
+ u8 tx_can_msg_buf[ES58X_FD_TX_BULK_MAX * ES58X_FD_CANFD_TX_LEN];
+ u8 rx_can_msg_buf[ES58X_FD_RX_BULK_MAX * ES58X_FD_CANFD_RX_LEN];
+ struct es58x_fd_echo_msg echo_msg[ES58X_FD_ECHO_BULK_MAX];
+ struct es58x_fd_rx_event_msg rx_event_msg;
+ struct es58x_fd_tx_ack_msg tx_ack_msg;
+ __le64 timestamp;
+ __le32 rx_cmd_ret_le32;
+ u8 raw_msg[0];
+ } __packed;
+
+ __le16 reserved_for_crc16_do_not_use;
+} __packed;
+
+#define ES58X_FD_URB_CMD_HEADER_LEN (offsetof(struct es58x_fd_urb_cmd, raw_msg))
+#define ES58X_FD_TX_URB_CMD_MAX_LEN \
+ ES58X_SIZEOF_URB_CMD(struct es58x_fd_urb_cmd, tx_can_msg_buf)
+#define ES58X_FD_RX_URB_CMD_MAX_LEN \
+ ES58X_SIZEOF_URB_CMD(struct es58x_fd_urb_cmd, rx_can_msg_buf)
+
+#endif /* __ES58X_FD_H__ */
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index a00dc1904415..5e892bef46b0 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -533,7 +533,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
if (unlikely(rc)) { /* usb send failed */
atomic_dec(&dev->active_tx_urbs);
- can_free_echo_skb(netdev, idx);
+ can_free_echo_skb(netdev, idx, NULL);
gs_free_tx_context(txc);
usb_unanchor_urb(urb);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 4e97da8434ab..90ebcae13409 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -593,7 +593,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
if (unlikely(err)) {
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
- can_free_echo_skb(netdev, context->echo_index);
+ can_free_echo_skb(netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(netdev);
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 1f649d178010..029e77dfa773 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -364,7 +364,7 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
xmit_failed:
- can_free_echo_skb(priv->netdev, ctx->ndx);
+ can_free_echo_skb(priv->netdev, ctx->ndx, NULL);
mcba_usb_free_ctx(ctx);
dev_kfree_skb(skb);
stats->tx_dropped++;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index e393e8457d77..1d6f77252f01 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/usb.h>
#include <linux/module.h>
+#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -40,6 +41,7 @@
#define PCAN_USB_CMD_REGISTER 9
#define PCAN_USB_CMD_EXT_VCC 10
#define PCAN_USB_CMD_ERR_FR 11
+#define PCAN_USB_CMD_LED 12
/* PCAN_USB_CMD_SET_BUS number arg */
#define PCAN_USB_BUS_XCVER 2
@@ -248,6 +250,15 @@ static int pcan_usb_set_ext_vcc(struct peak_usb_device *dev, u8 onoff)
return pcan_usb_send_cmd(dev, PCAN_USB_CMD_EXT_VCC, PCAN_USB_SET, args);
}
+static int pcan_usb_set_led(struct peak_usb_device *dev, u8 onoff)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN] = {
+ [0] = !!onoff,
+ };
+
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_LED, PCAN_USB_SET, args);
+}
+
/*
* set bittiming value to can
*/
@@ -354,16 +365,11 @@ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number)
int err;
err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_SN, PCAN_USB_GET, args);
- if (err) {
- netdev_err(dev->netdev, "getting serial failure: %d\n", err);
- } else if (serial_number) {
- __le32 tmp32;
-
- memcpy(&tmp32, args, 4);
- *serial_number = le32_to_cpu(tmp32);
- }
+ if (err)
+ return err;
+ *serial_number = le32_to_cpup((__le32 *)args);
- return err;
+ return 0;
}
/*
@@ -377,8 +383,8 @@ static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id)
err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_DEVID, PCAN_USB_GET, args);
if (err)
netdev_err(dev->netdev, "getting device id failure: %d\n", err);
- else if (device_id)
- *device_id = args[0];
+
+ *device_id = args[0];
return err;
}
@@ -388,14 +394,10 @@ static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id)
*/
static int pcan_usb_update_ts(struct pcan_usb_msg_context *mc)
{
- __le16 tmp16;
-
- if ((mc->ptr+2) > mc->end)
+ if ((mc->ptr + 2) > mc->end)
return -EINVAL;
- memcpy(&tmp16, mc->ptr, 2);
-
- mc->ts16 = le16_to_cpu(tmp16);
+ mc->ts16 = get_unaligned_le16(mc->ptr);
if (mc->rec_idx > 0)
peak_usb_update_ts_now(&mc->pdev->time_ref, mc->ts16);
@@ -412,16 +414,13 @@ static int pcan_usb_decode_ts(struct pcan_usb_msg_context *mc, u8 first_packet)
{
/* only 1st packet supplies a word timestamp */
if (first_packet) {
- __le16 tmp16;
-
if ((mc->ptr + 2) > mc->end)
return -EINVAL;
- memcpy(&tmp16, mc->ptr, 2);
- mc->ptr += 2;
-
- mc->ts16 = le16_to_cpu(tmp16);
+ mc->ts16 = get_unaligned_le16(mc->ptr);
mc->prev_ts8 = mc->ts16 & 0x00ff;
+
+ mc->ptr += 2;
} else {
u8 ts8;
@@ -711,25 +710,17 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
return -ENOMEM;
if (status_len & PCAN_USB_STATUSLEN_EXT_ID) {
- __le32 tmp32;
-
if ((mc->ptr + 4) > mc->end)
goto decode_failed;
- memcpy(&tmp32, mc->ptr, 4);
+ cf->can_id = get_unaligned_le32(mc->ptr) >> 3 | CAN_EFF_FLAG;
mc->ptr += 4;
-
- cf->can_id = (le32_to_cpu(tmp32) >> 3) | CAN_EFF_FLAG;
} else {
- __le16 tmp16;
-
if ((mc->ptr + 2) > mc->end)
goto decode_failed;
- memcpy(&tmp16, mc->ptr, 2);
+ cf->can_id = get_unaligned_le16(mc->ptr) >> 5;
mc->ptr += 2;
-
- cf->can_id = le16_to_cpu(tmp16) >> 5;
}
can_frame_set_cc_len(cf, rec_len, mc->pdev->dev.can.ctrlmode);
@@ -843,15 +834,15 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb,
/* can id */
if (cf->can_id & CAN_EFF_FLAG) {
- __le32 tmp32 = cpu_to_le32((cf->can_id & CAN_ERR_MASK) << 3);
-
*pc |= PCAN_USB_STATUSLEN_EXT_ID;
- memcpy(++pc, &tmp32, 4);
+ pc++;
+
+ put_unaligned_le32((cf->can_id & CAN_ERR_MASK) << 3, pc);
pc += 4;
} else {
- __le16 tmp16 = cpu_to_le16((cf->can_id & CAN_ERR_MASK) << 5);
+ pc++;
- memcpy(++pc, &tmp16, 2);
+ put_unaligned_le16((cf->can_id & CAN_ERR_MASK) << 5, pc);
pc += 2;
}
@@ -971,6 +962,40 @@ static int pcan_usb_probe(struct usb_interface *intf)
return 0;
}
+static int pcan_usb_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ int err = 0;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* call ON/OFF twice a second */
+ return 2;
+
+ case ETHTOOL_ID_OFF:
+ err = pcan_usb_set_led(dev, 0);
+ break;
+
+ case ETHTOOL_ID_ON:
+ fallthrough;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* restore LED default */
+ err = pcan_usb_set_led(dev, 1);
+ break;
+
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static const struct ethtool_ops pcan_usb_ethtool_ops = {
+ .set_phys_id = pcan_usb_set_phys_id,
+};
+
/*
* describe the PCAN-USB adapter
*/
@@ -994,16 +1019,17 @@ const struct peak_usb_adapter pcan_usb = {
CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_CC_LEN8_DLC,
.clock = {
- .freq = PCAN_USB_CRYSTAL_HZ / 2 ,
+ .freq = PCAN_USB_CRYSTAL_HZ / 2,
},
.bittiming_const = &pcan_usb_const,
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb),
+ .ethtool_ops = &pcan_usb_ethtool_ops,
+
/* timestamps usage */
.ts_used_bits = 16,
- .ts_period = 24575, /* calibration period in ts. */
.us_per_ts_scale = PCAN_USB_TS_US_PER_TICK, /* us=(ts*scale) */
.us_per_ts_shift = PCAN_USB_TS_DIV_SHIFTER, /* >> shift */
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 28e916a04047..e8f43ed90b72 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
+#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -26,28 +27,32 @@ MODULE_DESCRIPTION("CAN driver for PEAK-System USB adapters");
MODULE_LICENSE("GPL v2");
/* Table of devices that work with this driver */
-static struct usb_device_id peak_usb_table[] = {
- {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID)},
- {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)},
- {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)},
- {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)},
- {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID)},
- {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)},
- {} /* Terminating entry */
+static const struct usb_device_id peak_usb_table[] = {
+ {
+ USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&pcan_usb,
+ }, {
+ USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&pcan_usb_pro,
+ }, {
+ USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&pcan_usb_fd,
+ }, {
+ USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&pcan_usb_pro_fd,
+ }, {
+ USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&pcan_usb_chip,
+ }, {
+ USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&pcan_usb_x6,
+ }, {
+ /* Terminating entry */
+ }
};
MODULE_DEVICE_TABLE(usb, peak_usb_table);
-/* List of supported PCAN-USB adapters (NULL terminated list) */
-static const struct peak_usb_adapter *const peak_usb_adapters_list[] = {
- &pcan_usb,
- &pcan_usb_pro,
- &pcan_usb_fd,
- &pcan_usb_pro_fd,
- &pcan_usb_chip,
- &pcan_usb_x6,
-};
-
/*
* dump memory
*/
@@ -371,7 +376,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err) {
- can_free_echo_skb(netdev, context->echo_index);
+ can_free_echo_skb(netdev, context->echo_index, NULL);
usb_unanchor_urb(urb);
@@ -623,6 +628,7 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
/* can set bus off now */
if (dev->adapter->dev_set_bus) {
int err = dev->adapter->dev_set_bus(dev, 0);
+
if (err)
return err;
}
@@ -820,6 +826,9 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
netdev->flags |= IFF_ECHO; /* we support local echo */
+ /* add ethtool support */
+ netdev->ethtool_ops = peak_usb_adapter->ethtool_ops;
+
init_usb_anchor(&dev->rx_submitted);
init_usb_anchor(&dev->tx_submitted);
@@ -923,24 +932,11 @@ static void peak_usb_disconnect(struct usb_interface *intf)
static int peak_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct usb_device *usb_dev = interface_to_usbdev(intf);
- const u16 usb_id_product = le16_to_cpu(usb_dev->descriptor.idProduct);
- const struct peak_usb_adapter *peak_usb_adapter = NULL;
+ const struct peak_usb_adapter *peak_usb_adapter;
int i, err = -ENOMEM;
/* get corresponding PCAN-USB adapter */
- for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++)
- if (peak_usb_adapters_list[i]->device_id == usb_id_product) {
- peak_usb_adapter = peak_usb_adapters_list[i];
- break;
- }
-
- if (!peak_usb_adapter) {
- /* should never come except device_id bad usage in this file */
- pr_err("%s: didn't find device id. 0x%x in devices list\n",
- PCAN_USB_DRIVER_NAME, usb_id_product);
- return -ENODEV;
- }
+ peak_usb_adapter = (const struct peak_usb_adapter *)id->driver_info;
/* got corresponding adapter: check if it handles current interface */
if (peak_usb_adapter->intf_probe) {
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index 4b1528a42a7b..b00a4811bf61 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -31,7 +31,7 @@
/* usb adapters maximum channels per usb interface */
#define PCAN_USB_MAX_CHANNEL 2
-/* maximum length of the usb commands sent to/received from the devices */
+/* maximum length of the usb commands sent to/received from the devices */
#define PCAN_USB_MAX_CMD_LEN 32
struct peak_usb_device;
@@ -46,6 +46,8 @@ struct peak_usb_adapter {
const struct can_bittiming_const * const data_bittiming_const;
unsigned int ctrl_count;
+ const struct ethtool_ops *ethtool_ops;
+
int (*intf_probe)(struct usb_interface *intf);
int (*dev_init)(struct peak_usb_device *dev);
@@ -71,7 +73,6 @@ struct peak_usb_adapter {
u8 ep_msg_in;
u8 ep_msg_out[PCAN_USB_MAX_CHANNEL];
u8 ts_used_bits;
- u32 ts_period;
u8 us_per_ts_shift;
u32 us_per_ts_scale;
@@ -112,8 +113,6 @@ struct peak_usb_device {
unsigned int ctrl_idx;
u32 state;
- struct sk_buff *echo_skb[PCAN_USB_MAX_TX_URBS];
-
struct usb_device *udev;
struct net_device *netdev;
@@ -130,8 +129,6 @@ struct peak_usb_device {
u8 ep_msg_in;
u8 ep_msg_out;
- u16 bus_load;
-
struct peak_usb_device *prev_siblings;
struct peak_usb_device *next_siblings;
};
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index bae078579c0d..b11eabad575b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -7,6 +7,7 @@
#include <linux/netdevice.h>
#include <linux/usb.h>
#include <linux/module.h>
+#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -773,6 +774,10 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev,
tx_msg_flags |= PUCAN_MSG_RTR;
}
+ /* Single-Shot frame */
+ if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ tx_msg_flags |= PUCAN_MSG_SINGLE_SHOT;
+
tx_msg->flags = cpu_to_le16(tx_msg_flags);
tx_msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(dev->ctrl_idx, dlc);
memcpy(tx_msg->d, cfd->data, cfd->len);
@@ -1006,6 +1011,31 @@ static void pcan_usb_fd_free(struct peak_usb_device *dev)
}
}
+/* blink LED's */
+static int pcan_usb_fd_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ int err = 0;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ err = pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_FAST);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ err = pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_DEF);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static const struct ethtool_ops pcan_usb_fd_ethtool_ops = {
+ .set_phys_id = pcan_usb_fd_set_phys_id,
+};
+
/* describes the PCAN-USB FD adapter */
static const struct can_bittiming_const pcan_usb_fd_const = {
.name = "pcan_usb_fd",
@@ -1037,7 +1067,7 @@ const struct peak_usb_adapter pcan_usb_fd = {
.ctrl_count = PCAN_USBFD_CHANNEL_COUNT,
.ctrlmode_supported = CAN_CTRLMODE_FD |
CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY |
- CAN_CTRLMODE_CC_LEN8_DLC,
+ CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC,
.clock = {
.freq = PCAN_UFD_CRYSTAL_HZ,
},
@@ -1047,9 +1077,10 @@ const struct peak_usb_adapter pcan_usb_fd = {
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
+ .ethtool_ops = &pcan_usb_fd_ethtool_ops,
+
/* timestamps usage */
.ts_used_bits = 32,
- .ts_period = 1000000, /* calibration period in ts. */
.us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
.us_per_ts_shift = 0,
@@ -1110,7 +1141,7 @@ const struct peak_usb_adapter pcan_usb_chip = {
.ctrl_count = PCAN_USBFD_CHANNEL_COUNT,
.ctrlmode_supported = CAN_CTRLMODE_FD |
CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY |
- CAN_CTRLMODE_CC_LEN8_DLC,
+ CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC,
.clock = {
.freq = PCAN_UFD_CRYSTAL_HZ,
},
@@ -1120,9 +1151,10 @@ const struct peak_usb_adapter pcan_usb_chip = {
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
+ .ethtool_ops = &pcan_usb_fd_ethtool_ops,
+
/* timestamps usage */
.ts_used_bits = 32,
- .ts_period = 1000000, /* calibration period in ts. */
.us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
.us_per_ts_shift = 0,
@@ -1183,7 +1215,7 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
.ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT,
.ctrlmode_supported = CAN_CTRLMODE_FD |
CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY |
- CAN_CTRLMODE_CC_LEN8_DLC,
+ CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC,
.clock = {
.freq = PCAN_UFD_CRYSTAL_HZ,
},
@@ -1193,9 +1225,10 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
+ .ethtool_ops = &pcan_usb_fd_ethtool_ops,
+
/* timestamps usage */
.ts_used_bits = 32,
- .ts_period = 1000000, /* calibration period in ts. */
.us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
.us_per_ts_shift = 0,
@@ -1256,7 +1289,7 @@ const struct peak_usb_adapter pcan_usb_x6 = {
.ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT,
.ctrlmode_supported = CAN_CTRLMODE_FD |
CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY |
- CAN_CTRLMODE_CC_LEN8_DLC,
+ CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC,
.clock = {
.freq = PCAN_UFD_CRYSTAL_HZ,
},
@@ -1266,9 +1299,10 @@ const struct peak_usb_adapter pcan_usb_x6 = {
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
+ .ethtool_ops = &pcan_usb_fd_ethtool_ops,
+
/* timestamps usage */
.ts_used_bits = 32,
- .ts_period = 1000000, /* calibration period in ts. */
.us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
.us_per_ts_shift = 0,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 18fa180ecc81..858ab22708fc 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -9,6 +9,7 @@
#include <linux/netdevice.h>
#include <linux/usb.h>
#include <linux/module.h>
+#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -36,6 +37,7 @@
#define PCAN_USBPRO_RTR 0x01
#define PCAN_USBPRO_EXT 0x02
+#define PCAN_USBPRO_SS 0x08
#define PCAN_USBPRO_CMD_BUFFER_SIZE 512
@@ -288,7 +290,7 @@ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev,
pr->data_type);
/* check if channel in response corresponds too */
- else if ((req_channel != 0xff) && \
+ else if ((req_channel != 0xff) &&
(pr->bus_act.channel != req_channel))
netdev_err(dev->netdev,
"got rsp %xh but on chan%u: ignored\n",
@@ -437,8 +439,7 @@ static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev,
return err;
pdn = (struct pcan_usb_pro_devid *)pc;
- if (device_id)
- *device_id = le32_to_cpu(pdn->serial_num);
+ *device_id = le32_to_cpu(pdn->serial_num);
return err;
}
@@ -776,9 +777,13 @@ static int pcan_usb_pro_encode_msg(struct peak_usb_device *dev,
flags = 0;
if (cf->can_id & CAN_EFF_FLAG)
- flags |= 0x02;
+ flags |= PCAN_USBPRO_EXT;
if (cf->can_id & CAN_RTR_FLAG)
- flags |= 0x01;
+ flags |= PCAN_USBPRO_RTR;
+
+ /* Single-Shot frame */
+ if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ flags |= PCAN_USBPRO_SS;
pcan_msg_add_rec(&usb_msg, data_type, 0, flags, len, cf->can_id,
cf->data);
@@ -906,7 +911,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
usb_if->dev[dev->ctrl_idx] = dev;
/* set LED in default state (end of init phase) */
- pcan_usb_pro_set_led(dev, 0, 1);
+ pcan_usb_pro_set_led(dev, PCAN_USBPRO_LED_DEVICE, 1);
kfree(bi);
kfree(fi);
@@ -990,6 +995,35 @@ int pcan_usb_pro_probe(struct usb_interface *intf)
return 0;
}
+static int pcan_usb_pro_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ int err = 0;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* fast blinking forever */
+ err = pcan_usb_pro_set_led(dev, PCAN_USBPRO_LED_BLINK_FAST,
+ 0xffffffff);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* restore LED default */
+ err = pcan_usb_pro_set_led(dev, PCAN_USBPRO_LED_DEVICE, 1);
+ break;
+
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static const struct ethtool_ops pcan_usb_pro_ethtool_ops = {
+ .set_phys_id = pcan_usb_pro_set_phys_id,
+};
+
/*
* describe the PCAN-USB Pro adapter
*/
@@ -1009,7 +1043,8 @@ const struct peak_usb_adapter pcan_usb_pro = {
.name = "PCAN-USB Pro",
.device_id = PCAN_USBPRO_PRODUCT_ID,
.ctrl_count = PCAN_USBPRO_CHANNEL_COUNT,
- .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+ .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_ONE_SHOT,
.clock = {
.freq = PCAN_USBPRO_CRYSTAL_HZ,
},
@@ -1018,9 +1053,10 @@ const struct peak_usb_adapter pcan_usb_pro = {
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb_pro_device),
+ .ethtool_ops = &pcan_usb_pro_ethtool_ops,
+
/* timestamps usage */
.ts_used_bits = 32,
- .ts_period = 1000000, /* calibration period in ts. */
.us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
.us_per_ts_shift = 0,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index 6bb12357d078..5d4cf14eb9d9 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -34,11 +34,11 @@
/* PCAN_USBPRO_INFO_BL vendor request record type */
struct __packed pcan_usb_pro_blinfo {
__le32 ctrl_type;
- u8 version[4];
- u8 day;
- u8 month;
- u8 year;
- u8 dummy;
+ u8 version[4];
+ u8 day;
+ u8 month;
+ u8 year;
+ u8 dummy;
__le32 serial_num_hi;
__le32 serial_num_lo;
__le32 hw_type;
@@ -48,11 +48,11 @@ struct __packed pcan_usb_pro_blinfo {
/* PCAN_USBPRO_INFO_FW vendor request record type */
struct __packed pcan_usb_pro_fwinfo {
__le32 ctrl_type;
- u8 version[4];
- u8 day;
- u8 month;
- u8 year;
- u8 dummy;
+ u8 version[4];
+ u8 day;
+ u8 month;
+ u8 year;
+ u8 dummy;
__le32 fw_type;
};
@@ -78,59 +78,65 @@ struct __packed pcan_usb_pro_fwinfo {
/* record structures */
struct __packed pcan_usb_pro_btr {
- u8 data_type;
- u8 channel;
+ u8 data_type;
+ u8 channel;
__le16 dummy;
__le32 CCBT;
};
struct __packed pcan_usb_pro_busact {
- u8 data_type;
- u8 channel;
+ u8 data_type;
+ u8 channel;
__le16 onoff;
};
struct __packed pcan_usb_pro_silent {
- u8 data_type;
- u8 channel;
+ u8 data_type;
+ u8 channel;
__le16 onoff;
};
struct __packed pcan_usb_pro_filter {
- u8 data_type;
- u8 dummy;
+ u8 data_type;
+ u8 dummy;
__le16 filter_mode;
};
struct __packed pcan_usb_pro_setts {
- u8 data_type;
- u8 dummy;
+ u8 data_type;
+ u8 dummy;
__le16 mode;
};
struct __packed pcan_usb_pro_devid {
- u8 data_type;
- u8 channel;
+ u8 data_type;
+ u8 channel;
__le16 dummy;
__le32 serial_num;
};
+#define PCAN_USBPRO_LED_DEVICE 0x00
+#define PCAN_USBPRO_LED_BLINK_FAST 0x01
+#define PCAN_USBPRO_LED_BLINK_SLOW 0x02
+#define PCAN_USBPRO_LED_ON 0x03
+#define PCAN_USBPRO_LED_OFF 0x04
+
struct __packed pcan_usb_pro_setled {
- u8 data_type;
- u8 channel;
+ u8 data_type;
+ u8 channel;
__le16 mode;
__le32 timeout;
};
struct __packed pcan_usb_pro_rxmsg {
- u8 data_type;
- u8 client;
- u8 flags;
- u8 len;
+ u8 data_type;
+ u8 client;
+ u8 flags;
+ u8 len;
__le32 ts32;
__le32 id;
- u8 data[8];
+ u8 data[8];
};
#define PCAN_USBPRO_STATUS_ERROR 0x0001
@@ -139,26 +145,26 @@ struct __packed pcan_usb_pro_rxmsg {
#define PCAN_USBPRO_STATUS_QOVERRUN 0x0008
struct __packed pcan_usb_pro_rxstatus {
- u8 data_type;
- u8 channel;
+ u8 data_type;
+ u8 channel;
__le16 status;
__le32 ts32;
__le32 err_frm;
};
struct __packed pcan_usb_pro_rxts {
- u8 data_type;
- u8 dummy[3];
+ u8 data_type;
+ u8 dummy[3];
__le32 ts64[2];
};
struct __packed pcan_usb_pro_txmsg {
- u8 data_type;
- u8 client;
- u8 flags;
- u8 len;
+ u8 data_type;
+ u8 client;
+ u8 flags;
+ u8 len;
__le32 id;
- u8 data[8];
+ u8 data[8];
};
union pcan_usb_pro_rec {
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index fa403c080871..1679cbe45ded 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -246,7 +246,7 @@ struct ucan_message_in {
*/
struct ucan_tx_complete_entry_t can_tx_complete_msg[0];
} __aligned(0x4) msg;
-} __packed;
+} __packed __aligned(0x4);
/* Macros to calculate message lengths */
#define UCAN_OUT_HDR_SIZE offsetof(struct ucan_message_out, msg)
@@ -675,7 +675,7 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
can_get_echo_skb(up->netdev, echo_index, NULL);
} else {
up->netdev->stats.tx_dropped++;
- can_free_echo_skb(up->netdev, echo_index);
+ can_free_echo_skb(up->netdev, echo_index, NULL);
}
spin_unlock_irqrestore(&up->echo_skb_lock, flags);
}
@@ -843,7 +843,7 @@ static void ucan_write_bulk_callback(struct urb *urb)
/* update counters an cleanup */
spin_lock_irqsave(&up->echo_skb_lock, flags);
- can_free_echo_skb(up->netdev, context - up->context_array);
+ can_free_echo_skb(up->netdev, context - up->context_array, NULL);
spin_unlock_irqrestore(&up->echo_skb_lock, flags);
up->netdev->stats.tx_dropped++;
@@ -1157,7 +1157,7 @@ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb,
* frees the skb
*/
spin_lock_irqsave(&up->echo_skb_lock, flags);
- can_free_echo_skb(up->netdev, echo_index);
+ can_free_echo_skb(up->netdev, echo_index, NULL);
spin_unlock_irqrestore(&up->echo_skb_lock, flags);
if (ret == -ENODEV) {
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index e8c42430a4fc..b6e7ef0d5bc6 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -691,7 +691,7 @@ nofreecontext:
return NETDEV_TX_BUSY;
failed:
- can_free_echo_skb(netdev, context->echo_index);
+ can_free_echo_skb(netdev, context->echo_index, NULL);
usb_unanchor_urb(urb);
usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 37fa19c62d73..3b883e607d8b 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -1772,17 +1772,15 @@ static int xcan_probe(struct platform_device *pdev)
/* Getting the CAN can_clk info */
priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
if (IS_ERR(priv->can_clk)) {
- if (PTR_ERR(priv->can_clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Device clock not found.\n");
- ret = PTR_ERR(priv->can_clk);
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk),
+ "device clock not found\n");
goto err_free;
}
priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
if (IS_ERR(priv->bus_clk)) {
- if (PTR_ERR(priv->bus_clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "bus clock not found\n");
- ret = PTR_ERR(priv->bus_clk);
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk),
+ "bus clock not found\n");
goto err_free;
}
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 3af373e90806..a5f1aa911fe2 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -1,12 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "Distributed Switch Architecture drivers"
- depends on HAVE_NET_DSA
+ depends on NET_DSA
source "drivers/net/dsa/b53/Kconfig"
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
- depends on HAS_IOMEM && NET_DSA
+ depends on HAS_IOMEM
select NET_DSA_TAG_BRCM
select FIXED_PHY
select BCM7XXX_PHY
@@ -18,7 +18,6 @@ config NET_DSA_BCM_SF2
config NET_DSA_LOOP
tristate "DSA mock-up Ethernet switch chip support"
- depends on NET_DSA
select FIXED_PHY
help
This enables support for a fake mock-up switch chip which
@@ -28,7 +27,7 @@ source "drivers/net/dsa/hirschmann/Kconfig"
config NET_DSA_LANTIQ_GSWIP
tristate "Lantiq / Intel GSWIP"
- depends on HAS_IOMEM && NET_DSA
+ depends on HAS_IOMEM
select NET_DSA_TAG_GSWIP
help
This enables support for the Lantiq / Intel GSWIP 2.1 found in
@@ -36,7 +35,6 @@ config NET_DSA_LANTIQ_GSWIP
config NET_DSA_MT7530
tristate "MediaTek MT753x and MT7621 Ethernet switch support"
- depends on NET_DSA
select NET_DSA_TAG_MTK
help
This enables support for the MediaTek MT7530, MT7531, and MT7621
@@ -44,7 +42,6 @@ config NET_DSA_MT7530
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
- depends on NET_DSA
select NET_DSA_TAG_TRAILER
help
This enables support for the Marvell 88E6060 ethernet switch
@@ -64,7 +61,6 @@ source "drivers/net/dsa/xrs700x/Kconfig"
config NET_DSA_QCA8K
tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
- depends on NET_DSA
select NET_DSA_TAG_QCA
select REGMAP
help
@@ -73,7 +69,6 @@ config NET_DSA_QCA8K
config NET_DSA_REALTEK_SMI
tristate "Realtek SMI Ethernet switch family support"
- depends on NET_DSA
select NET_DSA_TAG_RTL4_A
select FIXED_PHY
select IRQ_DOMAIN
@@ -93,7 +88,7 @@ config NET_DSA_SMSC_LAN9303
config NET_DSA_SMSC_LAN9303_I2C
tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in I2C managed mode"
- depends on NET_DSA && I2C
+ depends on I2C
select NET_DSA_SMSC_LAN9303
select REGMAP_I2C
help
@@ -102,7 +97,6 @@ config NET_DSA_SMSC_LAN9303_I2C
config NET_DSA_SMSC_LAN9303_MDIO
tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode"
- depends on NET_DSA
select NET_DSA_SMSC_LAN9303
help
Enable access functions if the SMSC/Microchip LAN9303 is configured
@@ -110,7 +104,6 @@ config NET_DSA_SMSC_LAN9303_MDIO
config NET_DSA_VITESSE_VSC73XX
tristate
- depends on NET_DSA
select FIXED_PHY
select VITESSE_PHY
select GPIOLIB
@@ -120,7 +113,6 @@ config NET_DSA_VITESSE_VSC73XX
config NET_DSA_VITESSE_VSC73XX_SPI
tristate "Vitesse VSC7385/7388/7395/7398 SPI mode support"
- depends on NET_DSA
depends on SPI
select NET_DSA_VITESSE_VSC73XX
help
@@ -129,7 +121,6 @@ config NET_DSA_VITESSE_VSC73XX_SPI
config NET_DSA_VITESSE_VSC73XX_PLATFORM
tristate "Vitesse VSC7385/7388/7395/7398 Platform mode support"
- depends on NET_DSA
depends on HAS_IOMEM
select NET_DSA_VITESSE_VSC73XX
help
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
index f9891a81c808..90b525160b71 100644
--- a/drivers/net/dsa/b53/Kconfig
+++ b/drivers/net/dsa/b53/Kconfig
@@ -3,6 +3,7 @@ menuconfig B53
tristate "Broadcom BCM53xx managed switch support"
depends on NET_DSA
select NET_DSA_TAG_BRCM
+ select NET_DSA_TAG_BRCM_LEGACY
select NET_DSA_TAG_BRCM_PREPEND
help
This driver adds support for Broadcom managed switch chips. It supports
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index eb443721c58e..3ca6b394dd5f 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -349,7 +349,7 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
}
-static void b53_enable_vlan(struct b53_device *dev, bool enable,
+static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
bool enable_filtering)
{
u8 mgmt, vc0, vc1, vc4 = 0, vc5;
@@ -431,6 +431,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable,
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
dev->vlan_enabled = enable;
+
+ dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n",
+ port, enable, enable_filtering);
}
static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
@@ -743,7 +746,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
}
- b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering);
+ b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering);
b53_for_each_port(dev, i)
b53_write16(dev, B53_VLAN_PAGE,
@@ -1422,7 +1425,7 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
{
struct b53_device *dev = ds->priv;
- b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
+ b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering);
return 0;
}
@@ -1447,7 +1450,7 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
if (vlan->vid >= dev->num_vlans)
return -ERANGE;
- b53_enable_vlan(dev, true, ds->vlan_filtering);
+ b53_enable_vlan(dev, port, true, ds->vlan_filtering);
return 0;
}
@@ -2045,15 +2048,17 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
{
struct b53_device *dev = ds->priv;
- /* Older models (5325, 5365) support a different tag format that we do
- * not support in net/dsa/tag_brcm.c yet.
- */
- if (is5325(dev) || is5365(dev) ||
- !b53_can_enable_brcm_tags(ds, port, mprot)) {
+ if (!b53_can_enable_brcm_tags(ds, port, mprot)) {
dev->tag_protocol = DSA_TAG_PROTO_NONE;
goto out;
}
+ /* Older models require a different 6 byte tag */
+ if (is5325(dev) || is5365(dev) || is63xx(dev)) {
+ dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY;
+ goto out;
+ }
+
/* Broadcom BCM58xx chips have a flow accelerator on Port 8
* which requires us to use the prepended Broadcom tag type
*/
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index c628d0980c0b..82680e083cc2 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -16,6 +16,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -228,11 +229,65 @@ static const struct b53_io_ops b53_mmap_ops = {
.write64 = b53_mmap_write64,
};
+static int b53_mmap_probe_of(struct platform_device *pdev,
+ struct b53_platform_data **ppdata)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *of_ports, *of_port;
+ struct device *dev = &pdev->dev;
+ struct b53_platform_data *pdata;
+ void __iomem *mem;
+
+ mem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ pdata = devm_kzalloc(dev, sizeof(struct b53_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->regs = mem;
+ pdata->chip_id = BCM63XX_DEVICE_ID;
+ pdata->big_endian = of_property_read_bool(np, "big-endian");
+
+ of_ports = of_get_child_by_name(np, "ports");
+ if (!of_ports) {
+ dev_err(dev, "no ports child node found\n");
+ return -EINVAL;
+ }
+
+ for_each_available_child_of_node(of_ports, of_port) {
+ u32 reg;
+
+ if (of_property_read_u32(of_port, "reg", &reg))
+ continue;
+
+ if (reg < B53_CPU_PORT)
+ pdata->enabled_ports |= BIT(reg);
+ }
+
+ of_node_put(of_ports);
+ *ppdata = pdata;
+
+ return 0;
+}
+
static int b53_mmap_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct b53_platform_data *pdata = pdev->dev.platform_data;
struct b53_mmap_priv *priv;
struct b53_device *dev;
+ int ret;
+
+ if (!pdata && np) {
+ ret = b53_mmap_probe_of(pdev, &pdata);
+ if (ret) {
+ dev_err(&pdev->dev, "OF probe error\n");
+ return ret;
+ }
+ }
if (!pdata)
return -EINVAL;
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 8419bb7f4505..82700a5714c1 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -186,11 +186,7 @@ static inline int is531x5(struct b53_device *dev)
static inline int is63xx(struct b53_device *dev)
{
-#ifdef CONFIG_BCM63XX
return dev->chip_id == BCM63XX_DEVICE_ID;
-#else
- return 0;
-#endif
}
static inline int is5301x(struct b53_device *dev)
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index 7abec8dab8ba..ecb9f7f6b335 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -324,9 +324,23 @@ static int b53_spi_remove(struct spi_device *spi)
return 0;
}
+static const struct of_device_id b53_spi_of_match[] = {
+ { .compatible = "brcm,bcm5325" },
+ { .compatible = "brcm,bcm5365" },
+ { .compatible = "brcm,bcm5395" },
+ { .compatible = "brcm,bcm5397" },
+ { .compatible = "brcm,bcm5398" },
+ { .compatible = "brcm,bcm53115" },
+ { .compatible = "brcm,bcm53125" },
+ { .compatible = "brcm,bcm53128" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, b53_spi_of_match);
+
static struct spi_driver b53_spi_driver = {
.driver = {
.name = "b53-switch",
+ .of_match_table = b53_spi_of_match,
},
.probe = b53_spi_probe,
.remove = b53_spi_remove,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index ba5d546d06aa..9150038b60cb 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -32,6 +32,36 @@
#include "b53/b53_priv.h"
#include "b53/b53_regs.h"
+static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port)
+{
+ switch (priv->type) {
+ case BCM4908_DEVICE_ID:
+ switch (port) {
+ case 7:
+ return REG_RGMII_11_CNTRL;
+ default:
+ break;
+ }
+ break;
+ default:
+ switch (port) {
+ case 0:
+ return REG_RGMII_0_CNTRL;
+ case 1:
+ return REG_RGMII_1_CNTRL;
+ case 2:
+ return REG_RGMII_2_CNTRL;
+ default:
+ break;
+ }
+ }
+
+ WARN_ONCE(1, "Unsupported port %d\n", port);
+
+ /* RO fallback reg */
+ return REG_SWITCH_STATUS;
+}
+
/* Return the number of active ports, not counting the IMP (CPU) port */
static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
{
@@ -435,6 +465,44 @@ static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
return 0;
}
+static void bcm_sf2_crossbar_setup(struct bcm_sf2_priv *priv)
+{
+ struct device *dev = priv->dev->ds->dev;
+ int shift;
+ u32 mask;
+ u32 reg;
+ int i;
+
+ mask = BIT(priv->num_crossbar_int_ports) - 1;
+
+ reg = reg_readl(priv, REG_CROSSBAR);
+ switch (priv->type) {
+ case BCM4908_DEVICE_ID:
+ shift = CROSSBAR_BCM4908_INT_P7 * priv->num_crossbar_int_ports;
+ reg &= ~(mask << shift);
+ if (0) /* FIXME */
+ reg |= CROSSBAR_BCM4908_EXT_SERDES << shift;
+ else if (priv->int_phy_mask & BIT(7))
+ reg |= CROSSBAR_BCM4908_EXT_GPHY4 << shift;
+ else if (phy_interface_mode_is_rgmii(priv->port_sts[7].mode))
+ reg |= CROSSBAR_BCM4908_EXT_RGMII << shift;
+ else if (WARN(1, "Invalid port mode\n"))
+ return;
+ break;
+ default:
+ return;
+ }
+ reg_writel(priv, reg, REG_CROSSBAR);
+
+ reg = reg_readl(priv, REG_CROSSBAR);
+ for (i = 0; i < priv->num_crossbar_int_ports; i++) {
+ shift = i * priv->num_crossbar_int_ports;
+
+ dev_dbg(dev, "crossbar int port #%d - ext port #%d\n", i,
+ (reg >> shift) & mask);
+ }
+}
+
static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
{
intrl2_0_mask_set(priv, 0xffffffff);
@@ -446,10 +514,11 @@ static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
struct device_node *dn)
{
+ struct device *dev = priv->dev->ds->dev;
+ struct bcm_sf2_port_status *port_st;
struct device_node *port;
unsigned int port_num;
struct property *prop;
- phy_interface_t mode;
int err;
priv->moca_port = -1;
@@ -458,19 +527,26 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
if (of_property_read_u32(port, "reg", &port_num))
continue;
+ if (port_num >= DSA_MAX_PORTS) {
+ dev_err(dev, "Invalid port number %d\n", port_num);
+ continue;
+ }
+
+ port_st = &priv->port_sts[port_num];
+
/* Internal PHYs get assigned a specific 'phy-mode' property
* value: "internal" to help flag them before MDIO probing
* has completed, since they might be turned off at that
* time
*/
- err = of_get_phy_mode(port, &mode);
+ err = of_get_phy_mode(port, &port_st->mode);
if (err)
continue;
- if (mode == PHY_INTERFACE_MODE_INTERNAL)
+ if (port_st->mode == PHY_INTERFACE_MODE_INTERNAL)
priv->int_phy_mask |= 1 << port_num;
- if (mode == PHY_INTERFACE_MODE_MOCA)
+ if (port_st->mode == PHY_INTERFACE_MODE_MOCA)
priv->moca_port = port_num;
if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
@@ -647,6 +723,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 id_mode_dis = 0, port_mode;
+ u32 reg_rgmii_ctrl;
u32 reg;
if (port == core_readl(priv, CORE_IMP0_PRT_ID))
@@ -670,10 +747,12 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
return;
}
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+
/* Clear id_mode_dis bit, and the existing port mode, let
* RGMII_MODE_EN bet set by mac_link_{up,down}
*/
- reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
+ reg = reg_readl(priv, reg_rgmii_ctrl);
reg &= ~ID_MODE_DIS;
reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
@@ -681,13 +760,14 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
if (id_mode_dis)
reg |= ID_MODE_DIS;
- reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
+ reg_writel(priv, reg, reg_rgmii_ctrl);
}
static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
phy_interface_t interface, bool link)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ u32 reg_rgmii_ctrl;
u32 reg;
if (!phy_interface_mode_is_rgmii(interface) &&
@@ -695,13 +775,15 @@ static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
interface != PHY_INTERFACE_MODE_REVMII)
return;
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+
/* If the link is down, just disable the interface to conserve power */
- reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
+ reg = reg_readl(priv, reg_rgmii_ctrl);
if (link)
reg |= RGMII_MODE_EN;
else
reg &= ~RGMII_MODE_EN;
- reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
+ reg_writel(priv, reg, reg_rgmii_ctrl);
}
static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
@@ -735,11 +817,15 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_eee *p = &priv->dev->ports[port].eee;
- u32 reg, offset;
bcm_sf2_sw_mac_link_set(ds, port, interface, true);
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
+ u32 reg_rgmii_ctrl;
+ u32 reg, offset;
+
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+
if (priv->type == BCM4908_DEVICE_ID ||
priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
@@ -750,7 +836,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
interface == PHY_INTERFACE_MODE_RGMII_TXID ||
interface == PHY_INTERFACE_MODE_MII ||
interface == PHY_INTERFACE_MODE_REVMII) {
- reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
+ reg = reg_readl(priv, reg_rgmii_ctrl);
reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
if (tx_pause)
@@ -758,7 +844,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
if (rx_pause)
reg |= RX_PAUSE_EN;
- reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
+ reg_writel(priv, reg, reg_rgmii_ctrl);
}
reg = SW_OVERRIDE | LINK_STS;
@@ -861,6 +947,8 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
return ret;
}
+ bcm_sf2_crossbar_setup(priv);
+
ret = bcm_sf2_cfp_resume(ds);
if (ret)
return ret;
@@ -1133,6 +1221,7 @@ struct bcm_sf2_of_data {
const u16 *reg_offsets;
unsigned int core_reg_align;
unsigned int num_cfp_rules;
+ unsigned int num_crossbar_int_ports;
};
static const u16 bcm_sf2_4908_reg_offsets[] = {
@@ -1144,9 +1233,7 @@ static const u16 bcm_sf2_4908_reg_offsets[] = {
[REG_PHY_REVISION] = 0x14,
[REG_SPHY_CNTRL] = 0x24,
[REG_CROSSBAR] = 0xc8,
- [REG_RGMII_0_CNTRL] = 0xe0,
- [REG_RGMII_1_CNTRL] = 0xec,
- [REG_RGMII_2_CNTRL] = 0xf8,
+ [REG_RGMII_11_CNTRL] = 0x014c,
[REG_LED_0_CNTRL] = 0x40,
[REG_LED_1_CNTRL] = 0x4c,
[REG_LED_2_CNTRL] = 0x58,
@@ -1156,7 +1243,8 @@ static const struct bcm_sf2_of_data bcm_sf2_4908_data = {
.type = BCM4908_DEVICE_ID,
.core_reg_align = 0,
.reg_offsets = bcm_sf2_4908_reg_offsets,
- .num_cfp_rules = 0, /* FIXME */
+ .num_cfp_rules = 256,
+ .num_crossbar_int_ports = 2,
};
/* Register offsets for the SWITCH_REG_* block */
@@ -1267,6 +1355,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
priv->reg_offsets = data->reg_offsets;
priv->core_reg_align = data->core_reg_align;
priv->num_cfp_rules = data->num_cfp_rules;
+ priv->num_crossbar_int_ports = data->num_crossbar_int_ports;
priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev,
"switch");
@@ -1340,6 +1429,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
goto out_clk_mdiv;
}
+ bcm_sf2_crossbar_setup(priv);
+
bcm_sf2_gphy_enable_set(priv->dev->ds, true);
ret = bcm_sf2_mdio_register(ds);
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 1ed901a68536..0d48402068d3 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -44,6 +44,7 @@ struct bcm_sf2_hw_params {
#define BCM_SF2_REGS_NUM 6
struct bcm_sf2_port_status {
+ phy_interface_t mode;
unsigned int link;
bool enabled;
};
@@ -73,6 +74,7 @@ struct bcm_sf2_priv {
const u16 *reg_offsets;
unsigned int core_reg_align;
unsigned int num_cfp_rules;
+ unsigned int num_crossbar_int_ports;
/* spinlock protecting access to the indirect registers */
spinlock_t indir_lock;
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 1d2d55c9f8aa..7bffc80f241f 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -21,6 +21,7 @@ enum bcm_sf2_reg_offs {
REG_RGMII_0_CNTRL,
REG_RGMII_1_CNTRL,
REG_RGMII_2_CNTRL,
+ REG_RGMII_11_CNTRL,
REG_LED_0_CNTRL,
REG_LED_1_CNTRL,
REG_LED_2_CNTRL,
@@ -48,7 +49,12 @@ enum bcm_sf2_reg_offs {
#define PHY_PHYAD_SHIFT 8
#define PHY_PHYAD_MASK 0x1F
-#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_CNTRL + (x))
+/* Relative to REG_CROSSBAR */
+#define CROSSBAR_BCM4908_INT_P7 0
+#define CROSSBAR_BCM4908_INT_RUNNER 1
+#define CROSSBAR_BCM4908_EXT_SERDES 0
+#define CROSSBAR_BCM4908_EXT_GPHY4 1
+#define CROSSBAR_BCM4908_EXT_RGMII 2
/* Relative to REG_RGMII_CNTRL */
#define RGMII_MODE_EN (1 << 0)
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 463137c39db2..4d78219da253 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -433,7 +433,7 @@ static void hellcreek_unapply_vlan(struct hellcreek *hellcreek, int port,
mutex_lock(&hellcreek->reg_lock);
- hellcreek_select_vlan(hellcreek, vid, 0);
+ hellcreek_select_vlan(hellcreek, vid, false);
/* Setup port vlan membership */
hellcreek_select_vlan_params(hellcreek, port, &shift, &mask);
@@ -596,6 +596,83 @@ static void hellcreek_setup_vlan_membership(struct dsa_switch *ds, int port,
hellcreek_unapply_vlan(hellcreek, upstream, vid);
}
+static void hellcreek_port_set_ucast_flood(struct hellcreek *hellcreek,
+ int port, bool enable)
+{
+ struct hellcreek_port *hellcreek_port;
+ u16 val;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "%s unicast flooding on port %d\n",
+ enable ? "Enable" : "Disable", port);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ val = hellcreek_port->ptcfg;
+ if (enable)
+ val &= ~HR_PTCFG_UUC_FLT;
+ else
+ val |= HR_PTCFG_UUC_FLT;
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_port_set_mcast_flood(struct hellcreek *hellcreek,
+ int port, bool enable)
+{
+ struct hellcreek_port *hellcreek_port;
+ u16 val;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "%s multicast flooding on port %d\n",
+ enable ? "Enable" : "Disable", port);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ val = hellcreek_port->ptcfg;
+ if (enable)
+ val &= ~HR_PTCFG_UMC_FLT;
+ else
+ val |= HR_PTCFG_UMC_FLT;
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static int hellcreek_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hellcreek_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ if (flags.mask & BR_FLOOD)
+ hellcreek_port_set_ucast_flood(hellcreek, port,
+ !!(flags.val & BR_FLOOD));
+
+ if (flags.mask & BR_MCAST_FLOOD)
+ hellcreek_port_set_mcast_flood(hellcreek, port,
+ !!(flags.val & BR_MCAST_FLOOD));
+
+ return 0;
+}
+
static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *br)
{
@@ -670,6 +747,40 @@ static int __hellcreek_fdb_del(struct hellcreek *hellcreek,
return hellcreek_wait_fdb_ready(hellcreek);
}
+static void hellcreek_populate_fdb_entry(struct hellcreek *hellcreek,
+ struct hellcreek_fdb_entry *entry,
+ size_t idx)
+{
+ unsigned char addr[ETH_ALEN];
+ u16 meta, mac;
+
+ /* Read values */
+ meta = hellcreek_read(hellcreek, HR_FDBMDRD);
+ mac = hellcreek_read(hellcreek, HR_FDBRDL);
+ addr[5] = mac & 0xff;
+ addr[4] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDM);
+ addr[3] = mac & 0xff;
+ addr[2] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDH);
+ addr[1] = mac & 0xff;
+ addr[0] = (mac & 0xff00) >> 8;
+
+ /* Populate @entry */
+ memcpy(entry->mac, addr, sizeof(addr));
+ entry->idx = idx;
+ entry->portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >>
+ HR_FDBMDRD_PORTMASK_SHIFT;
+ entry->age = (meta & HR_FDBMDRD_AGE_MASK) >>
+ HR_FDBMDRD_AGE_SHIFT;
+ entry->is_obt = !!(meta & HR_FDBMDRD_OBT);
+ entry->pass_blocked = !!(meta & HR_FDBMDRD_PASS_BLOCKED);
+ entry->is_static = !!(meta & HR_FDBMDRD_STATIC);
+ entry->reprio_tc = (meta & HR_FDBMDRD_REPRIO_TC_MASK) >>
+ HR_FDBMDRD_REPRIO_TC_SHIFT;
+ entry->reprio_en = !!(meta & HR_FDBMDRD_REPRIO_EN);
+}
+
/* Retrieve the index of a FDB entry by mac address. Currently we search through
* the complete table in hardware. If that's too slow, we might have to cache
* the complete FDB table in software.
@@ -691,39 +802,19 @@ static int hellcreek_fdb_get(struct hellcreek *hellcreek,
* enter new entries anywhere.
*/
for (i = 0; i < hellcreek->fdb_entries; ++i) {
- unsigned char addr[ETH_ALEN];
- u16 meta, mac;
-
- meta = hellcreek_read(hellcreek, HR_FDBMDRD);
- mac = hellcreek_read(hellcreek, HR_FDBRDL);
- addr[5] = mac & 0xff;
- addr[4] = (mac & 0xff00) >> 8;
- mac = hellcreek_read(hellcreek, HR_FDBRDM);
- addr[3] = mac & 0xff;
- addr[2] = (mac & 0xff00) >> 8;
- mac = hellcreek_read(hellcreek, HR_FDBRDH);
- addr[1] = mac & 0xff;
- addr[0] = (mac & 0xff00) >> 8;
+ struct hellcreek_fdb_entry tmp = { 0 };
+
+ /* Read entry */
+ hellcreek_populate_fdb_entry(hellcreek, &tmp, i);
/* Force next entry */
hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
- if (memcmp(addr, dest, ETH_ALEN))
+ if (memcmp(tmp.mac, dest, ETH_ALEN))
continue;
/* Match found */
- entry->idx = i;
- entry->portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >>
- HR_FDBMDRD_PORTMASK_SHIFT;
- entry->age = (meta & HR_FDBMDRD_AGE_MASK) >>
- HR_FDBMDRD_AGE_SHIFT;
- entry->is_obt = !!(meta & HR_FDBMDRD_OBT);
- entry->pass_blocked = !!(meta & HR_FDBMDRD_PASS_BLOCKED);
- entry->is_static = !!(meta & HR_FDBMDRD_STATIC);
- entry->reprio_tc = (meta & HR_FDBMDRD_REPRIO_TC_MASK) >>
- HR_FDBMDRD_REPRIO_TC_SHIFT;
- entry->reprio_en = !!(meta & HR_FDBMDRD_REPRIO_EN);
- memcpy(entry->mac, addr, sizeof(addr));
+ memcpy(entry, &tmp, sizeof(*entry));
return 0;
}
@@ -838,18 +929,9 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
for (i = 0; i < hellcreek->fdb_entries; ++i) {
unsigned char null_addr[ETH_ALEN] = { 0 };
struct hellcreek_fdb_entry entry = { 0 };
- u16 meta, mac;
-
- meta = hellcreek_read(hellcreek, HR_FDBMDRD);
- mac = hellcreek_read(hellcreek, HR_FDBRDL);
- entry.mac[5] = mac & 0xff;
- entry.mac[4] = (mac & 0xff00) >> 8;
- mac = hellcreek_read(hellcreek, HR_FDBRDM);
- entry.mac[3] = mac & 0xff;
- entry.mac[2] = (mac & 0xff00) >> 8;
- mac = hellcreek_read(hellcreek, HR_FDBRDH);
- entry.mac[1] = mac & 0xff;
- entry.mac[0] = (mac & 0xff00) >> 8;
+
+ /* Read entry */
+ hellcreek_populate_fdb_entry(hellcreek, &entry, i);
/* Force next entry */
hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
@@ -858,10 +940,6 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
if (!memcmp(entry.mac, null_addr, ETH_ALEN))
continue;
- entry.portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >>
- HR_FDBMDRD_PORTMASK_SHIFT;
- entry.is_static = !!(meta & HR_FDBMDRD_STATIC);
-
/* Check port mask */
if (!(entry.portmask & BIT(port)))
continue;
@@ -1004,6 +1082,22 @@ out:
return ret;
}
+static int hellcreek_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ int ret;
+
+ ret = devlink_info_driver_name_put(req, "hellcreek");
+ if (ret)
+ return ret;
+
+ return devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ hellcreek->pdata->name);
+}
+
static u64 hellcreek_devlink_vlan_table_get(void *priv)
{
struct hellcreek *hellcreek = priv;
@@ -1082,6 +1176,129 @@ out:
return err;
}
+static int hellcreek_devlink_region_vlan_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct hellcreek_devlink_vlan_entry *table, *entry;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct hellcreek *hellcreek = ds->priv;
+ int i;
+
+ table = kcalloc(VLAN_N_VID, sizeof(*entry), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ entry = table;
+
+ mutex_lock(&hellcreek->reg_lock);
+ for (i = 0; i < VLAN_N_VID; ++i, ++entry) {
+ entry->member = hellcreek->vidmbrcfg[i];
+ entry->vid = i;
+ }
+ mutex_unlock(&hellcreek->reg_lock);
+
+ *data = (u8 *)table;
+
+ return 0;
+}
+
+static int hellcreek_devlink_region_fdb_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct hellcreek_fdb_entry *table, *entry;
+ struct hellcreek *hellcreek = ds->priv;
+ size_t i;
+
+ table = kcalloc(hellcreek->fdb_entries, sizeof(*entry), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ entry = table;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ /* Start table read */
+ hellcreek_read(hellcreek, HR_FDBMAX);
+ hellcreek_write(hellcreek, 0x00, HR_FDBMAX);
+
+ for (i = 0; i < hellcreek->fdb_entries; ++i, ++entry) {
+ /* Read current entry */
+ hellcreek_populate_fdb_entry(hellcreek, entry, i);
+
+ /* Advance read pointer */
+ hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
+ }
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ *data = (u8 *)table;
+
+ return 0;
+}
+
+static struct devlink_region_ops hellcreek_region_vlan_ops = {
+ .name = "vlan",
+ .snapshot = hellcreek_devlink_region_vlan_snapshot,
+ .destructor = kfree,
+};
+
+static struct devlink_region_ops hellcreek_region_fdb_ops = {
+ .name = "fdb",
+ .snapshot = hellcreek_devlink_region_fdb_snapshot,
+ .destructor = kfree,
+};
+
+static int hellcreek_setup_devlink_regions(struct dsa_switch *ds)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct devlink_region_ops *ops;
+ struct devlink_region *region;
+ u64 size;
+ int ret;
+
+ /* VLAN table */
+ size = VLAN_N_VID * sizeof(struct hellcreek_devlink_vlan_entry);
+ ops = &hellcreek_region_vlan_ops;
+
+ region = dsa_devlink_region_create(ds, ops, 1, size);
+ if (IS_ERR(region))
+ return PTR_ERR(region);
+
+ hellcreek->vlan_region = region;
+
+ /* FDB table */
+ size = hellcreek->fdb_entries * sizeof(struct hellcreek_fdb_entry);
+ ops = &hellcreek_region_fdb_ops;
+
+ region = dsa_devlink_region_create(ds, ops, 1, size);
+ if (IS_ERR(region)) {
+ ret = PTR_ERR(region);
+ goto err_fdb;
+ }
+
+ hellcreek->fdb_region = region;
+
+ return 0;
+
+err_fdb:
+ dsa_devlink_region_destroy(hellcreek->vlan_region);
+
+ return ret;
+}
+
+static void hellcreek_teardown_devlink_regions(struct dsa_switch *ds)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ dsa_devlink_region_destroy(hellcreek->fdb_region);
+ dsa_devlink_region_destroy(hellcreek->vlan_region);
+}
+
static int hellcreek_setup(struct dsa_switch *ds)
{
struct hellcreek *hellcreek = ds->priv;
@@ -1143,11 +1360,24 @@ static int hellcreek_setup(struct dsa_switch *ds)
return ret;
}
+ ret = hellcreek_setup_devlink_regions(ds);
+ if (ret) {
+ dev_err(hellcreek->dev,
+ "Failed to setup devlink regions!\n");
+ goto err_regions;
+ }
+
return 0;
+
+err_regions:
+ dsa_devlink_resources_unregister(ds);
+
+ return ret;
}
static void hellcreek_teardown(struct dsa_switch *ds)
{
+ hellcreek_teardown_devlink_regions(ds);
dsa_devlink_resources_unregister(ds);
}
@@ -1518,31 +1748,34 @@ static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port,
}
static const struct dsa_switch_ops hellcreek_ds_ops = {
- .get_ethtool_stats = hellcreek_get_ethtool_stats,
- .get_sset_count = hellcreek_get_sset_count,
- .get_strings = hellcreek_get_strings,
- .get_tag_protocol = hellcreek_get_tag_protocol,
- .get_ts_info = hellcreek_get_ts_info,
- .phylink_validate = hellcreek_phylink_validate,
- .port_bridge_join = hellcreek_port_bridge_join,
- .port_bridge_leave = hellcreek_port_bridge_leave,
- .port_disable = hellcreek_port_disable,
- .port_enable = hellcreek_port_enable,
- .port_fdb_add = hellcreek_fdb_add,
- .port_fdb_del = hellcreek_fdb_del,
- .port_fdb_dump = hellcreek_fdb_dump,
- .port_hwtstamp_set = hellcreek_port_hwtstamp_set,
- .port_hwtstamp_get = hellcreek_port_hwtstamp_get,
- .port_prechangeupper = hellcreek_port_prechangeupper,
- .port_rxtstamp = hellcreek_port_rxtstamp,
- .port_setup_tc = hellcreek_port_setup_tc,
- .port_stp_state_set = hellcreek_port_stp_state_set,
- .port_txtstamp = hellcreek_port_txtstamp,
- .port_vlan_add = hellcreek_vlan_add,
- .port_vlan_del = hellcreek_vlan_del,
- .port_vlan_filtering = hellcreek_vlan_filtering,
- .setup = hellcreek_setup,
- .teardown = hellcreek_teardown,
+ .devlink_info_get = hellcreek_devlink_info_get,
+ .get_ethtool_stats = hellcreek_get_ethtool_stats,
+ .get_sset_count = hellcreek_get_sset_count,
+ .get_strings = hellcreek_get_strings,
+ .get_tag_protocol = hellcreek_get_tag_protocol,
+ .get_ts_info = hellcreek_get_ts_info,
+ .phylink_validate = hellcreek_phylink_validate,
+ .port_bridge_flags = hellcreek_bridge_flags,
+ .port_bridge_join = hellcreek_port_bridge_join,
+ .port_bridge_leave = hellcreek_port_bridge_leave,
+ .port_disable = hellcreek_port_disable,
+ .port_enable = hellcreek_port_enable,
+ .port_fdb_add = hellcreek_fdb_add,
+ .port_fdb_del = hellcreek_fdb_del,
+ .port_fdb_dump = hellcreek_fdb_dump,
+ .port_hwtstamp_set = hellcreek_port_hwtstamp_set,
+ .port_hwtstamp_get = hellcreek_port_hwtstamp_get,
+ .port_pre_bridge_flags = hellcreek_pre_bridge_flags,
+ .port_prechangeupper = hellcreek_port_prechangeupper,
+ .port_rxtstamp = hellcreek_port_rxtstamp,
+ .port_setup_tc = hellcreek_port_setup_tc,
+ .port_stp_state_set = hellcreek_port_stp_state_set,
+ .port_txtstamp = hellcreek_port_txtstamp,
+ .port_vlan_add = hellcreek_vlan_add,
+ .port_vlan_del = hellcreek_vlan_del,
+ .port_vlan_filtering = hellcreek_vlan_filtering,
+ .setup = hellcreek_setup,
+ .teardown = hellcreek_teardown,
};
static int hellcreek_probe(struct platform_device *pdev)
@@ -1609,10 +1842,8 @@ static int hellcreek_probe(struct platform_device *pdev)
}
hellcreek->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(hellcreek->base)) {
- dev_err(dev, "No memory available!\n");
+ if (IS_ERR(hellcreek->base))
return PTR_ERR(hellcreek->base);
- }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ptp");
if (!res) {
@@ -1621,10 +1852,8 @@ static int hellcreek_probe(struct platform_device *pdev)
}
hellcreek->ptp_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(hellcreek->ptp_base)) {
- dev_err(dev, "No memory available!\n");
+ if (IS_ERR(hellcreek->ptp_base))
return PTR_ERR(hellcreek->ptp_base);
- }
ret = hellcreek_detect(hellcreek);
if (ret) {
@@ -1693,6 +1922,7 @@ static int hellcreek_remove(struct platform_device *pdev)
}
static const struct hellcreek_platform_data de1soc_r1_pdata = {
+ .name = "r4c30",
.num_ports = 4,
.is_100_mbits = 1,
.qbv_support = 1,
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
index 305e76dab34d..9e303b8ab13c 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.h
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -278,6 +278,8 @@ struct hellcreek {
struct mutex reg_lock; /* Switch IP register lock */
struct mutex vlan_lock; /* VLAN bitmaps lock */
struct mutex ptp_lock; /* PTP IP register lock */
+ struct devlink_region *vlan_region;
+ struct devlink_region *fdb_region;
void __iomem *base;
void __iomem *ptp_base;
u16 swcfg; /* swcfg shadow */
@@ -304,4 +306,9 @@ enum hellcreek_devlink_resource_id {
HELLCREEK_DEVLINK_PARAM_ID_FDB_TABLE,
};
+struct hellcreek_devlink_vlan_entry {
+ u16 vid;
+ u16 member;
+};
+
#endif /* _HELLCREEK_H_ */
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
index 69dd9a2e8bb6..40b41c794dfa 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -373,30 +373,38 @@ long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp)
return restart ? 1 : -1;
}
-bool hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *clone, unsigned int type)
+void hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port_hwtstamp *ps;
struct ptp_header *hdr;
+ struct sk_buff *clone;
+ unsigned int type;
ps = &hellcreek->ports[port].port_hwtstamp;
- /* Check if the driver is expected to do HW timestamping */
- if (!(skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP))
- return false;
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return;
/* Make sure the message is a PTP message that needs to be timestamped
* and the interaction with the HW timestamping is enabled. If not, stop
* here
*/
- hdr = hellcreek_should_tstamp(hellcreek, port, clone, type);
+ hdr = hellcreek_should_tstamp(hellcreek, port, skb, type);
if (!hdr)
- return false;
+ return;
+
+ clone = skb_clone_sk(skb);
+ if (!clone)
+ return;
if (test_and_set_bit_lock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS,
- &ps->state))
- return false;
+ &ps->state)) {
+ kfree_skb(clone);
+ return;
+ }
ps->tx_skb = clone;
@@ -406,8 +414,6 @@ bool hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
ps->tx_tstamp_start = jiffies;
ptp_schedule_worker(hellcreek->ptp_clock, 0);
-
- return true;
}
bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
index c0745ffa1ebb..71af77efb28b 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
@@ -44,8 +44,8 @@ int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *clone, unsigned int type);
-bool hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *clone, unsigned int type);
+void hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb);
int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *info);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index bf5c62e5c0b0..314ae78bbdd6 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Lantiq / Intel GSWIP switch driver for VRX200 SoCs
+ * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs
*
* Copyright (C) 2010 Lantiq Deutschland
* Copyright (C) 2012 John Crispin <john@phrozen.org>
@@ -104,6 +104,7 @@
#define GSWIP_MII_CFG_MODE_RMIIP 0x2
#define GSWIP_MII_CFG_MODE_RMIIM 0x3
#define GSWIP_MII_CFG_MODE_RGMII 0x4
+#define GSWIP_MII_CFG_MODE_GMII 0x9
#define GSWIP_MII_CFG_MODE_MASK 0xf
#define GSWIP_MII_CFG_RATE_M2P5 0x00
#define GSWIP_MII_CFG_RATE_M25 0x10
@@ -241,6 +242,7 @@
struct gswip_hw_info {
int max_ports;
int cpu_port;
+ const struct dsa_switch_ops *ops;
};
struct xway_gphy_match_data {
@@ -1412,12 +1414,42 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
return 0;
}
-static void gswip_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void gswip_phylink_set_capab(unsigned long *supported,
+ struct phylink_link_state *state)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ /* Allow all the expected bits */
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ /* With the exclusion of MII, Reverse MII and Reduced MII, we
+ * support Gigabit, including Half duplex
+ */
+ if (state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_REVMII &&
+ state->interface != PHY_INTERFACE_MODE_RMII) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ }
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
switch (port) {
case 0:
case 1:
@@ -1444,38 +1476,54 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port,
return;
}
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
+ gswip_phylink_set_capab(supported, state);
- /* With the exclusion of MII, Reverse MII and Reduced MII, we
- * support Gigabit, including Half duplex
- */
- if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- state->interface != PHY_INTERFACE_MODE_RMII) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
+ return;
+
+unsupported:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
+ phy_modes(state->interface), port);
+}
+
+static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ switch (port) {
+ case 0:
+ if (!phy_interface_mode_is_rgmii(state->interface) &&
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ state->interface != PHY_INTERFACE_MODE_RMII)
+ goto unsupported;
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
+ goto unsupported;
+ break;
+ case 5:
+ if (!phy_interface_mode_is_rgmii(state->interface) &&
+ state->interface != PHY_INTERFACE_MODE_INTERNAL &&
+ state->interface != PHY_INTERFACE_MODE_RMII)
+ goto unsupported;
+ break;
+ default:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev, "Unsupported port: %i\n", port);
+ return;
}
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
+ gswip_phylink_set_capab(supported, state);
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
return;
unsupported:
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
phy_modes(state->interface), port);
- return;
}
static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
@@ -1613,6 +1661,9 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
case PHY_INTERFACE_MODE_RGMII_TXID:
miicfg |= GSWIP_MII_CFG_MODE_RGMII;
break;
+ case PHY_INTERFACE_MODE_GMII:
+ miicfg |= GSWIP_MII_CFG_MODE_GMII;
+ break;
default:
dev_err(ds->dev,
"Unsupported interface: %d\n", state->interface);
@@ -1739,7 +1790,7 @@ static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(gswip_rmon_cnt);
}
-static const struct dsa_switch_ops gswip_switch_ops = {
+static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
.get_tag_protocol = gswip_get_tag_protocol,
.setup = gswip_setup,
.port_enable = gswip_port_enable,
@@ -1754,7 +1805,31 @@ static const struct dsa_switch_ops gswip_switch_ops = {
.port_fdb_add = gswip_port_fdb_add,
.port_fdb_del = gswip_port_fdb_del,
.port_fdb_dump = gswip_port_fdb_dump,
- .phylink_validate = gswip_phylink_validate,
+ .phylink_validate = gswip_xrx200_phylink_validate,
+ .phylink_mac_config = gswip_phylink_mac_config,
+ .phylink_mac_link_down = gswip_phylink_mac_link_down,
+ .phylink_mac_link_up = gswip_phylink_mac_link_up,
+ .get_strings = gswip_get_strings,
+ .get_ethtool_stats = gswip_get_ethtool_stats,
+ .get_sset_count = gswip_get_sset_count,
+};
+
+static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
+ .get_tag_protocol = gswip_get_tag_protocol,
+ .setup = gswip_setup,
+ .port_enable = gswip_port_enable,
+ .port_disable = gswip_port_disable,
+ .port_bridge_join = gswip_port_bridge_join,
+ .port_bridge_leave = gswip_port_bridge_leave,
+ .port_fast_age = gswip_port_fast_age,
+ .port_vlan_filtering = gswip_port_vlan_filtering,
+ .port_vlan_add = gswip_port_vlan_add,
+ .port_vlan_del = gswip_port_vlan_del,
+ .port_stp_state_set = gswip_port_stp_state_set,
+ .port_fdb_add = gswip_port_fdb_add,
+ .port_fdb_del = gswip_port_fdb_del,
+ .port_fdb_dump = gswip_port_fdb_dump,
+ .phylink_validate = gswip_xrx300_phylink_validate,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
.phylink_mac_link_up = gswip_phylink_mac_link_up,
@@ -1983,7 +2058,7 @@ remove_gphy:
static int gswip_probe(struct platform_device *pdev)
{
struct gswip_priv *priv;
- struct device_node *mdio_np, *gphy_fw_np;
+ struct device_node *np, *mdio_np, *gphy_fw_np;
struct device *dev = &pdev->dev;
int err;
int i;
@@ -2016,10 +2091,28 @@ static int gswip_probe(struct platform_device *pdev)
priv->ds->dev = dev;
priv->ds->num_ports = priv->hw_info->max_ports;
priv->ds->priv = priv;
- priv->ds->ops = &gswip_switch_ops;
+ priv->ds->ops = priv->hw_info->ops;
priv->dev = dev;
version = gswip_switch_r(priv, GSWIP_VERSION);
+ np = dev->of_node;
+ switch (version) {
+ case GSWIP_VERSION_2_0:
+ case GSWIP_VERSION_2_1:
+ if (!of_device_is_compatible(np, "lantiq,xrx200-gswip"))
+ return -EINVAL;
+ break;
+ case GSWIP_VERSION_2_2:
+ case GSWIP_VERSION_2_2_ETC:
+ if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") &&
+ !of_device_is_compatible(np, "lantiq,xrx330-gswip"))
+ return -EINVAL;
+ break;
+ default:
+ dev_err(dev, "unknown GSWIP version: 0x%x", version);
+ return -ENOENT;
+ }
+
/* bring up the mdio bus */
gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
if (gphy_fw_np) {
@@ -2097,10 +2190,19 @@ static int gswip_remove(struct platform_device *pdev)
static const struct gswip_hw_info gswip_xrx200 = {
.max_ports = 7,
.cpu_port = 6,
+ .ops = &gswip_xrx200_switch_ops,
+};
+
+static const struct gswip_hw_info gswip_xrx300 = {
+ .max_ports = 7,
+ .cpu_port = 6,
+ .ops = &gswip_xrx300_switch_ops,
};
static const struct of_device_id gswip_of_match[] = {
{ .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
+ { .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 },
+ { .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 },
{},
};
MODULE_DEVICE_TABLE(of, gswip_of_match);
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index 4ec6a47b7f72..c9e2a8989556 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -29,7 +29,7 @@ menuconfig NET_DSA_MICROCHIP_KSZ8795
depends on NET_DSA
select NET_DSA_MICROCHIP_KSZ_COMMON
help
- This driver adds support for Microchip KSZ8795 switch chips.
+ This driver adds support for Microchip KSZ8795/KSZ88X3 switch chips.
config NET_DSA_MICROCHIP_KSZ8795_SPI
tristate "KSZ8795 series SPI connected switch driver"
@@ -40,3 +40,11 @@ config NET_DSA_MICROCHIP_KSZ8795_SPI
It is required to use the KSZ8795 switch driver as the only access
is through SPI.
+
+config NET_DSA_MICROCHIP_KSZ8863_SMI
+ tristate "KSZ series SMI connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ8795
+ select MDIO_BITBANG
+ help
+ Select to enable support for registering switches configured through
+ Microchip SMI. It supports the KSZ8863 and KSZ8873 switch.
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 929caa81e782..2a03b21a3386 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C) += ksz9477_i2c.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI) += ksz9477_spi.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795) += ksz8795.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI) += ksz8795_spi.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8863_SMI) += ksz8863_smi.o
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
new file mode 100644
index 000000000000..9d611895d3cf
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Microchip KSZ8XXX series register access
+ *
+ * Copyright (C) 2020 Pengutronix, Michael Grzeschik <kernel@pengutronix.de>
+ */
+
+#ifndef __KSZ8XXX_H
+#define __KSZ8XXX_H
+#include <linux/kernel.h>
+
+enum ksz_regs {
+ REG_IND_CTRL_0,
+ REG_IND_DATA_8,
+ REG_IND_DATA_CHECK,
+ REG_IND_DATA_HI,
+ REG_IND_DATA_LO,
+ REG_IND_MIB_CHECK,
+ P_FORCE_CTRL,
+ P_LINK_STATUS,
+ P_LOCAL_CTRL,
+ P_NEG_RESTART_CTRL,
+ P_REMOTE_STATUS,
+ P_SPEED_STATUS,
+ S_TAIL_TAG_CTRL,
+};
+
+enum ksz_masks {
+ PORT_802_1P_REMAPPING,
+ SW_TAIL_TAG_ENABLE,
+ MIB_COUNTER_OVERFLOW,
+ MIB_COUNTER_VALID,
+ VLAN_TABLE_FID,
+ VLAN_TABLE_MEMBERSHIP,
+ VLAN_TABLE_VALID,
+ STATIC_MAC_TABLE_VALID,
+ STATIC_MAC_TABLE_USE_FID,
+ STATIC_MAC_TABLE_FID,
+ STATIC_MAC_TABLE_OVERRIDE,
+ STATIC_MAC_TABLE_FWD_PORTS,
+ DYNAMIC_MAC_TABLE_ENTRIES_H,
+ DYNAMIC_MAC_TABLE_MAC_EMPTY,
+ DYNAMIC_MAC_TABLE_NOT_READY,
+ DYNAMIC_MAC_TABLE_ENTRIES,
+ DYNAMIC_MAC_TABLE_FID,
+ DYNAMIC_MAC_TABLE_SRC_PORT,
+ DYNAMIC_MAC_TABLE_TIMESTAMP,
+};
+
+enum ksz_shifts {
+ VLAN_TABLE_MEMBERSHIP_S,
+ VLAN_TABLE,
+ STATIC_MAC_FWD_PORTS,
+ STATIC_MAC_FID,
+ DYNAMIC_MAC_ENTRIES_H,
+ DYNAMIC_MAC_ENTRIES,
+ DYNAMIC_MAC_FID,
+ DYNAMIC_MAC_TIMESTAMP,
+ DYNAMIC_MAC_SRC_PORT,
+};
+
+struct ksz8 {
+ const u8 *regs;
+ const u32 *masks;
+ const u8 *shifts;
+ void *priv;
+};
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index b4b7de63ca79..ad509a57a945 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -20,10 +20,112 @@
#include "ksz_common.h"
#include "ksz8795_reg.h"
+#include "ksz8.h"
+
+static const u8 ksz8795_regs[] = {
+ [REG_IND_CTRL_0] = 0x6E,
+ [REG_IND_DATA_8] = 0x70,
+ [REG_IND_DATA_CHECK] = 0x72,
+ [REG_IND_DATA_HI] = 0x71,
+ [REG_IND_DATA_LO] = 0x75,
+ [REG_IND_MIB_CHECK] = 0x74,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x07,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x08,
+ [P_SPEED_STATUS] = 0x09,
+ [S_TAIL_TAG_CTRL] = 0x0C,
+};
+
+static const u32 ksz8795_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(7),
+ [SW_TAIL_TAG_ENABLE] = BIT(1),
+ [MIB_COUNTER_OVERFLOW] = BIT(6),
+ [MIB_COUNTER_VALID] = BIT(5),
+ [VLAN_TABLE_FID] = GENMASK(6, 0),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
+ [VLAN_TABLE_VALID] = BIT(12),
+ [STATIC_MAC_TABLE_VALID] = BIT(21),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(23),
+ [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(26),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(24, 20),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(8),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(26, 20),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
+};
+
+static const u8 ksz8795_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 7,
+ [VLAN_TABLE] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 24,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 29,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 27,
+ [DYNAMIC_MAC_SRC_PORT] = 24,
+};
+
+static const u8 ksz8863_regs[] = {
+ [REG_IND_CTRL_0] = 0x79,
+ [REG_IND_DATA_8] = 0x7B,
+ [REG_IND_DATA_CHECK] = 0x7B,
+ [REG_IND_DATA_HI] = 0x7C,
+ [REG_IND_DATA_LO] = 0x80,
+ [REG_IND_MIB_CHECK] = 0x80,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x0F,
+ [S_TAIL_TAG_CTRL] = 0x03,
+};
+
+static const u32 ksz8863_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(3),
+ [SW_TAIL_TAG_ENABLE] = BIT(6),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(15, 12),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
+ [VLAN_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(21),
+ [STATIC_MAC_TABLE_FID] = GENMASK(29, 26),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(5, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 28),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
+};
-static const struct {
+static u8 ksz8863_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 22,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 24,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 24,
+ [DYNAMIC_MAC_SRC_PORT] = 20,
+};
+
+struct mib_names {
char string[ETH_GSTRING_LEN];
-} mib_names[] = {
+};
+
+static const struct mib_names ksz87xx_mib_names[] = {
{ "rx_hi" },
{ "rx_undersize" },
{ "rx_fragments" },
@@ -62,6 +164,48 @@ static const struct {
{ "tx_discards" },
};
+static const struct mib_names ksz88xx_mib_names[] = {
+ { "rx" },
+ { "rx_hi" },
+ { "rx_undersize" },
+ { "rx_fragments" },
+ { "rx_oversize" },
+ { "rx_jabbers" },
+ { "rx_symbol_err" },
+ { "rx_crc_err" },
+ { "rx_align_err" },
+ { "rx_mac_ctrl" },
+ { "rx_pause" },
+ { "rx_bcast" },
+ { "rx_mcast" },
+ { "rx_ucast" },
+ { "rx_64_or_less" },
+ { "rx_65_127" },
+ { "rx_128_255" },
+ { "rx_256_511" },
+ { "rx_512_1023" },
+ { "rx_1024_1522" },
+ { "tx" },
+ { "tx_hi" },
+ { "tx_late_col" },
+ { "tx_pause" },
+ { "tx_bcast" },
+ { "tx_mcast" },
+ { "tx_ucast" },
+ { "tx_deferred" },
+ { "tx_total_col" },
+ { "tx_exc_col" },
+ { "tx_single_col" },
+ { "tx_mult_col" },
+ { "rx_discards" },
+ { "tx_discards" },
+};
+
+static bool ksz_is_ksz88x3(struct ksz_device *dev)
+{
+ return dev->chip_id == 0x8830;
+}
+
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
@@ -74,12 +218,20 @@ static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bits, set ? bits : 0);
}
-static int ksz8795_reset_switch(struct ksz_device *dev)
+static int ksz8_reset_switch(struct ksz_device *dev)
{
- /* reset switch */
- ksz_write8(dev, REG_POWER_MANAGEMENT_1,
- SW_SOFTWARE_POWER_DOWN << SW_POWER_MANAGEMENT_MODE_S);
- ksz_write8(dev, REG_POWER_MANAGEMENT_1, 0);
+ if (ksz_is_ksz88x3(dev)) {
+ /* reset switch */
+ ksz_cfg(dev, KSZ8863_REG_SW_RESET,
+ KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, true);
+ ksz_cfg(dev, KSZ8863_REG_SW_RESET,
+ KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, false);
+ } else {
+ /* reset switch */
+ ksz_write8(dev, REG_POWER_MANAGEMENT_1,
+ SW_SOFTWARE_POWER_DOWN << SW_POWER_MANAGEMENT_MODE_S);
+ ksz_write8(dev, REG_POWER_MANAGEMENT_1, 0);
+ }
return 0;
}
@@ -117,29 +269,34 @@ static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
true);
}
-static void ksz8795_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
- u64 *cnt)
+static void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
{
+ struct ksz8 *ksz8 = dev->priv;
+ const u32 *masks;
+ const u8 *regs;
u16 ctrl_addr;
u32 data;
u8 check;
int loop;
+ masks = ksz8->masks;
+ regs = ksz8->regs;
+
ctrl_addr = addr + dev->reg_mib_cnt * port;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
mutex_lock(&dev->alu_mutex);
- ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr);
+ ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
/* It is almost guaranteed to always read the valid bit because of
* slow SPI speed.
*/
for (loop = 2; loop > 0; loop--) {
- ksz_read8(dev, REG_IND_MIB_CHECK, &check);
+ ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check);
- if (check & MIB_COUNTER_VALID) {
- ksz_read32(dev, REG_IND_DATA_LO, &data);
- if (check & MIB_COUNTER_OVERFLOW)
+ if (check & masks[MIB_COUNTER_VALID]) {
+ ksz_read32(dev, regs[REG_IND_DATA_LO], &data);
+ if (check & masks[MIB_COUNTER_OVERFLOW])
*cnt += MIB_COUNTER_VALUE + 1;
*cnt += data & MIB_COUNTER_VALUE;
break;
@@ -151,27 +308,33 @@ static void ksz8795_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
+ struct ksz8 *ksz8 = dev->priv;
+ const u32 *masks;
+ const u8 *regs;
u16 ctrl_addr;
u32 data;
u8 check;
int loop;
+ masks = ksz8->masks;
+ regs = ksz8->regs;
+
addr -= dev->reg_mib_cnt;
- ctrl_addr = (KS_MIB_TOTAL_RX_1 - KS_MIB_TOTAL_RX_0) * port;
- ctrl_addr += addr + KS_MIB_TOTAL_RX_0;
+ ctrl_addr = (KSZ8795_MIB_TOTAL_RX_1 - KSZ8795_MIB_TOTAL_RX_0) * port;
+ ctrl_addr += addr + KSZ8795_MIB_TOTAL_RX_0;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
mutex_lock(&dev->alu_mutex);
- ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr);
+ ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
/* It is almost guaranteed to always read the valid bit because of
* slow SPI speed.
*/
for (loop = 2; loop > 0; loop--) {
- ksz_read8(dev, REG_IND_MIB_CHECK, &check);
+ ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check);
- if (check & MIB_COUNTER_VALID) {
- ksz_read32(dev, REG_IND_DATA_LO, &data);
+ if (check & masks[MIB_COUNTER_VALID]) {
+ ksz_read32(dev, regs[REG_IND_DATA_LO], &data);
if (addr < 2) {
u64 total;
@@ -179,13 +342,13 @@ static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
total <<= 32;
*cnt += total;
*cnt += data;
- if (check & MIB_COUNTER_OVERFLOW) {
+ if (check & masks[MIB_COUNTER_OVERFLOW]) {
total = MIB_TOTAL_BYTES_H + 1;
total <<= 32;
*cnt += total;
}
} else {
- if (check & MIB_COUNTER_OVERFLOW)
+ if (check & masks[MIB_COUNTER_OVERFLOW])
*cnt += MIB_PACKET_DROPPED + 1;
*cnt += data & MIB_PACKET_DROPPED;
}
@@ -195,8 +358,52 @@ static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
mutex_unlock(&dev->alu_mutex);
}
-static void ksz8795_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
+{
+ struct ksz8 *ksz8 = dev->priv;
+ const u8 *regs = ksz8->regs;
+ u32 *last = (u32 *)dropped;
+ u16 ctrl_addr;
+ u32 data;
+ u32 cur;
+
+ addr -= dev->reg_mib_cnt;
+ ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 :
+ KSZ8863_MIB_PACKET_DROPPED_RX_0;
+ ctrl_addr += port;
+ ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ ksz_read32(dev, regs[REG_IND_DATA_LO], &data);
+ mutex_unlock(&dev->alu_mutex);
+
+ data &= MIB_PACKET_DROPPED;
+ cur = last[addr];
+ if (data != cur) {
+ last[addr] = data;
+ if (data < cur)
+ data += MIB_PACKET_DROPPED + 1;
+ data -= cur;
+ *cnt += data;
+ }
+}
+
+static void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
{
+ if (ksz_is_ksz88x3(dev))
+ ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt);
+ else
+ ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt);
+}
+
+static void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+{
+ if (ksz_is_ksz88x3(dev))
+ return;
+
/* enable the port for flush/freeze function */
if (freeze)
ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
@@ -207,14 +414,17 @@ static void ksz8795_freeze_mib(struct ksz_device *dev, int port, bool freeze)
ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
}
-static void ksz8795_port_init_cnt(struct ksz_device *dev, int port)
+static void ksz8_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
+ u64 *dropped;
- /* flush all enabled port MIB counters */
- ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
- ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true);
- ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
+ if (!ksz_is_ksz88x3(dev)) {
+ /* flush all enabled port MIB counters */
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
+ ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true);
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
+ }
mib->cnt_ptr = 0;
@@ -225,80 +435,99 @@ static void ksz8795_port_init_cnt(struct ksz_device *dev, int port)
++mib->cnt_ptr;
}
+ /* last one in storage */
+ dropped = &mib->counters[dev->mib_cnt];
+
/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
while (mib->cnt_ptr < dev->mib_cnt) {
dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
- NULL, &mib->counters[mib->cnt_ptr]);
+ dropped, &mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
}
mib->cnt_ptr = 0;
memset(mib->counters, 0, dev->mib_cnt * sizeof(u64));
}
-static void ksz8795_r_table(struct ksz_device *dev, int table, u16 addr,
- u64 *data)
+static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
{
+ struct ksz8 *ksz8 = dev->priv;
+ const u8 *regs = ksz8->regs;
u16 ctrl_addr;
ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
mutex_lock(&dev->alu_mutex);
- ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr);
- ksz_read64(dev, REG_IND_DATA_HI, data);
+ ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ ksz_read64(dev, regs[REG_IND_DATA_HI], data);
mutex_unlock(&dev->alu_mutex);
}
-static void ksz8795_w_table(struct ksz_device *dev, int table, u16 addr,
- u64 data)
+static void ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
{
+ struct ksz8 *ksz8 = dev->priv;
+ const u8 *regs = ksz8->regs;
u16 ctrl_addr;
ctrl_addr = IND_ACC_TABLE(table) | addr;
mutex_lock(&dev->alu_mutex);
- ksz_write64(dev, REG_IND_DATA_HI, data);
- ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr);
+ ksz_write64(dev, regs[REG_IND_DATA_HI], data);
+ ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
mutex_unlock(&dev->alu_mutex);
}
-static int ksz8795_valid_dyn_entry(struct ksz_device *dev, u8 *data)
+static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
{
+ struct ksz8 *ksz8 = dev->priv;
int timeout = 100;
+ const u32 *masks;
+ const u8 *regs;
+
+ masks = ksz8->masks;
+ regs = ksz8->regs;
do {
- ksz_read8(dev, REG_IND_DATA_CHECK, data);
+ ksz_read8(dev, regs[REG_IND_DATA_CHECK], data);
timeout--;
- } while ((*data & DYNAMIC_MAC_TABLE_NOT_READY) && timeout);
+ } while ((*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) && timeout);
/* Entry is not ready for accessing. */
- if (*data & DYNAMIC_MAC_TABLE_NOT_READY) {
+ if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) {
return -EAGAIN;
/* Entry is ready for accessing. */
} else {
- ksz_read8(dev, REG_IND_DATA_8, data);
+ ksz_read8(dev, regs[REG_IND_DATA_8], data);
/* There is no valid entry in the table. */
- if (*data & DYNAMIC_MAC_TABLE_MAC_EMPTY)
+ if (*data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY])
return -ENXIO;
}
return 0;
}
-static int ksz8795_r_dyn_mac_table(struct ksz_device *dev, u16 addr,
- u8 *mac_addr, u8 *fid, u8 *src_port,
- u8 *timestamp, u16 *entries)
+static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr,
+ u8 *mac_addr, u8 *fid, u8 *src_port,
+ u8 *timestamp, u16 *entries)
{
+ struct ksz8 *ksz8 = dev->priv;
u32 data_hi, data_lo;
+ const u8 *shifts;
+ const u32 *masks;
+ const u8 *regs;
u16 ctrl_addr;
u8 data;
int rc;
+ shifts = ksz8->shifts;
+ masks = ksz8->masks;
+ regs = ksz8->regs;
+
ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr;
mutex_lock(&dev->alu_mutex);
- ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr);
+ ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
- rc = ksz8795_valid_dyn_entry(dev, &data);
+ rc = ksz8_valid_dyn_entry(dev, &data);
if (rc == -EAGAIN) {
if (addr == 0)
*entries = 0;
@@ -309,23 +538,23 @@ static int ksz8795_r_dyn_mac_table(struct ksz_device *dev, u16 addr,
u64 buf = 0;
int cnt;
- ksz_read64(dev, REG_IND_DATA_HI, &buf);
+ ksz_read64(dev, regs[REG_IND_DATA_HI], &buf);
data_hi = (u32)(buf >> 32);
data_lo = (u32)buf;
/* Check out how many valid entry in the table. */
- cnt = data & DYNAMIC_MAC_TABLE_ENTRIES_H;
- cnt <<= DYNAMIC_MAC_ENTRIES_H_S;
- cnt |= (data_hi & DYNAMIC_MAC_TABLE_ENTRIES) >>
- DYNAMIC_MAC_ENTRIES_S;
+ cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H];
+ cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H];
+ cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >>
+ shifts[DYNAMIC_MAC_ENTRIES];
*entries = cnt + 1;
- *fid = (data_hi & DYNAMIC_MAC_TABLE_FID) >>
- DYNAMIC_MAC_FID_S;
- *src_port = (data_hi & DYNAMIC_MAC_TABLE_SRC_PORT) >>
- DYNAMIC_MAC_SRC_PORT_S;
- *timestamp = (data_hi & DYNAMIC_MAC_TABLE_TIMESTAMP) >>
- DYNAMIC_MAC_TIMESTAMP_S;
+ *fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >>
+ shifts[DYNAMIC_MAC_FID];
+ *src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >>
+ shifts[DYNAMIC_MAC_SRC_PORT];
+ *timestamp = (data_hi & masks[DYNAMIC_MAC_TABLE_TIMESTAMP]) >>
+ shifts[DYNAMIC_MAC_TIMESTAMP];
mac_addr[5] = (u8)data_lo;
mac_addr[4] = (u8)(data_lo >> 8);
@@ -341,91 +570,128 @@ static int ksz8795_r_dyn_mac_table(struct ksz_device *dev, u16 addr,
return rc;
}
-static int ksz8795_r_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu)
+static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
{
+ struct ksz8 *ksz8 = dev->priv;
u32 data_hi, data_lo;
+ const u8 *shifts;
+ const u32 *masks;
u64 data;
- ksz8795_r_table(dev, TABLE_STATIC_MAC, addr, &data);
+ shifts = ksz8->shifts;
+ masks = ksz8->masks;
+
+ ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data);
data_hi = data >> 32;
data_lo = (u32)data;
- if (data_hi & (STATIC_MAC_TABLE_VALID | STATIC_MAC_TABLE_OVERRIDE)) {
+ if (data_hi & (masks[STATIC_MAC_TABLE_VALID] |
+ masks[STATIC_MAC_TABLE_OVERRIDE])) {
alu->mac[5] = (u8)data_lo;
alu->mac[4] = (u8)(data_lo >> 8);
alu->mac[3] = (u8)(data_lo >> 16);
alu->mac[2] = (u8)(data_lo >> 24);
alu->mac[1] = (u8)data_hi;
alu->mac[0] = (u8)(data_hi >> 8);
- alu->port_forward = (data_hi & STATIC_MAC_TABLE_FWD_PORTS) >>
- STATIC_MAC_FWD_PORTS_S;
+ alu->port_forward =
+ (data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >>
+ shifts[STATIC_MAC_FWD_PORTS];
alu->is_override =
- (data_hi & STATIC_MAC_TABLE_OVERRIDE) ? 1 : 0;
+ (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
data_hi >>= 1;
- alu->is_use_fid = (data_hi & STATIC_MAC_TABLE_USE_FID) ? 1 : 0;
- alu->fid = (data_hi & STATIC_MAC_TABLE_FID) >>
- STATIC_MAC_FID_S;
+ alu->is_static = true;
+ alu->is_use_fid =
+ (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
+ alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >>
+ shifts[STATIC_MAC_FID];
return 0;
}
return -ENXIO;
}
-static void ksz8795_w_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu)
+static void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
{
+ struct ksz8 *ksz8 = dev->priv;
u32 data_hi, data_lo;
+ const u8 *shifts;
+ const u32 *masks;
u64 data;
+ shifts = ksz8->shifts;
+ masks = ksz8->masks;
+
data_lo = ((u32)alu->mac[2] << 24) |
((u32)alu->mac[3] << 16) |
((u32)alu->mac[4] << 8) | alu->mac[5];
data_hi = ((u32)alu->mac[0] << 8) | alu->mac[1];
- data_hi |= (u32)alu->port_forward << STATIC_MAC_FWD_PORTS_S;
+ data_hi |= (u32)alu->port_forward << shifts[STATIC_MAC_FWD_PORTS];
if (alu->is_override)
- data_hi |= STATIC_MAC_TABLE_OVERRIDE;
+ data_hi |= masks[STATIC_MAC_TABLE_OVERRIDE];
if (alu->is_use_fid) {
- data_hi |= STATIC_MAC_TABLE_USE_FID;
- data_hi |= (u32)alu->fid << STATIC_MAC_FID_S;
+ data_hi |= masks[STATIC_MAC_TABLE_USE_FID];
+ data_hi |= (u32)alu->fid << shifts[STATIC_MAC_FID];
}
if (alu->is_static)
- data_hi |= STATIC_MAC_TABLE_VALID;
+ data_hi |= masks[STATIC_MAC_TABLE_VALID];
else
- data_hi &= ~STATIC_MAC_TABLE_OVERRIDE;
+ data_hi &= ~masks[STATIC_MAC_TABLE_OVERRIDE];
data = (u64)data_hi << 32 | data_lo;
- ksz8795_w_table(dev, TABLE_STATIC_MAC, addr, data);
+ ksz8_w_table(dev, TABLE_STATIC_MAC, addr, data);
}
-static void ksz8795_from_vlan(u16 vlan, u8 *fid, u8 *member, u8 *valid)
+static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid,
+ u8 *member, u8 *valid)
{
- *fid = vlan & VLAN_TABLE_FID;
- *member = (vlan & VLAN_TABLE_MEMBERSHIP) >> VLAN_TABLE_MEMBERSHIP_S;
- *valid = !!(vlan & VLAN_TABLE_VALID);
+ struct ksz8 *ksz8 = dev->priv;
+ const u8 *shifts;
+ const u32 *masks;
+
+ shifts = ksz8->shifts;
+ masks = ksz8->masks;
+
+ *fid = vlan & masks[VLAN_TABLE_FID];
+ *member = (vlan & masks[VLAN_TABLE_MEMBERSHIP]) >>
+ shifts[VLAN_TABLE_MEMBERSHIP_S];
+ *valid = !!(vlan & masks[VLAN_TABLE_VALID]);
}
-static void ksz8795_to_vlan(u8 fid, u8 member, u8 valid, u16 *vlan)
+static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid,
+ u16 *vlan)
{
+ struct ksz8 *ksz8 = dev->priv;
+ const u8 *shifts;
+ const u32 *masks;
+
+ shifts = ksz8->shifts;
+ masks = ksz8->masks;
+
*vlan = fid;
- *vlan |= (u16)member << VLAN_TABLE_MEMBERSHIP_S;
+ *vlan |= (u16)member << shifts[VLAN_TABLE_MEMBERSHIP_S];
if (valid)
- *vlan |= VLAN_TABLE_VALID;
+ *vlan |= masks[VLAN_TABLE_VALID];
}
-static void ksz8795_r_vlan_entries(struct ksz_device *dev, u16 addr)
+static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
{
+ struct ksz8 *ksz8 = dev->priv;
+ const u8 *shifts;
u64 data;
int i;
- ksz8795_r_table(dev, TABLE_VLAN, addr, &data);
+ shifts = ksz8->shifts;
+
+ ksz8_r_table(dev, TABLE_VLAN, addr, &data);
addr *= dev->phy_port_cnt;
for (i = 0; i < dev->phy_port_cnt; i++) {
dev->vlan_cache[addr + i].table[0] = (u16)data;
- data >>= VLAN_TABLE_S;
+ data >>= shifts[VLAN_TABLE];
}
}
-static void ksz8795_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
+static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
{
int index;
u16 *data;
@@ -435,11 +701,11 @@ static void ksz8795_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
data = (u16 *)&buf;
addr = vid / dev->phy_port_cnt;
index = vid & 3;
- ksz8795_r_table(dev, TABLE_VLAN, addr, &buf);
+ ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
*vlan = data[index];
}
-static void ksz8795_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
+static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
{
int index;
u16 *data;
@@ -449,30 +715,37 @@ static void ksz8795_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
data = (u16 *)&buf;
addr = vid / dev->phy_port_cnt;
index = vid & 3;
- ksz8795_r_table(dev, TABLE_VLAN, addr, &buf);
+ ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
data[index] = vlan;
dev->vlan_cache[vid].table[0] = vlan;
- ksz8795_w_table(dev, TABLE_VLAN, addr, buf);
+ ksz8_w_table(dev, TABLE_VLAN, addr, buf);
}
-static void ksz8795_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
{
+ struct ksz8 *ksz8 = dev->priv;
u8 restart, speed, ctrl, link;
+ const u8 *regs = ksz8->regs;
int processed = true;
u16 data = 0;
u8 p = phy;
switch (reg) {
case PHY_REG_CTRL:
- ksz_pread8(dev, p, P_NEG_RESTART_CTRL, &restart);
- ksz_pread8(dev, p, P_SPEED_STATUS, &speed);
- ksz_pread8(dev, p, P_FORCE_CTRL, &ctrl);
+ ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
if (restart & PORT_PHY_LOOPBACK)
data |= PHY_LOOPBACK;
if (ctrl & PORT_FORCE_100_MBIT)
data |= PHY_SPEED_100MBIT;
- if (!(ctrl & PORT_AUTO_NEG_DISABLE))
- data |= PHY_AUTO_NEG_ENABLE;
+ if (ksz_is_ksz88x3(dev)) {
+ if ((ctrl & PORT_AUTO_NEG_ENABLE))
+ data |= PHY_AUTO_NEG_ENABLE;
+ } else {
+ if (!(ctrl & PORT_AUTO_NEG_DISABLE))
+ data |= PHY_AUTO_NEG_ENABLE;
+ }
if (restart & PORT_POWER_DOWN)
data |= PHY_POWER_DOWN;
if (restart & PORT_AUTO_NEG_RESTART)
@@ -491,7 +764,7 @@ static void ksz8795_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= PHY_LED_DISABLE;
break;
case PHY_REG_STATUS:
- ksz_pread8(dev, p, P_LINK_STATUS, &link);
+ ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
data = PHY_100BTX_FD_CAPABLE |
PHY_100BTX_CAPABLE |
PHY_10BT_FD_CAPABLE |
@@ -506,10 +779,13 @@ static void ksz8795_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data = KSZ8795_ID_HI;
break;
case PHY_REG_ID_2:
- data = KSZ8795_ID_LO;
+ if (ksz_is_ksz88x3(dev))
+ data = KSZ8863_ID_LO;
+ else
+ data = KSZ8795_ID_LO;
break;
case PHY_REG_AUTO_NEGOTIATION:
- ksz_pread8(dev, p, P_LOCAL_CTRL, &ctrl);
+ ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
data = PHY_AUTO_NEG_802_3;
if (ctrl & PORT_AUTO_NEG_SYM_PAUSE)
data |= PHY_AUTO_NEG_SYM_PAUSE;
@@ -523,7 +799,7 @@ static void ksz8795_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= PHY_AUTO_NEG_10BT;
break;
case PHY_REG_REMOTE_CAPABILITY:
- ksz_pread8(dev, p, P_REMOTE_STATUS, &link);
+ ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
data = PHY_AUTO_NEG_802_3;
if (link & PORT_REMOTE_SYM_PAUSE)
data |= PHY_AUTO_NEG_SYM_PAUSE;
@@ -546,10 +822,12 @@ static void ksz8795_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
*val = data;
}
-static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
{
- u8 p = phy;
+ struct ksz8 *ksz8 = dev->priv;
u8 restart, speed, ctrl, data;
+ const u8 *regs = ksz8->regs;
+ u8 p = phy;
switch (reg) {
case PHY_REG_CTRL:
@@ -557,24 +835,32 @@ static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
/* Do not support PHY reset function. */
if (val & PHY_RESET)
break;
- ksz_pread8(dev, p, P_SPEED_STATUS, &speed);
+ ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
data = speed;
if (val & PHY_HP_MDIX)
data |= PORT_HP_MDIX;
else
data &= ~PORT_HP_MDIX;
if (data != speed)
- ksz_pwrite8(dev, p, P_SPEED_STATUS, data);
- ksz_pread8(dev, p, P_FORCE_CTRL, &ctrl);
+ ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
+ ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
data = ctrl;
- if (!(val & PHY_AUTO_NEG_ENABLE))
- data |= PORT_AUTO_NEG_DISABLE;
- else
- data &= ~PORT_AUTO_NEG_DISABLE;
+ if (ksz_is_ksz88x3(dev)) {
+ if ((val & PHY_AUTO_NEG_ENABLE))
+ data |= PORT_AUTO_NEG_ENABLE;
+ else
+ data &= ~PORT_AUTO_NEG_ENABLE;
+ } else {
+ if (!(val & PHY_AUTO_NEG_ENABLE))
+ data |= PORT_AUTO_NEG_DISABLE;
+ else
+ data &= ~PORT_AUTO_NEG_DISABLE;
+
+ /* Fiber port does not support auto-negotiation. */
+ if (dev->ports[p].fiber)
+ data |= PORT_AUTO_NEG_DISABLE;
+ }
- /* Fiber port does not support auto-negotiation. */
- if (dev->ports[p].fiber)
- data |= PORT_AUTO_NEG_DISABLE;
if (val & PHY_SPEED_100MBIT)
data |= PORT_FORCE_100_MBIT;
else
@@ -584,8 +870,8 @@ static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
else
data &= ~PORT_FORCE_FULL_DUPLEX;
if (data != ctrl)
- ksz_pwrite8(dev, p, P_FORCE_CTRL, data);
- ksz_pread8(dev, p, P_NEG_RESTART_CTRL, &restart);
+ ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
+ ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
data = restart;
if (val & PHY_LED_DISABLE)
data |= PORT_LED_OFF;
@@ -616,10 +902,10 @@ static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
else
data &= ~PORT_PHY_LOOPBACK;
if (data != restart)
- ksz_pwrite8(dev, p, P_NEG_RESTART_CTRL, data);
+ ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL], data);
break;
case PHY_REG_AUTO_NEGOTIATION:
- ksz_pread8(dev, p, P_LOCAL_CTRL, &ctrl);
+ ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
data = ctrl;
data &= ~(PORT_AUTO_NEG_SYM_PAUSE |
PORT_AUTO_NEG_100BTX_FD |
@@ -637,34 +923,37 @@ static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
if (val & PHY_AUTO_NEG_10BT)
data |= PORT_AUTO_NEG_10BT;
if (data != ctrl)
- ksz_pwrite8(dev, p, P_LOCAL_CTRL, data);
+ ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
break;
default:
break;
}
}
-static enum dsa_tag_protocol ksz8795_get_tag_protocol(struct dsa_switch *ds,
- int port,
- enum dsa_tag_protocol mp)
+static enum dsa_tag_protocol ksz8_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
{
- return DSA_TAG_PROTO_KSZ8795;
+ struct ksz_device *dev = ds->priv;
+
+ /* ksz88x3 uses the same tag schema as KSZ9893 */
+ return ksz_is_ksz88x3(dev) ?
+ DSA_TAG_PROTO_KSZ9893 : DSA_TAG_PROTO_KSZ8795;
}
-static void ksz8795_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *buf)
+static void ksz8_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf)
{
struct ksz_device *dev = ds->priv;
int i;
for (i = 0; i < dev->mib_cnt; i++) {
- memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string,
- ETH_GSTRING_LEN);
+ memcpy(buf + i * ETH_GSTRING_LEN,
+ dev->mib_names[i].string, ETH_GSTRING_LEN);
}
}
-static void ksz8795_cfg_port_member(struct ksz_device *dev, int port,
- u8 member)
+static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
u8 data;
@@ -675,8 +964,7 @@ static void ksz8795_cfg_port_member(struct ksz_device *dev, int port,
dev->ports[port].member = member;
}
-static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port,
- u8 state)
+static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
struct ksz_device *dev = ds->priv;
int forward = dev->member;
@@ -734,7 +1022,7 @@ static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port,
p->stp_state = state;
/* Port membership may share register with STP state. */
if (member >= 0 && member != p->member)
- ksz8795_cfg_port_member(dev, port, (u8)member);
+ ksz8_cfg_port_member(dev, port, (u8)member);
/* Check if forwarding needs to be updated. */
if (state != BR_STATE_FORWARDING) {
@@ -749,7 +1037,7 @@ static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port,
ksz_update_port_member(dev, port);
}
-static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port)
+static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
{
u8 learn[DSA_MAX_PORTS];
int first, index, cnt;
@@ -782,30 +1070,35 @@ static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port)
}
}
-static int ksz8795_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool flag,
- struct netlink_ext_ack *extack)
+static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag,
+ struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
+ if (ksz_is_ksz88x3(dev))
+ return -ENOTSUPP;
+
ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
return 0;
}
-static int ksz8795_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
+static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct ksz_device *dev = ds->priv;
u16 data, new_pvid = 0;
u8 fid, member, valid;
+ if (ksz_is_ksz88x3(dev))
+ return -ENOTSUPP;
+
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
- ksz8795_r_vlan_table(dev, vlan->vid, &data);
- ksz8795_from_vlan(data, &fid, &member, &valid);
+ ksz8_r_vlan_table(dev, vlan->vid, &data);
+ ksz8_from_vlan(dev, data, &fid, &member, &valid);
/* First time to setup the VLAN entry. */
if (!valid) {
@@ -815,8 +1108,8 @@ static int ksz8795_port_vlan_add(struct dsa_switch *ds, int port,
}
member |= BIT(port);
- ksz8795_to_vlan(fid, member, valid, &data);
- ksz8795_w_vlan_table(dev, vlan->vid, data);
+ ksz8_to_vlan(dev, fid, member, valid, &data);
+ ksz8_w_vlan_table(dev, vlan->vid, data);
/* change PVID */
if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
@@ -834,21 +1127,24 @@ static int ksz8795_port_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct ksz_device *dev = ds->priv;
u16 data, pvid, new_pvid = 0;
u8 fid, member, valid;
+ if (ksz_is_ksz88x3(dev))
+ return -ENOTSUPP;
+
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
pvid = pvid & 0xFFF;
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
- ksz8795_r_vlan_table(dev, vlan->vid, &data);
- ksz8795_from_vlan(data, &fid, &member, &valid);
+ ksz8_r_vlan_table(dev, vlan->vid, &data);
+ ksz8_from_vlan(dev, data, &fid, &member, &valid);
member &= ~BIT(port);
@@ -861,8 +1157,8 @@ static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port,
if (pvid == vlan->vid)
new_pvid = 1;
- ksz8795_to_vlan(fid, member, valid, &data);
- ksz8795_w_vlan_table(dev, vlan->vid, data);
+ ksz8_to_vlan(dev, fid, member, valid, &data);
+ ksz8_w_vlan_table(dev, vlan->vid, data);
if (new_pvid != pvid)
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid);
@@ -870,9 +1166,9 @@ static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz8795_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+static int ksz8_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
{
struct ksz_device *dev = ds->priv;
@@ -894,8 +1190,8 @@ static int ksz8795_port_mirror_add(struct dsa_switch *ds, int port,
return 0;
}
-static void ksz8795_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
+static void ksz8_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
{
struct ksz_device *dev = ds->priv;
u8 data;
@@ -915,91 +1211,111 @@ static void ksz8795_port_mirror_del(struct dsa_switch *ds, int port,
PORT_MIRROR_SNIFFER, false);
}
-static void ksz8795_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
+{
+ struct ksz_port *p = &dev->ports[port];
+ u8 data8;
+
+ if (!p->interface && dev->compat_interface) {
+ dev_warn(dev->dev,
+ "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
+ "Please update your device tree.\n",
+ port);
+ p->interface = dev->compat_interface;
+ }
+
+ /* Configure MII interface for proper network communication. */
+ ksz_read8(dev, REG_PORT_5_CTRL_6, &data8);
+ data8 &= ~PORT_INTERFACE_TYPE;
+ data8 &= ~PORT_GMII_1GPS_MODE;
+ switch (p->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ p->phydev.speed = SPEED_100;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ data8 |= PORT_INTERFACE_RMII;
+ p->phydev.speed = SPEED_100;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ data8 |= PORT_GMII_1GPS_MODE;
+ data8 |= PORT_INTERFACE_GMII;
+ p->phydev.speed = SPEED_1000;
+ break;
+ default:
+ data8 &= ~PORT_RGMII_ID_IN_ENABLE;
+ data8 &= ~PORT_RGMII_ID_OUT_ENABLE;
+ if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ p->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ data8 |= PORT_RGMII_ID_IN_ENABLE;
+ if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ p->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ data8 |= PORT_RGMII_ID_OUT_ENABLE;
+ data8 |= PORT_GMII_1GPS_MODE;
+ data8 |= PORT_INTERFACE_RGMII;
+ p->phydev.speed = SPEED_1000;
+ break;
+ }
+ ksz_write8(dev, REG_PORT_5_CTRL_6, data8);
+ p->phydev.duplex = 1;
+}
+
+static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct ksz_port *p = &dev->ports[port];
- u8 data8, member;
+ struct ksz8 *ksz8 = dev->priv;
+ const u32 *masks;
+ u8 member;
+
+ masks = ksz8->masks;
/* enable broadcast storm limit */
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
- ksz8795_set_prio_queue(dev, port, 4);
+ if (!ksz_is_ksz88x3(dev))
+ ksz8795_set_prio_queue(dev, port, 4);
/* disable DiffServ priority */
ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_ENABLE, false);
/* replace priority */
- ksz_port_cfg(dev, port, P_802_1P_CTRL, PORT_802_1P_REMAPPING, false);
+ ksz_port_cfg(dev, port, P_802_1P_CTRL,
+ masks[PORT_802_1P_REMAPPING], false);
/* enable 802.1p priority */
ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true);
if (cpu_port) {
- if (!p->interface && dev->compat_interface) {
- dev_warn(dev->dev,
- "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
- "Please update your device tree.\n",
- port);
- p->interface = dev->compat_interface;
- }
-
- /* Configure MII interface for proper network communication. */
- ksz_read8(dev, REG_PORT_5_CTRL_6, &data8);
- data8 &= ~PORT_INTERFACE_TYPE;
- data8 &= ~PORT_GMII_1GPS_MODE;
- switch (p->interface) {
- case PHY_INTERFACE_MODE_MII:
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_RMII:
- data8 |= PORT_INTERFACE_RMII;
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_GMII:
- data8 |= PORT_GMII_1GPS_MODE;
- data8 |= PORT_INTERFACE_GMII;
- p->phydev.speed = SPEED_1000;
- break;
- default:
- data8 &= ~PORT_RGMII_ID_IN_ENABLE;
- data8 &= ~PORT_RGMII_ID_OUT_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- data8 |= PORT_RGMII_ID_IN_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- data8 |= PORT_RGMII_ID_OUT_ENABLE;
- data8 |= PORT_GMII_1GPS_MODE;
- data8 |= PORT_INTERFACE_RGMII;
- p->phydev.speed = SPEED_1000;
- break;
- }
- ksz_write8(dev, REG_PORT_5_CTRL_6, data8);
- p->phydev.duplex = 1;
+ if (!ksz_is_ksz88x3(dev))
+ ksz8795_cpu_interface_select(dev, port);
member = dev->port_mask;
} else {
member = dev->host_mask | p->vid_member;
}
- ksz8795_cfg_port_member(dev, port, member);
+ ksz8_cfg_port_member(dev, port, member);
}
-static void ksz8795_config_cpu_port(struct dsa_switch *ds)
+static void ksz8_config_cpu_port(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ struct ksz8 *ksz8 = dev->priv;
+ const u8 *regs = ksz8->regs;
struct ksz_port *p;
+ const u32 *masks;
u8 remote;
int i;
+ masks = ksz8->masks;
+
/* Switch marks the maximum frame with extra byte as oversize. */
ksz_cfg(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, true);
- ksz_cfg(dev, S_TAIL_TAG_CTRL, SW_TAIL_TAG_ENABLE, true);
+ ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true);
p = &dev->ports[dev->cpu_port];
p->vid_member = dev->port_mask;
p->on = 1;
- ksz8795_port_setup(dev, dev->cpu_port, true);
+ ksz8_port_setup(dev, dev->cpu_port, true);
dev->member = dev->host_mask;
for (i = 0; i < dev->phy_port_cnt; i++) {
@@ -1010,7 +1326,7 @@ static void ksz8795_config_cpu_port(struct dsa_switch *ds)
*/
p->vid_member = BIT(i);
p->member = dev->port_mask;
- ksz8795_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+ ksz8_port_stp_state_set(ds, i, BR_STATE_DISABLED);
/* Last port may be disabled. */
if (i == dev->phy_port_cnt)
@@ -1022,9 +1338,11 @@ static void ksz8795_config_cpu_port(struct dsa_switch *ds)
p = &dev->ports[i];
if (!p->on)
continue;
- ksz_pread8(dev, i, P_REMOTE_STATUS, &remote);
- if (remote & PORT_FIBER_MODE)
- p->fiber = 1;
+ if (!ksz_is_ksz88x3(dev)) {
+ ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote);
+ if (remote & PORT_FIBER_MODE)
+ p->fiber = 1;
+ }
if (p->fiber)
ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL,
true);
@@ -1034,7 +1352,7 @@ static void ksz8795_config_cpu_port(struct dsa_switch *ds)
}
}
-static int ksz8795_setup(struct dsa_switch *ds)
+static int ksz8_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
struct alu_struct alu;
@@ -1045,7 +1363,7 @@ static int ksz8795_setup(struct dsa_switch *ds)
if (!dev->vlan_cache)
return -ENOMEM;
- ret = ksz8795_reset_switch(dev);
+ ret = ksz8_reset_switch(dev);
if (ret) {
dev_err(ds->dev, "failed to reset switch\n");
return ret;
@@ -1068,7 +1386,7 @@ static int ksz8795_setup(struct dsa_switch *ds)
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
- ksz8795_config_cpu_port(ds);
+ ksz8_config_cpu_port(ds);
ksz_cfg(dev, REG_SW_CTRL_2, MULTICAST_STORM_DISABLE, true);
@@ -1083,7 +1401,7 @@ static int ksz8795_setup(struct dsa_switch *ds)
BROADCAST_STORM_PROT_RATE) / 100);
for (i = 0; i < (dev->num_vlans / 4); i++)
- ksz8795_r_vlan_entries(dev, i);
+ ksz8_r_vlan_entries(dev, i);
/* Setup STP address for STP operation. */
memset(&alu, 0, sizeof(alu));
@@ -1092,7 +1410,7 @@ static int ksz8795_setup(struct dsa_switch *ds)
alu.is_override = true;
alu.port_forward = dev->host_mask;
- ksz8795_w_sta_mac_table(dev, 0, &alu);
+ ksz8_w_sta_mac_table(dev, 0, &alu);
ksz_init_mib_timer(dev);
@@ -1101,36 +1419,36 @@ static int ksz8795_setup(struct dsa_switch *ds)
return 0;
}
-static const struct dsa_switch_ops ksz8795_switch_ops = {
- .get_tag_protocol = ksz8795_get_tag_protocol,
- .setup = ksz8795_setup,
+static const struct dsa_switch_ops ksz8_switch_ops = {
+ .get_tag_protocol = ksz8_get_tag_protocol,
+ .setup = ksz8_setup,
.phy_read = ksz_phy_read16,
.phy_write = ksz_phy_write16,
.phylink_mac_link_down = ksz_mac_link_down,
.port_enable = ksz_enable_port,
- .get_strings = ksz8795_get_strings,
+ .get_strings = ksz8_get_strings,
.get_ethtool_stats = ksz_get_ethtool_stats,
.get_sset_count = ksz_sset_count,
.port_bridge_join = ksz_port_bridge_join,
.port_bridge_leave = ksz_port_bridge_leave,
- .port_stp_state_set = ksz8795_port_stp_state_set,
+ .port_stp_state_set = ksz8_port_stp_state_set,
.port_fast_age = ksz_port_fast_age,
- .port_vlan_filtering = ksz8795_port_vlan_filtering,
- .port_vlan_add = ksz8795_port_vlan_add,
- .port_vlan_del = ksz8795_port_vlan_del,
+ .port_vlan_filtering = ksz8_port_vlan_filtering,
+ .port_vlan_add = ksz8_port_vlan_add,
+ .port_vlan_del = ksz8_port_vlan_del,
.port_fdb_dump = ksz_port_fdb_dump,
.port_mdb_add = ksz_port_mdb_add,
.port_mdb_del = ksz_port_mdb_del,
- .port_mirror_add = ksz8795_port_mirror_add,
- .port_mirror_del = ksz8795_port_mirror_del,
+ .port_mirror_add = ksz8_port_mirror_add,
+ .port_mirror_del = ksz8_port_mirror_del,
};
-static u32 ksz8795_get_port_addr(int port, int offset)
+static u32 ksz8_get_port_addr(int port, int offset)
{
return PORT_CTRL_ADDR(port, offset);
}
-static int ksz8795_switch_detect(struct ksz_device *dev)
+static int ksz8_switch_detect(struct ksz_device *dev)
{
u8 id1, id2;
u16 id16;
@@ -1143,19 +1461,30 @@ static int ksz8795_switch_detect(struct ksz_device *dev)
id1 = id16 >> 8;
id2 = id16 & SW_CHIP_ID_M;
- if (id1 != FAMILY_ID ||
- (id2 != CHIP_ID_94 && id2 != CHIP_ID_95))
- return -ENODEV;
- if (id2 == CHIP_ID_95) {
- u8 val;
+ switch (id1) {
+ case KSZ87_FAMILY_ID:
+ if ((id2 != CHIP_ID_94 && id2 != CHIP_ID_95))
+ return -ENODEV;
+
+ if (id2 == CHIP_ID_95) {
+ u8 val;
- id2 = 0x95;
- ksz_read8(dev, REG_PORT_1_STATUS_0, &val);
- if (val & PORT_FIBER_MODE)
- id2 = 0x65;
- } else if (id2 == CHIP_ID_94) {
- id2 = 0x94;
+ id2 = 0x95;
+ ksz_read8(dev, REG_PORT_STATUS_0, &val);
+ if (val & PORT_FIBER_MODE)
+ id2 = 0x65;
+ } else if (id2 == CHIP_ID_94) {
+ id2 = 0x94;
+ }
+ break;
+ case KSZ88_FAMILY_ID:
+ if (id2 != CHIP_ID_63)
+ return -ENODEV;
+ break;
+ default:
+ dev_err(dev->dev, "invalid family id: %d\n", id1);
+ return -ENODEV;
}
id16 &= ~0xff;
id16 |= id2;
@@ -1174,7 +1503,7 @@ struct ksz_chip_data {
int port_cnt;
};
-static const struct ksz_chip_data ksz8795_switch_chips[] = {
+static const struct ksz_chip_data ksz8_switch_chips[] = {
{
.chip_id = 0x8795,
.dev_name = "KSZ8795",
@@ -1216,16 +1545,26 @@ static const struct ksz_chip_data ksz8795_switch_chips[] = {
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
},
+ {
+ .chip_id = 0x8830,
+ .dev_name = "KSZ8863/KSZ8873",
+ .num_vlans = 16,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x4, /* can be configured as cpu port */
+ .port_cnt = 3,
+ },
};
-static int ksz8795_switch_init(struct ksz_device *dev)
+static int ksz8_switch_init(struct ksz_device *dev)
{
+ struct ksz8 *ksz8 = dev->priv;
int i;
- dev->ds->ops = &ksz8795_switch_ops;
+ dev->ds->ops = &ksz8_switch_ops;
- for (i = 0; i < ARRAY_SIZE(ksz8795_switch_chips); i++) {
- const struct ksz_chip_data *chip = &ksz8795_switch_chips[i];
+ for (i = 0; i < ARRAY_SIZE(ksz8_switch_chips); i++) {
+ const struct ksz_chip_data *chip = &ksz8_switch_chips[i];
if (dev->chip_id == chip->chip_id) {
dev->name = chip->dev_name;
@@ -1247,8 +1586,21 @@ static int ksz8795_switch_init(struct ksz_device *dev)
if (!dev->cpu_ports)
return -ENODEV;
- dev->reg_mib_cnt = KSZ8795_COUNTER_NUM;
- dev->mib_cnt = ARRAY_SIZE(mib_names);
+ if (ksz_is_ksz88x3(dev)) {
+ ksz8->regs = ksz8863_regs;
+ ksz8->masks = ksz8863_masks;
+ ksz8->shifts = ksz8863_shifts;
+ dev->mib_cnt = ARRAY_SIZE(ksz88xx_mib_names);
+ dev->mib_names = ksz88xx_mib_names;
+ } else {
+ ksz8->regs = ksz8795_regs;
+ ksz8->masks = ksz8795_masks;
+ ksz8->shifts = ksz8795_shifts;
+ dev->mib_cnt = ARRAY_SIZE(ksz87xx_mib_names);
+ dev->mib_names = ksz87xx_mib_names;
+ }
+
+ dev->reg_mib_cnt = MIB_COUNTER_NUM;
dev->ports = devm_kzalloc(dev->dev,
dev->port_cnt * sizeof(struct ksz_port),
@@ -1272,36 +1624,36 @@ static int ksz8795_switch_init(struct ksz_device *dev)
return 0;
}
-static void ksz8795_switch_exit(struct ksz_device *dev)
+static void ksz8_switch_exit(struct ksz_device *dev)
{
- ksz8795_reset_switch(dev);
+ ksz8_reset_switch(dev);
}
-static const struct ksz_dev_ops ksz8795_dev_ops = {
- .get_port_addr = ksz8795_get_port_addr,
- .cfg_port_member = ksz8795_cfg_port_member,
- .flush_dyn_mac_table = ksz8795_flush_dyn_mac_table,
- .port_setup = ksz8795_port_setup,
- .r_phy = ksz8795_r_phy,
- .w_phy = ksz8795_w_phy,
- .r_dyn_mac_table = ksz8795_r_dyn_mac_table,
- .r_sta_mac_table = ksz8795_r_sta_mac_table,
- .w_sta_mac_table = ksz8795_w_sta_mac_table,
- .r_mib_cnt = ksz8795_r_mib_cnt,
- .r_mib_pkt = ksz8795_r_mib_pkt,
- .freeze_mib = ksz8795_freeze_mib,
- .port_init_cnt = ksz8795_port_init_cnt,
- .shutdown = ksz8795_reset_switch,
- .detect = ksz8795_switch_detect,
- .init = ksz8795_switch_init,
- .exit = ksz8795_switch_exit,
+static const struct ksz_dev_ops ksz8_dev_ops = {
+ .get_port_addr = ksz8_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8_r_phy,
+ .w_phy = ksz8_w_phy,
+ .r_dyn_mac_table = ksz8_r_dyn_mac_table,
+ .r_sta_mac_table = ksz8_r_sta_mac_table,
+ .w_sta_mac_table = ksz8_w_sta_mac_table,
+ .r_mib_cnt = ksz8_r_mib_cnt,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .shutdown = ksz8_reset_switch,
+ .detect = ksz8_switch_detect,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
};
-int ksz8795_switch_register(struct ksz_device *dev)
+int ksz8_switch_register(struct ksz_device *dev)
{
- return ksz_switch_register(dev, &ksz8795_dev_ops);
+ return ksz_switch_register(dev, &ksz8_dev_ops);
}
-EXPORT_SYMBOL(ksz8795_switch_register);
+EXPORT_SYMBOL(ksz8_switch_register);
MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver");
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index 40372047d40d..c2e52c40a54c 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -16,7 +16,8 @@
#define REG_CHIP_ID0 0x00
-#define FAMILY_ID 0x87
+#define KSZ87_FAMILY_ID 0x87
+#define KSZ88_FAMILY_ID 0x88
#define REG_CHIP_ID1 0x01
@@ -28,6 +29,12 @@
#define CHIP_ID_94 0x60
#define CHIP_ID_95 0x90
+#define CHIP_ID_63 0x30
+
+#define KSZ8863_REG_SW_RESET 0x43
+
+#define KSZ8863_GLOBAL_SOFTWARE_RESET BIT(4)
+#define KSZ8863_PCS_RESET BIT(0)
#define REG_SW_CTRL_0 0x02
@@ -98,7 +105,6 @@
#define REG_SW_CTRL_10 0x0C
-#define SW_TAIL_TAG_ENABLE BIT(1)
#define SW_PASS_PAUSE BIT(0)
#define REG_SW_CTRL_11 0x0D
@@ -150,7 +156,6 @@
#define REG_PORT_4_CTRL_2 0x42
#define REG_PORT_5_CTRL_2 0x52
-#define PORT_802_1P_REMAPPING BIT(7)
#define PORT_INGRESS_FILTER BIT(6)
#define PORT_DISCARD_NON_VID BIT(5)
#define PORT_FORCE_FLOW_CTRL BIT(4)
@@ -269,6 +274,7 @@
#define REG_PORT_3_CTRL_9 0x3C
#define REG_PORT_4_CTRL_9 0x4C
+#define PORT_AUTO_NEG_ENABLE BIT(7)
#define PORT_AUTO_NEG_DISABLE BIT(7)
#define PORT_FORCE_100_MBIT BIT(6)
#define PORT_FORCE_FULL_DUPLEX BIT(5)
@@ -319,14 +325,12 @@
#define REG_PORT_CTRL_5 0x05
-#define REG_PORT_CTRL_7 0x07
#define REG_PORT_STATUS_0 0x08
#define REG_PORT_STATUS_1 0x09
#define REG_PORT_LINK_MD_CTRL 0x0A
#define REG_PORT_LINK_MD_RESULT 0x0B
#define REG_PORT_CTRL_9 0x0C
#define REG_PORT_CTRL_10 0x0D
-#define REG_PORT_STATUS_2 0x0E
#define REG_PORT_STATUS_3 0x0F
#define REG_PORT_CTRL_12 0xA0
@@ -356,8 +360,6 @@
#define REG_SW_MAC_ADDR_4 0x6C
#define REG_SW_MAC_ADDR_5 0x6D
-#define REG_IND_CTRL_0 0x6E
-
#define TABLE_EXT_SELECT_S 5
#define TABLE_EEE_V 1
#define TABLE_ACL_V 2
@@ -383,23 +385,13 @@
#define TABLE_ENTRY_MASK 0x03FF
#define TABLE_EXT_ENTRY_MASK 0x0FFF
-#define REG_IND_DATA_8 0x70
-#define REG_IND_DATA_7 0x71
-#define REG_IND_DATA_6 0x72
#define REG_IND_DATA_5 0x73
-#define REG_IND_DATA_4 0x74
-#define REG_IND_DATA_3 0x75
#define REG_IND_DATA_2 0x76
#define REG_IND_DATA_1 0x77
#define REG_IND_DATA_0 0x78
#define REG_IND_DATA_PME_EEE_ACL 0xA0
-#define REG_IND_DATA_CHECK REG_IND_DATA_6
-#define REG_IND_MIB_CHECK REG_IND_DATA_4
-#define REG_IND_DATA_HI REG_IND_DATA_7
-#define REG_IND_DATA_LO REG_IND_DATA_3
-
#define REG_INT_STATUS 0x7C
#define REG_INT_ENABLE 0x7D
@@ -816,6 +808,7 @@
#define KSZ8795_ID_HI 0x0022
#define KSZ8795_ID_LO 0x1550
+#define KSZ8863_ID_LO 0x1430
#define KSZ8795_SW_ID 0x8795
@@ -846,7 +839,7 @@
#define KS_PRIO_IN_REG 4
-#define KSZ8795_COUNTER_NUM 0x20
+#define MIB_COUNTER_NUM 0x20
/* Common names used by other drivers */
@@ -856,12 +849,6 @@
#define P_MIRROR_CTRL REG_PORT_CTRL_1
#define P_802_1P_CTRL REG_PORT_CTRL_2
#define P_STP_CTRL REG_PORT_CTRL_2
-#define P_LOCAL_CTRL REG_PORT_CTRL_7
-#define P_REMOTE_STATUS REG_PORT_STATUS_0
-#define P_FORCE_CTRL REG_PORT_CTRL_9
-#define P_NEG_RESTART_CTRL REG_PORT_CTRL_10
-#define P_SPEED_STATUS REG_PORT_STATUS_1
-#define P_LINK_STATUS REG_PORT_STATUS_2
#define P_PASS_ALL_CTRL REG_PORT_CTRL_12
#define P_INS_SRC_PVID_CTRL REG_PORT_CTRL_12
#define P_DROP_TAG_CTRL REG_PORT_CTRL_13
@@ -876,7 +863,6 @@
#define S_MIRROR_CTRL REG_SW_CTRL_3
#define S_REPLACE_VID_CTRL REG_SW_CTRL_4
#define S_PASS_PAUSE_CTRL REG_SW_CTRL_10
-#define S_TAIL_TAG_CTRL REG_SW_CTRL_10
#define S_802_1P_PRIO_CTRL REG_SW_CTRL_12
#define S_TOS_PRIO_CTRL REG_TOS_PRIO_CTRL_0
#define S_IPV6_MLD_CTRL REG_SW_CTRL_21
@@ -890,65 +876,6 @@
#define BROADCAST_STORM_VALUE 9969
/**
- * STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
- * STATIC_MAC_TABLE_FWD_PORTS 00-001F0000-00000000
- * STATIC_MAC_TABLE_VALID 00-00200000-00000000
- * STATIC_MAC_TABLE_OVERRIDE 00-00400000-00000000
- * STATIC_MAC_TABLE_USE_FID 00-00800000-00000000
- * STATIC_MAC_TABLE_FID 00-7F000000-00000000
- */
-
-#define STATIC_MAC_TABLE_ADDR 0x0000FFFF
-#define STATIC_MAC_TABLE_FWD_PORTS 0x001F0000
-#define STATIC_MAC_TABLE_VALID 0x00200000
-#define STATIC_MAC_TABLE_OVERRIDE 0x00400000
-#define STATIC_MAC_TABLE_USE_FID 0x00800000
-#define STATIC_MAC_TABLE_FID 0x7F000000
-
-#define STATIC_MAC_FWD_PORTS_S 16
-#define STATIC_MAC_FID_S 24
-
-/**
- * VLAN_TABLE_FID 00-007F007F-007F007F
- * VLAN_TABLE_MEMBERSHIP 00-0F800F80-0F800F80
- * VLAN_TABLE_VALID 00-10001000-10001000
- */
-
-#define VLAN_TABLE_FID 0x007F
-#define VLAN_TABLE_MEMBERSHIP 0x0F80
-#define VLAN_TABLE_VALID 0x1000
-
-#define VLAN_TABLE_MEMBERSHIP_S 7
-#define VLAN_TABLE_S 16
-
-/**
- * DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
- * DYNAMIC_MAC_TABLE_FID 00-007F0000-00000000
- * DYNAMIC_MAC_TABLE_NOT_READY 00-00800000-00000000
- * DYNAMIC_MAC_TABLE_SRC_PORT 00-07000000-00000000
- * DYNAMIC_MAC_TABLE_TIMESTAMP 00-18000000-00000000
- * DYNAMIC_MAC_TABLE_ENTRIES 7F-E0000000-00000000
- * DYNAMIC_MAC_TABLE_MAC_EMPTY 80-00000000-00000000
- */
-
-#define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
-#define DYNAMIC_MAC_TABLE_FID 0x007F0000
-#define DYNAMIC_MAC_TABLE_SRC_PORT 0x07000000
-#define DYNAMIC_MAC_TABLE_TIMESTAMP 0x18000000
-#define DYNAMIC_MAC_TABLE_ENTRIES 0xE0000000
-
-#define DYNAMIC_MAC_TABLE_NOT_READY 0x80
-
-#define DYNAMIC_MAC_TABLE_ENTRIES_H 0x7F
-#define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x80
-
-#define DYNAMIC_MAC_FID_S 16
-#define DYNAMIC_MAC_SRC_PORT_S 24
-#define DYNAMIC_MAC_TIMESTAMP_S 27
-#define DYNAMIC_MAC_ENTRIES_S 29
-#define DYNAMIC_MAC_ENTRIES_H_S 3
-
-/**
* MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
* MIB_TOTAL_BYTES 00-0000000F-FFFFFFFF
* MIB_PACKET_DROPPED 00-00000000-0000FFFF
@@ -956,31 +883,15 @@
* MIB_COUNTER_OVERFLOW 00-00000040-00000000
*/
-#define MIB_COUNTER_OVERFLOW BIT(6)
-#define MIB_COUNTER_VALID BIT(5)
-
#define MIB_COUNTER_VALUE 0x3FFFFFFF
-#define KS_MIB_TOTAL_RX_0 0x100
-#define KS_MIB_TOTAL_TX_0 0x101
-#define KS_MIB_PACKET_DROPPED_RX_0 0x102
-#define KS_MIB_PACKET_DROPPED_TX_0 0x103
-#define KS_MIB_TOTAL_RX_1 0x104
-#define KS_MIB_TOTAL_TX_1 0x105
-#define KS_MIB_PACKET_DROPPED_TX_1 0x106
-#define KS_MIB_PACKET_DROPPED_RX_1 0x107
-#define KS_MIB_TOTAL_RX_2 0x108
-#define KS_MIB_TOTAL_TX_2 0x109
-#define KS_MIB_PACKET_DROPPED_TX_2 0x10A
-#define KS_MIB_PACKET_DROPPED_RX_2 0x10B
-#define KS_MIB_TOTAL_RX_3 0x10C
-#define KS_MIB_TOTAL_TX_3 0x10D
-#define KS_MIB_PACKET_DROPPED_TX_3 0x10E
-#define KS_MIB_PACKET_DROPPED_RX_3 0x10F
-#define KS_MIB_TOTAL_RX_4 0x110
-#define KS_MIB_TOTAL_TX_4 0x111
-#define KS_MIB_PACKET_DROPPED_TX_4 0x112
-#define KS_MIB_PACKET_DROPPED_RX_4 0x113
+#define KSZ8795_MIB_TOTAL_RX_0 0x100
+#define KSZ8795_MIB_TOTAL_TX_0 0x101
+#define KSZ8795_MIB_TOTAL_RX_1 0x104
+#define KSZ8795_MIB_TOTAL_TX_1 0x105
+
+#define KSZ8863_MIB_PACKET_DROPPED_TX_0 0x100
+#define KSZ8863_MIB_PACKET_DROPPED_RX_0 0x105
#define MIB_PACKET_DROPPED 0x0000FFFF
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
index f98432a3e2b5..85ba12aa82d8 100644
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ b/drivers/net/dsa/microchip/ksz8795_spi.c
@@ -14,34 +14,52 @@
#include <linux/regmap.h>
#include <linux/spi/spi.h>
+#include "ksz8.h"
#include "ksz_common.h"
-#define SPI_ADDR_SHIFT 12
-#define SPI_ADDR_ALIGN 3
-#define SPI_TURNAROUND_SHIFT 1
+#define KSZ8795_SPI_ADDR_SHIFT 12
+#define KSZ8795_SPI_ADDR_ALIGN 3
+#define KSZ8795_SPI_TURNAROUND_SHIFT 1
-KSZ_REGMAP_TABLE(ksz8795, 16, SPI_ADDR_SHIFT,
- SPI_TURNAROUND_SHIFT, SPI_ADDR_ALIGN);
+#define KSZ8863_SPI_ADDR_SHIFT 8
+#define KSZ8863_SPI_ADDR_ALIGN 8
+#define KSZ8863_SPI_TURNAROUND_SHIFT 0
+
+KSZ_REGMAP_TABLE(ksz8795, 16, KSZ8795_SPI_ADDR_SHIFT,
+ KSZ8795_SPI_TURNAROUND_SHIFT, KSZ8795_SPI_ADDR_ALIGN);
+
+KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT,
+ KSZ8863_SPI_TURNAROUND_SHIFT, KSZ8863_SPI_ADDR_ALIGN);
static int ksz8795_spi_probe(struct spi_device *spi)
{
+ const struct regmap_config *regmap_config;
+ struct device *ddev = &spi->dev;
struct regmap_config rc;
struct ksz_device *dev;
- int i, ret;
+ struct ksz8 *ksz8;
+ int i, ret = 0;
- dev = ksz_switch_alloc(&spi->dev, spi);
+ ksz8 = devm_kzalloc(&spi->dev, sizeof(struct ksz8), GFP_KERNEL);
+ ksz8->priv = spi;
+
+ dev = ksz_switch_alloc(&spi->dev, ksz8);
if (!dev)
return -ENOMEM;
+ regmap_config = device_get_match_data(ddev);
+ if (!regmap_config)
+ return -EINVAL;
+
for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
- rc = ksz8795_regmap_config[i];
+ rc = regmap_config[i];
rc.lock_arg = &dev->regmap_mutex;
dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
if (IS_ERR(dev->regmap[i])) {
ret = PTR_ERR(dev->regmap[i]);
dev_err(&spi->dev,
"Failed to initialize regmap%i: %d\n",
- ksz8795_regmap_config[i].val_bits, ret);
+ regmap_config[i].val_bits, ret);
return ret;
}
}
@@ -55,7 +73,7 @@ static int ksz8795_spi_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = ksz8795_switch_register(dev);
+ ret = ksz8_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
@@ -85,9 +103,11 @@ static void ksz8795_spi_shutdown(struct spi_device *spi)
}
static const struct of_device_id ksz8795_dt_ids[] = {
- { .compatible = "microchip,ksz8765" },
- { .compatible = "microchip,ksz8794" },
- { .compatible = "microchip,ksz8795" },
+ { .compatible = "microchip,ksz8765", .data = &ksz8795_regmap_config },
+ { .compatible = "microchip,ksz8794", .data = &ksz8795_regmap_config },
+ { .compatible = "microchip,ksz8795", .data = &ksz8795_regmap_config },
+ { .compatible = "microchip,ksz8863", .data = &ksz8863_regmap_config },
+ { .compatible = "microchip,ksz8873", .data = &ksz8863_regmap_config },
{},
};
MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
new file mode 100644
index 000000000000..30d97ea7a949
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip KSZ8863 series register access through SMI
+ *
+ * Copyright (C) 2019 Pengutronix, Michael Grzeschik <kernel@pengutronix.de>
+ */
+
+#include "ksz8.h"
+#include "ksz_common.h"
+
+/* Serial Management Interface (SMI) uses the following frame format:
+ *
+ * preamble|start|Read/Write| PHY | REG |TA| Data bits | Idle
+ * |frame| OP code |address |address| | |
+ * read | 32x1´s | 01 | 00 | 1xRRR | RRRRR |Z0| 00000000DDDDDDDD | Z
+ * write| 32x1´s | 01 | 00 | 0xRRR | RRRRR |10| xxxxxxxxDDDDDDDD | Z
+ *
+ */
+
+#define SMI_KSZ88XX_READ_PHY BIT(4)
+
+static int ksz8863_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ struct ksz_device *dev = ctx;
+ struct mdio_device *mdev;
+ u8 reg = *(u8 *)reg_buf;
+ u8 *val = val_buf;
+ struct ksz8 *ksz8;
+ int i, ret = 0;
+
+ ksz8 = dev->priv;
+ mdev = ksz8->priv;
+
+ mutex_lock_nested(&mdev->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ for (i = 0; i < val_len; i++) {
+ int tmp = reg + i;
+
+ ret = __mdiobus_read(mdev->bus, ((tmp & 0xE0) >> 5) |
+ SMI_KSZ88XX_READ_PHY, tmp);
+ if (ret < 0)
+ goto out;
+
+ val[i] = ret;
+ }
+ ret = 0;
+
+ out:
+ mutex_unlock(&mdev->bus->mdio_lock);
+
+ return ret;
+}
+
+static int ksz8863_mdio_write(void *ctx, const void *data, size_t count)
+{
+ struct ksz_device *dev = ctx;
+ struct mdio_device *mdev;
+ struct ksz8 *ksz8;
+ int i, ret = 0;
+ u32 reg;
+ u8 *val;
+
+ ksz8 = dev->priv;
+ mdev = ksz8->priv;
+
+ val = (u8 *)(data + 4);
+ reg = *(u32 *)data;
+
+ mutex_lock_nested(&mdev->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ for (i = 0; i < (count - 4); i++) {
+ int tmp = reg + i;
+
+ ret = __mdiobus_write(mdev->bus, ((tmp & 0xE0) >> 5),
+ tmp, val[i]);
+ if (ret < 0)
+ goto out;
+ }
+
+ out:
+ mutex_unlock(&mdev->bus->mdio_lock);
+
+ return ret;
+}
+
+static const struct regmap_bus regmap_smi[] = {
+ {
+ .read = ksz8863_mdio_read,
+ .write = ksz8863_mdio_write,
+ .max_raw_read = 1,
+ .max_raw_write = 1,
+ },
+ {
+ .read = ksz8863_mdio_read,
+ .write = ksz8863_mdio_write,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+ .max_raw_read = 2,
+ .max_raw_write = 2,
+ },
+ {
+ .read = ksz8863_mdio_read,
+ .write = ksz8863_mdio_write,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+ .max_raw_read = 4,
+ .max_raw_write = 4,
+ }
+};
+
+static const struct regmap_config ksz8863_regmap_config[] = {
+ {
+ .name = "#8",
+ .reg_bits = 8,
+ .pad_bits = 24,
+ .val_bits = 8,
+ .cache_type = REGCACHE_NONE,
+ .use_single_read = 1,
+ .lock = ksz_regmap_lock,
+ .unlock = ksz_regmap_unlock,
+ },
+ {
+ .name = "#16",
+ .reg_bits = 8,
+ .pad_bits = 24,
+ .val_bits = 16,
+ .cache_type = REGCACHE_NONE,
+ .use_single_read = 1,
+ .lock = ksz_regmap_lock,
+ .unlock = ksz_regmap_unlock,
+ },
+ {
+ .name = "#32",
+ .reg_bits = 8,
+ .pad_bits = 24,
+ .val_bits = 32,
+ .cache_type = REGCACHE_NONE,
+ .use_single_read = 1,
+ .lock = ksz_regmap_lock,
+ .unlock = ksz_regmap_unlock,
+ }
+};
+
+static int ksz8863_smi_probe(struct mdio_device *mdiodev)
+{
+ struct regmap_config rc;
+ struct ksz_device *dev;
+ struct ksz8 *ksz8;
+ int ret;
+ int i;
+
+ ksz8 = devm_kzalloc(&mdiodev->dev, sizeof(struct ksz8), GFP_KERNEL);
+ ksz8->priv = mdiodev;
+
+ dev = ksz_switch_alloc(&mdiodev->dev, ksz8);
+ if (!dev)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ksz8863_regmap_config); i++) {
+ rc = ksz8863_regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ dev->regmap[i] = devm_regmap_init(&mdiodev->dev,
+ &regmap_smi[i], dev,
+ &rc);
+ if (IS_ERR(dev->regmap[i])) {
+ ret = PTR_ERR(dev->regmap[i]);
+ dev_err(&mdiodev->dev,
+ "Failed to initialize regmap%i: %d\n",
+ ksz8863_regmap_config[i].val_bits, ret);
+ return ret;
+ }
+ }
+
+ if (mdiodev->dev.platform_data)
+ dev->pdata = mdiodev->dev.platform_data;
+
+ ret = ksz8_switch_register(dev);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(&mdiodev->dev, dev);
+
+ return 0;
+}
+
+static void ksz8863_smi_remove(struct mdio_device *mdiodev)
+{
+ struct ksz_device *dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (dev)
+ ksz_switch_remove(dev);
+}
+
+static const struct of_device_id ksz8863_dt_ids[] = {
+ { .compatible = "microchip,ksz8863" },
+ { .compatible = "microchip,ksz8873" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ksz8863_dt_ids);
+
+static struct mdio_driver ksz8863_driver = {
+ .probe = ksz8863_smi_probe,
+ .remove = ksz8863_smi_remove,
+ .mdiodrv.driver = {
+ .name = "ksz8863-switch",
+ .of_match_table = ksz8863_dt_ids,
+ },
+};
+
+mdio_module_driver(ksz8863_driver);
+
+MODULE_AUTHOR("Michael Grzeschik <m.grzeschik@pengutronix.de>");
+MODULE_DESCRIPTION("Microchip KSZ8863 SMI Switch driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index f212775372ce..2e6bfd333f50 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -69,8 +69,9 @@ struct ksz_device {
int cpu_ports; /* port bitmap can be cpu port */
int phy_port_cnt;
int port_cnt;
- int reg_mib_cnt;
+ u8 reg_mib_cnt;
int mib_cnt;
+ const struct mib_names *mib_names;
phy_interface_t compat_interface;
u32 regs_size;
bool phy_errata_9477;
@@ -142,7 +143,7 @@ int ksz_switch_register(struct ksz_device *dev,
const struct ksz_dev_ops *ops);
void ksz_switch_remove(struct ksz_device *dev);
-int ksz8795_switch_register(struct ksz_device *dev);
+int ksz8_switch_register(struct ksz_device *dev);
int ksz9477_switch_register(struct ksz_device *dev);
void ksz_update_port_member(struct ksz_device *dev, int port);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 9871d7cff93a..96f7c9eede35 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -67,6 +67,11 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
MIB_DESC(1, 0xb8, "RxArlDrop"),
};
+/* Since phy_device has not yet been created and
+ * phy_{read,write}_mmd_indirect is not available, we provide our own
+ * core_{read,write}_mmd_indirect with core_{clear,write,set} wrappers
+ * to complete this function.
+ */
static int
core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad)
{
@@ -435,19 +440,13 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
TD_DM_DRVP(8) | TD_DM_DRVN(8));
- /* Setup core clock for MT7530 */
- /* Disable MT7530 core clock */
- core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+ /* Disable MT7530 core and TRGMII Tx clocks */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
+ REG_GSWCK_EN | REG_TRGMIICK_EN);
- /* Disable PLL, since phy_device has not yet been created
- * provided for phy_[read,write]_mmd_indirect is called, we
- * provide our own core_write_mmd_indirect to complete this
- * function.
- */
- core_write_mmd_indirect(priv,
- CORE_GSWPLL_GRP1,
- MDIO_MMD_VEND2,
- 0);
+ /* Setup core clock for MT7530 */
+ /* Disable PLL */
+ core_write(priv, CORE_GSWPLL_GRP1, 0);
/* Set core clock into 500Mhz */
core_write(priv, CORE_GSWPLL_GRP2,
@@ -460,11 +459,7 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
RG_GSWPLL_POSDIV_200M(2) |
RG_GSWPLL_FBKDIV_200M(32));
- /* Enable MT7530 core clock */
- core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
-
/* Setup the MT7530 TRGMII Tx Clock */
- core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
@@ -478,6 +473,8 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
core_write(priv, CORE_PLL_GROUP7,
RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+
+ /* Enable MT7530 core and TRGMII Tx clocks */
core_set(priv, CORE_TRGMII_GSW_CLK_CG,
REG_GSWCK_EN | REG_TRGMIICK_EN);
@@ -997,8 +994,9 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
mt7530_write(priv, MT7530_PVC_P(port),
PORT_SPEC_TAG);
- /* Unknown multicast frame forwarding to the cpu port */
- mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port)));
+ /* Disable flooding by default */
+ mt7530_rmw(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK | UNU_FFP_MASK,
+ BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | UNU_FFP(BIT(port)));
/* Set CPU port number */
if (priv->id == ID_MT7621)
@@ -1136,6 +1134,56 @@ mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state)
}
static int
+mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
+ BR_BCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ if (flags.mask & BR_LEARNING)
+ mt7530_rmw(priv, MT7530_PSC_P(port), SA_DIS,
+ flags.val & BR_LEARNING ? 0 : SA_DIS);
+
+ if (flags.mask & BR_FLOOD)
+ mt7530_rmw(priv, MT7530_MFC, UNU_FFP(BIT(port)),
+ flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0);
+
+ if (flags.mask & BR_MCAST_FLOOD)
+ mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)),
+ flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0);
+
+ if (flags.mask & BR_BCAST_FLOOD)
+ mt7530_rmw(priv, MT7530_MFC, BC_FFP(BIT(port)),
+ flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0);
+
+ return 0;
+}
+
+static int
+mt7530_port_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
+ struct netlink_ext_ack *extack)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)),
+ mrouter ? UNM_FFP(BIT(port)) : 0);
+
+ return 0;
+}
+
+static int
mt7530_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
@@ -1347,6 +1395,59 @@ err:
}
static int
+mt7530_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct mt7530_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+ u8 port_mask = 0;
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP);
+ if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL))
+ port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP)
+ & PORT_MAP_MASK;
+
+ port_mask |= BIT(port);
+ mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT);
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int
+mt7530_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct mt7530_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+ u8 port_mask = 0;
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP);
+ if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL))
+ port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP)
+ & PORT_MAP_MASK;
+
+ port_mask &= ~BIT(port);
+ mt7530_fdb_write(priv, vid, port_mask, addr, -1,
+ port_mask ? STATIC_ENT : STATIC_EMP);
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int
mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid)
{
struct mt7530_dummy_poll p;
@@ -1818,9 +1919,12 @@ mt7530_setup(struct dsa_switch *ds)
ret = mt753x_cpu_port_enable(ds, i);
if (ret)
return ret;
- } else
+ } else {
mt7530_port_disable(ds, i);
+ /* Disable learning by default on all user ports */
+ mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
+ }
/* Enable consistent egress tag */
mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
@@ -1982,9 +2086,13 @@ mt7531_setup(struct dsa_switch *ds)
ret = mt753x_cpu_port_enable(ds, i);
if (ret)
return ret;
- } else
+ } else {
mt7530_port_disable(ds, i);
+ /* Disable learning by default on all user ports */
+ mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
+ }
+
/* Enable consistent egress tag */
mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
@@ -2462,6 +2570,17 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
mcr |= PMCR_RX_FC_EN;
}
+ if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, 0) >= 0) {
+ switch (speed) {
+ case SPEED_1000:
+ mcr |= PMCR_FORCE_EEE1G;
+ break;
+ case SPEED_100:
+ mcr |= PMCR_FORCE_EEE100;
+ break;
+ }
+ }
+
mt7530_set(priv, MT7530_PMCR_P(port), mcr);
}
@@ -2692,6 +2811,36 @@ mt753x_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
return priv->info->phy_write(ds, port, regnum, val);
}
+static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port));
+
+ e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN);
+ e->tx_lpi_timer = GET_LPI_THRESH(eeecr);
+
+ return 0;
+}
+
+static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN;
+
+ if (e->tx_lpi_timer > 0xFFF)
+ return -EINVAL;
+
+ set = SET_LPI_THRESH(e->tx_lpi_timer);
+ if (!e->tx_lpi_enabled)
+ /* Force LPI Mode without a delay */
+ set |= LPI_MODE_EN;
+ mt7530_rmw(priv, MT7530_PMEEECR_P(port), mask, set);
+
+ return 0;
+}
+
static const struct dsa_switch_ops mt7530_switch_ops = {
.get_tag_protocol = mtk_get_tag_protocol,
.setup = mt753x_setup,
@@ -2706,11 +2855,16 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_change_mtu = mt7530_port_change_mtu,
.port_max_mtu = mt7530_port_max_mtu,
.port_stp_state_set = mt7530_stp_state_set,
+ .port_pre_bridge_flags = mt7530_port_pre_bridge_flags,
+ .port_bridge_flags = mt7530_port_bridge_flags,
+ .port_set_mrouter = mt7530_port_set_mrouter,
.port_bridge_join = mt7530_port_bridge_join,
.port_bridge_leave = mt7530_port_bridge_leave,
.port_fdb_add = mt7530_port_fdb_add,
.port_fdb_del = mt7530_port_fdb_del,
.port_fdb_dump = mt7530_port_fdb_dump,
+ .port_mdb_add = mt7530_port_mdb_add,
+ .port_mdb_del = mt7530_port_mdb_del,
.port_vlan_filtering = mt7530_port_vlan_filtering,
.port_vlan_add = mt7530_port_vlan_add,
.port_vlan_del = mt7530_port_vlan_del,
@@ -2722,6 +2876,8 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.phylink_mac_an_restart = mt753x_phylink_mac_an_restart,
.phylink_mac_link_down = mt753x_phylink_mac_link_down,
.phylink_mac_link_up = mt753x_phylink_mac_link_up,
+ .get_mac_eee = mt753x_get_mac_eee,
+ .set_mac_eee = mt753x_set_mac_eee,
};
static const struct mt753x_info mt753x_table[] = {
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 64a9bb377e15..0204da486f3a 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -34,6 +34,7 @@ enum mt753x_id {
/* Registers to mac forward control for unknown frames */
#define MT7530_MFC 0x10
#define BC_FFP(x) (((x) & 0xff) << 24)
+#define BC_FFP_MASK BC_FFP(~0)
#define UNM_FFP(x) (((x) & 0xff) << 16)
#define UNM_FFP_MASK UNM_FFP(~0)
#define UNU_FFP(x) (((x) & 0xff) << 8)
@@ -256,6 +257,8 @@ enum mt7530_vlan_port_attr {
#define PMCR_RX_EN BIT(13)
#define PMCR_BACKOFF_EN BIT(9)
#define PMCR_BACKPR_EN BIT(8)
+#define PMCR_FORCE_EEE1G BIT(7)
+#define PMCR_FORCE_EEE100 BIT(6)
#define PMCR_TX_FC_EN BIT(5)
#define PMCR_RX_FC_EN BIT(4)
#define PMCR_FORCE_SPEED_1000 BIT(3)
@@ -280,7 +283,8 @@ enum mt7530_vlan_port_attr {
#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
- PMCR_FORCE_FDX | PMCR_FORCE_LNK)
+ PMCR_FORCE_FDX | PMCR_FORCE_LNK | \
+ PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100)
#define PMCR_CPU_PORT_SETTING(id) (PMCR_FORCE_MODE_ID((id)) | \
PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \
@@ -289,6 +293,15 @@ enum mt7530_vlan_port_attr {
PMCR_FORCE_SPEED_1000 | \
PMCR_FORCE_FDX | PMCR_FORCE_LNK)
+#define MT7530_PMEEECR_P(x) (0x3004 + (x) * 0x100)
+#define WAKEUP_TIME_1000(x) (((x) & 0xFF) << 24)
+#define WAKEUP_TIME_100(x) (((x) & 0xFF) << 16)
+#define LPI_THRESH_MASK GENMASK(15, 4)
+#define LPI_THRESH_SHT 4
+#define SET_LPI_THRESH(x) (((x) << LPI_THRESH_SHT) & LPI_THRESH_MASK)
+#define GET_LPI_THRESH(x) (((x) & LPI_THRESH_MASK) >> LPI_THRESH_SHT)
+#define LPI_MODE_EN BIT(0)
+
#define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100)
#define PMSR_EEE1G BIT(7)
#define PMSR_EEE100M BIT(6)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index e08bf9377140..eca285aaf72f 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -485,12 +485,12 @@ static int mv88e6xxx_serdes_pcs_get_state(struct dsa_switch *ds, int port,
struct phylink_link_state *state)
{
struct mv88e6xxx_chip *chip = ds->priv;
- u8 lane;
+ int lane;
int err;
mv88e6xxx_reg_lock(chip);
lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane && chip->info->ops->serdes_pcs_get_state)
+ if (lane >= 0 && chip->info->ops->serdes_pcs_get_state)
err = chip->info->ops->serdes_pcs_get_state(chip, port, lane,
state);
else
@@ -506,11 +506,11 @@ static int mv88e6xxx_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
const unsigned long *advertise)
{
const struct mv88e6xxx_ops *ops = chip->info->ops;
- u8 lane;
+ int lane;
if (ops->serdes_pcs_config) {
lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane)
+ if (lane >= 0)
return ops->serdes_pcs_config(chip, port, lane, mode,
interface, advertise);
}
@@ -523,14 +523,14 @@ static void mv88e6xxx_serdes_pcs_an_restart(struct dsa_switch *ds, int port)
struct mv88e6xxx_chip *chip = ds->priv;
const struct mv88e6xxx_ops *ops;
int err = 0;
- u8 lane;
+ int lane;
ops = chip->info->ops;
if (ops->serdes_pcs_an_restart) {
mv88e6xxx_reg_lock(chip);
lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane)
+ if (lane >= 0)
err = ops->serdes_pcs_an_restart(chip, port, lane);
mv88e6xxx_reg_unlock(chip);
@@ -544,11 +544,11 @@ static int mv88e6xxx_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
const struct mv88e6xxx_ops *ops = chip->info->ops;
- u8 lane;
+ int lane;
if (!phylink_autoneg_inband(mode) && ops->serdes_pcs_link_up) {
lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane)
+ if (lane >= 0)
return ops->serdes_pcs_link_up(chip, port, lane,
speed, duplex);
}
@@ -635,6 +635,29 @@ static void mv88e6390x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
mv88e6390_phylink_validate(chip, port, mask, state);
}
+static void mv88e6393x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
+ unsigned long *mask,
+ struct phylink_link_state *state)
+{
+ if (port == 0 || port == 9 || port == 10) {
+ phylink_set(mask, 10000baseT_Full);
+ phylink_set(mask, 10000baseKR_Full);
+ phylink_set(mask, 10000baseCR_Full);
+ phylink_set(mask, 10000baseSR_Full);
+ phylink_set(mask, 10000baseLR_Full);
+ phylink_set(mask, 10000baseLRM_Full);
+ phylink_set(mask, 10000baseER_Full);
+ phylink_set(mask, 5000baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ phylink_set(mask, 2500baseT_Full);
+ }
+
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ mv88e6065_phylink_validate(chip, port, mask, state);
+}
+
static void mv88e6xxx_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state)
@@ -1417,7 +1440,7 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
* the special "LAG device" in the PVT, using
* the LAG ID as the port number.
*/
- dev = MV88E6XXX_G2_PVT_ADRR_DEV_TRUNK;
+ dev = MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK;
port = dsa_lag_id(dst, dp->lag_dev);
}
}
@@ -1456,6 +1479,13 @@ static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
struct mv88e6xxx_chip *chip = ds->priv;
int err;
+ if (dsa_to_port(ds, port)->lag_dev)
+ /* Hardware is incapable of fast-aging a LAG through a
+ * regular ATU move operation. Until we have something
+ * more fancy in place this is a no-op.
+ */
+ return;
+
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_atu_remove(chip, 0, port, false);
mv88e6xxx_reg_unlock(chip);
@@ -1472,13 +1502,54 @@ static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_vtu_flush(chip);
}
-static int mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
+ struct mv88e6xxx_vtu_entry *entry)
{
+ int err;
+
if (!chip->info->ops->vtu_getnext)
return -EOPNOTSUPP;
- return chip->info->ops->vtu_getnext(chip, entry);
+ entry->vid = vid ? vid - 1 : mv88e6xxx_max_vid(chip);
+ entry->valid = false;
+
+ err = chip->info->ops->vtu_getnext(chip, entry);
+
+ if (entry->vid != vid)
+ entry->valid = false;
+
+ return err;
+}
+
+static int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
+ int (*cb)(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *priv),
+ void *priv)
+{
+ struct mv88e6xxx_vtu_entry entry = {
+ .vid = mv88e6xxx_max_vid(chip),
+ .valid = false,
+ };
+ int err;
+
+ if (!chip->info->ops->vtu_getnext)
+ return -EOPNOTSUPP;
+
+ do {
+ err = chip->info->ops->vtu_getnext(chip, &entry);
+ if (err)
+ return err;
+
+ if (!entry.valid)
+ break;
+
+ err = cb(chip, &entry, priv);
+ if (err)
+ return err;
+ } while (entry.vid < mv88e6xxx_max_vid(chip));
+
+ return 0;
}
static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
@@ -1490,9 +1561,18 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
return chip->info->ops->vtu_loadpurge(chip, entry);
}
+static int mv88e6xxx_fid_map_vlan(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *_fid_bitmap)
+{
+ unsigned long *fid_bitmap = _fid_bitmap;
+
+ set_bit(entry->fid, fid_bitmap);
+ return 0;
+}
+
int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
{
- struct mv88e6xxx_vtu_entry vlan;
int i, err;
u16 fid;
@@ -1508,21 +1588,7 @@ int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
}
/* Set every FID bit used by the VLAN entries */
- vlan.vid = mv88e6xxx_max_vid(chip);
- vlan.valid = false;
-
- do {
- err = mv88e6xxx_vtu_getnext(chip, &vlan);
- if (err)
- return err;
-
- if (!vlan.valid)
- break;
-
- set_bit(vlan.fid, fid_bitmap);
- } while (vlan.vid < mv88e6xxx_max_vid(chip));
-
- return 0;
+ return mv88e6xxx_vtu_walk(chip, mv88e6xxx_fid_map_vlan, fid_bitmap);
}
static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
@@ -1559,19 +1625,13 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
return 0;
- vlan.vid = vid - 1;
- vlan.valid = false;
-
- err = mv88e6xxx_vtu_getnext(chip, &vlan);
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
return err;
if (!vlan.valid)
return 0;
- if (vlan.vid != vid)
- return 0;
-
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
continue;
@@ -1653,15 +1713,12 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
} else {
- vlan.vid = vid - 1;
- vlan.valid = false;
-
- err = mv88e6xxx_vtu_getnext(chip, &vlan);
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
return err;
/* switchdev expects -EOPNOTSUPP to honor software VLANs */
- if (vlan.vid != vid || !vlan.valid)
+ if (!vlan.valid)
return -EOPNOTSUPP;
fid = vlan.fid;
@@ -1911,8 +1968,10 @@ static int mv88e6xxx_set_rxnfc(struct dsa_switch *ds, int port,
static int mv88e6xxx_port_add_broadcast(struct mv88e6xxx_chip *chip, int port,
u16 vid)
{
- const char broadcast[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
u8 state = MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC;
+ u8 broadcast[ETH_ALEN];
+
+ eth_broadcast_addr(broadcast);
return mv88e6xxx_port_db_load_purge(chip, port, broadcast, vid, state);
}
@@ -1923,6 +1982,19 @@ static int mv88e6xxx_broadcast_setup(struct mv88e6xxx_chip *chip, u16 vid)
int err;
for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ struct dsa_port *dp = dsa_to_port(chip->ds, port);
+ struct net_device *brport;
+
+ if (dsa_is_unused_port(chip->ds, port))
+ continue;
+
+ brport = dsa_port_to_bridge_port(dp);
+ if (brport && !br_port_flag_is_set(brport, BR_BCAST_FLOOD))
+ /* Skip bridged user ports where broadcast
+ * flooding is disabled.
+ */
+ continue;
+
err = mv88e6xxx_port_add_broadcast(chip, port, vid);
if (err)
return err;
@@ -1931,6 +2003,53 @@ static int mv88e6xxx_broadcast_setup(struct mv88e6xxx_chip *chip, u16 vid)
return 0;
}
+struct mv88e6xxx_port_broadcast_sync_ctx {
+ int port;
+ bool flood;
+};
+
+static int
+mv88e6xxx_port_broadcast_sync_vlan(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *vlan,
+ void *_ctx)
+{
+ struct mv88e6xxx_port_broadcast_sync_ctx *ctx = _ctx;
+ u8 broadcast[ETH_ALEN];
+ u8 state;
+
+ if (ctx->flood)
+ state = MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC;
+ else
+ state = MV88E6XXX_G1_ATU_DATA_STATE_MC_UNUSED;
+
+ eth_broadcast_addr(broadcast);
+
+ return mv88e6xxx_port_db_load_purge(chip, ctx->port, broadcast,
+ vlan->vid, state);
+}
+
+static int mv88e6xxx_port_broadcast_sync(struct mv88e6xxx_chip *chip, int port,
+ bool flood)
+{
+ struct mv88e6xxx_port_broadcast_sync_ctx ctx = {
+ .port = port,
+ .flood = flood,
+ };
+ struct mv88e6xxx_vtu_entry vid0 = {
+ .vid = 0,
+ };
+ int err;
+
+ /* Update the port's private database... */
+ err = mv88e6xxx_port_broadcast_sync_vlan(chip, &vid0, &ctx);
+ if (err)
+ return err;
+
+ /* ...and the database for all VLANs. */
+ return mv88e6xxx_vtu_walk(chip, mv88e6xxx_port_broadcast_sync_vlan,
+ &ctx);
+}
+
static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
u16 vid, u8 member, bool warn)
{
@@ -1938,14 +2057,11 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
struct mv88e6xxx_vtu_entry vlan;
int i, err;
- vlan.vid = vid - 1;
- vlan.valid = false;
-
- err = mv88e6xxx_vtu_getnext(chip, &vlan);
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
return err;
- if (vlan.vid != vid || !vlan.valid) {
+ if (!vlan.valid) {
memset(&vlan, 0, sizeof(vlan));
err = mv88e6xxx_atu_new(chip, &vlan.fid);
@@ -2041,17 +2157,14 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
if (!vid)
return -EOPNOTSUPP;
- vlan.vid = vid - 1;
- vlan.valid = false;
-
- err = mv88e6xxx_vtu_getnext(chip, &vlan);
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
return err;
/* If the VLAN doesn't exist in hardware or the port isn't a member,
* tell switchdev that this VLAN is likely handled in software.
*/
- if (vlan.vid != vid || !vlan.valid ||
+ if (!vlan.valid ||
vlan.member[port] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
return -EOPNOTSUPP;
@@ -2168,10 +2281,30 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip,
return err;
}
+struct mv88e6xxx_port_db_dump_vlan_ctx {
+ int port;
+ dsa_fdb_dump_cb_t *cb;
+ void *data;
+};
+
+static int mv88e6xxx_port_db_dump_vlan(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *_data)
+{
+ struct mv88e6xxx_port_db_dump_vlan_ctx *ctx = _data;
+
+ return mv88e6xxx_port_db_dump_fid(chip, entry->fid, entry->vid,
+ ctx->port, ctx->cb, ctx->data);
+}
+
static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
- struct mv88e6xxx_vtu_entry vlan;
+ struct mv88e6xxx_port_db_dump_vlan_ctx ctx = {
+ .port = port,
+ .cb = cb,
+ .data = data,
+ };
u16 fid;
int err;
@@ -2184,25 +2317,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- /* Dump VLANs' Filtering Information Databases */
- vlan.vid = mv88e6xxx_max_vid(chip);
- vlan.valid = false;
-
- do {
- err = mv88e6xxx_vtu_getnext(chip, &vlan);
- if (err)
- return err;
-
- if (!vlan.valid)
- break;
-
- err = mv88e6xxx_port_db_dump_fid(chip, vlan.fid, vlan.vid, port,
- cb, data);
- if (err)
- return err;
- } while (vlan.vid < mv88e6xxx_max_vid(chip));
-
- return err;
+ return mv88e6xxx_vtu_walk(chip, mv88e6xxx_port_db_dump_vlan, &ctx);
}
static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
@@ -2416,10 +2531,10 @@ static int mv88e6xxx_setup_port_mode(struct mv88e6xxx_chip *chip, int port)
return mv88e6xxx_set_port_mode_normal(chip, port);
/* Setup CPU port mode depending on its supported tag format */
- if (chip->info->tag_protocol == DSA_TAG_PROTO_DSA)
+ if (chip->tag_protocol == DSA_TAG_PROTO_DSA)
return mv88e6xxx_set_port_mode_dsa(chip, port);
- if (chip->info->tag_protocol == DSA_TAG_PROTO_EDSA)
+ if (chip->tag_protocol == DSA_TAG_PROTO_EDSA)
return mv88e6xxx_set_port_mode_edsa(chip, port);
return -EINVAL;
@@ -2434,19 +2549,15 @@ static int mv88e6xxx_setup_message_port(struct mv88e6xxx_chip *chip, int port)
static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
{
- struct dsa_switch *ds = chip->ds;
- bool flood;
int err;
- /* Upstream ports flood frames with unknown unicast or multicast DA */
- flood = dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port);
if (chip->info->ops->port_set_ucast_flood) {
- err = chip->info->ops->port_set_ucast_flood(chip, port, flood);
+ err = chip->info->ops->port_set_ucast_flood(chip, port, true);
if (err)
return err;
}
if (chip->info->ops->port_set_mcast_flood) {
- err = chip->info->ops->port_set_mcast_flood(chip, port, flood);
+ err = chip->info->ops->port_set_mcast_flood(chip, port, true);
if (err)
return err;
}
@@ -2460,11 +2571,11 @@ static irqreturn_t mv88e6xxx_serdes_irq_thread_fn(int irq, void *dev_id)
struct mv88e6xxx_chip *chip = mvp->chip;
irqreturn_t ret = IRQ_NONE;
int port = mvp->port;
- u8 lane;
+ int lane;
mv88e6xxx_reg_lock(chip);
lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane)
+ if (lane >= 0)
ret = mv88e6xxx_serdes_irq_status(chip, port, lane);
mv88e6xxx_reg_unlock(chip);
@@ -2472,7 +2583,7 @@ static irqreturn_t mv88e6xxx_serdes_irq_thread_fn(int irq, void *dev_id)
}
static int mv88e6xxx_serdes_irq_request(struct mv88e6xxx_chip *chip, int port,
- u8 lane)
+ int lane)
{
struct mv88e6xxx_port *dev_id = &chip->ports[port];
unsigned int irq;
@@ -2501,7 +2612,7 @@ static int mv88e6xxx_serdes_irq_request(struct mv88e6xxx_chip *chip, int port,
}
static int mv88e6xxx_serdes_irq_free(struct mv88e6xxx_chip *chip, int port,
- u8 lane)
+ int lane)
{
struct mv88e6xxx_port *dev_id = &chip->ports[port];
unsigned int irq = dev_id->serdes_irq;
@@ -2526,11 +2637,11 @@ static int mv88e6xxx_serdes_irq_free(struct mv88e6xxx_chip *chip, int port,
static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port,
bool on)
{
- u8 lane;
+ int lane;
int err;
lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (!lane)
+ if (lane < 0)
return 0;
if (on) {
@@ -2550,6 +2661,27 @@ static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port,
return err;
}
+static int mv88e6xxx_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port)
+{
+ int err;
+
+ if (!chip->info->ops->set_egress_port)
+ return -EOPNOTSUPP;
+
+ err = chip->info->ops->set_egress_port(chip, direction, port);
+ if (err)
+ return err;
+
+ if (direction == MV88E6XXX_EGRESS_DIR_INGRESS)
+ chip->ingress_dest_port = port;
+ else
+ chip->egress_dest_port = port;
+
+ return 0;
+}
+
static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
{
struct dsa_switch *ds = chip->ds;
@@ -2572,19 +2704,17 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
- if (chip->info->ops->set_egress_port) {
- err = chip->info->ops->set_egress_port(chip,
+ err = mv88e6xxx_set_egress_port(chip,
MV88E6XXX_EGRESS_DIR_INGRESS,
upstream_port);
- if (err)
- return err;
+ if (err && err != -EOPNOTSUPP)
+ return err;
- err = chip->info->ops->set_egress_port(chip,
+ err = mv88e6xxx_set_egress_port(chip,
MV88E6XXX_EGRESS_DIR_EGRESS,
upstream_port);
- if (err)
- return err;
- }
+ if (err && err != -EOPNOTSUPP)
+ return err;
}
return 0;
@@ -2670,15 +2800,20 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
- /* Port Association Vector: when learning source addresses
- * of packets, add the address to the address database using
- * a port bitmap that has only the bit for this port set and
- * the other bits clear.
+ /* Port Association Vector: disable automatic address learning
+ * on all user ports since they start out in standalone
+ * mode. When joining a bridge, learning will be configured to
+ * match the bridge port settings. Enable learning on all
+ * DSA/CPU ports. NOTE: FROM_CPU frames always bypass the
+ * learning process.
+ *
+ * Disable HoldAt1, IntOnAgeOut, LockedPort, IgnoreWrongData,
+ * and RefreshLocked. I.e. setup standard automatic learning.
*/
- reg = 1 << port;
- /* Disable learning for CPU port */
- if (dsa_is_cpu_port(ds, port))
+ if (dsa_is_user_port(ds, port))
reg = 0;
+ else
+ reg = 1 << port;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR,
reg);
@@ -3030,6 +3165,7 @@ out_resources:
static const u16 family_prod_id_table[] = {
[MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
[MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
+ [MV88E6XXX_FAMILY_6393] = MV88E6XXX_PORT_SWITCH_ID_PROD_6393X,
};
static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
@@ -4566,6 +4702,70 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.phylink_validate = mv88e6390x_phylink_validate,
};
+static const struct mv88e6xxx_ops mv88e6393x_ops = {
+ /* MV88E6XXX_FAMILY_6393 */
+ .setup_errata = mv88e6393x_serdes_setup_errata,
+ .irl_init_all = mv88e6390_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6393x_port_set_speed_duplex,
+ .port_max_speed_mode = mv88e6393x_port_max_speed_mode,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_policy = mv88e6393x_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6393x_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6390_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6393x_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .port_set_upstream_port = mv88e6393x_port_set_upstream_port,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ /* .set_cpu_port is missing because this family does not support a global
+ * CPU port, only per port CPU port which is set via
+ * .port_set_upstream_port method.
+ */
+ .set_egress_port = mv88e6393x_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6390_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .serdes_power = mv88e6393x_serdes_power,
+ .serdes_get_lane = mv88e6393x_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6393x_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6393x_serdes_irq_enable,
+ .serdes_irq_status = mv88e6393x_serdes_irq_status,
+ /* TODO: serdes stats */
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6393x_phylink_validate,
+};
+
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
[MV88E6085] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6085,
@@ -4586,7 +4786,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_DSA,
.ops = &mv88e6085_ops,
},
@@ -4607,7 +4806,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.g1_irqs = 8,
.atu_move_port_mask = 0xf,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_DSA,
.ops = &mv88e6095_ops,
},
@@ -4630,7 +4828,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6097_ops,
},
@@ -4653,7 +4851,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6123_ops,
},
@@ -4674,7 +4872,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.g1_irqs = 9,
.atu_move_port_mask = 0xf,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_DSA,
.ops = &mv88e6131_ops,
},
@@ -4698,7 +4895,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.g2_irqs = 10,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6141_ops,
},
@@ -4721,7 +4918,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
.ops = &mv88e6161_ops,
},
@@ -4745,7 +4942,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_DSA,
.ptp_support = true,
.ops = &mv88e6165_ops,
},
@@ -4769,7 +4965,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6171_ops,
},
@@ -4793,7 +4989,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6172_ops,
},
@@ -4816,7 +5012,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6175_ops,
},
@@ -4840,7 +5036,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6176_ops,
},
@@ -4861,7 +5057,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.g1_irqs = 8,
.atu_move_port_mask = 0xf,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6185_ops,
},
@@ -4879,7 +5075,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
- .tag_protocol = DSA_TAG_PROTO_DSA,
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
@@ -4909,7 +5104,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_DSA,
.ops = &mv88e6190x_ops,
},
@@ -4932,11 +5126,54 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_DSA,
.ptp_support = true,
.ops = &mv88e6191_ops,
},
+ [MV88E6191X] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6191X,
+ .family = MV88E6XXX_FAMILY_6393,
+ .name = "Marvell 88E6191X",
+ .num_databases = 4096,
+ .num_ports = 11, /* 10 + Z80 */
+ .num_internal_phys = 9,
+ .max_vid = 8191,
+ .port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 3750,
+ .g1_irqs = 10,
+ .g2_irqs = 14,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
+ .multi_chip = true,
+ .ptp_support = true,
+ .ops = &mv88e6393x_ops,
+ },
+
+ [MV88E6193X] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6193X,
+ .family = MV88E6XXX_FAMILY_6393,
+ .name = "Marvell 88E6193X",
+ .num_databases = 4096,
+ .num_ports = 11, /* 10 + Z80 */
+ .num_internal_phys = 9,
+ .max_vid = 8191,
+ .port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 3750,
+ .g1_irqs = 10,
+ .g2_irqs = 14,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
+ .multi_chip = true,
+ .ptp_support = true,
+ .ops = &mv88e6393x_ops,
+ },
+
[MV88E6220] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6220,
.family = MV88E6XXX_FAMILY_6250,
@@ -4959,7 +5196,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.dual_chip = true,
- .tag_protocol = DSA_TAG_PROTO_DSA,
.ptp_support = true,
.ops = &mv88e6250_ops,
},
@@ -4984,7 +5220,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
.ops = &mv88e6240_ops,
},
@@ -5006,7 +5242,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.dual_chip = true,
- .tag_protocol = DSA_TAG_PROTO_DSA,
.ptp_support = true,
.ops = &mv88e6250_ops,
},
@@ -5030,7 +5265,6 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_DSA,
.ptp_support = true,
.ops = &mv88e6290_ops,
},
@@ -5055,7 +5289,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
.ops = &mv88e6320_ops,
},
@@ -5079,7 +5313,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
.ops = &mv88e6321_ops,
},
@@ -5104,7 +5338,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.g2_irqs = 10,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
.ops = &mv88e6341_ops,
},
@@ -5128,7 +5362,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6350_ops,
},
@@ -5151,7 +5385,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6351_ops,
},
@@ -5175,7 +5409,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
.ops = &mv88e6352_ops,
},
@@ -5199,7 +5433,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_DSA,
+ .edsa_support = MV88E6XXX_EDSA_UNDOCUMENTED,
.ptp_support = true,
.ops = &mv88e6390_ops,
},
@@ -5223,10 +5457,32 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
- .tag_protocol = DSA_TAG_PROTO_DSA,
+ .edsa_support = MV88E6XXX_EDSA_UNDOCUMENTED,
.ptp_support = true,
.ops = &mv88e6390x_ops,
},
+
+ [MV88E6393X] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6393X,
+ .family = MV88E6XXX_FAMILY_6393,
+ .name = "Marvell 88E6393X",
+ .num_databases = 4096,
+ .num_ports = 11, /* 10 + Z80 */
+ .num_internal_phys = 9,
+ .max_vid = 8191,
+ .port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 3750,
+ .g1_irqs = 10,
+ .g2_irqs = 14,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
+ .multi_chip = true,
+ .ptp_support = true,
+ .ops = &mv88e6393x_ops,
+ },
};
static const struct mv88e6xxx_info *mv88e6xxx_lookup_info(unsigned int prod_num)
@@ -5292,7 +5548,45 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
{
struct mv88e6xxx_chip *chip = ds->priv;
- return chip->info->tag_protocol;
+ return chip->tag_protocol;
+}
+
+static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol proto)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ enum dsa_tag_protocol old_protocol;
+ int err;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_EDSA:
+ switch (chip->info->edsa_support) {
+ case MV88E6XXX_EDSA_UNSUPPORTED:
+ return -EPROTONOSUPPORT;
+ case MV88E6XXX_EDSA_UNDOCUMENTED:
+ dev_warn(chip->dev, "Relying on undocumented EDSA tagging behavior\n");
+ fallthrough;
+ case MV88E6XXX_EDSA_SUPPORTED:
+ break;
+ }
+ break;
+ case DSA_TAG_PROTO_DSA:
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ old_protocol = chip->tag_protocol;
+ chip->tag_protocol = proto;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_setup_port_mode(chip, port);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ chip->tag_protocol = old_protocol;
+
+ return err;
}
static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
@@ -5334,9 +5628,6 @@ static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port,
int i;
int err;
- if (!chip->info->ops->set_egress_port)
- return -EOPNOTSUPP;
-
mutex_lock(&chip->reg_lock);
if ((ingress ? chip->ingress_dest_port : chip->egress_dest_port) !=
mirror->to_local_port) {
@@ -5351,9 +5642,8 @@ static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port,
goto out;
}
- err = chip->info->ops->set_egress_port(chip,
- direction,
- mirror->to_local_port);
+ err = mv88e6xxx_set_egress_port(chip, direction,
+ mirror->to_local_port);
if (err)
goto out;
}
@@ -5386,10 +5676,8 @@ static void mv88e6xxx_port_mirror_del(struct dsa_switch *ds, int port,
/* Reset egress port when no other mirror is active */
if (!other_mirrors) {
- if (chip->info->ops->set_egress_port(chip,
- direction,
- dsa_upstream_port(ds,
- port)))
+ if (mv88e6xxx_set_egress_port(chip, direction,
+ dsa_upstream_port(ds, port)))
dev_err(ds->dev, "failed to set egress port\n");
}
@@ -5403,7 +5691,8 @@ static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
const struct mv88e6xxx_ops *ops;
- if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD))
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
+ BR_BCAST_FLOOD))
return -EINVAL;
ops = chip->info->ops;
@@ -5422,10 +5711,23 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ bool do_fast_age = false;
int err = -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
+ if (flags.mask & BR_LEARNING) {
+ bool learning = !!(flags.val & BR_LEARNING);
+ u16 pav = learning ? (1 << port) : 0;
+
+ err = mv88e6xxx_port_set_assoc_vector(chip, port, pav);
+ if (err)
+ goto out;
+
+ if (!learning)
+ do_fast_age = true;
+ }
+
if (flags.mask & BR_FLOOD) {
bool unicast = !!(flags.val & BR_FLOOD);
@@ -5444,9 +5746,20 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
goto out;
}
+ if (flags.mask & BR_BCAST_FLOOD) {
+ bool broadcast = !!(flags.val & BR_BCAST_FLOOD);
+
+ err = mv88e6xxx_port_broadcast_sync(chip, port, broadcast);
+ if (err)
+ goto out;
+ }
+
out:
mv88e6xxx_reg_unlock(chip);
+ if (do_fast_age)
+ mv88e6xxx_port_fast_age(ds, port);
+
return err;
}
@@ -5738,6 +6051,7 @@ static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
+ .change_tag_protocol = mv88e6xxx_change_tag_protocol,
.setup = mv88e6xxx_setup,
.teardown = mv88e6xxx_teardown,
.phylink_validate = mv88e6xxx_validate,
@@ -5918,6 +6232,11 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (err)
goto out;
+ if (chip->info->edsa_support == MV88E6XXX_EDSA_SUPPORTED)
+ chip->tag_protocol = DSA_TAG_PROTO_EDSA;
+ else
+ chip->tag_protocol = DSA_TAG_PROTO_DSA;
+
mv88e6xxx_phy_init(chip);
if (chip->info->ops->get_eeprom) {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index a57c8886f3ac..675b1f3e43b7 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -23,6 +23,8 @@
/* PVT limits for 4-bit port and 5-bit switch */
#define MV88E6XXX_MAX_PVT_SWITCHES 32
#define MV88E6XXX_MAX_PVT_PORTS 16
+#define MV88E6XXX_MAX_PVT_ENTRIES \
+ (MV88E6XXX_MAX_PVT_SWITCHES * MV88E6XXX_MAX_PVT_PORTS)
#define MV88E6XXX_MAX_GPIO 16
@@ -63,6 +65,8 @@ enum mv88e6xxx_model {
MV88E6190,
MV88E6190X,
MV88E6191,
+ MV88E6191X,
+ MV88E6193X,
MV88E6220,
MV88E6240,
MV88E6250,
@@ -75,6 +79,7 @@ enum mv88e6xxx_model {
MV88E6352,
MV88E6390,
MV88E6390X,
+ MV88E6393X,
};
enum mv88e6xxx_family {
@@ -90,6 +95,23 @@ enum mv88e6xxx_family {
MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */
MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */
MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */
+ MV88E6XXX_FAMILY_6393, /* 6191X 6193X 6393X */
+};
+
+/**
+ * enum mv88e6xxx_edsa_support - Ethertype DSA tag support level
+ * @MV88E6XXX_EDSA_UNSUPPORTED: Device has no support for EDSA tags
+ * @MV88E6XXX_EDSA_UNDOCUMENTED: Documentation indicates that
+ * egressing FORWARD frames with an EDSA
+ * tag is reserved for future use, but
+ * empirical data shows that this mode
+ * is supported.
+ * @MV88E6XXX_EDSA_SUPPORTED: EDSA tags are fully supported.
+ */
+enum mv88e6xxx_edsa_support {
+ MV88E6XXX_EDSA_UNSUPPORTED = 0,
+ MV88E6XXX_EDSA_UNDOCUMENTED,
+ MV88E6XXX_EDSA_SUPPORTED,
};
struct mv88e6xxx_ops;
@@ -129,7 +151,7 @@ struct mv88e6xxx_info {
*/
bool dual_chip;
- enum dsa_tag_protocol tag_protocol;
+ enum mv88e6xxx_edsa_support edsa_support;
/* Mask for FromPort and ToPort value of PortVec used in ATU Move
* operation. 0 means that the ATU Move operation is not supported.
@@ -246,6 +268,7 @@ enum mv88e6xxx_region_id {
MV88E6XXX_REGION_GLOBAL2,
MV88E6XXX_REGION_ATU,
MV88E6XXX_REGION_VTU,
+ MV88E6XXX_REGION_PVT,
_MV88E6XXX_REGION_MAX,
};
@@ -257,6 +280,9 @@ struct mv88e6xxx_region_priv {
struct mv88e6xxx_chip {
const struct mv88e6xxx_info *info;
+ /* Currently configured tagging protocol */
+ enum dsa_tag_protocol tag_protocol;
+
/* The dsa_switch this private structure is related to */
struct dsa_switch *ds;
@@ -513,30 +539,30 @@ struct mv88e6xxx_ops {
int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip);
/* Power on/off a SERDES interface */
- int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, int lane,
bool up);
/* SERDES lane mapping */
- u8 (*serdes_get_lane)(struct mv88e6xxx_chip *chip, int port);
+ int (*serdes_get_lane)(struct mv88e6xxx_chip *chip, int port);
int (*serdes_pcs_get_state)(struct mv88e6xxx_chip *chip, int port,
- u8 lane, struct phylink_link_state *state);
+ int lane, struct phylink_link_state *state);
int (*serdes_pcs_config)(struct mv88e6xxx_chip *chip, int port,
- u8 lane, unsigned int mode,
+ int lane, unsigned int mode,
phy_interface_t interface,
const unsigned long *advertise);
int (*serdes_pcs_an_restart)(struct mv88e6xxx_chip *chip, int port,
- u8 lane);
+ int lane);
int (*serdes_pcs_link_up)(struct mv88e6xxx_chip *chip, int port,
- u8 lane, int speed, int duplex);
+ int lane, int speed, int duplex);
/* SERDES interrupt handling */
unsigned int (*serdes_irq_mapping)(struct mv88e6xxx_chip *chip,
int port);
- int (*serdes_irq_enable)(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ int (*serdes_irq_enable)(struct mv88e6xxx_chip *chip, int port, int lane,
bool enable);
irqreturn_t (*serdes_irq_status)(struct mv88e6xxx_chip *chip, int port,
- u8 lane);
+ int lane);
/* Statistics from the SERDES interface */
int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port);
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index 21953d6d484c..0c0f5ea6680c 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -503,6 +503,44 @@ static int mv88e6xxx_region_vtu_snapshot(struct devlink *dl,
return 0;
}
+static int mv88e6xxx_region_pvt_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int dev, port, err;
+ u16 *pvt, *cur;
+
+ pvt = kcalloc(MV88E6XXX_MAX_PVT_ENTRIES, sizeof(*pvt), GFP_KERNEL);
+ if (!pvt)
+ return -ENOMEM;
+
+ mv88e6xxx_reg_lock(chip);
+
+ cur = pvt;
+ for (dev = 0; dev < MV88E6XXX_MAX_PVT_SWITCHES; dev++) {
+ for (port = 0; port < MV88E6XXX_MAX_PVT_PORTS; port++) {
+ err = mv88e6xxx_g2_pvt_read(chip, dev, port, cur);
+ if (err)
+ break;
+
+ cur++;
+ }
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err) {
+ kfree(pvt);
+ return err;
+ }
+
+ *data = (u8 *)pvt;
+ return 0;
+}
+
static int mv88e6xxx_region_port_snapshot(struct devlink_port *devlink_port,
const struct devlink_port_region_ops *ops,
struct netlink_ext_ack *extack,
@@ -567,6 +605,12 @@ static struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
.destructor = kfree,
};
+static struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
+ .name = "pvt",
+ .snapshot = mv88e6xxx_region_pvt_snapshot,
+ .destructor = kfree,
+};
+
static const struct devlink_port_region_ops mv88e6xxx_region_port_ops = {
.name = "port",
.snapshot = mv88e6xxx_region_port_snapshot,
@@ -576,6 +620,8 @@ static const struct devlink_port_region_ops mv88e6xxx_region_port_ops = {
struct mv88e6xxx_region {
struct devlink_region_ops *ops;
u64 size;
+
+ bool (*cond)(struct mv88e6xxx_chip *chip);
};
static struct mv88e6xxx_region mv88e6xxx_regions[] = {
@@ -594,6 +640,11 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
.ops = &mv88e6xxx_region_vtu_ops
/* calculated at runtime */
},
+ [MV88E6XXX_REGION_PVT] = {
+ .ops = &mv88e6xxx_region_pvt_ops,
+ .size = MV88E6XXX_MAX_PVT_ENTRIES * sizeof(u16),
+ .cond = mv88e6xxx_has_pvt,
+ },
};
static void
@@ -663,6 +714,7 @@ out:
static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
struct mv88e6xxx_chip *chip)
{
+ bool (*cond)(struct mv88e6xxx_chip *chip);
struct devlink_region_ops *ops;
struct devlink_region *region;
u64 size;
@@ -671,6 +723,10 @@ static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++) {
ops = mv88e6xxx_regions[i].ops;
size = mv88e6xxx_regions[i].size;
+ cond = mv88e6xxx_regions[i].cond;
+
+ if (cond && !cond(chip))
+ continue;
switch (i) {
case MV88E6XXX_REGION_ATU:
@@ -678,7 +734,7 @@ static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
sizeof(struct mv88e6xxx_devlink_atu_entry);
break;
case MV88E6XXX_REGION_VTU:
- size = mv88e6xxx_max_vid(chip) *
+ size = (mv88e6xxx_max_vid(chip) + 1) *
sizeof(struct mv88e6xxx_devlink_vtu_entry);
break;
}
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 33d443a37efc..815b0f681d69 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -315,7 +315,6 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip,
enum mv88e6xxx_egress_direction direction,
int port)
{
- int *dest_port_chip;
u16 reg;
int err;
@@ -325,13 +324,11 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip,
switch (direction) {
case MV88E6XXX_EGRESS_DIR_INGRESS:
- dest_port_chip = &chip->ingress_dest_port;
reg &= ~MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK;
reg |= port <<
__bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK);
break;
case MV88E6XXX_EGRESS_DIR_EGRESS:
- dest_port_chip = &chip->egress_dest_port;
reg &= ~MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK;
reg |= port <<
__bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
@@ -340,11 +337,7 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip,
return -EINVAL;
}
- err = mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg);
- if (!err)
- *dest_port_chip = port;
-
- return err;
+ return mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg);
}
/* Older generations also call this the ARP destination. It has been
@@ -380,28 +373,20 @@ int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip,
enum mv88e6xxx_egress_direction direction,
int port)
{
- int *dest_port_chip;
u16 ptr;
- int err;
switch (direction) {
case MV88E6XXX_EGRESS_DIR_INGRESS:
- dest_port_chip = &chip->ingress_dest_port;
ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST;
break;
case MV88E6XXX_EGRESS_DIR_EGRESS:
- dest_port_chip = &chip->egress_dest_port;
ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST;
break;
default:
return -EINVAL;
}
- err = mv88e6390_g1_monitor_write(chip, ptr, port);
- if (!err)
- *dest_port_chip = port;
-
- return err;
+ return mv88e6390_g1_monitor_write(chip, ptr, port);
}
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 7c396964d0b2..4f3dbb015f77 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -22,6 +22,7 @@
#define MV88E6185_G1_STS_PPU_STATE_DISABLED 0x8000
#define MV88E6185_G1_STS_PPU_STATE_POLLING 0xc000
#define MV88E6XXX_G1_STS_INIT_READY 0x0800
+#define MV88E6393X_G1_STS_IRQ_DEVICE_2 9
#define MV88E6XXX_G1_STS_IRQ_AVB 8
#define MV88E6XXX_G1_STS_IRQ_DEVICE 7
#define MV88E6XXX_G1_STS_IRQ_STATS 6
@@ -59,6 +60,7 @@
#define MV88E6185_G1_CTL1_SCHED_PRIO 0x0800
#define MV88E6185_G1_CTL1_MAX_FRAME_1632 0x0400
#define MV88E6185_G1_CTL1_RELOAD_EEPROM 0x0200
+#define MV88E6393X_G1_CTL1_DEVICE2_EN 0x0200
#define MV88E6XXX_G1_CTL1_DEVICE_EN 0x0080
#define MV88E6XXX_G1_CTL1_STATS_DONE_EN 0x0040
#define MV88E6XXX_G1_CTL1_VTU_PROBLEM_EN 0x0020
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index da8bac8813e1..fa65ecd9cb85 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -239,6 +239,23 @@ static int mv88e6xxx_g2_pvt_op(struct mv88e6xxx_chip *chip, int src_dev,
return mv88e6xxx_g2_pvt_op_wait(chip);
}
+int mv88e6xxx_g2_pvt_read(struct mv88e6xxx_chip *chip, int src_dev,
+ int src_port, u16 *data)
+{
+ int err;
+
+ err = mv88e6xxx_g2_pvt_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_pvt_op(chip, src_dev, src_port,
+ MV88E6XXX_G2_PVT_ADDR_OP_READ);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_PVT_DATA, data);
+}
+
int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
int src_port, u16 data)
{
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 4127f82275ad..f3e27573a386 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -38,9 +38,15 @@
/* Offset 0x02: MGMT Enable Register 2x */
#define MV88E6XXX_G2_MGMT_EN_2X 0x02
+/* Offset 0x02: MAC LINK change IRQ Register for MV88E6393X */
+#define MV88E6393X_G2_MACLINK_INT_SRC 0x02
+
/* Offset 0x03: MGMT Enable Register 0x */
#define MV88E6XXX_G2_MGMT_EN_0X 0x03
+/* Offset 0x03: MAC LINK change IRQ Mask Register for MV88E6393X */
+#define MV88E6393X_G2_MACLINK_INT_MASK 0x03
+
/* Offset 0x04: Flow Control Delay Register */
#define MV88E6XXX_G2_FLOW_CTL 0x04
@@ -52,6 +58,8 @@
#define MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI 0x0080
#define MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU 0x0008
+#define MV88E6393X_G2_EGRESS_MONITOR_DEST 0x05
+
/* Offset 0x06: Device Mapping Table Register */
#define MV88E6XXX_G2_DEVICE_MAPPING 0x06
#define MV88E6XXX_G2_DEVICE_MAPPING_UPDATE 0x8000
@@ -101,7 +109,7 @@
#define MV88E6XXX_G2_PVT_ADDR_OP_WRITE_PVLAN 0x3000
#define MV88E6XXX_G2_PVT_ADDR_OP_READ 0x4000
#define MV88E6XXX_G2_PVT_ADDR_PTR_MASK 0x01ff
-#define MV88E6XXX_G2_PVT_ADRR_DEV_TRUNK 0x1f
+#define MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK 0x1f
/* Offset 0x0C: Cross-chip Port VLAN Data Register */
#define MV88E6XXX_G2_PVT_DATA 0x0c
@@ -322,6 +330,8 @@ int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data);
+int mv88e6xxx_g2_pvt_read(struct mv88e6xxx_chip *chip, int src_dev,
+ int src_port, u16 *data);
int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
int src_port, u16 data);
int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index 7c2c67405322..eda710062933 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -42,7 +42,7 @@ static int mv88e6xxx_g2_scratch_write(struct mv88e6xxx_chip *chip, int reg,
}
/**
- * mv88e6xxx_g2_scratch_gpio_get_bit - get a bit
+ * mv88e6xxx_g2_scratch_get_bit - get a bit
* @chip: chip private data
* @base_reg: base of scratch bits
* @offset: index of bit within the register
@@ -67,7 +67,7 @@ static int mv88e6xxx_g2_scratch_get_bit(struct mv88e6xxx_chip *chip,
}
/**
- * mv88e6xxx_g2_scratch_gpio_set_bit - set (or clear) a bit
+ * mv88e6xxx_g2_scratch_set_bit - set (or clear) a bit
* @chip: chip private data
* @base_reg: base of scratch bits
* @offset: index of bit within the register
@@ -240,7 +240,7 @@ const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {
};
/**
- * mv88e6xxx_g2_gpio_set_smi - set gpio muxing for external smi
+ * mv88e6xxx_g2_scratch_gpio_set_smi - set gpio muxing for external smi
* @chip: chip private data
* @external: set mux for external smi, or free for gpio usage
*
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index 094d17a1d037..8f74ffc7a279 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -468,30 +468,38 @@ long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp)
return restart ? 1 : -1;
}
-bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *clone, unsigned int type)
+void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
struct ptp_header *hdr;
+ struct sk_buff *clone;
+ unsigned int type;
- if (!(skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP))
- return false;
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return;
- hdr = mv88e6xxx_should_tstamp(chip, port, clone, type);
+ hdr = mv88e6xxx_should_tstamp(chip, port, skb, type);
if (!hdr)
- return false;
+ return;
+
+ clone = skb_clone_sk(skb);
+ if (!clone)
+ return;
if (test_and_set_bit_lock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS,
- &ps->state))
- return false;
+ &ps->state)) {
+ kfree_skb(clone);
+ return;
+ }
ps->tx_skb = clone;
ps->tx_tstamp_start = jiffies;
ps->tx_seq_id = be16_to_cpu(hdr->sequence_id);
ptp_schedule_worker(chip->ptp_clock, 0);
- return true;
}
int mv88e6165_global_disable(struct mv88e6xxx_chip *chip)
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
index 9da9f197ba02..cf7fb6d660b1 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
@@ -117,8 +117,8 @@ int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *clone, unsigned int type);
-bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *clone, unsigned int type);
+void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb);
int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *info);
@@ -151,11 +151,9 @@ static inline bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
return false;
}
-static inline bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *clone,
- unsigned int type)
+static inline void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb)
{
- return false;
}
static inline int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 4561f289ab76..f77e2ee64a60 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -14,6 +14,7 @@
#include <linux/phylink.h>
#include "chip.h"
+#include "global2.h"
#include "port.h"
#include "serdes.h"
@@ -25,6 +26,14 @@ int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
return mv88e6xxx_read(chip, addr, reg, val);
}
+int mv88e6xxx_port_wait_bit(struct mv88e6xxx_chip *chip, int port, int reg,
+ int bit, int val)
+{
+ int addr = chip->info->port_base_addr + port;
+
+ return mv88e6xxx_wait_bit(chip, addr, reg, bit, val);
+}
+
int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
u16 val)
{
@@ -426,11 +435,111 @@ phy_interface_t mv88e6390x_port_max_speed_mode(int port)
return PHY_INTERFACE_MODE_NA;
}
+/* Support 10, 100, 200, 1000, 2500, 5000, 10000 Mbps (e.g. 88E6393X)
+ * Function mv88e6xxx_port_set_speed_duplex() can't be used as the register
+ * values for speeds 2500 & 5000 conflict.
+ */
+int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
+{
+ u16 reg, ctrl;
+ int err;
+
+ if (speed == SPEED_MAX)
+ speed = (port > 0 && port < 9) ? 1000 : 10000;
+
+ if (speed == 200 && port != 0)
+ return -EOPNOTSUPP;
+
+ if (speed >= 2500 && port > 0 && port < 9)
+ return -EOPNOTSUPP;
+
+ switch (speed) {
+ case 10:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_10;
+ break;
+ case 100:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100;
+ break;
+ case 200:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100 |
+ MV88E6390_PORT_MAC_CTL_ALTSPEED;
+ break;
+ case 1000:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000;
+ break;
+ case 2500:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000 |
+ MV88E6390_PORT_MAC_CTL_ALTSPEED;
+ break;
+ case 5000:
+ ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 |
+ MV88E6390_PORT_MAC_CTL_ALTSPEED;
+ break;
+ case 10000:
+ case SPEED_UNFORCED:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_UNFORCED;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ switch (duplex) {
+ case DUPLEX_HALF:
+ ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX;
+ break;
+ case DUPLEX_FULL:
+ ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
+ MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL;
+ break;
+ case DUPLEX_UNFORCED:
+ /* normal duplex detection */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~(MV88E6XXX_PORT_MAC_CTL_SPEED_MASK |
+ MV88E6390_PORT_MAC_CTL_ALTSPEED |
+ MV88E6390_PORT_MAC_CTL_FORCE_SPEED);
+
+ if (speed != SPEED_UNFORCED)
+ reg |= MV88E6390_PORT_MAC_CTL_FORCE_SPEED;
+
+ reg |= ctrl;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
+ if (err)
+ return err;
+
+ if (speed)
+ dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
+ else
+ dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
+ dev_dbg(chip->dev, "p%d: %s %s duplex\n", port,
+ reg & MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX ? "Force" : "Unforce",
+ reg & MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL ? "full" : "half");
+
+ return 0;
+}
+
+phy_interface_t mv88e6393x_port_max_speed_mode(int port)
+{
+ if (port == 0 || port == 9 || port == 10)
+ return PHY_INTERFACE_MODE_10GBASER;
+
+ return PHY_INTERFACE_MODE_NA;
+}
+
static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode, bool force)
{
- u8 lane;
u16 cmode;
+ int lane;
u16 reg;
int err;
@@ -450,6 +559,9 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
case PHY_INTERFACE_MODE_2500BASEX:
cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX;
break;
+ case PHY_INTERFACE_MODE_5GBASER:
+ cmode = MV88E6393X_PORT_STS_CMODE_5GBASER;
+ break;
case PHY_INTERFACE_MODE_XGMII:
case PHY_INTERFACE_MODE_XAUI:
cmode = MV88E6XXX_PORT_STS_CMODE_XAUI;
@@ -457,6 +569,9 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
case PHY_INTERFACE_MODE_RXAUI:
cmode = MV88E6XXX_PORT_STS_CMODE_RXAUI;
break;
+ case PHY_INTERFACE_MODE_10GBASER:
+ cmode = MV88E6393X_PORT_STS_CMODE_10GBASER;
+ break;
default:
cmode = 0;
}
@@ -466,7 +581,7 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return 0;
lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane) {
+ if (lane >= 0) {
if (chip->ports[port].serdes_irq) {
err = mv88e6xxx_serdes_irq_disable(chip, port, lane);
if (err)
@@ -495,8 +610,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
chip->ports[port].cmode = cmode;
lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (!lane)
- return -ENODEV;
+ if (lane < 0)
+ return lane;
err = mv88e6xxx_serdes_power_up(chip, port, lane);
if (err)
@@ -541,6 +656,29 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_set_cmode(chip, port, mode, false);
}
+int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ int err;
+ u16 reg;
+
+ if (port != 0 && port != 9 && port != 10)
+ return -EOPNOTSUPP;
+
+ /* mv88e6393x errata 4.5: EEE should be disabled on SERDES ports */
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_MAC_CTL_EEE;
+ reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_EEE;
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_set_cmode(chip, port, mode, false);
+}
+
static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip,
int port)
{
@@ -1171,6 +1309,27 @@ int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port)
0x0001);
}
+/* Offset 0x0B: Port Association Vector */
+
+int mv88e6xxx_port_set_assoc_vector(struct mv88e6xxx_chip *chip, int port,
+ u16 pav)
+{
+ u16 reg, mask;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR,
+ &reg);
+ if (err)
+ return err;
+
+ mask = mv88e6xxx_port_mask(chip);
+ reg &= ~mask;
+ reg |= pav & mask;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR,
+ reg);
+}
+
/* Offset 0x0C: Port ATU Control */
int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port)
@@ -1185,6 +1344,156 @@ int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port)
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_PRI_OVERRIDE, 0);
}
+/* Offset 0x0E: Policy & MGMT Control Register for FAMILY 6191X 6193X 6393X */
+
+static int mv88e6393x_port_policy_read(struct mv88e6xxx_chip *chip, int port,
+ u16 pointer, u8 *data)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_POLICY_MGMT_CTL,
+ pointer);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6393X_PORT_POLICY_MGMT_CTL,
+ &reg);
+ if (err)
+ return err;
+
+ *data = reg;
+
+ return 0;
+}
+
+static int mv88e6393x_port_policy_write(struct mv88e6xxx_chip *chip, int port,
+ u16 pointer, u8 data)
+{
+ u16 reg;
+
+ reg = MV88E6393X_PORT_POLICY_MGMT_CTL_UPDATE | pointer | data;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_POLICY_MGMT_CTL,
+ reg);
+}
+
+static int mv88e6393x_port_policy_write_all(struct mv88e6xxx_chip *chip,
+ u16 pointer, u8 data)
+{
+ int err, port;
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ if (dsa_is_unused_port(chip->ds, port))
+ continue;
+
+ err = mv88e6393x_port_policy_write(chip, port, pointer, data);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int mv88e6393x_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port)
+{
+ u16 ptr;
+ int err;
+
+ switch (direction) {
+ case MV88E6XXX_EGRESS_DIR_INGRESS:
+ ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_INGRESS_DEST;
+ err = mv88e6393x_port_policy_write_all(chip, ptr, port);
+ if (err)
+ return err;
+ break;
+ case MV88E6XXX_EGRESS_DIR_EGRESS:
+ ptr = MV88E6393X_G2_EGRESS_MONITOR_DEST;
+ err = mv88e6xxx_g2_write(chip, ptr, port);
+ if (err)
+ return err;
+ break;
+ }
+
+ return 0;
+}
+
+int mv88e6393x_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
+ int upstream_port)
+{
+ u16 ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_CPU_DEST;
+ u8 data = MV88E6393X_PORT_POLICY_MGMT_CTL_CPU_DEST_MGMTPRI |
+ upstream_port;
+
+ return mv88e6393x_port_policy_write(chip, port, ptr, data);
+}
+
+int mv88e6393x_port_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
+{
+ u16 ptr;
+ int err;
+
+ /* Consider the frames with reserved multicast destination
+ * addresses matching 01:80:c2:00:00:00 and
+ * 01:80:c2:00:00:02 as MGMT.
+ */
+ ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XLO;
+ err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff);
+ if (err)
+ return err;
+
+ ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XHI;
+ err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff);
+ if (err)
+ return err;
+
+ ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XLO;
+ err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff);
+ if (err)
+ return err;
+
+ ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XHI;
+ err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Offset 0x10 & 0x11: EPC */
+
+static int mv88e6393x_port_epc_wait_ready(struct mv88e6xxx_chip *chip, int port)
+{
+ int bit = __bf_shf(MV88E6393X_PORT_EPC_CMD_BUSY);
+
+ return mv88e6xxx_port_wait_bit(chip, port, MV88E6393X_PORT_EPC_CMD, bit, 0);
+}
+
+/* Port Ether type for 6393X family */
+
+int mv88e6393x_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
+ u16 etype)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6393x_port_epc_wait_ready(chip, port);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_EPC_DATA, etype);
+ if (err)
+ return err;
+
+ val = MV88E6393X_PORT_EPC_CMD_BUSY |
+ MV88E6393X_PORT_EPC_CMD_WRITE |
+ MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_EPC_CMD, val);
+}
+
/* Offset 0x0f: Port Ether type */
int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
@@ -1259,46 +1568,43 @@ int mv88e6390_port_tag_remap(struct mv88e6xxx_chip *chip, int port)
/* Offset 0x0E: Policy Control Register */
-int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port,
- enum mv88e6xxx_policy_mapping mapping,
- enum mv88e6xxx_policy_action action)
+static int
+mv88e6xxx_port_policy_mapping_get_pos(enum mv88e6xxx_policy_mapping mapping,
+ enum mv88e6xxx_policy_action action,
+ u16 *mask, u16 *val, int *shift)
{
- u16 reg, mask, val;
- int shift;
- int err;
-
switch (mapping) {
case MV88E6XXX_POLICY_MAPPING_DA:
- shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_DA_MASK);
- mask = MV88E6XXX_PORT_POLICY_CTL_DA_MASK;
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_DA_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_DA_MASK;
break;
case MV88E6XXX_POLICY_MAPPING_SA:
- shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_SA_MASK);
- mask = MV88E6XXX_PORT_POLICY_CTL_SA_MASK;
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_SA_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_SA_MASK;
break;
case MV88E6XXX_POLICY_MAPPING_VTU:
- shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VTU_MASK);
- mask = MV88E6XXX_PORT_POLICY_CTL_VTU_MASK;
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VTU_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_VTU_MASK;
break;
case MV88E6XXX_POLICY_MAPPING_ETYPE:
- shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK);
- mask = MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK;
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK;
break;
case MV88E6XXX_POLICY_MAPPING_PPPOE:
- shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK);
- mask = MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK;
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK;
break;
case MV88E6XXX_POLICY_MAPPING_VBAS:
- shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK);
- mask = MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK;
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK;
break;
case MV88E6XXX_POLICY_MAPPING_OPT82:
- shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK);
- mask = MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK;
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK;
break;
case MV88E6XXX_POLICY_MAPPING_UDP:
- shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_UDP_MASK);
- mask = MV88E6XXX_PORT_POLICY_CTL_UDP_MASK;
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_UDP_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_UDP_MASK;
break;
default:
return -EOPNOTSUPP;
@@ -1306,21 +1612,37 @@ int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port,
switch (action) {
case MV88E6XXX_POLICY_ACTION_NORMAL:
- val = MV88E6XXX_PORT_POLICY_CTL_NORMAL;
+ *val = MV88E6XXX_PORT_POLICY_CTL_NORMAL;
break;
case MV88E6XXX_POLICY_ACTION_MIRROR:
- val = MV88E6XXX_PORT_POLICY_CTL_MIRROR;
+ *val = MV88E6XXX_PORT_POLICY_CTL_MIRROR;
break;
case MV88E6XXX_POLICY_ACTION_TRAP:
- val = MV88E6XXX_PORT_POLICY_CTL_TRAP;
+ *val = MV88E6XXX_PORT_POLICY_CTL_TRAP;
break;
case MV88E6XXX_POLICY_ACTION_DISCARD:
- val = MV88E6XXX_PORT_POLICY_CTL_DISCARD;
+ *val = MV88E6XXX_PORT_POLICY_CTL_DISCARD;
break;
default:
return -EOPNOTSUPP;
}
+ return 0;
+}
+
+int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_policy_mapping mapping,
+ enum mv88e6xxx_policy_action action)
+{
+ u16 reg, mask, val;
+ int shift;
+ int err;
+
+ err = mv88e6xxx_port_policy_mapping_get_pos(mapping, action, &mask,
+ &val, &shift);
+ if (err)
+ return err;
+
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_POLICY_CTL, &reg);
if (err)
return err;
@@ -1330,3 +1652,37 @@ int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_POLICY_CTL, reg);
}
+
+int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_policy_mapping mapping,
+ enum mv88e6xxx_policy_action action)
+{
+ u16 mask, val;
+ int shift;
+ int err;
+ u16 ptr;
+ u8 reg;
+
+ err = mv88e6xxx_port_policy_mapping_get_pos(mapping, action, &mask,
+ &val, &shift);
+ if (err)
+ return err;
+
+ /* The 16-bit Port Policy CTL register from older chips is on 6393x
+ * changed to Port Policy MGMT CTL, which can access more data, but
+ * indirectly. The original 16-bit value is divided into two 8-bit
+ * registers.
+ */
+ ptr = shift / 8;
+ shift %= 8;
+ mask >>= ptr * 8;
+
+ err = mv88e6393x_port_policy_read(chip, port, ptr, &reg);
+ if (err)
+ return err;
+
+ reg &= ~mask;
+ reg |= (val << shift) & mask;
+
+ return mv88e6393x_port_policy_write(chip, port, ptr, reg);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index e6d0eaa6aa1d..b10e5aebacf6 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -49,6 +49,9 @@
#define MV88E6XXX_PORT_STS_CMODE_2500BASEX 0x000b
#define MV88E6XXX_PORT_STS_CMODE_XAUI 0x000c
#define MV88E6XXX_PORT_STS_CMODE_RXAUI 0x000d
+#define MV88E6393X_PORT_STS_CMODE_5GBASER 0x000c
+#define MV88E6393X_PORT_STS_CMODE_10GBASER 0x000d
+#define MV88E6393X_PORT_STS_CMODE_USXGMII 0x000e
#define MV88E6185_PORT_STS_CDUPLEX 0x0008
#define MV88E6185_PORT_STS_CMODE_MASK 0x0007
#define MV88E6185_PORT_STS_CMODE_GMII_FD 0x0000
@@ -68,6 +71,8 @@
#define MV88E6390_PORT_MAC_CTL_FORCE_SPEED 0x2000
#define MV88E6390_PORT_MAC_CTL_ALTSPEED 0x1000
#define MV88E6352_PORT_MAC_CTL_200BASE 0x1000
+#define MV88E6XXX_PORT_MAC_CTL_EEE 0x0200
+#define MV88E6XXX_PORT_MAC_CTL_FORCE_EEE 0x0100
#define MV88E6185_PORT_MAC_CTL_AN_EN 0x0400
#define MV88E6185_PORT_MAC_CTL_AN_RESTART 0x0200
#define MV88E6185_PORT_MAC_CTL_AN_DONE 0x0100
@@ -117,6 +122,8 @@
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6176 0x1760
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6190 0x1900
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6191 0x1910
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6191X 0x1920
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6193X 0x1930
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6185 0x1a70
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6220 0x2200
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6240 0x2400
@@ -129,6 +136,7 @@
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6350 0x3710
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6351 0x3750
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6390 0x3900
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6393X 0x3930
#define MV88E6XXX_PORT_SWITCH_ID_REV_MASK 0x000f
/* Offset 0x04: Port Control Register */
@@ -236,6 +244,19 @@
#define MV88E6XXX_PORT_POLICY_CTL_TRAP 0x0002
#define MV88E6XXX_PORT_POLICY_CTL_DISCARD 0x0003
+/* Offset 0x0E: Policy & MGMT Control Register (FAMILY_6393X) */
+#define MV88E6393X_PORT_POLICY_MGMT_CTL 0x0e
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_UPDATE 0x8000
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_MASK 0x3f00
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_DATA_MASK 0x00ff
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XLO 0x2000
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XHI 0x2100
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XLO 0x2400
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XHI 0x2500
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_INGRESS_DEST 0x3000
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_CPU_DEST 0x3800
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_CPU_DEST_MGMTPRI 0x00e0
+
/* Offset 0x0F: Port Special Ether Type */
#define MV88E6XXX_PORT_ETH_TYPE 0x0f
#define MV88E6XXX_PORT_ETH_TYPE_DEFAULT 0x9100
@@ -243,6 +264,15 @@
/* Offset 0x10: InDiscards Low Counter */
#define MV88E6XXX_PORT_IN_DISCARD_LO 0x10
+/* Offset 0x10: Extended Port Control Command */
+#define MV88E6393X_PORT_EPC_CMD 0x10
+#define MV88E6393X_PORT_EPC_CMD_BUSY 0x8000
+#define MV88E6393X_PORT_EPC_CMD_WRITE 0x0300
+#define MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE 0x02
+
+/* Offset 0x11: Extended Port Control Data */
+#define MV88E6393X_PORT_EPC_DATA 0x11
+
/* Offset 0x11: InDiscards High Counter */
#define MV88E6XXX_PORT_IN_DISCARD_HI 0x11
@@ -288,6 +318,8 @@ int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
u16 *val);
int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
u16 val);
+int mv88e6xxx_port_wait_bit(struct mv88e6xxx_chip *chip, int port, int reg,
+ int bit, int val);
int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port,
int pause);
@@ -315,10 +347,13 @@ int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex);
int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex);
+int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
phy_interface_t mv88e6341_port_max_speed_mode(int port);
phy_interface_t mv88e6390_port_max_speed_mode(int port);
phy_interface_t mv88e6390x_port_max_speed_mode(int port);
+phy_interface_t mv88e6393x_port_max_speed_mode(int port);
int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state);
@@ -351,8 +386,19 @@ int mv88e6352_port_set_mcast_flood(struct mv88e6xxx_chip *chip, int port,
int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_policy_mapping mapping,
enum mv88e6xxx_policy_action action);
+int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_policy_mapping mapping,
+ enum mv88e6xxx_policy_action action);
int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
u16 etype);
+int mv88e6393x_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port);
+int mv88e6393x_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
+ int upstream_port);
+int mv88e6393x_port_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
+int mv88e6393x_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
+ u16 etype);
int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
bool message_port);
int mv88e6xxx_port_set_trunk(struct mv88e6xxx_chip *chip, int port,
@@ -361,6 +407,8 @@ int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
size_t size);
int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_port_set_assoc_vector(struct mv88e6xxx_chip *chip, int port,
+ u16 pav);
int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
u8 out);
int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
@@ -371,6 +419,8 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
+int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 3195936dc5be..e4fbef81bc52 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -95,7 +95,7 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
return 0;
}
-int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
+int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
bool up)
{
u16 val, new_val;
@@ -117,7 +117,7 @@ int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
}
int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
- u8 lane, unsigned int mode,
+ int lane, unsigned int mode,
phy_interface_t interface,
const unsigned long *advertise)
{
@@ -166,7 +166,7 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
}
int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- u8 lane, struct phylink_link_state *state)
+ int lane, struct phylink_link_state *state)
{
u16 lpa, status;
int err;
@@ -187,7 +187,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
}
int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
- u8 lane)
+ int lane)
{
u16 bmcr;
int err;
@@ -200,7 +200,7 @@ int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
}
int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
- u8 lane, int speed, int duplex)
+ int lane, int speed, int duplex)
{
u16 val, bmcr;
int err;
@@ -230,10 +230,10 @@ int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
return mv88e6352_serdes_write(chip, MII_BMCR, bmcr);
}
-u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode = chip->ports[port].cmode;
- u8 lane = 0;
+ int lane = -ENODEV;
if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASEX) ||
(cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX) ||
@@ -245,7 +245,7 @@ u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
{
- if (mv88e6xxx_serdes_get_lane(chip, port))
+ if (mv88e6xxx_serdes_get_lane(chip, port) >= 0)
return true;
return false;
@@ -354,7 +354,7 @@ static void mv88e6352_serdes_irq_link(struct mv88e6xxx_chip *chip, int port)
}
irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- u8 lane)
+ int lane)
{
irqreturn_t ret = IRQ_NONE;
u16 status;
@@ -372,7 +372,7 @@ irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
return ret;
}
-int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane,
+int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
bool enable)
{
u16 val = 0;
@@ -413,10 +413,10 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
}
}
-u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+int mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode = chip->ports[port].cmode;
- u8 lane = 0;
+ int lane = -ENODEV;
switch (port) {
case 5:
@@ -430,7 +430,7 @@ u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
return lane;
}
-int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
+int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
bool up)
{
/* The serdes power can't be controlled on this switch chip but we need
@@ -440,23 +440,23 @@ int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
return 0;
}
-u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+int mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
/* There are no configurable serdes lanes on this switch chip but we
- * need to return non-zero so that callers of
+ * need to return a non-negative lane number so that callers of
* mv88e6xxx_serdes_get_lane() know this is a serdes port.
*/
switch (chip->ports[port].cmode) {
case MV88E6185_PORT_STS_CMODE_SERDES:
case MV88E6185_PORT_STS_CMODE_1000BASE_X:
- return 0xff;
- default:
return 0;
+ default:
+ return -ENODEV;
}
}
int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- u8 lane, struct phylink_link_state *state)
+ int lane, struct phylink_link_state *state)
{
int err;
u16 status;
@@ -492,7 +492,7 @@ int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
return 0;
}
-int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane,
+int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
bool enable)
{
u8 cmode = chip->ports[port].cmode;
@@ -525,7 +525,7 @@ static void mv88e6097_serdes_irq_link(struct mv88e6xxx_chip *chip, int port)
}
irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- u8 lane)
+ int lane)
{
u8 cmode = chip->ports[port].cmode;
@@ -539,10 +539,10 @@ irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
return IRQ_NONE;
}
-u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode = chip->ports[port].cmode;
- u8 lane = 0;
+ int lane = -ENODEV;
switch (port) {
case 9:
@@ -562,12 +562,12 @@ u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
return lane;
}
-u8 mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode_port = chip->ports[port].cmode;
u8 cmode_port10 = chip->ports[10].cmode;
u8 cmode_port9 = chip->ports[9].cmode;
- u8 lane = 0;
+ int lane = -ENODEV;
switch (port) {
case 2:
@@ -637,8 +637,29 @@ u8 mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
return lane;
}
+/* Only Ports 0, 9 and 10 have SERDES lanes. Return the SERDES lane address
+ * a port is using else Returns -ENODEV.
+ */
+int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 cmode = chip->ports[port].cmode;
+ int lane = -ENODEV;
+
+ if (port != 0 && port != 9 && port != 10)
+ return -EOPNOTSUPP;
+
+ if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode == MV88E6393X_PORT_STS_CMODE_5GBASER ||
+ cmode == MV88E6393X_PORT_STS_CMODE_10GBASER)
+ lane = port;
+
+ return lane;
+}
+
/* Set power up/down for 10GBASE-R and 10GBASE-X4/X2 */
-static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, u8 lane,
+static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane,
bool up)
{
u16 val, new_val;
@@ -665,7 +686,7 @@ static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, u8 lane,
}
/* Set power up/down for SGMII and 1000Base-X */
-static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, u8 lane,
+static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane,
bool up)
{
u16 val, new_val;
@@ -701,7 +722,7 @@ static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = {
int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
{
- if (mv88e6390_serdes_get_lane(chip, port) == 0)
+ if (mv88e6390_serdes_get_lane(chip, port) < 0)
return 0;
return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
@@ -713,7 +734,7 @@ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
struct mv88e6390_serdes_hw_stat *stat;
int i;
- if (mv88e6390_serdes_get_lane(chip, port) == 0)
+ if (mv88e6390_serdes_get_lane(chip, port) < 0)
return 0;
for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
@@ -750,7 +771,7 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
int i;
lane = mv88e6390_serdes_get_lane(chip, port);
- if (lane == 0)
+ if (lane < 0)
return 0;
for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
@@ -761,7 +782,7 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
}
-static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, u8 lane)
+static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, int lane)
{
u16 reg;
int err;
@@ -776,7 +797,7 @@ static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, u8 lane)
MV88E6390_PG_CONTROL, reg);
}
-int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
+int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
bool up)
{
u8 cmode = chip->ports[port].cmode;
@@ -801,7 +822,7 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
}
int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
- u8 lane, unsigned int mode,
+ int lane, unsigned int mode,
phy_interface_t interface,
const unsigned long *advertise)
{
@@ -860,7 +881,7 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
}
static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
- int port, u8 lane, struct phylink_link_state *state)
+ int port, int lane, struct phylink_link_state *state)
{
u16 lpa, status;
int err;
@@ -883,7 +904,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
}
static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
- int port, u8 lane, struct phylink_link_state *state)
+ int port, int lane, struct phylink_link_state *state)
{
u16 status;
int err;
@@ -902,8 +923,32 @@ static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
return 0;
}
+static int mv88e6393x_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
+ int port, int lane,
+ struct phylink_link_state *state)
+{
+ u16 status;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_10G_STAT1, &status);
+ if (err)
+ return err;
+
+ state->link = !!(status & MDIO_STAT1_LSTATUS);
+ if (state->link) {
+ if (state->interface == PHY_INTERFACE_MODE_5GBASER)
+ state->speed = SPEED_5000;
+ else
+ state->speed = SPEED_10000;
+ state->duplex = DUPLEX_FULL;
+ }
+
+ return 0;
+}
+
int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- u8 lane, struct phylink_link_state *state)
+ int lane, struct phylink_link_state *state)
{
switch (state->interface) {
case PHY_INTERFACE_MODE_SGMII:
@@ -921,8 +966,27 @@ int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
}
}
+int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ int lane, struct phylink_link_state *state)
+{
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return mv88e6390_serdes_pcs_get_state_sgmii(chip, port, lane,
+ state);
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ return mv88e6393x_serdes_pcs_get_state_10g(chip, port, lane,
+ state);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
- u8 lane)
+ int lane)
{
u16 bmcr;
int err;
@@ -938,7 +1002,7 @@ int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
}
int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
- u8 lane, int speed, int duplex)
+ int lane, int speed, int duplex)
{
u16 val, bmcr;
int err;
@@ -972,7 +1036,7 @@ int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
}
static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
- int port, u8 lane)
+ int port, int lane)
{
u16 bmsr;
int err;
@@ -988,8 +1052,25 @@ static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
dsa_port_phylink_mac_change(chip->ds, port, !!(bmsr & BMSR_LSTATUS));
}
+static void mv88e6393x_serdes_irq_link_10g(struct mv88e6xxx_chip *chip,
+ int port, u8 lane)
+{
+ u16 status;
+ int err;
+
+ /* If the link has dropped, we want to know about it. */
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_10G_STAT1, &status);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes STAT1: %d\n", err);
+ return;
+ }
+
+ dsa_port_phylink_mac_change(chip->ds, port, !!(status & MDIO_STAT1_LSTATUS));
+}
+
static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip,
- u8 lane, bool enable)
+ int lane, bool enable)
{
u16 val = 0;
@@ -1001,7 +1082,7 @@ static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip,
MV88E6390_SGMII_INT_ENABLE, val);
}
-int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane,
+int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
bool enable)
{
u8 cmode = chip->ports[port].cmode;
@@ -1017,7 +1098,7 @@ int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane,
}
static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip,
- u8 lane, u16 *status)
+ int lane, u16 *status)
{
int err;
@@ -1027,8 +1108,85 @@ static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip,
return err;
}
+static int mv88e6393x_serdes_irq_enable_10g(struct mv88e6xxx_chip *chip,
+ u8 lane, bool enable)
+{
+ u16 val = 0;
+
+ if (enable)
+ val |= MV88E6393X_10G_INT_LINK_CHANGE;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_10G_INT_ENABLE, val);
+}
+
+int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
+ int lane, bool enable)
+{
+ u8 cmode = chip->ports[port].cmode;
+
+ switch (cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ return mv88e6390_serdes_irq_enable_sgmii(chip, lane, enable);
+ case MV88E6393X_PORT_STS_CMODE_5GBASER:
+ case MV88E6393X_PORT_STS_CMODE_10GBASER:
+ return mv88e6393x_serdes_irq_enable_10g(chip, lane, enable);
+ }
+
+ return 0;
+}
+
+static int mv88e6393x_serdes_irq_status_10g(struct mv88e6xxx_chip *chip,
+ u8 lane, u16 *status)
+{
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_10G_INT_STATUS, status);
+
+ return err;
+}
+
+irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ int lane)
+{
+ u8 cmode = chip->ports[port].cmode;
+ irqreturn_t ret = IRQ_NONE;
+ u16 status;
+ int err;
+
+ switch (cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status);
+ if (err)
+ return ret;
+ if (status & (MV88E6390_SGMII_INT_LINK_DOWN |
+ MV88E6390_SGMII_INT_LINK_UP)) {
+ ret = IRQ_HANDLED;
+ mv88e6390_serdes_irq_link_sgmii(chip, port, lane);
+ }
+ break;
+ case MV88E6393X_PORT_STS_CMODE_5GBASER:
+ case MV88E6393X_PORT_STS_CMODE_10GBASER:
+ err = mv88e6393x_serdes_irq_status_10g(chip, lane, &status);
+ if (err)
+ return err;
+ if (status & MV88E6393X_10G_INT_LINK_CHANGE) {
+ ret = IRQ_HANDLED;
+ mv88e6393x_serdes_irq_link_10g(chip, port, lane);
+ }
+ break;
+ }
+
+ return ret;
+}
+
irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- u8 lane)
+ int lane)
{
u8 cmode = chip->ports[port].cmode;
irqreturn_t ret = IRQ_NONE;
@@ -1087,7 +1245,7 @@ static const u16 mv88e6390_serdes_regs[] = {
int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port)
{
- if (mv88e6xxx_serdes_get_lane(chip, port) == 0)
+ if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
return 0;
return ARRAY_SIZE(mv88e6390_serdes_regs) * sizeof(u16);
@@ -1102,7 +1260,7 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
int i;
lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane == 0)
+ if (lane < 0)
return;
for (i = 0 ; i < ARRAY_SIZE(mv88e6390_serdes_regs); i++) {
@@ -1112,3 +1270,101 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
p[i] = reg;
}
}
+
+static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
+{
+ u16 reg, pcs;
+ int err;
+
+ /* mv88e6393x family errata 4.6:
+ * Cannot clear PwrDn bit on SERDES on port 0 if device is configured
+ * CPU_MGD mode or P0_mode is configured for [x]MII.
+ * Workaround: Set Port0 SERDES register 4.F002 bit 5=0 and bit 15=1.
+ *
+ * It seems that after this workaround the SERDES is automatically
+ * powered up (the bit is cleared), so power it down.
+ */
+ if (lane == MV88E6393X_PORT0_LANE) {
+ err = mv88e6390_serdes_read(chip, MV88E6393X_PORT0_LANE,
+ MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6393X_SERDES_POC_PDOWN;
+ reg |= MV88E6393X_SERDES_POC_RESET;
+
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, reg);
+ if (err)
+ return err;
+
+ err = mv88e6390_serdes_power_sgmii(chip, lane, false);
+ if (err)
+ return err;
+ }
+
+ /* mv88e6393x family errata 4.8:
+ * When a SERDES port is operating in 1000BASE-X or SGMII mode link may
+ * not come up after hardware reset or software reset of SERDES core.
+ * Workaround is to write SERDES register 4.F074.14=1 for only those
+ * modes and 0 in all other modes.
+ */
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, &pcs);
+ if (err)
+ return err;
+
+ pcs &= MV88E6393X_SERDES_POC_PCS_MASK;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_ERRATA_4_8_REG, &reg);
+ if (err)
+ return err;
+
+ if (pcs == MV88E6393X_SERDES_POC_PCS_1000BASEX ||
+ pcs == MV88E6393X_SERDES_POC_PCS_SGMII_PHY ||
+ pcs == MV88E6393X_SERDES_POC_PCS_SGMII_MAC)
+ reg |= MV88E6393X_ERRATA_4_8_BIT;
+ else
+ reg &= ~MV88E6393X_ERRATA_4_8_BIT;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_ERRATA_4_8_REG, reg);
+}
+
+int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT0_LANE);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT9_LANE);
+ if (err)
+ return err;
+
+ return mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT10_LANE);
+}
+
+int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool on)
+{
+ u8 cmode = chip->ports[port].cmode;
+
+ if (port != 0 && port != 9 && port != 10)
+ return -EOPNOTSUPP;
+
+ switch (cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ return mv88e6390_serdes_power_sgmii(chip, lane, on);
+ case MV88E6393X_PORT_STS_CMODE_5GBASER:
+ case MV88E6393X_PORT_STS_CMODE_10GBASER:
+ return mv88e6390_serdes_power_10g(chip, lane, on);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 93822ef9bab8..cbb3ba30caea 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -42,6 +42,9 @@
/* 10GBASE-R and 10GBASE-X4/X2 */
#define MV88E6390_10G_CTRL1 (0x1000 + MDIO_CTRL1)
#define MV88E6390_10G_STAT1 (0x1000 + MDIO_STAT1)
+#define MV88E6393X_10G_INT_ENABLE 0x9000
+#define MV88E6393X_10G_INT_LINK_CHANGE BIT(2)
+#define MV88E6393X_10G_INT_STATUS 0x9001
/* 1000BASE-X and SGMII */
#define MV88E6390_SGMII_BMCR (0x2000 + MII_BMCR)
@@ -73,55 +76,86 @@
#define MV88E6390_PG_CONTROL 0xf010
#define MV88E6390_PG_CONTROL_ENABLE_PC BIT(0)
-u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
-u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
-u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
-u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
-u8 mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+#define MV88E6393X_PORT0_LANE 0x00
+#define MV88E6393X_PORT9_LANE 0x09
+#define MV88E6393X_PORT10_LANE 0x0a
+
+/* Port Operational Configuration */
+#define MV88E6393X_SERDES_POC 0xf002
+#define MV88E6393X_SERDES_POC_PCS_1000BASEX 0x0000
+#define MV88E6393X_SERDES_POC_PCS_2500BASEX 0x0001
+#define MV88E6393X_SERDES_POC_PCS_SGMII_PHY 0x0002
+#define MV88E6393X_SERDES_POC_PCS_SGMII_MAC 0x0003
+#define MV88E6393X_SERDES_POC_PCS_5GBASER 0x0004
+#define MV88E6393X_SERDES_POC_PCS_10GBASER 0x0005
+#define MV88E6393X_SERDES_POC_PCS_USXGMII_PHY 0x0006
+#define MV88E6393X_SERDES_POC_PCS_USXGMII_MAC 0x0007
+#define MV88E6393X_SERDES_POC_PCS_MASK 0x0007
+#define MV88E6393X_SERDES_POC_RESET BIT(15)
+#define MV88E6393X_SERDES_POC_PDOWN BIT(5)
+
+#define MV88E6393X_ERRATA_4_8_REG 0xF074
+#define MV88E6393X_ERRATA_4_8_BIT BIT(14)
+
+int mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+int mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
- u8 lane, unsigned int mode,
+ int lane, unsigned int mode,
phy_interface_t interface,
const unsigned long *advertise);
int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
- u8 lane, unsigned int mode,
+ int lane, unsigned int mode,
phy_interface_t interface,
const unsigned long *advertise);
int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- u8 lane, struct phylink_link_state *state);
+ int lane, struct phylink_link_state *state);
int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- u8 lane, struct phylink_link_state *state);
+ int lane, struct phylink_link_state *state);
int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- u8 lane, struct phylink_link_state *state);
+ int lane, struct phylink_link_state *state);
+int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ int lane, struct phylink_link_state *state);
int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
- u8 lane);
+ int lane);
int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
- u8 lane);
+ int lane);
int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
- u8 lane, int speed, int duplex);
+ int lane, int speed, int duplex);
int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
- u8 lane, int speed, int duplex);
+ int lane, int speed, int duplex);
unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
int port);
unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
int port);
-int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
+int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
bool up);
-int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
+int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
bool on);
-int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
+int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
bool on);
-int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane,
+int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool on);
+int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip);
+int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
bool enable);
-int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane,
+int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
bool enable);
-int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane,
+int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
bool enable);
+int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
+ int lane, bool enable);
irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- u8 lane);
+ int lane);
irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- u8 lane);
+ int lane);
irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- u8 lane);
+ int lane);
+irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ int lane);
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data);
@@ -138,18 +172,18 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
-/* Return the (first) SERDES lane address a port is using, 0 otherwise. */
-static inline u8 mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
- int port)
+/* Return the (first) SERDES lane address a port is using, -errno otherwise. */
+static inline int mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
+ int port)
{
if (!chip->info->ops->serdes_get_lane)
- return 0;
+ return -EOPNOTSUPP;
return chip->info->ops->serdes_get_lane(chip, port);
}
static inline int mv88e6xxx_serdes_power_up(struct mv88e6xxx_chip *chip,
- int port, u8 lane)
+ int port, int lane)
{
if (!chip->info->ops->serdes_power)
return -EOPNOTSUPP;
@@ -158,7 +192,7 @@ static inline int mv88e6xxx_serdes_power_up(struct mv88e6xxx_chip *chip,
}
static inline int mv88e6xxx_serdes_power_down(struct mv88e6xxx_chip *chip,
- int port, u8 lane)
+ int port, int lane)
{
if (!chip->info->ops->serdes_power)
return -EOPNOTSUPP;
@@ -176,7 +210,7 @@ mv88e6xxx_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
}
static inline int mv88e6xxx_serdes_irq_enable(struct mv88e6xxx_chip *chip,
- int port, u8 lane)
+ int port, int lane)
{
if (!chip->info->ops->serdes_irq_enable)
return -EOPNOTSUPP;
@@ -185,7 +219,7 @@ static inline int mv88e6xxx_serdes_irq_enable(struct mv88e6xxx_chip *chip,
}
static inline int mv88e6xxx_serdes_irq_disable(struct mv88e6xxx_chip *chip,
- int port, u8 lane)
+ int port, int lane)
{
if (!chip->info->ops->serdes_irq_enable)
return -EOPNOTSUPP;
@@ -194,7 +228,7 @@ static inline int mv88e6xxx_serdes_irq_disable(struct mv88e6xxx_chip *chip,
}
static inline irqreturn_t
-mv88e6xxx_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, u8 lane)
+mv88e6xxx_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, int lane)
{
if (!chip->info->ops->serdes_irq_status)
return IRQ_NONE;
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 628afb47b579..ce607fbaaa3a 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -719,7 +719,9 @@ static int felix_bridge_join(struct dsa_switch *ds, int port,
{
struct ocelot *ocelot = ds->priv;
- return ocelot_port_bridge_join(ocelot, port, br);
+ ocelot_port_bridge_join(ocelot, port, br);
+
+ return 0;
}
static void felix_bridge_leave(struct dsa_switch *ds, int port,
@@ -1393,19 +1395,20 @@ static bool felix_rxtstamp(struct dsa_switch *ds, int port,
return false;
}
-static bool felix_txtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *clone, unsigned int type)
+static void felix_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb)
{
struct ocelot *ocelot = ds->priv;
- struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct sk_buff *clone = NULL;
- if (ocelot->ptp && (skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP) &&
- ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
- ocelot_port_add_txtstamp_skb(ocelot, port, clone);
- return true;
- }
+ if (!ocelot->ptp)
+ return;
- return false;
+ if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone))
+ return;
+
+ if (clone)
+ OCELOT_SKB_CB(skb)->clone = clone;
}
static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 5ff623ee76a6..2473bebe48e6 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1057,10 +1057,8 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
res.end += felix->imdio_base;
imdio_regs = devm_ioremap_resource(dev, &res);
- if (IS_ERR(imdio_regs)) {
- dev_err(dev, "failed to map internal MDIO registers\n");
+ if (IS_ERR(imdio_regs))
return PTR_ERR(imdio_regs);
- }
hw = enetc_hw_alloc(dev, imdio_regs);
if (IS_ERR(hw)) {
@@ -1229,8 +1227,12 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX)
return -ERANGE;
- ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port) |
- QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
+ /* Set port num and disable ALWAYS_GUARD_BAND_SCH_Q, which means set
+ * guard band to be implemented for nonschedule queues to schedule
+ * queues transition.
+ */
+ ocelot_rmw(ocelot,
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M |
QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
QSYS_TAS_PARAM_CFG_CTRL);
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 12e76020bea3..973761132fc3 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -317,11 +317,16 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
if (rc)
return rc;
- rc = -EOPNOTSUPP;
-
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
rc = sja1105_flower_policer(priv, port, extack, cookie,
&key,
act->police.rate_bytes_ps,
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 51ea104c63bb..405024b637d6 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -3049,21 +3049,6 @@ static void sja1105_teardown(struct dsa_switch *ds)
}
}
-static int sja1105_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
-{
- struct net_device *slave;
-
- if (!dsa_is_user_port(ds, port))
- return 0;
-
- slave = dsa_to_port(ds, port)->slave;
-
- slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
-
- return 0;
-}
-
static void sja1105_port_disable(struct dsa_switch *ds, int port)
{
struct sja1105_private *priv = ds->priv;
@@ -3152,7 +3137,7 @@ static void sja1105_port_deferred_xmit(struct kthread_work *work)
struct sk_buff *skb;
while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
- struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
+ struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
mutex_lock(&priv->mgmt_lock);
@@ -3491,7 +3476,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.get_ethtool_stats = sja1105_get_ethtool_stats,
.get_sset_count = sja1105_get_sset_count,
.get_ts_info = sja1105_get_ts_info,
- .port_enable = sja1105_port_enable,
.port_disable = sja1105_port_disable,
.port_fdb_dump = sja1105_fdb_dump,
.port_fdb_add = sja1105_fdb_add,
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 1b90570b257b..0bc566b9e958 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -431,20 +431,24 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
return true;
}
-/* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone
- * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit
+/* Called from dsa_skb_tx_timestamp. This callback is just to clone
+ * the skb and have it available in SJA1105_SKB_CB in the .port_deferred_xmit
* callback, where we will timestamp it synchronously.
*/
-bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *skb, unsigned int type)
+void sja1105_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_port *sp = &priv->ports[port];
+ struct sk_buff *clone;
if (!sp->hwts_tx_en)
- return false;
+ return;
- return true;
+ clone = skb_clone_sk(skb);
+ if (!clone)
+ return;
+
+ SJA1105_SKB_CB(skb)->clone = clone;
}
static int sja1105_ptp_reset(struct dsa_switch *ds)
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 3daa33e98e77..34f97f58a355 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -104,8 +104,8 @@ void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type);
-bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *skb, unsigned int type);
+void sja1105_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb);
int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 53e1f7e07959..96cc5fc36eb5 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1051,6 +1051,7 @@ el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_link_ksettings *cmd)
break;
case 3:
cmd->base.port = PORT_BNC;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index ad04660b97b8..1cdff1dca790 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -19,6 +19,7 @@ config SUNGEM_PHY
tristate
source "drivers/net/ethernet/3com/Kconfig"
+source "drivers/net/ethernet/actions/Kconfig"
source "drivers/net/ethernet/adaptec/Kconfig"
source "drivers/net/ethernet/aeroflex/Kconfig"
source "drivers/net/ethernet/agere/Kconfig"
@@ -81,6 +82,7 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
+source "drivers/net/ethernet/microsoft/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -97,7 +99,8 @@ config JME
config KORINA
tristate "Korina (IDT RC32434) Ethernet support"
- depends on MIKROTIK_RB532
+ depends on MIKROTIK_RB532 || COMPILE_TEST
+ select MII
help
If you have a Mikrotik RouterBoard 500 or IDT RC32434
based system say Y. Otherwise say N.
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 1e7dc8a7762d..cb3f9084a21b 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_NET_VENDOR_3COM) += 3com/
obj-$(CONFIG_NET_VENDOR_8390) += 8390/
+obj-$(CONFIG_NET_VENDOR_ACTIONS) += actions/
obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
@@ -44,6 +45,7 @@ obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/
obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
+obj-$(CONFIG_NET_VENDOR_MICROSOFT) += microsoft/
obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
obj-$(CONFIG_JME) += jme.o
obj-$(CONFIG_KORINA) += korina.o
diff --git a/drivers/net/ethernet/actions/Kconfig b/drivers/net/ethernet/actions/Kconfig
new file mode 100644
index 000000000000..ccad6a3f4d6f
--- /dev/null
+++ b/drivers/net/ethernet/actions/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config NET_VENDOR_ACTIONS
+ bool "Actions Semi devices"
+ default y
+ depends on ARCH_ACTIONS
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all the
+ questions about Actions Semi devices. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_VENDOR_ACTIONS
+
+config OWL_EMAC
+ tristate "Actions Semi Owl Ethernet MAC support"
+ select PHYLIB
+ help
+ This driver supports the Actions Semi Ethernet Media Access
+ Controller (EMAC) found on the S500 and S900 SoCs. The controller
+ is compliant with the IEEE 802.3 CSMA/CD standard and supports
+ both half-duplex and full-duplex operation modes at 10/100 Mb/s.
+
+endif # NET_VENDOR_ACTIONS
diff --git a/drivers/net/ethernet/actions/Makefile b/drivers/net/ethernet/actions/Makefile
new file mode 100644
index 000000000000..fde8001d538a
--- /dev/null
+++ b/drivers/net/ethernet/actions/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Actions Semi Owl SoCs built-in ethernet macs
+#
+
+obj-$(CONFIG_OWL_EMAC) += owl-emac.o
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
new file mode 100644
index 000000000000..b8e771c2bc40
--- /dev/null
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -0,0 +1,1625 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Actions Semi Owl SoCs Ethernet MAC driver
+ *
+ * Copyright (c) 2012 Actions Semi Inc.
+ * Copyright (c) 2021 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/reset.h>
+
+#include "owl-emac.h"
+
+#define OWL_EMAC_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
+static u32 owl_emac_reg_read(struct owl_emac_priv *priv, u32 reg)
+{
+ return readl(priv->base + reg);
+}
+
+static void owl_emac_reg_write(struct owl_emac_priv *priv, u32 reg, u32 data)
+{
+ writel(data, priv->base + reg);
+}
+
+static u32 owl_emac_reg_update(struct owl_emac_priv *priv,
+ u32 reg, u32 mask, u32 val)
+{
+ u32 data, old_val;
+
+ data = owl_emac_reg_read(priv, reg);
+ old_val = data & mask;
+
+ data &= ~mask;
+ data |= val & mask;
+
+ owl_emac_reg_write(priv, reg, data);
+
+ return old_val;
+}
+
+static void owl_emac_reg_set(struct owl_emac_priv *priv, u32 reg, u32 bits)
+{
+ owl_emac_reg_update(priv, reg, bits, bits);
+}
+
+static void owl_emac_reg_clear(struct owl_emac_priv *priv, u32 reg, u32 bits)
+{
+ owl_emac_reg_update(priv, reg, bits, 0);
+}
+
+static struct device *owl_emac_get_dev(struct owl_emac_priv *priv)
+{
+ return priv->netdev->dev.parent;
+}
+
+static void owl_emac_irq_enable(struct owl_emac_priv *priv)
+{
+ /* Enable all interrupts except TU.
+ *
+ * Note the NIE and AIE bits shall also be set in order to actually
+ * enable the selected interrupts.
+ */
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR7,
+ OWL_EMAC_BIT_MAC_CSR7_NIE |
+ OWL_EMAC_BIT_MAC_CSR7_AIE |
+ OWL_EMAC_BIT_MAC_CSR7_ALL_NOT_TUE);
+}
+
+static void owl_emac_irq_disable(struct owl_emac_priv *priv)
+{
+ /* Disable all interrupts.
+ *
+ * WARNING: Unset only the NIE and AIE bits in CSR7 to workaround an
+ * unexpected side effect (MAC hardware bug?!) where some bits in the
+ * status register (CSR5) are cleared automatically before being able
+ * to read them via owl_emac_irq_clear().
+ */
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR7,
+ OWL_EMAC_BIT_MAC_CSR7_ALL_NOT_TUE);
+}
+
+static u32 owl_emac_irq_status(struct owl_emac_priv *priv)
+{
+ return owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR5);
+}
+
+static u32 owl_emac_irq_clear(struct owl_emac_priv *priv)
+{
+ u32 val = owl_emac_irq_status(priv);
+
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR5, val);
+
+ return val;
+}
+
+static dma_addr_t owl_emac_dma_map_rx(struct owl_emac_priv *priv,
+ struct sk_buff *skb)
+{
+ struct device *dev = owl_emac_get_dev(priv);
+
+ /* Buffer pointer for the RX DMA descriptor must be word aligned. */
+ return dma_map_single(dev, skb_tail_pointer(skb),
+ skb_tailroom(skb), DMA_FROM_DEVICE);
+}
+
+static void owl_emac_dma_unmap_rx(struct owl_emac_priv *priv,
+ struct sk_buff *skb, dma_addr_t dma_addr)
+{
+ struct device *dev = owl_emac_get_dev(priv);
+
+ dma_unmap_single(dev, dma_addr, skb_tailroom(skb), DMA_FROM_DEVICE);
+}
+
+static dma_addr_t owl_emac_dma_map_tx(struct owl_emac_priv *priv,
+ struct sk_buff *skb)
+{
+ struct device *dev = owl_emac_get_dev(priv);
+
+ return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+}
+
+static void owl_emac_dma_unmap_tx(struct owl_emac_priv *priv,
+ struct sk_buff *skb, dma_addr_t dma_addr)
+{
+ struct device *dev = owl_emac_get_dev(priv);
+
+ dma_unmap_single(dev, dma_addr, skb_headlen(skb), DMA_TO_DEVICE);
+}
+
+static unsigned int owl_emac_ring_num_unused(struct owl_emac_ring *ring)
+{
+ return CIRC_SPACE(ring->head, ring->tail, ring->size);
+}
+
+static unsigned int owl_emac_ring_get_next(struct owl_emac_ring *ring,
+ unsigned int cur)
+{
+ return (cur + 1) & (ring->size - 1);
+}
+
+static void owl_emac_ring_push_head(struct owl_emac_ring *ring)
+{
+ ring->head = owl_emac_ring_get_next(ring, ring->head);
+}
+
+static void owl_emac_ring_pop_tail(struct owl_emac_ring *ring)
+{
+ ring->tail = owl_emac_ring_get_next(ring, ring->tail);
+}
+
+static struct sk_buff *owl_emac_alloc_skb(struct net_device *netdev)
+{
+ struct sk_buff *skb;
+ int offset;
+
+ skb = netdev_alloc_skb(netdev, OWL_EMAC_RX_FRAME_MAX_LEN +
+ OWL_EMAC_SKB_RESERVE);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* Ensure 4 bytes DMA alignment. */
+ offset = ((uintptr_t)skb->data) & (OWL_EMAC_SKB_ALIGN - 1);
+ if (unlikely(offset))
+ skb_reserve(skb, OWL_EMAC_SKB_ALIGN - offset);
+
+ return skb;
+}
+
+static int owl_emac_ring_prepare_rx(struct owl_emac_priv *priv)
+{
+ struct owl_emac_ring *ring = &priv->rx_ring;
+ struct device *dev = owl_emac_get_dev(priv);
+ struct net_device *netdev = priv->netdev;
+ struct owl_emac_ring_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ int i;
+
+ for (i = 0; i < ring->size; i++) {
+ skb = owl_emac_alloc_skb(netdev);
+ if (!skb)
+ return -ENOMEM;
+
+ dma_addr = owl_emac_dma_map_rx(priv, skb);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ desc = &ring->descs[i];
+ desc->status = OWL_EMAC_BIT_RDES0_OWN;
+ desc->control = skb_tailroom(skb) & OWL_EMAC_MSK_RDES1_RBS1;
+ desc->buf_addr = dma_addr;
+ desc->reserved = 0;
+
+ ring->skbs[i] = skb;
+ ring->skbs_dma[i] = dma_addr;
+ }
+
+ desc->control |= OWL_EMAC_BIT_RDES1_RER;
+
+ ring->head = 0;
+ ring->tail = 0;
+
+ return 0;
+}
+
+static void owl_emac_ring_prepare_tx(struct owl_emac_priv *priv)
+{
+ struct owl_emac_ring *ring = &priv->tx_ring;
+ struct owl_emac_ring_desc *desc;
+ int i;
+
+ for (i = 0; i < ring->size; i++) {
+ desc = &ring->descs[i];
+
+ desc->status = 0;
+ desc->control = OWL_EMAC_BIT_TDES1_IC;
+ desc->buf_addr = 0;
+ desc->reserved = 0;
+ }
+
+ desc->control |= OWL_EMAC_BIT_TDES1_TER;
+
+ memset(ring->skbs_dma, 0, sizeof(dma_addr_t) * ring->size);
+
+ ring->head = 0;
+ ring->tail = 0;
+}
+
+static void owl_emac_ring_unprepare_rx(struct owl_emac_priv *priv)
+{
+ struct owl_emac_ring *ring = &priv->rx_ring;
+ int i;
+
+ for (i = 0; i < ring->size; i++) {
+ ring->descs[i].status = 0;
+
+ if (!ring->skbs_dma[i])
+ continue;
+
+ owl_emac_dma_unmap_rx(priv, ring->skbs[i], ring->skbs_dma[i]);
+ ring->skbs_dma[i] = 0;
+
+ dev_kfree_skb(ring->skbs[i]);
+ ring->skbs[i] = NULL;
+ }
+}
+
+static void owl_emac_ring_unprepare_tx(struct owl_emac_priv *priv)
+{
+ struct owl_emac_ring *ring = &priv->tx_ring;
+ int i;
+
+ for (i = 0; i < ring->size; i++) {
+ ring->descs[i].status = 0;
+
+ if (!ring->skbs_dma[i])
+ continue;
+
+ owl_emac_dma_unmap_tx(priv, ring->skbs[i], ring->skbs_dma[i]);
+ ring->skbs_dma[i] = 0;
+
+ dev_kfree_skb(ring->skbs[i]);
+ ring->skbs[i] = NULL;
+ }
+}
+
+static int owl_emac_ring_alloc(struct device *dev, struct owl_emac_ring *ring,
+ unsigned int size)
+{
+ ring->descs = dmam_alloc_coherent(dev,
+ sizeof(struct owl_emac_ring_desc) * size,
+ &ring->descs_dma, GFP_KERNEL);
+ if (!ring->descs)
+ return -ENOMEM;
+
+ ring->skbs = devm_kcalloc(dev, size, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!ring->skbs)
+ return -ENOMEM;
+
+ ring->skbs_dma = devm_kcalloc(dev, size, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!ring->skbs_dma)
+ return -ENOMEM;
+
+ ring->size = size;
+
+ return 0;
+}
+
+static void owl_emac_dma_cmd_resume_rx(struct owl_emac_priv *priv)
+{
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR2,
+ OWL_EMAC_VAL_MAC_CSR2_RPD);
+}
+
+static void owl_emac_dma_cmd_resume_tx(struct owl_emac_priv *priv)
+{
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR1,
+ OWL_EMAC_VAL_MAC_CSR1_TPD);
+}
+
+static u32 owl_emac_dma_cmd_set_tx(struct owl_emac_priv *priv, u32 status)
+{
+ return owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6,
+ OWL_EMAC_BIT_MAC_CSR6_ST, status);
+}
+
+static u32 owl_emac_dma_cmd_start_tx(struct owl_emac_priv *priv)
+{
+ return owl_emac_dma_cmd_set_tx(priv, ~0);
+}
+
+static u32 owl_emac_dma_cmd_set(struct owl_emac_priv *priv, u32 status)
+{
+ return owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6,
+ OWL_EMAC_MSK_MAC_CSR6_STSR, status);
+}
+
+static u32 owl_emac_dma_cmd_start(struct owl_emac_priv *priv)
+{
+ return owl_emac_dma_cmd_set(priv, ~0);
+}
+
+static u32 owl_emac_dma_cmd_stop(struct owl_emac_priv *priv)
+{
+ return owl_emac_dma_cmd_set(priv, 0);
+}
+
+static void owl_emac_set_hw_mac_addr(struct net_device *netdev)
+{
+ struct owl_emac_priv *priv = netdev_priv(netdev);
+ u8 *mac_addr = netdev->dev_addr;
+ u32 addr_high, addr_low;
+
+ addr_high = mac_addr[0] << 8 | mac_addr[1];
+ addr_low = mac_addr[2] << 24 | mac_addr[3] << 16 |
+ mac_addr[4] << 8 | mac_addr[5];
+
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR17, addr_high);
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR16, addr_low);
+}
+
+static void owl_emac_update_link_state(struct owl_emac_priv *priv)
+{
+ u32 val, status;
+
+ if (priv->pause) {
+ val = OWL_EMAC_BIT_MAC_CSR20_FCE | OWL_EMAC_BIT_MAC_CSR20_TUE;
+ val |= OWL_EMAC_BIT_MAC_CSR20_TPE | OWL_EMAC_BIT_MAC_CSR20_RPE;
+ val |= OWL_EMAC_BIT_MAC_CSR20_BPE;
+ } else {
+ val = 0;
+ }
+
+ /* Update flow control. */
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR20, val);
+
+ val = (priv->speed == SPEED_100) ? OWL_EMAC_VAL_MAC_CSR6_SPEED_100M :
+ OWL_EMAC_VAL_MAC_CSR6_SPEED_10M;
+ val <<= OWL_EMAC_OFF_MAC_CSR6_SPEED;
+
+ if (priv->duplex == DUPLEX_FULL)
+ val |= OWL_EMAC_BIT_MAC_CSR6_FD;
+
+ spin_lock_bh(&priv->lock);
+
+ /* Temporarily stop DMA TX & RX. */
+ status = owl_emac_dma_cmd_stop(priv);
+
+ /* Update operation modes. */
+ owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6,
+ OWL_EMAC_MSK_MAC_CSR6_SPEED |
+ OWL_EMAC_BIT_MAC_CSR6_FD, val);
+
+ /* Restore DMA TX & RX status. */
+ owl_emac_dma_cmd_set(priv, status);
+
+ spin_unlock_bh(&priv->lock);
+}
+
+static void owl_emac_adjust_link(struct net_device *netdev)
+{
+ struct owl_emac_priv *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ bool state_changed = false;
+
+ if (phydev->link) {
+ if (!priv->link) {
+ priv->link = phydev->link;
+ state_changed = true;
+ }
+
+ if (priv->speed != phydev->speed) {
+ priv->speed = phydev->speed;
+ state_changed = true;
+ }
+
+ if (priv->duplex != phydev->duplex) {
+ priv->duplex = phydev->duplex;
+ state_changed = true;
+ }
+
+ if (priv->pause != phydev->pause) {
+ priv->pause = phydev->pause;
+ state_changed = true;
+ }
+ } else {
+ if (priv->link) {
+ priv->link = phydev->link;
+ state_changed = true;
+ }
+ }
+
+ if (state_changed) {
+ if (phydev->link)
+ owl_emac_update_link_state(priv);
+
+ if (netif_msg_link(priv))
+ phy_print_status(phydev);
+ }
+}
+
+static irqreturn_t owl_emac_handle_irq(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct owl_emac_priv *priv = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ owl_emac_irq_disable(priv);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void owl_emac_ether_addr_push(u8 **dst, const u8 *src)
+{
+ u32 *a = (u32 *)(*dst);
+ const u16 *b = (const u16 *)src;
+
+ a[0] = b[0];
+ a[1] = b[1];
+ a[2] = b[2];
+
+ *dst += 12;
+}
+
+static void
+owl_emac_setup_frame_prepare(struct owl_emac_priv *priv, struct sk_buff *skb)
+{
+ const u8 bcast_addr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ const u8 *mac_addr = priv->netdev->dev_addr;
+ u8 *frame;
+ int i;
+
+ skb_put(skb, OWL_EMAC_SETUP_FRAME_LEN);
+
+ frame = skb->data;
+ memset(frame, 0, skb->len);
+
+ owl_emac_ether_addr_push(&frame, mac_addr);
+ owl_emac_ether_addr_push(&frame, bcast_addr);
+
+ /* Fill multicast addresses. */
+ WARN_ON(priv->mcaddr_list.count >= OWL_EMAC_MAX_MULTICAST_ADDRS);
+ for (i = 0; i < priv->mcaddr_list.count; i++) {
+ mac_addr = priv->mcaddr_list.addrs[i];
+ owl_emac_ether_addr_push(&frame, mac_addr);
+ }
+}
+
+/* The setup frame is a special descriptor which is used to provide physical
+ * addresses (i.e. mac, broadcast and multicast) to the MAC hardware for
+ * filtering purposes. To be recognized as a setup frame, the TDES1_SET bit
+ * must be set in the TX descriptor control field.
+ */
+static int owl_emac_setup_frame_xmit(struct owl_emac_priv *priv)
+{
+ struct owl_emac_ring *ring = &priv->tx_ring;
+ struct net_device *netdev = priv->netdev;
+ struct owl_emac_ring_desc *desc;
+ struct sk_buff *skb;
+ unsigned int tx_head;
+ u32 status, control;
+ dma_addr_t dma_addr;
+ int ret;
+
+ skb = owl_emac_alloc_skb(netdev);
+ if (!skb)
+ return -ENOMEM;
+
+ owl_emac_setup_frame_prepare(priv, skb);
+
+ dma_addr = owl_emac_dma_map_tx(priv, skb);
+ if (dma_mapping_error(owl_emac_get_dev(priv), dma_addr)) {
+ ret = -ENOMEM;
+ goto err_free_skb;
+ }
+
+ spin_lock_bh(&priv->lock);
+
+ tx_head = ring->head;
+ desc = &ring->descs[tx_head];
+
+ status = READ_ONCE(desc->status);
+ control = READ_ONCE(desc->control);
+ dma_rmb(); /* Ensure data has been read before used. */
+
+ if (unlikely(status & OWL_EMAC_BIT_TDES0_OWN) ||
+ !owl_emac_ring_num_unused(ring)) {
+ spin_unlock_bh(&priv->lock);
+ owl_emac_dma_unmap_tx(priv, skb, dma_addr);
+ ret = -EBUSY;
+ goto err_free_skb;
+ }
+
+ ring->skbs[tx_head] = skb;
+ ring->skbs_dma[tx_head] = dma_addr;
+
+ control &= OWL_EMAC_BIT_TDES1_IC | OWL_EMAC_BIT_TDES1_TER; /* Maintain bits */
+ control |= OWL_EMAC_BIT_TDES1_SET;
+ control |= OWL_EMAC_MSK_TDES1_TBS1 & skb->len;
+
+ WRITE_ONCE(desc->control, control);
+ WRITE_ONCE(desc->buf_addr, dma_addr);
+ dma_wmb(); /* Flush descriptor before changing ownership. */
+ WRITE_ONCE(desc->status, OWL_EMAC_BIT_TDES0_OWN);
+
+ owl_emac_ring_push_head(ring);
+
+ /* Temporarily enable DMA TX. */
+ status = owl_emac_dma_cmd_start_tx(priv);
+
+ /* Trigger setup frame processing. */
+ owl_emac_dma_cmd_resume_tx(priv);
+
+ /* Restore DMA TX status. */
+ owl_emac_dma_cmd_set_tx(priv, status);
+
+ /* Stop regular TX until setup frame is processed. */
+ netif_stop_queue(netdev);
+
+ spin_unlock_bh(&priv->lock);
+
+ return 0;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static netdev_tx_t owl_emac_ndo_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct owl_emac_priv *priv = netdev_priv(netdev);
+ struct device *dev = owl_emac_get_dev(priv);
+ struct owl_emac_ring *ring = &priv->tx_ring;
+ struct owl_emac_ring_desc *desc;
+ unsigned int tx_head;
+ u32 status, control;
+ dma_addr_t dma_addr;
+
+ dma_addr = owl_emac_dma_map_tx(priv, skb);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err_ratelimited(&netdev->dev, "TX DMA mapping failed\n");
+ dev_kfree_skb(skb);
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ spin_lock_bh(&priv->lock);
+
+ tx_head = ring->head;
+ desc = &ring->descs[tx_head];
+
+ status = READ_ONCE(desc->status);
+ control = READ_ONCE(desc->control);
+ dma_rmb(); /* Ensure data has been read before used. */
+
+ if (!owl_emac_ring_num_unused(ring) ||
+ unlikely(status & OWL_EMAC_BIT_TDES0_OWN)) {
+ netif_stop_queue(netdev);
+ spin_unlock_bh(&priv->lock);
+
+ dev_dbg_ratelimited(&netdev->dev, "TX buffer full, status=0x%08x\n",
+ owl_emac_irq_status(priv));
+ owl_emac_dma_unmap_tx(priv, skb, dma_addr);
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_BUSY;
+ }
+
+ ring->skbs[tx_head] = skb;
+ ring->skbs_dma[tx_head] = dma_addr;
+
+ control &= OWL_EMAC_BIT_TDES1_IC | OWL_EMAC_BIT_TDES1_TER; /* Maintain bits */
+ control |= OWL_EMAC_BIT_TDES1_FS | OWL_EMAC_BIT_TDES1_LS;
+ control |= OWL_EMAC_MSK_TDES1_TBS1 & skb->len;
+
+ WRITE_ONCE(desc->control, control);
+ WRITE_ONCE(desc->buf_addr, dma_addr);
+ dma_wmb(); /* Flush descriptor before changing ownership. */
+ WRITE_ONCE(desc->status, OWL_EMAC_BIT_TDES0_OWN);
+
+ owl_emac_dma_cmd_resume_tx(priv);
+ owl_emac_ring_push_head(ring);
+
+ /* FIXME: The transmission is currently restricted to a single frame
+ * at a time as a workaround for a MAC hardware bug that causes random
+ * freeze of the TX queue processor.
+ */
+ netif_stop_queue(netdev);
+
+ spin_unlock_bh(&priv->lock);
+
+ return NETDEV_TX_OK;
+}
+
+static bool owl_emac_tx_complete_tail(struct owl_emac_priv *priv)
+{
+ struct owl_emac_ring *ring = &priv->tx_ring;
+ struct net_device *netdev = priv->netdev;
+ struct owl_emac_ring_desc *desc;
+ struct sk_buff *skb;
+ unsigned int tx_tail;
+ u32 status;
+
+ tx_tail = ring->tail;
+ desc = &ring->descs[tx_tail];
+
+ status = READ_ONCE(desc->status);
+ dma_rmb(); /* Ensure data has been read before used. */
+
+ if (status & OWL_EMAC_BIT_TDES0_OWN)
+ return false;
+
+ /* Check for errors. */
+ if (status & OWL_EMAC_BIT_TDES0_ES) {
+ dev_dbg_ratelimited(&netdev->dev,
+ "TX complete error status: 0x%08x\n",
+ status);
+
+ netdev->stats.tx_errors++;
+
+ if (status & OWL_EMAC_BIT_TDES0_UF)
+ netdev->stats.tx_fifo_errors++;
+
+ if (status & OWL_EMAC_BIT_TDES0_EC)
+ netdev->stats.tx_aborted_errors++;
+
+ if (status & OWL_EMAC_BIT_TDES0_LC)
+ netdev->stats.tx_window_errors++;
+
+ if (status & OWL_EMAC_BIT_TDES0_NC)
+ netdev->stats.tx_heartbeat_errors++;
+
+ if (status & OWL_EMAC_BIT_TDES0_LO)
+ netdev->stats.tx_carrier_errors++;
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += ring->skbs[tx_tail]->len;
+ }
+
+ /* Some collisions occurred, but pkt has been transmitted. */
+ if (status & OWL_EMAC_BIT_TDES0_DE)
+ netdev->stats.collisions++;
+
+ skb = ring->skbs[tx_tail];
+ owl_emac_dma_unmap_tx(priv, skb, ring->skbs_dma[tx_tail]);
+ dev_kfree_skb(skb);
+
+ ring->skbs[tx_tail] = NULL;
+ ring->skbs_dma[tx_tail] = 0;
+
+ owl_emac_ring_pop_tail(ring);
+
+ if (unlikely(netif_queue_stopped(netdev)))
+ netif_wake_queue(netdev);
+
+ return true;
+}
+
+static void owl_emac_tx_complete(struct owl_emac_priv *priv)
+{
+ struct owl_emac_ring *ring = &priv->tx_ring;
+ struct net_device *netdev = priv->netdev;
+ unsigned int tx_next;
+ u32 status;
+
+ spin_lock(&priv->lock);
+
+ while (ring->tail != ring->head) {
+ if (!owl_emac_tx_complete_tail(priv))
+ break;
+ }
+
+ /* FIXME: This is a workaround for a MAC hardware bug not clearing
+ * (sometimes) the OWN bit for a transmitted frame descriptor.
+ *
+ * At this point, when TX queue is full, the tail descriptor has the
+ * OWN bit set, which normally means the frame has not been processed
+ * or transmitted yet. But if there is at least one descriptor in the
+ * queue having the OWN bit cleared, we can safely assume the tail
+ * frame has been also processed by the MAC hardware.
+ *
+ * If that's the case, let's force the frame completion by manually
+ * clearing the OWN bit.
+ */
+ if (unlikely(!owl_emac_ring_num_unused(ring))) {
+ tx_next = ring->tail;
+
+ while ((tx_next = owl_emac_ring_get_next(ring, tx_next)) != ring->head) {
+ status = READ_ONCE(ring->descs[tx_next].status);
+ dma_rmb(); /* Ensure data has been read before used. */
+
+ if (status & OWL_EMAC_BIT_TDES0_OWN)
+ continue;
+
+ netdev_dbg(netdev, "Found uncleared TX desc OWN bit\n");
+
+ status = READ_ONCE(ring->descs[ring->tail].status);
+ dma_rmb(); /* Ensure data has been read before used. */
+ status &= ~OWL_EMAC_BIT_TDES0_OWN;
+ WRITE_ONCE(ring->descs[ring->tail].status, status);
+
+ owl_emac_tx_complete_tail(priv);
+ break;
+ }
+ }
+
+ spin_unlock(&priv->lock);
+}
+
+static int owl_emac_rx_process(struct owl_emac_priv *priv, int budget)
+{
+ struct owl_emac_ring *ring = &priv->rx_ring;
+ struct device *dev = owl_emac_get_dev(priv);
+ struct net_device *netdev = priv->netdev;
+ struct owl_emac_ring_desc *desc;
+ struct sk_buff *curr_skb, *new_skb;
+ dma_addr_t curr_dma, new_dma;
+ unsigned int rx_tail, len;
+ u32 status;
+ int recv = 0;
+
+ while (recv < budget) {
+ spin_lock(&priv->lock);
+
+ rx_tail = ring->tail;
+ desc = &ring->descs[rx_tail];
+
+ status = READ_ONCE(desc->status);
+ dma_rmb(); /* Ensure data has been read before used. */
+
+ if (status & OWL_EMAC_BIT_RDES0_OWN) {
+ spin_unlock(&priv->lock);
+ break;
+ }
+
+ curr_skb = ring->skbs[rx_tail];
+ curr_dma = ring->skbs_dma[rx_tail];
+ owl_emac_ring_pop_tail(ring);
+
+ spin_unlock(&priv->lock);
+
+ if (status & (OWL_EMAC_BIT_RDES0_DE | OWL_EMAC_BIT_RDES0_RF |
+ OWL_EMAC_BIT_RDES0_TL | OWL_EMAC_BIT_RDES0_CS |
+ OWL_EMAC_BIT_RDES0_DB | OWL_EMAC_BIT_RDES0_CE |
+ OWL_EMAC_BIT_RDES0_ZERO)) {
+ dev_dbg_ratelimited(&netdev->dev,
+ "RX desc error status: 0x%08x\n",
+ status);
+
+ if (status & OWL_EMAC_BIT_RDES0_DE)
+ netdev->stats.rx_over_errors++;
+
+ if (status & (OWL_EMAC_BIT_RDES0_RF | OWL_EMAC_BIT_RDES0_DB))
+ netdev->stats.rx_frame_errors++;
+
+ if (status & OWL_EMAC_BIT_RDES0_TL)
+ netdev->stats.rx_length_errors++;
+
+ if (status & OWL_EMAC_BIT_RDES0_CS)
+ netdev->stats.collisions++;
+
+ if (status & OWL_EMAC_BIT_RDES0_CE)
+ netdev->stats.rx_crc_errors++;
+
+ if (status & OWL_EMAC_BIT_RDES0_ZERO)
+ netdev->stats.rx_fifo_errors++;
+
+ goto drop_skb;
+ }
+
+ len = (status & OWL_EMAC_MSK_RDES0_FL) >> OWL_EMAC_OFF_RDES0_FL;
+ if (unlikely(len > OWL_EMAC_RX_FRAME_MAX_LEN)) {
+ netdev->stats.rx_length_errors++;
+ netdev_err(netdev, "invalid RX frame len: %u\n", len);
+ goto drop_skb;
+ }
+
+ /* Prepare new skb before receiving the current one. */
+ new_skb = owl_emac_alloc_skb(netdev);
+ if (unlikely(!new_skb))
+ goto drop_skb;
+
+ new_dma = owl_emac_dma_map_rx(priv, new_skb);
+ if (dma_mapping_error(dev, new_dma)) {
+ dev_kfree_skb(new_skb);
+ netdev_err(netdev, "RX DMA mapping failed\n");
+ goto drop_skb;
+ }
+
+ owl_emac_dma_unmap_rx(priv, curr_skb, curr_dma);
+
+ skb_put(curr_skb, len - ETH_FCS_LEN);
+ curr_skb->ip_summed = CHECKSUM_NONE;
+ curr_skb->protocol = eth_type_trans(curr_skb, netdev);
+ curr_skb->dev = netdev;
+
+ netif_receive_skb(curr_skb);
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += len;
+ recv++;
+ goto push_skb;
+
+drop_skb:
+ netdev->stats.rx_dropped++;
+ netdev->stats.rx_errors++;
+ /* Reuse the current skb. */
+ new_skb = curr_skb;
+ new_dma = curr_dma;
+
+push_skb:
+ spin_lock(&priv->lock);
+
+ ring->skbs[ring->head] = new_skb;
+ ring->skbs_dma[ring->head] = new_dma;
+
+ WRITE_ONCE(desc->buf_addr, new_dma);
+ dma_wmb(); /* Flush descriptor before changing ownership. */
+ WRITE_ONCE(desc->status, OWL_EMAC_BIT_RDES0_OWN);
+
+ owl_emac_ring_push_head(ring);
+
+ spin_unlock(&priv->lock);
+ }
+
+ return recv;
+}
+
+static int owl_emac_poll(struct napi_struct *napi, int budget)
+{
+ int work_done = 0, ru_cnt = 0, recv;
+ static int tx_err_cnt, rx_err_cnt;
+ struct owl_emac_priv *priv;
+ u32 status, proc_status;
+
+ priv = container_of(napi, struct owl_emac_priv, napi);
+
+ while ((status = owl_emac_irq_clear(priv)) &
+ (OWL_EMAC_BIT_MAC_CSR5_NIS | OWL_EMAC_BIT_MAC_CSR5_AIS)) {
+ recv = 0;
+
+ /* TX setup frame raises ETI instead of TI. */
+ if (status & (OWL_EMAC_BIT_MAC_CSR5_TI | OWL_EMAC_BIT_MAC_CSR5_ETI)) {
+ owl_emac_tx_complete(priv);
+ tx_err_cnt = 0;
+
+ /* Count MAC internal RX errors. */
+ proc_status = status & OWL_EMAC_MSK_MAC_CSR5_RS;
+ proc_status >>= OWL_EMAC_OFF_MAC_CSR5_RS;
+ if (proc_status == OWL_EMAC_VAL_MAC_CSR5_RS_DATA ||
+ proc_status == OWL_EMAC_VAL_MAC_CSR5_RS_CDES ||
+ proc_status == OWL_EMAC_VAL_MAC_CSR5_RS_FDES)
+ rx_err_cnt++;
+ }
+
+ if (status & OWL_EMAC_BIT_MAC_CSR5_RI) {
+ recv = owl_emac_rx_process(priv, budget - work_done);
+ rx_err_cnt = 0;
+
+ /* Count MAC internal TX errors. */
+ proc_status = status & OWL_EMAC_MSK_MAC_CSR5_TS;
+ proc_status >>= OWL_EMAC_OFF_MAC_CSR5_TS;
+ if (proc_status == OWL_EMAC_VAL_MAC_CSR5_TS_DATA ||
+ proc_status == OWL_EMAC_VAL_MAC_CSR5_TS_CDES)
+ tx_err_cnt++;
+ } else if (status & OWL_EMAC_BIT_MAC_CSR5_RU) {
+ /* MAC AHB is in suspended state, will return to RX
+ * descriptor processing when the host changes ownership
+ * of the descriptor and either an RX poll demand CMD is
+ * issued or a new frame is recognized by the MAC AHB.
+ */
+ if (++ru_cnt == 2)
+ owl_emac_dma_cmd_resume_rx(priv);
+
+ recv = owl_emac_rx_process(priv, budget - work_done);
+
+ /* Guard against too many RU interrupts. */
+ if (ru_cnt > 3)
+ break;
+ }
+
+ work_done += recv;
+ if (work_done >= budget)
+ break;
+ }
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ owl_emac_irq_enable(priv);
+ }
+
+ /* Reset MAC when getting too many internal TX or RX errors. */
+ if (tx_err_cnt > 10 || rx_err_cnt > 10) {
+ netdev_dbg(priv->netdev, "%s error status: 0x%08x\n",
+ tx_err_cnt > 10 ? "TX" : "RX", status);
+ rx_err_cnt = 0;
+ tx_err_cnt = 0;
+ schedule_work(&priv->mac_reset_task);
+ }
+
+ return work_done;
+}
+
+static void owl_emac_mdio_clock_enable(struct owl_emac_priv *priv)
+{
+ u32 val;
+
+ /* Enable MDC clock generation by adjusting CLKDIV according to
+ * the vendor implementation of the original driver.
+ */
+ val = owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR10);
+ val &= OWL_EMAC_MSK_MAC_CSR10_CLKDIV;
+ val |= OWL_EMAC_VAL_MAC_CSR10_CLKDIV_128 << OWL_EMAC_OFF_MAC_CSR10_CLKDIV;
+
+ val |= OWL_EMAC_BIT_MAC_CSR10_SB;
+ val |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_CDS << OWL_EMAC_OFF_MAC_CSR10_OPCODE;
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, val);
+}
+
+static void owl_emac_core_hw_reset(struct owl_emac_priv *priv)
+{
+ /* Trigger hardware reset. */
+ reset_control_assert(priv->reset);
+ usleep_range(10, 20);
+ reset_control_deassert(priv->reset);
+ usleep_range(100, 200);
+}
+
+static int owl_emac_core_sw_reset(struct owl_emac_priv *priv)
+{
+ u32 val;
+ int ret;
+
+ /* Trigger software reset. */
+ owl_emac_reg_set(priv, OWL_EMAC_REG_MAC_CSR0, OWL_EMAC_BIT_MAC_CSR0_SWR);
+ ret = readl_poll_timeout(priv->base + OWL_EMAC_REG_MAC_CSR0,
+ val, !(val & OWL_EMAC_BIT_MAC_CSR0_SWR),
+ OWL_EMAC_POLL_DELAY_USEC,
+ OWL_EMAC_RESET_POLL_TIMEOUT_USEC);
+ if (ret)
+ return ret;
+
+ if (priv->phy_mode == PHY_INTERFACE_MODE_RMII) {
+ /* Enable RMII and use the 50MHz rmii clk as output to PHY. */
+ val = 0;
+ } else {
+ /* Enable SMII and use the 125MHz rmii clk as output to PHY.
+ * Additionally set SMII SYNC delay to 4 half cycle.
+ */
+ val = 0x04 << OWL_EMAC_OFF_MAC_CTRL_SSDC;
+ val |= OWL_EMAC_BIT_MAC_CTRL_RSIS;
+ }
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CTRL, val);
+
+ /* MDC is disabled after reset. */
+ owl_emac_mdio_clock_enable(priv);
+
+ /* Set FIFO pause & restart threshold levels. */
+ val = 0x40 << OWL_EMAC_OFF_MAC_CSR19_FPTL;
+ val |= 0x10 << OWL_EMAC_OFF_MAC_CSR19_FRTL;
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR19, val);
+
+ /* Set flow control pause quanta time to ~100 ms. */
+ val = 0x4FFF << OWL_EMAC_OFF_MAC_CSR18_PQT;
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR18, val);
+
+ /* Setup interrupt mitigation. */
+ val = 7 << OWL_EMAC_OFF_MAC_CSR11_NRP;
+ val |= 4 << OWL_EMAC_OFF_MAC_CSR11_RT;
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR11, val);
+
+ /* Set RX/TX rings base addresses. */
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR3,
+ (u32)(priv->rx_ring.descs_dma));
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR4,
+ (u32)(priv->tx_ring.descs_dma));
+
+ /* Setup initial operation mode. */
+ val = OWL_EMAC_VAL_MAC_CSR6_SPEED_100M << OWL_EMAC_OFF_MAC_CSR6_SPEED;
+ val |= OWL_EMAC_BIT_MAC_CSR6_FD;
+ owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6,
+ OWL_EMAC_MSK_MAC_CSR6_SPEED |
+ OWL_EMAC_BIT_MAC_CSR6_FD, val);
+ owl_emac_reg_clear(priv, OWL_EMAC_REG_MAC_CSR6,
+ OWL_EMAC_BIT_MAC_CSR6_PR | OWL_EMAC_BIT_MAC_CSR6_PM);
+
+ priv->link = 0;
+ priv->speed = SPEED_UNKNOWN;
+ priv->duplex = DUPLEX_UNKNOWN;
+ priv->pause = 0;
+ priv->mcaddr_list.count = 0;
+
+ return 0;
+}
+
+static int owl_emac_enable(struct net_device *netdev, bool start_phy)
+{
+ struct owl_emac_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ owl_emac_dma_cmd_stop(priv);
+ owl_emac_irq_disable(priv);
+ owl_emac_irq_clear(priv);
+
+ owl_emac_ring_prepare_tx(priv);
+ ret = owl_emac_ring_prepare_rx(priv);
+ if (ret)
+ goto err_unprep;
+
+ ret = owl_emac_core_sw_reset(priv);
+ if (ret) {
+ netdev_err(netdev, "failed to soft reset MAC core: %d\n", ret);
+ goto err_unprep;
+ }
+
+ owl_emac_set_hw_mac_addr(netdev);
+ owl_emac_setup_frame_xmit(priv);
+
+ netdev_reset_queue(netdev);
+ napi_enable(&priv->napi);
+
+ owl_emac_irq_enable(priv);
+ owl_emac_dma_cmd_start(priv);
+
+ if (start_phy)
+ phy_start(netdev->phydev);
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_unprep:
+ owl_emac_ring_unprepare_rx(priv);
+ owl_emac_ring_unprepare_tx(priv);
+
+ return ret;
+}
+
+static void owl_emac_disable(struct net_device *netdev, bool stop_phy)
+{
+ struct owl_emac_priv *priv = netdev_priv(netdev);
+
+ owl_emac_dma_cmd_stop(priv);
+ owl_emac_irq_disable(priv);
+
+ netif_stop_queue(netdev);
+ napi_disable(&priv->napi);
+
+ if (stop_phy)
+ phy_stop(netdev->phydev);
+
+ owl_emac_ring_unprepare_rx(priv);
+ owl_emac_ring_unprepare_tx(priv);
+}
+
+static int owl_emac_ndo_open(struct net_device *netdev)
+{
+ return owl_emac_enable(netdev, true);
+}
+
+static int owl_emac_ndo_stop(struct net_device *netdev)
+{
+ owl_emac_disable(netdev, true);
+
+ return 0;
+}
+
+static void owl_emac_set_multicast(struct net_device *netdev, int count)
+{
+ struct owl_emac_priv *priv = netdev_priv(netdev);
+ struct netdev_hw_addr *ha;
+ int index = 0;
+
+ if (count <= 0) {
+ priv->mcaddr_list.count = 0;
+ return;
+ }
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (!is_multicast_ether_addr(ha->addr))
+ continue;
+
+ WARN_ON(index >= OWL_EMAC_MAX_MULTICAST_ADDRS);
+ ether_addr_copy(priv->mcaddr_list.addrs[index++], ha->addr);
+ }
+
+ priv->mcaddr_list.count = index;
+
+ owl_emac_setup_frame_xmit(priv);
+}
+
+static void owl_emac_ndo_set_rx_mode(struct net_device *netdev)
+{
+ struct owl_emac_priv *priv = netdev_priv(netdev);
+ u32 status, val = 0;
+ int mcast_count = 0;
+
+ if (netdev->flags & IFF_PROMISC) {
+ val = OWL_EMAC_BIT_MAC_CSR6_PR;
+ } else if (netdev->flags & IFF_ALLMULTI) {
+ val = OWL_EMAC_BIT_MAC_CSR6_PM;
+ } else if (netdev->flags & IFF_MULTICAST) {
+ mcast_count = netdev_mc_count(netdev);
+
+ if (mcast_count > OWL_EMAC_MAX_MULTICAST_ADDRS) {
+ val = OWL_EMAC_BIT_MAC_CSR6_PM;
+ mcast_count = 0;
+ }
+ }
+
+ spin_lock_bh(&priv->lock);
+
+ /* Temporarily stop DMA TX & RX. */
+ status = owl_emac_dma_cmd_stop(priv);
+
+ /* Update operation modes. */
+ owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6,
+ OWL_EMAC_BIT_MAC_CSR6_PR | OWL_EMAC_BIT_MAC_CSR6_PM,
+ val);
+
+ /* Restore DMA TX & RX status. */
+ owl_emac_dma_cmd_set(priv, status);
+
+ spin_unlock_bh(&priv->lock);
+
+ /* Set/reset multicast addr list. */
+ owl_emac_set_multicast(netdev, mcast_count);
+}
+
+static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *skaddr = addr;
+
+ if (!is_valid_ether_addr(skaddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
+ owl_emac_set_hw_mac_addr(netdev);
+
+ return owl_emac_setup_frame_xmit(netdev_priv(netdev));
+}
+
+static int owl_emac_ndo_do_ioctl(struct net_device *netdev,
+ struct ifreq *req, int cmd)
+{
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ return phy_mii_ioctl(netdev->phydev, req, cmd);
+}
+
+static void owl_emac_ndo_tx_timeout(struct net_device *netdev,
+ unsigned int txqueue)
+{
+ struct owl_emac_priv *priv = netdev_priv(netdev);
+
+ schedule_work(&priv->mac_reset_task);
+}
+
+static void owl_emac_reset_task(struct work_struct *work)
+{
+ struct owl_emac_priv *priv;
+
+ priv = container_of(work, struct owl_emac_priv, mac_reset_task);
+
+ netdev_dbg(priv->netdev, "resetting MAC\n");
+ owl_emac_disable(priv->netdev, false);
+ owl_emac_enable(priv->netdev, false);
+}
+
+static struct net_device_stats *
+owl_emac_ndo_get_stats(struct net_device *netdev)
+{
+ /* FIXME: If possible, try to get stats from MAC hardware registers
+ * instead of tracking them manually in the driver.
+ */
+
+ return &netdev->stats;
+}
+
+static const struct net_device_ops owl_emac_netdev_ops = {
+ .ndo_open = owl_emac_ndo_open,
+ .ndo_stop = owl_emac_ndo_stop,
+ .ndo_start_xmit = owl_emac_ndo_start_xmit,
+ .ndo_set_rx_mode = owl_emac_ndo_set_rx_mode,
+ .ndo_set_mac_address = owl_emac_ndo_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = owl_emac_ndo_do_ioctl,
+ .ndo_tx_timeout = owl_emac_ndo_tx_timeout,
+ .ndo_get_stats = owl_emac_ndo_get_stats,
+};
+
+static void owl_emac_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, OWL_EMAC_DRVNAME, sizeof(info->driver));
+}
+
+static u32 owl_emac_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct owl_emac_priv *priv = netdev_priv(netdev);
+
+ return priv->msg_enable;
+}
+
+static void owl_emac_ethtool_set_msglevel(struct net_device *ndev, u32 val)
+{
+ struct owl_emac_priv *priv = netdev_priv(ndev);
+
+ priv->msg_enable = val;
+}
+
+static const struct ethtool_ops owl_emac_ethtool_ops = {
+ .get_drvinfo = owl_emac_ethtool_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_msglevel = owl_emac_ethtool_get_msglevel,
+ .set_msglevel = owl_emac_ethtool_set_msglevel,
+};
+
+static int owl_emac_mdio_wait(struct owl_emac_priv *priv)
+{
+ u32 val;
+
+ /* Wait while data transfer is in progress. */
+ return readl_poll_timeout(priv->base + OWL_EMAC_REG_MAC_CSR10,
+ val, !(val & OWL_EMAC_BIT_MAC_CSR10_SB),
+ OWL_EMAC_POLL_DELAY_USEC,
+ OWL_EMAC_MDIO_POLL_TIMEOUT_USEC);
+}
+
+static int owl_emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct owl_emac_priv *priv = bus->priv;
+ u32 data, tmp;
+ int ret;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ data = OWL_EMAC_BIT_MAC_CSR10_SB;
+ data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_RD << OWL_EMAC_OFF_MAC_CSR10_OPCODE;
+
+ tmp = addr << OWL_EMAC_OFF_MAC_CSR10_PHYADD;
+ data |= tmp & OWL_EMAC_MSK_MAC_CSR10_PHYADD;
+
+ tmp = regnum << OWL_EMAC_OFF_MAC_CSR10_REGADD;
+ data |= tmp & OWL_EMAC_MSK_MAC_CSR10_REGADD;
+
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, data);
+
+ ret = owl_emac_mdio_wait(priv);
+ if (ret)
+ return ret;
+
+ data = owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR10);
+ data &= OWL_EMAC_MSK_MAC_CSR10_DATA;
+
+ return data;
+}
+
+static int
+owl_emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ struct owl_emac_priv *priv = bus->priv;
+ u32 data, tmp;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ data = OWL_EMAC_BIT_MAC_CSR10_SB;
+ data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_WR << OWL_EMAC_OFF_MAC_CSR10_OPCODE;
+
+ tmp = addr << OWL_EMAC_OFF_MAC_CSR10_PHYADD;
+ data |= tmp & OWL_EMAC_MSK_MAC_CSR10_PHYADD;
+
+ tmp = regnum << OWL_EMAC_OFF_MAC_CSR10_REGADD;
+ data |= tmp & OWL_EMAC_MSK_MAC_CSR10_REGADD;
+
+ data |= val & OWL_EMAC_MSK_MAC_CSR10_DATA;
+
+ owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, data);
+
+ return owl_emac_mdio_wait(priv);
+}
+
+static int owl_emac_mdio_init(struct net_device *netdev)
+{
+ struct owl_emac_priv *priv = netdev_priv(netdev);
+ struct device *dev = owl_emac_get_dev(priv);
+ struct device_node *mdio_node;
+ int ret;
+
+ mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ if (!mdio_node)
+ return -ENODEV;
+
+ if (!of_device_is_available(mdio_node)) {
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ priv->mii = devm_mdiobus_alloc(dev);
+ if (!priv->mii) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+
+ snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+ priv->mii->name = "owl-emac-mdio";
+ priv->mii->parent = dev;
+ priv->mii->read = owl_emac_mdio_read;
+ priv->mii->write = owl_emac_mdio_write;
+ priv->mii->phy_mask = ~0; /* Mask out all PHYs from auto probing. */
+ priv->mii->priv = priv;
+
+ ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node);
+
+err_put_node:
+ of_node_put(mdio_node);
+ return ret;
+}
+
+static int owl_emac_phy_init(struct net_device *netdev)
+{
+ struct owl_emac_priv *priv = netdev_priv(netdev);
+ struct device *dev = owl_emac_get_dev(priv);
+ struct phy_device *phy;
+
+ phy = of_phy_get_and_connect(netdev, dev->of_node,
+ owl_emac_adjust_link);
+ if (!phy)
+ return -ENODEV;
+
+ phy_set_sym_pause(phy, true, true, true);
+
+ if (netif_msg_link(priv))
+ phy_attached_info(phy);
+
+ return 0;
+}
+
+static void owl_emac_get_mac_addr(struct net_device *netdev)
+{
+ struct device *dev = netdev->dev.parent;
+ int ret;
+
+ ret = eth_platform_get_mac_address(dev, netdev->dev_addr);
+ if (!ret && is_valid_ether_addr(netdev->dev_addr))
+ return;
+
+ eth_hw_addr_random(netdev);
+ dev_warn(dev, "using random MAC address %pM\n", netdev->dev_addr);
+}
+
+static __maybe_unused int owl_emac_suspend(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct owl_emac_priv *priv = netdev_priv(netdev);
+
+ disable_irq(netdev->irq);
+
+ if (netif_running(netdev)) {
+ owl_emac_disable(netdev, true);
+ netif_device_detach(netdev);
+ }
+
+ clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks);
+
+ return 0;
+}
+
+static __maybe_unused int owl_emac_resume(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct owl_emac_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(OWL_EMAC_NCLKS, priv->clks);
+ if (ret)
+ return ret;
+
+ if (netif_running(netdev)) {
+ owl_emac_core_hw_reset(priv);
+ owl_emac_core_sw_reset(priv);
+
+ ret = owl_emac_enable(netdev, true);
+ if (ret) {
+ clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks);
+ return ret;
+ }
+
+ netif_device_attach(netdev);
+ }
+
+ enable_irq(netdev->irq);
+
+ return 0;
+}
+
+static void owl_emac_clk_disable_unprepare(void *data)
+{
+ struct owl_emac_priv *priv = data;
+
+ clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks);
+}
+
+static int owl_emac_clk_set_rate(struct owl_emac_priv *priv)
+{
+ struct device *dev = owl_emac_get_dev(priv);
+ unsigned long rate;
+ int ret;
+
+ switch (priv->phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ rate = 50000000;
+ break;
+
+ case PHY_INTERFACE_MODE_SMII:
+ rate = 125000000;
+ break;
+
+ default:
+ dev_err(dev, "unsupported phy interface mode %d\n",
+ priv->phy_mode);
+ return -EOPNOTSUPP;
+ }
+
+ ret = clk_set_rate(priv->clks[OWL_EMAC_CLK_RMII].clk, rate);
+ if (ret)
+ dev_err(dev, "failed to set RMII clock rate: %d\n", ret);
+
+ return ret;
+}
+
+static int owl_emac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct owl_emac_priv *priv;
+ int ret, i;
+
+ netdev = devm_alloc_etherdev(dev, sizeof(*priv));
+ if (!netdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, netdev);
+ SET_NETDEV_DEV(netdev, dev);
+
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+ priv->msg_enable = netif_msg_init(-1, OWL_EMAC_DEFAULT_MSG_ENABLE);
+
+ ret = of_get_phy_mode(dev->of_node, &priv->phy_mode);
+ if (ret) {
+ dev_err(dev, "failed to get phy mode: %d\n", ret);
+ return ret;
+ }
+
+ spin_lock_init(&priv->lock);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "unsupported DMA mask\n");
+ return ret;
+ }
+
+ ret = owl_emac_ring_alloc(dev, &priv->rx_ring, OWL_EMAC_RX_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = owl_emac_ring_alloc(dev, &priv->tx_ring, OWL_EMAC_TX_RING_SIZE);
+ if (ret)
+ return ret;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ netdev->irq = platform_get_irq(pdev, 0);
+ if (netdev->irq < 0)
+ return netdev->irq;
+
+ ret = devm_request_irq(dev, netdev->irq, owl_emac_handle_irq,
+ IRQF_SHARED, netdev->name, netdev);
+ if (ret) {
+ dev_err(dev, "failed to request irq: %d\n", netdev->irq);
+ return ret;
+ }
+
+ for (i = 0; i < OWL_EMAC_NCLKS; i++)
+ priv->clks[i].id = owl_emac_clk_names[i];
+
+ ret = devm_clk_bulk_get(dev, OWL_EMAC_NCLKS, priv->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(OWL_EMAC_NCLKS, priv->clks);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, owl_emac_clk_disable_unprepare, priv);
+ if (ret)
+ return ret;
+
+ ret = owl_emac_clk_set_rate(priv);
+ if (ret)
+ return ret;
+
+ priv->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->reset))
+ return dev_err_probe(dev, PTR_ERR(priv->reset),
+ "failed to get reset control");
+
+ owl_emac_get_mac_addr(netdev);
+
+ owl_emac_core_hw_reset(priv);
+ owl_emac_mdio_clock_enable(priv);
+
+ ret = owl_emac_mdio_init(netdev);
+ if (ret) {
+ dev_err(dev, "failed to initialize MDIO bus\n");
+ return ret;
+ }
+
+ ret = owl_emac_phy_init(netdev);
+ if (ret) {
+ dev_err(dev, "failed to initialize PHY\n");
+ return ret;
+ }
+
+ INIT_WORK(&priv->mac_reset_task, owl_emac_reset_task);
+
+ netdev->min_mtu = OWL_EMAC_MTU_MIN;
+ netdev->max_mtu = OWL_EMAC_MTU_MAX;
+ netdev->watchdog_timeo = OWL_EMAC_TX_TIMEOUT;
+ netdev->netdev_ops = &owl_emac_netdev_ops;
+ netdev->ethtool_ops = &owl_emac_ethtool_ops;
+ netif_napi_add(netdev, &priv->napi, owl_emac_poll, NAPI_POLL_WEIGHT);
+
+ ret = devm_register_netdev(dev, netdev);
+ if (ret) {
+ netif_napi_del(&priv->napi);
+ phy_disconnect(netdev->phydev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int owl_emac_remove(struct platform_device *pdev)
+{
+ struct owl_emac_priv *priv = platform_get_drvdata(pdev);
+
+ netif_napi_del(&priv->napi);
+ phy_disconnect(priv->netdev->phydev);
+ cancel_work_sync(&priv->mac_reset_task);
+
+ return 0;
+}
+
+static const struct of_device_id owl_emac_of_match[] = {
+ { .compatible = "actions,owl-emac", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, owl_emac_of_match);
+
+static SIMPLE_DEV_PM_OPS(owl_emac_pm_ops,
+ owl_emac_suspend, owl_emac_resume);
+
+static struct platform_driver owl_emac_driver = {
+ .driver = {
+ .name = OWL_EMAC_DRVNAME,
+ .of_match_table = owl_emac_of_match,
+ .pm = &owl_emac_pm_ops,
+ },
+ .probe = owl_emac_probe,
+ .remove = owl_emac_remove,
+};
+module_platform_driver(owl_emac_driver);
+
+MODULE_DESCRIPTION("Actions Semi Owl SoCs Ethernet MAC Driver");
+MODULE_AUTHOR("Actions Semi Inc.");
+MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/actions/owl-emac.h b/drivers/net/ethernet/actions/owl-emac.h
new file mode 100644
index 000000000000..9eb0d1a30242
--- /dev/null
+++ b/drivers/net/ethernet/actions/owl-emac.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Actions Semi Owl SoCs Ethernet MAC driver
+ *
+ * Copyright (c) 2012 Actions Semi Inc.
+ * Copyright (c) 2021 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#ifndef __OWL_EMAC_H__
+#define __OWL_EMAC_H__
+
+#define OWL_EMAC_DRVNAME "owl-emac"
+
+#define OWL_EMAC_POLL_DELAY_USEC 5
+#define OWL_EMAC_MDIO_POLL_TIMEOUT_USEC 1000
+#define OWL_EMAC_RESET_POLL_TIMEOUT_USEC 2000
+#define OWL_EMAC_TX_TIMEOUT (2 * HZ)
+
+#define OWL_EMAC_MTU_MIN ETH_MIN_MTU
+#define OWL_EMAC_MTU_MAX ETH_DATA_LEN
+#define OWL_EMAC_RX_FRAME_MAX_LEN (ETH_FRAME_LEN + ETH_FCS_LEN)
+#define OWL_EMAC_SKB_ALIGN 4
+#define OWL_EMAC_SKB_RESERVE 18
+
+#define OWL_EMAC_MAX_MULTICAST_ADDRS 14
+#define OWL_EMAC_SETUP_FRAME_LEN 192
+
+#define OWL_EMAC_RX_RING_SIZE 64
+#define OWL_EMAC_TX_RING_SIZE 32
+
+/* Bus mode register */
+#define OWL_EMAC_REG_MAC_CSR0 0x0000
+#define OWL_EMAC_BIT_MAC_CSR0_SWR BIT(0) /* Software reset */
+
+/* Transmit/receive poll demand registers */
+#define OWL_EMAC_REG_MAC_CSR1 0x0008
+#define OWL_EMAC_VAL_MAC_CSR1_TPD 0x01
+#define OWL_EMAC_REG_MAC_CSR2 0x0010
+#define OWL_EMAC_VAL_MAC_CSR2_RPD 0x01
+
+/* Receive/transmit descriptor list base address registers */
+#define OWL_EMAC_REG_MAC_CSR3 0x0018
+#define OWL_EMAC_REG_MAC_CSR4 0x0020
+
+/* Status register */
+#define OWL_EMAC_REG_MAC_CSR5 0x0028
+#define OWL_EMAC_MSK_MAC_CSR5_TS GENMASK(22, 20) /* Transmit process state */
+#define OWL_EMAC_OFF_MAC_CSR5_TS 20
+#define OWL_EMAC_VAL_MAC_CSR5_TS_DATA 0x03 /* Transferring data HOST -> FIFO */
+#define OWL_EMAC_VAL_MAC_CSR5_TS_CDES 0x07 /* Closing transmit descriptor */
+#define OWL_EMAC_MSK_MAC_CSR5_RS GENMASK(19, 17) /* Receive process state */
+#define OWL_EMAC_OFF_MAC_CSR5_RS 17
+#define OWL_EMAC_VAL_MAC_CSR5_RS_FDES 0x01 /* Fetching receive descriptor */
+#define OWL_EMAC_VAL_MAC_CSR5_RS_CDES 0x05 /* Closing receive descriptor */
+#define OWL_EMAC_VAL_MAC_CSR5_RS_DATA 0x07 /* Transferring data FIFO -> HOST */
+#define OWL_EMAC_BIT_MAC_CSR5_NIS BIT(16) /* Normal interrupt summary */
+#define OWL_EMAC_BIT_MAC_CSR5_AIS BIT(15) /* Abnormal interrupt summary */
+#define OWL_EMAC_BIT_MAC_CSR5_ERI BIT(14) /* Early receive interrupt */
+#define OWL_EMAC_BIT_MAC_CSR5_GTE BIT(11) /* General-purpose timer expiration */
+#define OWL_EMAC_BIT_MAC_CSR5_ETI BIT(10) /* Early transmit interrupt */
+#define OWL_EMAC_BIT_MAC_CSR5_RPS BIT(8) /* Receive process stopped */
+#define OWL_EMAC_BIT_MAC_CSR5_RU BIT(7) /* Receive buffer unavailable */
+#define OWL_EMAC_BIT_MAC_CSR5_RI BIT(6) /* Receive interrupt */
+#define OWL_EMAC_BIT_MAC_CSR5_UNF BIT(5) /* Transmit underflow */
+#define OWL_EMAC_BIT_MAC_CSR5_LCIS BIT(4) /* Link change status */
+#define OWL_EMAC_BIT_MAC_CSR5_LCIQ BIT(3) /* Link change interrupt */
+#define OWL_EMAC_BIT_MAC_CSR5_TU BIT(2) /* Transmit buffer unavailable */
+#define OWL_EMAC_BIT_MAC_CSR5_TPS BIT(1) /* Transmit process stopped */
+#define OWL_EMAC_BIT_MAC_CSR5_TI BIT(0) /* Transmit interrupt */
+
+/* Operation mode register */
+#define OWL_EMAC_REG_MAC_CSR6 0x0030
+#define OWL_EMAC_BIT_MAC_CSR6_RA BIT(30) /* Receive all */
+#define OWL_EMAC_BIT_MAC_CSR6_TTM BIT(22) /* Transmit threshold mode */
+#define OWL_EMAC_BIT_MAC_CSR6_SF BIT(21) /* Store and forward */
+#define OWL_EMAC_MSK_MAC_CSR6_SPEED GENMASK(17, 16) /* Eth speed selection */
+#define OWL_EMAC_OFF_MAC_CSR6_SPEED 16
+#define OWL_EMAC_VAL_MAC_CSR6_SPEED_100M 0x00
+#define OWL_EMAC_VAL_MAC_CSR6_SPEED_10M 0x02
+#define OWL_EMAC_BIT_MAC_CSR6_ST BIT(13) /* Start/stop transmit command */
+#define OWL_EMAC_BIT_MAC_CSR6_LP BIT(10) /* Loopback mode */
+#define OWL_EMAC_BIT_MAC_CSR6_FD BIT(9) /* Full duplex mode */
+#define OWL_EMAC_BIT_MAC_CSR6_PM BIT(7) /* Pass all multicast */
+#define OWL_EMAC_BIT_MAC_CSR6_PR BIT(6) /* Promiscuous mode */
+#define OWL_EMAC_BIT_MAC_CSR6_IF BIT(4) /* Inverse filtering */
+#define OWL_EMAC_BIT_MAC_CSR6_PB BIT(3) /* Pass bad frames */
+#define OWL_EMAC_BIT_MAC_CSR6_HO BIT(2) /* Hash only filtering mode */
+#define OWL_EMAC_BIT_MAC_CSR6_SR BIT(1) /* Start/stop receive command */
+#define OWL_EMAC_BIT_MAC_CSR6_HP BIT(0) /* Hash/perfect receive filtering mode */
+#define OWL_EMAC_MSK_MAC_CSR6_STSR (OWL_EMAC_BIT_MAC_CSR6_ST | \
+ OWL_EMAC_BIT_MAC_CSR6_SR)
+
+/* Interrupt enable register */
+#define OWL_EMAC_REG_MAC_CSR7 0x0038
+#define OWL_EMAC_BIT_MAC_CSR7_NIE BIT(16) /* Normal interrupt summary enable */
+#define OWL_EMAC_BIT_MAC_CSR7_AIE BIT(15) /* Abnormal interrupt summary enable */
+#define OWL_EMAC_BIT_MAC_CSR7_ERE BIT(14) /* Early receive interrupt enable */
+#define OWL_EMAC_BIT_MAC_CSR7_GTE BIT(11) /* General-purpose timer overflow */
+#define OWL_EMAC_BIT_MAC_CSR7_ETE BIT(10) /* Early transmit interrupt enable */
+#define OWL_EMAC_BIT_MAC_CSR7_RSE BIT(8) /* Receive stopped enable */
+#define OWL_EMAC_BIT_MAC_CSR7_RUE BIT(7) /* Receive buffer unavailable enable */
+#define OWL_EMAC_BIT_MAC_CSR7_RIE BIT(6) /* Receive interrupt enable */
+#define OWL_EMAC_BIT_MAC_CSR7_UNE BIT(5) /* Underflow interrupt enable */
+#define OWL_EMAC_BIT_MAC_CSR7_TUE BIT(2) /* Transmit buffer unavailable enable */
+#define OWL_EMAC_BIT_MAC_CSR7_TSE BIT(1) /* Transmit stopped enable */
+#define OWL_EMAC_BIT_MAC_CSR7_TIE BIT(0) /* Transmit interrupt enable */
+#define OWL_EMAC_BIT_MAC_CSR7_ALL_NOT_TUE (OWL_EMAC_BIT_MAC_CSR7_ERE | \
+ OWL_EMAC_BIT_MAC_CSR7_GTE | \
+ OWL_EMAC_BIT_MAC_CSR7_ETE | \
+ OWL_EMAC_BIT_MAC_CSR7_RSE | \
+ OWL_EMAC_BIT_MAC_CSR7_RUE | \
+ OWL_EMAC_BIT_MAC_CSR7_RIE | \
+ OWL_EMAC_BIT_MAC_CSR7_UNE | \
+ OWL_EMAC_BIT_MAC_CSR7_TSE | \
+ OWL_EMAC_BIT_MAC_CSR7_TIE)
+
+/* Missed frames and overflow counter register */
+#define OWL_EMAC_REG_MAC_CSR8 0x0040
+/* MII management and serial ROM register */
+#define OWL_EMAC_REG_MAC_CSR9 0x0048
+
+/* MII serial management register */
+#define OWL_EMAC_REG_MAC_CSR10 0x0050
+#define OWL_EMAC_BIT_MAC_CSR10_SB BIT(31) /* Start transfer or busy */
+#define OWL_EMAC_MSK_MAC_CSR10_CLKDIV GENMASK(30, 28) /* Clock divider */
+#define OWL_EMAC_OFF_MAC_CSR10_CLKDIV 28
+#define OWL_EMAC_VAL_MAC_CSR10_CLKDIV_128 0x04
+#define OWL_EMAC_VAL_MAC_CSR10_OPCODE_WR 0x01 /* Register write command */
+#define OWL_EMAC_OFF_MAC_CSR10_OPCODE 26 /* Operation mode */
+#define OWL_EMAC_VAL_MAC_CSR10_OPCODE_DCG 0x00 /* Disable clock generation */
+#define OWL_EMAC_VAL_MAC_CSR10_OPCODE_WR 0x01 /* Register write command */
+#define OWL_EMAC_VAL_MAC_CSR10_OPCODE_RD 0x02 /* Register read command */
+#define OWL_EMAC_VAL_MAC_CSR10_OPCODE_CDS 0x03 /* Clock divider set */
+#define OWL_EMAC_MSK_MAC_CSR10_PHYADD GENMASK(25, 21) /* Physical layer address */
+#define OWL_EMAC_OFF_MAC_CSR10_PHYADD 21
+#define OWL_EMAC_MSK_MAC_CSR10_REGADD GENMASK(20, 16) /* Register address */
+#define OWL_EMAC_OFF_MAC_CSR10_REGADD 16
+#define OWL_EMAC_MSK_MAC_CSR10_DATA GENMASK(15, 0) /* Register data */
+
+/* General-purpose timer and interrupt mitigation control register */
+#define OWL_EMAC_REG_MAC_CSR11 0x0058
+#define OWL_EMAC_OFF_MAC_CSR11_TT 27 /* Transmit timer */
+#define OWL_EMAC_OFF_MAC_CSR11_NTP 24 /* No. of transmit packets */
+#define OWL_EMAC_OFF_MAC_CSR11_RT 20 /* Receive timer */
+#define OWL_EMAC_OFF_MAC_CSR11_NRP 17 /* No. of receive packets */
+
+/* MAC address low/high registers */
+#define OWL_EMAC_REG_MAC_CSR16 0x0080
+#define OWL_EMAC_REG_MAC_CSR17 0x0088
+
+/* Pause time & cache thresholds register */
+#define OWL_EMAC_REG_MAC_CSR18 0x0090
+#define OWL_EMAC_OFF_MAC_CSR18_CPTL 24 /* Cache pause threshold level */
+#define OWL_EMAC_OFF_MAC_CSR18_CRTL 16 /* Cache restart threshold level */
+#define OWL_EMAC_OFF_MAC_CSR18_PQT 0 /* Flow control pause quanta time */
+
+/* FIFO pause & restart threshold register */
+#define OWL_EMAC_REG_MAC_CSR19 0x0098
+#define OWL_EMAC_OFF_MAC_CSR19_FPTL 16 /* FIFO pause threshold level */
+#define OWL_EMAC_OFF_MAC_CSR19_FRTL 0 /* FIFO restart threshold level */
+
+/* Flow control setup & status register */
+#define OWL_EMAC_REG_MAC_CSR20 0x00A0
+#define OWL_EMAC_BIT_MAC_CSR20_FCE BIT(31) /* Flow Control Enable */
+#define OWL_EMAC_BIT_MAC_CSR20_TUE BIT(30) /* Transmit Un-pause frames Enable */
+#define OWL_EMAC_BIT_MAC_CSR20_TPE BIT(29) /* Transmit Pause frames Enable */
+#define OWL_EMAC_BIT_MAC_CSR20_RPE BIT(28) /* Receive Pause frames Enable */
+#define OWL_EMAC_BIT_MAC_CSR20_BPE BIT(27) /* Back pressure (half-duplex) Enable */
+
+/* MII control register */
+#define OWL_EMAC_REG_MAC_CTRL 0x00B0
+#define OWL_EMAC_BIT_MAC_CTRL_RRSB BIT(8) /* RMII_REFCLK select bit */
+#define OWL_EMAC_OFF_MAC_CTRL_SSDC 4 /* SMII SYNC delay cycle */
+#define OWL_EMAC_BIT_MAC_CTRL_RCPS BIT(1) /* REF_CLK phase select */
+#define OWL_EMAC_BIT_MAC_CTRL_RSIS BIT(0) /* RMII/SMII interface select */
+
+/* Receive descriptor status field */
+#define OWL_EMAC_BIT_RDES0_OWN BIT(31) /* Ownership bit */
+#define OWL_EMAC_BIT_RDES0_FF BIT(30) /* Filtering fail */
+#define OWL_EMAC_MSK_RDES0_FL GENMASK(29, 16) /* Frame length */
+#define OWL_EMAC_OFF_RDES0_FL 16
+#define OWL_EMAC_BIT_RDES0_ES BIT(15) /* Error summary */
+#define OWL_EMAC_BIT_RDES0_DE BIT(14) /* Descriptor error */
+#define OWL_EMAC_BIT_RDES0_RF BIT(11) /* Runt frame */
+#define OWL_EMAC_BIT_RDES0_MF BIT(10) /* Multicast frame */
+#define OWL_EMAC_BIT_RDES0_FS BIT(9) /* First descriptor */
+#define OWL_EMAC_BIT_RDES0_LS BIT(8) /* Last descriptor */
+#define OWL_EMAC_BIT_RDES0_TL BIT(7) /* Frame too long */
+#define OWL_EMAC_BIT_RDES0_CS BIT(6) /* Collision seen */
+#define OWL_EMAC_BIT_RDES0_FT BIT(5) /* Frame type */
+#define OWL_EMAC_BIT_RDES0_RE BIT(3) /* Report on MII error */
+#define OWL_EMAC_BIT_RDES0_DB BIT(2) /* Dribbling bit */
+#define OWL_EMAC_BIT_RDES0_CE BIT(1) /* CRC error */
+#define OWL_EMAC_BIT_RDES0_ZERO BIT(0) /* Legal frame length indicator */
+
+/* Receive descriptor control and count field */
+#define OWL_EMAC_BIT_RDES1_RER BIT(25) /* Receive end of ring */
+#define OWL_EMAC_MSK_RDES1_RBS1 GENMASK(10, 0) /* Buffer 1 size */
+
+/* Transmit descriptor status field */
+#define OWL_EMAC_BIT_TDES0_OWN BIT(31) /* Ownership bit */
+#define OWL_EMAC_BIT_TDES0_ES BIT(15) /* Error summary */
+#define OWL_EMAC_BIT_TDES0_LO BIT(11) /* Loss of carrier */
+#define OWL_EMAC_BIT_TDES0_NC BIT(10) /* No carrier */
+#define OWL_EMAC_BIT_TDES0_LC BIT(9) /* Late collision */
+#define OWL_EMAC_BIT_TDES0_EC BIT(8) /* Excessive collisions */
+#define OWL_EMAC_MSK_TDES0_CC GENMASK(6, 3) /* Collision count */
+#define OWL_EMAC_BIT_TDES0_UF BIT(1) /* Underflow error */
+#define OWL_EMAC_BIT_TDES0_DE BIT(0) /* Deferred */
+
+/* Transmit descriptor control and count field */
+#define OWL_EMAC_BIT_TDES1_IC BIT(31) /* Interrupt on completion */
+#define OWL_EMAC_BIT_TDES1_LS BIT(30) /* Last descriptor */
+#define OWL_EMAC_BIT_TDES1_FS BIT(29) /* First descriptor */
+#define OWL_EMAC_BIT_TDES1_FT1 BIT(28) /* Filtering type */
+#define OWL_EMAC_BIT_TDES1_SET BIT(27) /* Setup packet */
+#define OWL_EMAC_BIT_TDES1_AC BIT(26) /* Add CRC disable */
+#define OWL_EMAC_BIT_TDES1_TER BIT(25) /* Transmit end of ring */
+#define OWL_EMAC_BIT_TDES1_DPD BIT(23) /* Disabled padding */
+#define OWL_EMAC_BIT_TDES1_FT0 BIT(22) /* Filtering type */
+#define OWL_EMAC_MSK_TDES1_TBS1 GENMASK(10, 0) /* Buffer 1 size */
+
+static const char *const owl_emac_clk_names[] = { "eth", "rmii" };
+#define OWL_EMAC_NCLKS ARRAY_SIZE(owl_emac_clk_names)
+
+enum owl_emac_clk_map {
+ OWL_EMAC_CLK_ETH = 0,
+ OWL_EMAC_CLK_RMII
+};
+
+struct owl_emac_addr_list {
+ u8 addrs[OWL_EMAC_MAX_MULTICAST_ADDRS][ETH_ALEN];
+ int count;
+};
+
+/* TX/RX descriptors */
+struct owl_emac_ring_desc {
+ u32 status;
+ u32 control;
+ u32 buf_addr;
+ u32 reserved; /* 2nd buffer address is not used */
+};
+
+struct owl_emac_ring {
+ struct owl_emac_ring_desc *descs;
+ dma_addr_t descs_dma;
+ struct sk_buff **skbs;
+ dma_addr_t *skbs_dma;
+ unsigned int size;
+ unsigned int head;
+ unsigned int tail;
+};
+
+struct owl_emac_priv {
+ struct net_device *netdev;
+ void __iomem *base;
+
+ struct clk_bulk_data clks[OWL_EMAC_NCLKS];
+ struct reset_control *reset;
+
+ struct owl_emac_ring rx_ring;
+ struct owl_emac_ring tx_ring;
+
+ struct mii_bus *mii;
+ struct napi_struct napi;
+
+ phy_interface_t phy_mode;
+ unsigned int link;
+ int speed;
+ int duplex;
+ int pause;
+ struct owl_emac_addr_list mcaddr_list;
+
+ struct work_struct mac_reset_task;
+
+ u32 msg_enable; /* Debug message level */
+ spinlock_t lock; /* Sync concurrent ring access */
+};
+
+#endif /* __OWL_EMAC_H__ */
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 555299737b51..7965e5e3c985 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -2070,11 +2070,3 @@ static void __exit starfire_cleanup (void)
module_init(starfire_init);
module_exit(starfire_cleanup);
-
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 9c5891bbfe61..d77fafbc1530 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1449,10 +1449,10 @@ static int greth_of_probe(struct platform_device *ofdev)
break;
}
if (i == 6) {
- const u8 *addr;
+ u8 addr[ETH_ALEN];
- addr = of_get_mac_address(ofdev->dev.of_node);
- if (!IS_ERR(addr)) {
+ err = of_get_mac_address(ofdev->dev.of_node, addr);
+ if (!err) {
for (i = 0; i < 6; i++)
macaddr[i] = (unsigned int) addr[i];
} else {
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 5ed80d9a6b9f..f99ae317c188 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -790,7 +790,6 @@ static int emac_probe(struct platform_device *pdev)
struct emac_board_info *db;
struct net_device *ndev;
int ret = 0;
- const char *mac_addr;
ndev = alloc_etherdev(sizeof(struct emac_board_info));
if (!ndev) {
@@ -853,12 +852,9 @@ static int emac_probe(struct platform_device *pdev)
}
/* Read MAC-address from DT */
- mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(ndev->dev_addr, mac_addr);
-
- /* Check if the MAC address is valid, if not get a random one */
- if (!is_valid_ether_addr(ndev->dev_addr)) {
+ ret = of_get_mac_address(np, ndev->dev_addr);
+ if (ret) {
+ /* if the MAC address is invalid get a random one */
eth_hw_addr_random(ndev);
dev_warn(&pdev->dev, "using random MAC address %pM\n",
ndev->dev_addr);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 907125abef2c..1c00d719e5d7 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1351,7 +1351,6 @@ static int altera_tse_probe(struct platform_device *pdev)
struct resource *control_port;
struct resource *dma_res;
struct altera_tse_private *priv;
- const unsigned char *macaddr;
void __iomem *descmap;
const struct of_device_id *of_id = NULL;
@@ -1525,10 +1524,8 @@ static int altera_tse_probe(struct platform_device *pdev)
priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
/* get default MAC address from device tree */
- macaddr = of_get_mac_address(pdev->dev.of_node);
- if (!IS_ERR(macaddr))
- ether_addr_copy(ndev->dev_addr, macaddr);
- else
+ ret = of_get_mac_address(pdev->dev.of_node, ndev->dev_addr);
+ if (ret)
eth_hw_addr_random(ndev);
/* get phy addr and create mdio */
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 02087d443e73..764852ead1d6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -863,7 +863,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
if (unlikely(i == timeout)) {
netdev_err(ena_dev->net_device,
- "Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
+ "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
mmio_read->seq_num, offset, read_resp->req_id,
read_resp->reg_off);
ret = ENA_MMIO_READ_TIMEOUT;
@@ -2396,7 +2396,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
if (key) {
if (key_len != sizeof(hash_key->key)) {
netdev_err(ena_dev->net_device,
- "key len (%hu) doesn't equal the supported size (%zu)\n",
+ "key len (%u) doesn't equal the supported size (%zu)\n",
key_len, sizeof(hash_key->key));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 343caf41e709..73b03ce59412 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -124,7 +124,7 @@ struct ena_com_io_cq {
/* holds the number of cdesc of the current packet */
u16 cur_rx_pkt_cdesc_count;
- /* save the firt cdesc idx of the current packet */
+ /* save the first cdesc idx of the current packet */
u16 cur_rx_pkt_cdesc_start_idx;
u16 q_depth;
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index d6cc7aa612b7..2fe7ccee55b2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -251,10 +251,10 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
ena_stats = &ena_stats_tx_strings[j];
- snprintf(*data, ETH_GSTRING_LEN,
- "queue_%u_%s_%s", i,
- is_xdp ? "xdp_tx" : "tx", ena_stats->name);
- (*data) += ETH_GSTRING_LEN;
+ ethtool_sprintf(data,
+ "queue_%u_%s_%s", i,
+ is_xdp ? "xdp_tx" : "tx",
+ ena_stats->name);
}
if (!is_xdp) {
@@ -264,9 +264,9 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
ena_stats = &ena_stats_rx_strings[j];
- snprintf(*data, ETH_GSTRING_LEN,
- "queue_%u_rx_%s", i, ena_stats->name);
- (*data) += ETH_GSTRING_LEN;
+ ethtool_sprintf(data,
+ "queue_%u_rx_%s", i,
+ ena_stats->name);
}
}
}
@@ -280,9 +280,8 @@ static void ena_com_dev_strings(u8 **data)
for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
ena_stats = &ena_stats_ena_com_strings[i];
- snprintf(*data, ETH_GSTRING_LEN,
- "ena_admin_q_%s", ena_stats->name);
- (*data) += ETH_GSTRING_LEN;
+ ethtool_sprintf(data,
+ "ena_admin_q_%s", ena_stats->name);
}
}
@@ -295,15 +294,13 @@ static void ena_get_strings(struct ena_adapter *adapter,
for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
ena_stats = &ena_stats_global_strings[i];
- memcpy(data, ena_stats->name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, ena_stats->name);
}
if (eni_stats_needed) {
for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
ena_stats = &ena_stats_eni_strings[i];
- memcpy(data, ena_stats->name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, ena_stats->name);
}
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 102f2c91fdb8..881f88754bf6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -300,7 +300,7 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
if (unlikely(rc))
- goto error_drop_packet;
+ return rc;
ena_tx_ctx.ena_bufs = tx_info->bufs;
ena_tx_ctx.push_header = push_hdr;
@@ -330,8 +330,6 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
error_unmap_dma:
ena_unmap_tx_buff(xdp_ring, tx_info);
tx_info->xdpf = NULL;
-error_drop_packet:
- xdp_return_frame(xdpf);
return rc;
}
@@ -339,8 +337,8 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags)
{
struct ena_adapter *adapter = netdev_priv(dev);
- int qid, i, err, drops = 0;
struct ena_ring *xdp_ring;
+ int qid, i, nxmit = 0;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -360,12 +358,9 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
spin_lock(&xdp_ring->xdp_tx_lock);
for (i = 0; i < n; i++) {
- err = ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0);
- /* The descriptor is freed by ena_xdp_xmit_frame in case
- * of an error.
- */
- if (err)
- drops++;
+ if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0))
+ break;
+ nxmit++;
}
/* Ring doorbell to make device aware of the packets */
@@ -378,7 +373,7 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
spin_unlock(&xdp_ring->xdp_tx_lock);
/* Return number of packets sent */
- return n - drops;
+ return nxmit;
}
static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
@@ -415,7 +410,9 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
/* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
spin_lock(&xdp_ring->xdp_tx_lock);
- ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, XDP_XMIT_FLUSH);
+ if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf,
+ XDP_XMIT_FLUSH))
+ xdp_return_frame(xdpf);
spin_unlock(&xdp_ring->xdp_tx_lock);
xdp_stat = &rx_ring->rx_stats.xdp_tx;
@@ -3978,7 +3975,7 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
- /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
+ /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
if (unlikely(!max_num_io_queues)) {
dev_err(&pdev->dev, "The device doesn't have io queues\n");
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 960d483e8997..4a1220cc6f10 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -100,19 +100,19 @@ static int amd8111e_read_phy(struct amd8111e_priv *lp,
{
void __iomem *mmio = lp->mmio;
unsigned int reg_val;
- unsigned int repeat= REPEAT_CNT;
+ unsigned int repeat = REPEAT_CNT;
reg_val = readl(mmio + PHY_ACCESS);
while (reg_val & PHY_CMD_ACTIVE)
- reg_val = readl( mmio + PHY_ACCESS );
+ reg_val = readl(mmio + PHY_ACCESS);
- writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
- ((reg & 0x1f) << 16), mmio +PHY_ACCESS);
- do{
+ writel(PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
+ ((reg & 0x1f) << 16), mmio + PHY_ACCESS);
+ do {
reg_val = readl(mmio + PHY_ACCESS);
udelay(30); /* It takes 30 us to read/write data */
} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
- if(reg_val & PHY_RD_ERR)
+ if (reg_val & PHY_RD_ERR)
goto err_phy_read;
*val = reg_val & 0xffff;
@@ -133,17 +133,17 @@ static int amd8111e_write_phy(struct amd8111e_priv *lp,
reg_val = readl(mmio + PHY_ACCESS);
while (reg_val & PHY_CMD_ACTIVE)
- reg_val = readl( mmio + PHY_ACCESS );
+ reg_val = readl(mmio + PHY_ACCESS);
- writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
+ writel(PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
- do{
+ do {
reg_val = readl(mmio + PHY_ACCESS);
udelay(30); /* It takes 30 us to read/write the data */
} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
- if(reg_val & PHY_RD_ERR)
+ if (reg_val & PHY_RD_ERR)
goto err_phy_write;
return 0;
@@ -159,7 +159,7 @@ static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num)
struct amd8111e_priv *lp = netdev_priv(dev);
unsigned int reg_val;
- amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
+ amd8111e_read_phy(lp, phy_id, reg_num, &reg_val);
return reg_val;
}
@@ -179,17 +179,17 @@ static void amd8111e_mdio_write(struct net_device *dev,
static void amd8111e_set_ext_phy(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
- u32 bmcr,advert,tmp;
+ u32 bmcr, advert, tmp;
/* Determine mii register values to set the speed */
advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
- switch (lp->ext_phy_option){
+ switch (lp->ext_phy_option) {
default:
case SPEED_AUTONEG: /* advertise all values */
- tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
- ADVERTISE_100HALF|ADVERTISE_100FULL) ;
+ tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL);
break;
case SPEED10_HALF:
tmp |= ADVERTISE_10HALF;
@@ -224,20 +224,20 @@ static int amd8111e_free_skbs(struct net_device *dev)
int i;
/* Freeing transmit skbs */
- for(i = 0; i < NUM_TX_BUFFERS; i++){
- if(lp->tx_skbuff[i]){
+ for (i = 0; i < NUM_TX_BUFFERS; i++) {
+ if (lp->tx_skbuff[i]) {
dma_unmap_single(&lp->pci_dev->dev,
lp->tx_dma_addr[i],
lp->tx_skbuff[i]->len, DMA_TO_DEVICE);
- dev_kfree_skb (lp->tx_skbuff[i]);
+ dev_kfree_skb(lp->tx_skbuff[i]);
lp->tx_skbuff[i] = NULL;
lp->tx_dma_addr[i] = 0;
}
}
/* Freeing previously allocated receive buffers */
- for (i = 0; i < NUM_RX_BUFFERS; i++){
+ for (i = 0; i < NUM_RX_BUFFERS; i++) {
rx_skbuff = lp->rx_skbuff[i];
- if(rx_skbuff != NULL){
+ if (rx_skbuff != NULL) {
dma_unmap_single(&lp->pci_dev->dev,
lp->rx_dma_addr[i],
lp->rx_buff_len - 2, DMA_FROM_DEVICE);
@@ -258,13 +258,13 @@ static inline void amd8111e_set_rx_buff_len(struct net_device *dev)
struct amd8111e_priv *lp = netdev_priv(dev);
unsigned int mtu = dev->mtu;
- if (mtu > ETH_DATA_LEN){
+ if (mtu > ETH_DATA_LEN) {
/* MTU + ethernet header + FCS
* + optional VLAN tag + skb reserve space 2
*/
lp->rx_buff_len = mtu + ETH_HLEN + 10;
lp->options |= OPTION_JUMBO_ENABLE;
- } else{
+ } else {
lp->rx_buff_len = PKT_BUFF_SZ;
lp->options &= ~OPTION_JUMBO_ENABLE;
}
@@ -285,11 +285,11 @@ static int amd8111e_init_ring(struct net_device *dev)
lp->tx_ring_idx = 0;
- if(lp->opened)
+ if (lp->opened)
/* Free previously allocated transmit and receive skbs */
amd8111e_free_skbs(dev);
- else{
+ else {
/* allocate the tx and rx descriptors */
lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
@@ -312,12 +312,12 @@ static int amd8111e_init_ring(struct net_device *dev)
lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len);
if (!lp->rx_skbuff[i]) {
- /* Release previos allocated skbs */
- for(--i; i >= 0 ;i--)
- dev_kfree_skb(lp->rx_skbuff[i]);
- goto err_free_rx_ring;
+ /* Release previos allocated skbs */
+ for (--i; i >= 0; i--)
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ goto err_free_rx_ring;
}
- skb_reserve(lp->rx_skbuff[i],2);
+ skb_reserve(lp->rx_skbuff[i], 2);
}
/* Initilaizing receive descriptors */
for (i = 0; i < NUM_RX_BUFFERS; i++) {
@@ -375,40 +375,40 @@ static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod)
case RX_INTR_COAL :
timeout = coal_conf->rx_timeout;
event_count = coal_conf->rx_event_count;
- if( timeout > MAX_TIMEOUT ||
- event_count > MAX_EVENT_COUNT )
+ if (timeout > MAX_TIMEOUT ||
+ event_count > MAX_EVENT_COUNT)
return -EINVAL;
timeout = timeout * DELAY_TIMER_CONV;
writel(VAL0|STINTEN, mmio+INTEN0);
- writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
- mmio+DLY_INT_A);
+ writel((u32)DLY_INT_A_R0 | (event_count << 16) |
+ timeout, mmio + DLY_INT_A);
break;
- case TX_INTR_COAL :
+ case TX_INTR_COAL:
timeout = coal_conf->tx_timeout;
event_count = coal_conf->tx_event_count;
- if( timeout > MAX_TIMEOUT ||
- event_count > MAX_EVENT_COUNT )
+ if (timeout > MAX_TIMEOUT ||
+ event_count > MAX_EVENT_COUNT)
return -EINVAL;
timeout = timeout * DELAY_TIMER_CONV;
- writel(VAL0|STINTEN,mmio+INTEN0);
- writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
- mmio+DLY_INT_B);
+ writel(VAL0 | STINTEN, mmio + INTEN0);
+ writel((u32)DLY_INT_B_T0 | (event_count << 16) |
+ timeout, mmio + DLY_INT_B);
break;
case DISABLE_COAL:
- writel(0,mmio+STVAL);
- writel(STINTEN, mmio+INTEN0);
- writel(0, mmio +DLY_INT_B);
- writel(0, mmio+DLY_INT_A);
+ writel(0, mmio + STVAL);
+ writel(STINTEN, mmio + INTEN0);
+ writel(0, mmio + DLY_INT_B);
+ writel(0, mmio + DLY_INT_A);
break;
case ENABLE_COAL:
/* Start the timer */
- writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */
- writel(VAL0|STINTEN, mmio+INTEN0);
+ writel((u32)SOFT_TIMER_FREQ, mmio + STVAL); /* 0.5 sec */
+ writel(VAL0 | STINTEN, mmio + INTEN0);
break;
default:
break;
@@ -423,67 +423,67 @@ static int amd8111e_restart(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
void __iomem *mmio = lp->mmio;
- int i,reg_val;
+ int i, reg_val;
/* stop the chip */
writel(RUN, mmio + CMD0);
- if(amd8111e_init_ring(dev))
+ if (amd8111e_init_ring(dev))
return -ENOMEM;
/* enable the port manager and set auto negotiation always */
- writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
- writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
+ writel((u32)VAL1 | EN_PMGR, mmio + CMD3);
+ writel((u32)XPHYANE | XPHYRST, mmio + CTRL2);
amd8111e_set_ext_phy(dev);
/* set control registers */
reg_val = readl(mmio + CTRL1);
reg_val &= ~XMTSP_MASK;
- writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
+ writel(reg_val | XMTSP_128 | CACHE_ALIGN, mmio + CTRL1);
/* enable interrupt */
- writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
+ writel(APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
/* initialize tx and rx ring base addresses */
- writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
- writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
+ writel((u32)lp->tx_ring_dma_addr, mmio + XMT_RING_BASE_ADDR0);
+ writel((u32)lp->rx_ring_dma_addr, mmio + RCV_RING_BASE_ADDR0);
writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
/* set default IPG to 96 */
- writew((u32)DEFAULT_IPG,mmio+IPG);
+ writew((u32)DEFAULT_IPG, mmio + IPG);
writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
- if(lp->options & OPTION_JUMBO_ENABLE){
+ if (lp->options & OPTION_JUMBO_ENABLE) {
writel((u32)VAL2|JUMBO, mmio + CMD3);
/* Reset REX_UFLO */
- writel( REX_UFLO, mmio + CMD2);
+ writel(REX_UFLO, mmio + CMD2);
/* Should not set REX_UFLO for jumbo frames */
- writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
- }else{
- writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
+ writel(VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2);
+ } else {
+ writel(VAL0 | APAD_XMT | REX_RTRY | REX_UFLO, mmio + CMD2);
writel((u32)JUMBO, mmio + CMD3);
}
#if AMD8111E_VLAN_TAG_USED
- writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
+ writel((u32)VAL2 | VSIZE | VL_TAG_DEL, mmio + CMD3);
#endif
- writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
+ writel(VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2);
/* Setting the MAC address to the device */
for (i = 0; i < ETH_ALEN; i++)
- writeb( dev->dev_addr[i], mmio + PADR + i );
+ writeb(dev->dev_addr[i], mmio + PADR + i);
/* Enable interrupt coalesce */
- if(lp->options & OPTION_INTR_COAL_ENABLE){
+ if (lp->options & OPTION_INTR_COAL_ENABLE) {
netdev_info(dev, "Interrupt Coalescing Enabled.\n");
- amd8111e_set_coalesce(dev,ENABLE_COAL);
+ amd8111e_set_coalesce(dev, ENABLE_COAL);
}
/* set RUN bit to start the chip */
@@ -499,11 +499,11 @@ static int amd8111e_restart(struct net_device *dev)
static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
{
unsigned int reg_val;
- unsigned int logic_filter[2] ={0,};
+ unsigned int logic_filter[2] = {0,};
void __iomem *mmio = lp->mmio;
- /* stop the chip */
+ /* stop the chip */
writel(RUN, mmio + CMD0);
/* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
@@ -519,13 +519,13 @@ static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
writel(0, mmio + XMT_RING_BASE_ADDR3);
/* Clear CMD0 */
- writel(CMD0_CLEAR,mmio + CMD0);
+ writel(CMD0_CLEAR, mmio + CMD0);
/* Clear CMD2 */
- writel(CMD2_CLEAR, mmio +CMD2);
+ writel(CMD2_CLEAR, mmio + CMD2);
/* Clear CMD7 */
- writel(CMD7_CLEAR , mmio + CMD7);
+ writel(CMD7_CLEAR, mmio + CMD7);
/* Clear DLY_INT_A and DLY_INT_B */
writel(0x0, mmio + DLY_INT_A);
@@ -542,16 +542,16 @@ static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
writel(0x0, mmio + STVAL);
/* Clear INTEN0 */
- writel( INTEN0_CLEAR, mmio + INTEN0);
+ writel(INTEN0_CLEAR, mmio + INTEN0);
/* Clear LADRF */
- writel(0x0 , mmio + LADRF);
+ writel(0x0, mmio + LADRF);
/* Set SRAM_SIZE & SRAM_BOUNDARY registers */
- writel( 0x80010,mmio + SRAM_SIZE);
+ writel(0x80010, mmio + SRAM_SIZE);
/* Clear RCV_RING0_LEN */
- writel(0x0, mmio + RCV_RING_LEN0);
+ writel(0x0, mmio + RCV_RING_LEN0);
/* Clear XMT_RING0/1/2/3_LEN */
writel(0x0, mmio + XMT_RING_LEN0);
@@ -571,10 +571,10 @@ static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
/* SRAM_SIZE register */
reg_val = readl(mmio + SRAM_SIZE);
- if(lp->options & OPTION_JUMBO_ENABLE)
- writel( VAL2|JUMBO, mmio + CMD3);
+ if (lp->options & OPTION_JUMBO_ENABLE)
+ writel(VAL2 | JUMBO, mmio + CMD3);
#if AMD8111E_VLAN_TAG_USED
- writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
+ writel(VAL2 | VSIZE | VL_TAG_DEL, mmio + CMD3);
#endif
/* Set default value to CTRL1 Register */
writel(CTRL1_DEFAULT, mmio + CTRL1);
@@ -616,14 +616,14 @@ static void amd8111e_stop_chip(struct amd8111e_priv *lp)
static void amd8111e_free_ring(struct amd8111e_priv *lp)
{
/* Free transmit and receive descriptor rings */
- if(lp->rx_ring){
+ if (lp->rx_ring) {
dma_free_coherent(&lp->pci_dev->dev,
sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
lp->rx_ring, lp->rx_ring_dma_addr);
lp->rx_ring = NULL;
}
- if(lp->tx_ring){
+ if (lp->tx_ring) {
dma_free_coherent(&lp->pci_dev->dev,
sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
lp->tx_ring, lp->tx_ring_dma_addr);
@@ -643,11 +643,11 @@ static int amd8111e_tx(struct net_device *dev)
int tx_index;
int status;
/* Complete all the transmit packet */
- while (lp->tx_complete_idx != lp->tx_idx){
+ while (lp->tx_complete_idx != lp->tx_idx) {
tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
- if(status & OWN_BIT)
+ if (status & OWN_BIT)
break; /* It still hasn't been Txed */
lp->tx_ring[tx_index].buff_phy_addr = 0;
@@ -669,10 +669,10 @@ static int amd8111e_tx(struct net_device *dev)
le16_to_cpu(lp->tx_ring[tx_index].buff_count);
if (netif_queue_stopped(dev) &&
- lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
+ lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS + 2) {
/* The ring is no longer full, clear tbusy. */
/* lp->tx_full = 0; */
- netif_wake_queue (dev);
+ netif_wake_queue(dev);
}
}
return 0;
@@ -685,7 +685,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
struct net_device *dev = lp->amd8111e_net_dev;
int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
void __iomem *mmio = lp->mmio;
- struct sk_buff *skb,*new_skb;
+ struct sk_buff *skb, *new_skb;
int min_pkt_len, status;
int num_rx_pkt = 0;
short pkt_len;
@@ -710,7 +710,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
goto err_next_pkt;
}
/* check for STP and ENP */
- if (!((status & STP_BIT) && (status & ENP_BIT))){
+ if (!((status & STP_BIT) && (status & ENP_BIT))) {
/* resetting flags */
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
goto err_next_pkt;
@@ -755,7 +755,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
skb->protocol = eth_type_trans(skb, dev);
#if AMD8111E_VLAN_TAG_USED
- if (vtag == TT_VLAN_TAGGED){
+ if (vtag == TT_VLAN_TAGGED) {
u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
}
@@ -793,25 +793,25 @@ err_next_pkt:
static int amd8111e_link_change(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
- int status0,speed;
+ int status0, speed;
/* read the link change */
- status0 = readl(lp->mmio + STAT0);
+ status0 = readl(lp->mmio + STAT0);
- if(status0 & LINK_STATS){
- if(status0 & AUTONEG_COMPLETE)
+ if (status0 & LINK_STATS) {
+ if (status0 & AUTONEG_COMPLETE)
lp->link_config.autoneg = AUTONEG_ENABLE;
else
lp->link_config.autoneg = AUTONEG_DISABLE;
- if(status0 & FULL_DPLX)
+ if (status0 & FULL_DPLX)
lp->link_config.duplex = DUPLEX_FULL;
else
lp->link_config.duplex = DUPLEX_HALF;
speed = (status0 & SPEED_MASK) >> 7;
- if(speed == PHY_SPEED_10)
+ if (speed == PHY_SPEED_10)
lp->link_config.speed = SPEED_10;
- else if(speed == PHY_SPEED_100)
+ else if (speed == PHY_SPEED_100)
lp->link_config.speed = SPEED_100;
netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n",
@@ -821,8 +821,7 @@ static int amd8111e_link_change(struct net_device *dev)
"Full" : "Half");
netif_carrier_on(dev);
- }
- else{
+ } else {
lp->link_config.speed = SPEED_INVALID;
lp->link_config.duplex = DUPLEX_INVALID;
lp->link_config.autoneg = AUTONEG_INVALID;
@@ -840,7 +839,7 @@ static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
unsigned int data;
unsigned int repeat = REPEAT_CNT;
- writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
+ writew(MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
do {
status = readw(mmio + MIB_ADDR);
udelay(2); /* controller takes MAX 2 us to get mib data */
@@ -863,7 +862,7 @@ static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
if (!lp->opened)
return new_stats;
- spin_lock_irqsave (&lp->lock, flags);
+ spin_lock_irqsave(&lp->lock, flags);
/* stats.rx_packets */
new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
@@ -943,7 +942,7 @@ static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
/* Reset the mibs for collecting new statistics */
/* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
- spin_unlock_irqrestore (&lp->lock, flags);
+ spin_unlock_irqrestore(&lp->lock, flags);
return new_stats;
}
@@ -974,96 +973,90 @@ static int amd8111e_calc_coalesce(struct net_device *dev)
rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
coal_conf->rx_prev_bytes = coal_conf->rx_bytes;
- if(rx_pkt_rate < 800){
- if(coal_conf->rx_coal_type != NO_COALESCE){
+ if (rx_pkt_rate < 800) {
+ if (coal_conf->rx_coal_type != NO_COALESCE) {
coal_conf->rx_timeout = 0x0;
coal_conf->rx_event_count = 0;
- amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ amd8111e_set_coalesce(dev, RX_INTR_COAL);
coal_conf->rx_coal_type = NO_COALESCE;
}
- }
- else{
+ } else {
rx_pkt_size = rx_data_rate/rx_pkt_rate;
- if (rx_pkt_size < 128){
- if(coal_conf->rx_coal_type != NO_COALESCE){
+ if (rx_pkt_size < 128) {
+ if (coal_conf->rx_coal_type != NO_COALESCE) {
coal_conf->rx_timeout = 0;
coal_conf->rx_event_count = 0;
- amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ amd8111e_set_coalesce(dev, RX_INTR_COAL);
coal_conf->rx_coal_type = NO_COALESCE;
}
- }
- else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
+ } else if ((rx_pkt_size >= 128) && (rx_pkt_size < 512)) {
- if(coal_conf->rx_coal_type != LOW_COALESCE){
+ if (coal_conf->rx_coal_type != LOW_COALESCE) {
coal_conf->rx_timeout = 1;
coal_conf->rx_event_count = 4;
- amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ amd8111e_set_coalesce(dev, RX_INTR_COAL);
coal_conf->rx_coal_type = LOW_COALESCE;
}
- }
- else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
+ } else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)) {
- if(coal_conf->rx_coal_type != MEDIUM_COALESCE){
+ if (coal_conf->rx_coal_type != MEDIUM_COALESCE) {
coal_conf->rx_timeout = 1;
coal_conf->rx_event_count = 4;
- amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ amd8111e_set_coalesce(dev, RX_INTR_COAL);
coal_conf->rx_coal_type = MEDIUM_COALESCE;
}
- }
- else if(rx_pkt_size >= 1024){
- if(coal_conf->rx_coal_type != HIGH_COALESCE){
+ } else if (rx_pkt_size >= 1024) {
+
+ if (coal_conf->rx_coal_type != HIGH_COALESCE) {
coal_conf->rx_timeout = 2;
coal_conf->rx_event_count = 3;
- amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ amd8111e_set_coalesce(dev, RX_INTR_COAL);
coal_conf->rx_coal_type = HIGH_COALESCE;
}
}
}
- /* NOW FOR TX INTR COALESC */
- if(tx_pkt_rate < 800){
- if(coal_conf->tx_coal_type != NO_COALESCE){
+ /* NOW FOR TX INTR COALESC */
+ if (tx_pkt_rate < 800) {
+ if (coal_conf->tx_coal_type != NO_COALESCE) {
coal_conf->tx_timeout = 0x0;
coal_conf->tx_event_count = 0;
- amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ amd8111e_set_coalesce(dev, TX_INTR_COAL);
coal_conf->tx_coal_type = NO_COALESCE;
}
- }
- else{
+ } else {
tx_pkt_size = tx_data_rate/tx_pkt_rate;
- if (tx_pkt_size < 128){
+ if (tx_pkt_size < 128) {
- if(coal_conf->tx_coal_type != NO_COALESCE){
+ if (coal_conf->tx_coal_type != NO_COALESCE) {
coal_conf->tx_timeout = 0;
coal_conf->tx_event_count = 0;
- amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ amd8111e_set_coalesce(dev, TX_INTR_COAL);
coal_conf->tx_coal_type = NO_COALESCE;
}
- }
- else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
+ } else if ((tx_pkt_size >= 128) && (tx_pkt_size < 512)) {
- if(coal_conf->tx_coal_type != LOW_COALESCE){
+ if (coal_conf->tx_coal_type != LOW_COALESCE) {
coal_conf->tx_timeout = 1;
coal_conf->tx_event_count = 2;
- amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ amd8111e_set_coalesce(dev, TX_INTR_COAL);
coal_conf->tx_coal_type = LOW_COALESCE;
}
- }
- else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
+ } else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)) {
- if(coal_conf->tx_coal_type != MEDIUM_COALESCE){
+ if (coal_conf->tx_coal_type != MEDIUM_COALESCE) {
coal_conf->tx_timeout = 2;
coal_conf->tx_event_count = 5;
- amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ amd8111e_set_coalesce(dev, TX_INTR_COAL);
coal_conf->tx_coal_type = MEDIUM_COALESCE;
}
} else if (tx_pkt_size >= 1024) {
@@ -1091,7 +1084,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
unsigned int intr0, intren0;
unsigned int handled = 1;
- if(unlikely(dev == NULL))
+ if (unlikely(dev == NULL))
return IRQ_NONE;
spin_lock(&lp->lock);
@@ -1105,7 +1098,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
/* Process all the INT event until INTR bit is clear. */
- if (!(intr0 & INTR)){
+ if (!(intr0 & INTR)) {
handled = 0;
goto err_no_interrupt;
}
@@ -1140,7 +1133,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
amd8111e_calc_coalesce(dev);
err_no_interrupt:
- writel( VAL0 | INTREN,mmio + CMD0);
+ writel(VAL0 | INTREN, mmio + CMD0);
spin_unlock(&lp->lock);
@@ -1180,7 +1173,7 @@ static int amd8111e_close(struct net_device *dev)
netif_carrier_off(lp->amd8111e_net_dev);
/* Delete ipg timer */
- if(lp->options & OPTION_DYN_IPG_ENABLE)
+ if (lp->options & OPTION_DYN_IPG_ENABLE)
del_timer_sync(&lp->ipg_data.ipg_timer);
spin_unlock_irq(&lp->lock);
@@ -1200,8 +1193,8 @@ static int amd8111e_open(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
- if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED,
- dev->name, dev))
+ if (dev->irq == 0 || request_irq(dev->irq, amd8111e_interrupt,
+ IRQF_SHARED, dev->name, dev))
return -EAGAIN;
napi_enable(&lp->napi);
@@ -1210,7 +1203,7 @@ static int amd8111e_open(struct net_device *dev)
amd8111e_init_hw_default(lp);
- if(amd8111e_restart(dev)){
+ if (amd8111e_restart(dev)) {
spin_unlock_irq(&lp->lock);
napi_disable(&lp->napi);
if (dev->irq)
@@ -1218,7 +1211,7 @@ static int amd8111e_open(struct net_device *dev)
return -ENOMEM;
}
/* Start ipg timer */
- if(lp->options & OPTION_DYN_IPG_ENABLE){
+ if (lp->options & OPTION_DYN_IPG_ENABLE) {
add_timer(&lp->ipg_data.ipg_timer);
netdev_info(dev, "Dynamic IPG Enabled\n");
}
@@ -1289,10 +1282,10 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
lp->tx_idx++;
/* Trigger an immediate send poll. */
- writel( VAL1 | TDMD0, lp->mmio + CMD0);
- writel( VAL2 | RDMD0,lp->mmio + CMD0);
+ writel(VAL1 | TDMD0, lp->mmio + CMD0);
+ writel(VAL2 | RDMD0, lp->mmio + CMD0);
- if(amd8111e_tx_queue_avail(lp) < 0){
+ if (amd8111e_tx_queue_avail(lp) < 0) {
netif_stop_queue(dev);
}
spin_unlock_irqrestore(&lp->lock, flags);
@@ -1326,15 +1319,15 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
struct amd8111e_priv *lp = netdev_priv(dev);
- u32 mc_filter[2] ;
+ u32 mc_filter[2];
int bit_num;
- if(dev->flags & IFF_PROMISC){
- writel( VAL2 | PROM, lp->mmio + CMD2);
+ if (dev->flags & IFF_PROMISC) {
+ writel(VAL2 | PROM, lp->mmio + CMD2);
return;
}
else
- writel( PROM, lp->mmio + CMD2);
+ writel(PROM, lp->mmio + CMD2);
if (dev->flags & IFF_ALLMULTI ||
netdev_mc_count(dev) > MAX_FILTER_SIZE) {
/* get all multicast packet */
@@ -1439,7 +1432,7 @@ static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_
if (wol_info->wolopts & WAKE_MAGIC)
lp->options |=
(OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
- else if(wol_info->wolopts & WAKE_PHY)
+ else if (wol_info->wolopts & WAKE_PHY)
lp->options |=
(OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
else
@@ -1464,14 +1457,14 @@ static const struct ethtool_ops ops = {
* gets/sets driver speed, gets memory mapped register values, forces
* auto negotiation, sets/gets WOL options for ethtool application.
*/
-static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd)
+static int amd8111e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *data = if_mii(ifr);
struct amd8111e_priv *lp = netdev_priv(dev);
int err;
u32 mii_regval;
- switch(cmd) {
+ switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = lp->ext_phy_addr;
@@ -1511,7 +1504,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p)
spin_lock_irq(&lp->lock);
/* Setting the MAC address to the device */
for (i = 0; i < ETH_ALEN; i++)
- writeb( dev->dev_addr[i], lp->mmio + PADR + i );
+ writeb(dev->dev_addr[i], lp->mmio + PADR + i);
spin_unlock_irq(&lp->lock);
@@ -1536,22 +1529,22 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
spin_lock_irq(&lp->lock);
- /* stop the chip */
+ /* stop the chip */
writel(RUN, lp->mmio + CMD0);
dev->mtu = new_mtu;
err = amd8111e_restart(dev);
spin_unlock_irq(&lp->lock);
- if(!err)
+ if (!err)
netif_start_queue(dev);
return err;
}
static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
{
- writel( VAL1|MPPLBA, lp->mmio + CMD3);
- writel( VAL0|MPEN_SW, lp->mmio + CMD7);
+ writel(VAL1 | MPPLBA, lp->mmio + CMD3);
+ writel(VAL0 | MPEN_SW, lp->mmio + CMD7);
/* To eliminate PCI posting bug */
readl(lp->mmio + CMD7);
@@ -1562,7 +1555,7 @@ static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
{
/* Adapter is already stoped/suspended/interrupt-disabled */
- writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
+ writel(VAL0 | LCMODE_SW, lp->mmio + CMD7);
/* To eliminate PCI posting bug */
readl(lp->mmio + CMD7);
@@ -1584,7 +1577,7 @@ static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue)
spin_lock_irq(&lp->lock);
err = amd8111e_restart(dev);
spin_unlock_irq(&lp->lock);
- if(!err)
+ if (!err)
netif_wake_queue(dev);
}
@@ -1605,22 +1598,21 @@ static int __maybe_unused amd8111e_suspend(struct device *dev_d)
/* stop chip */
spin_lock_irq(&lp->lock);
- if(lp->options & OPTION_DYN_IPG_ENABLE)
+ if (lp->options & OPTION_DYN_IPG_ENABLE)
del_timer_sync(&lp->ipg_data.ipg_timer);
amd8111e_stop_chip(lp);
spin_unlock_irq(&lp->lock);
- if(lp->options & OPTION_WOL_ENABLE){
+ if (lp->options & OPTION_WOL_ENABLE) {
/* enable wol */
- if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
+ if (lp->options & OPTION_WAKE_MAGIC_ENABLE)
amd8111e_enable_magicpkt(lp);
- if(lp->options & OPTION_WAKE_PHY_ENABLE)
+ if (lp->options & OPTION_WAKE_PHY_ENABLE)
amd8111e_enable_link_change(lp);
device_set_wakeup_enable(dev_d, 1);
- }
- else{
+ } else {
device_set_wakeup_enable(dev_d, 0);
}
@@ -1640,7 +1632,7 @@ static int __maybe_unused amd8111e_resume(struct device *dev_d)
spin_lock_irq(&lp->lock);
amd8111e_restart(dev);
/* Restart ipg timer */
- if(lp->options & OPTION_DYN_IPG_ENABLE)
+ if (lp->options & OPTION_DYN_IPG_ENABLE)
mod_timer(&lp->ipg_data.ipg_timer,
jiffies + IPG_CONVERGE_JIFFIES);
spin_unlock_irq(&lp->lock);
@@ -1657,14 +1649,14 @@ static void amd8111e_config_ipg(struct timer_list *t)
unsigned int total_col_cnt;
unsigned int tmp_ipg;
- if(lp->link_config.duplex == DUPLEX_FULL){
+ if (lp->link_config.duplex == DUPLEX_FULL) {
ipg_data->ipg = DEFAULT_IPG;
return;
}
- if(ipg_data->ipg_state == SSTATE){
+ if (ipg_data->ipg_state == SSTATE) {
- if(ipg_data->timer_tick == IPG_STABLE_TIME){
+ if (ipg_data->timer_tick == IPG_STABLE_TIME) {
ipg_data->timer_tick = 0;
ipg_data->ipg = MIN_IPG - IPG_STEP;
@@ -1676,7 +1668,7 @@ static void amd8111e_config_ipg(struct timer_list *t)
ipg_data->timer_tick++;
}
- if(ipg_data->ipg_state == CSTATE){
+ if (ipg_data->ipg_state == CSTATE) {
/* Get the current collision count */
@@ -1684,10 +1676,10 @@ static void amd8111e_config_ipg(struct timer_list *t)
amd8111e_read_mib(mmio, xmt_collisions);
if ((total_col_cnt - prev_col_cnt) <
- (ipg_data->diff_col_cnt)){
+ (ipg_data->diff_col_cnt)) {
ipg_data->diff_col_cnt =
- total_col_cnt - prev_col_cnt ;
+ total_col_cnt - prev_col_cnt;
ipg_data->ipg = ipg_data->current_ipg;
}
@@ -1696,7 +1688,7 @@ static void amd8111e_config_ipg(struct timer_list *t)
if (ipg_data->current_ipg <= MAX_IPG)
tmp_ipg = ipg_data->current_ipg;
- else{
+ else {
tmp_ipg = ipg_data->ipg;
ipg_data->ipg_state = SSTATE;
}
@@ -1748,24 +1740,24 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int err, i;
- unsigned long reg_addr,reg_len;
+ unsigned long reg_addr, reg_len;
struct amd8111e_priv *lp;
struct net_device *dev;
err = pci_enable_device(pdev);
- if(err){
+ if (err) {
dev_err(&pdev->dev, "Cannot enable new PCI device\n");
return err;
}
- if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Cannot find PCI base address\n");
err = -ENODEV;
goto err_disable_pdev;
}
err = pci_request_regions(pdev, MODULE_NAME);
- if(err){
+ if (err) {
dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
goto err_disable_pdev;
}
@@ -1798,7 +1790,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
#if AMD8111E_VLAN_TAG_USED
- dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
#endif
lp = netdev_priv(dev);
@@ -1821,16 +1813,16 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
/* Setting user defined parametrs */
lp->ext_phy_option = speed_duplex[card_idx];
- if(coalesce[card_idx])
+ if (coalesce[card_idx])
lp->options |= OPTION_INTR_COAL_ENABLE;
- if(dynamic_ipg[card_idx++])
+ if (dynamic_ipg[card_idx++])
lp->options |= OPTION_DYN_IPG_ENABLE;
/* Initialize driver entry points */
dev->netdev_ops = &amd8111e_netdev_ops;
dev->ethtool_ops = &ops;
- dev->irq =pdev->irq;
+ dev->irq = pdev->irq;
dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
dev->min_mtu = AMD8111E_MIN_MTU;
dev->max_mtu = AMD8111E_MAX_MTU;
@@ -1861,7 +1853,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
/* Initialize software ipg timer */
- if(lp->options & OPTION_DYN_IPG_ENABLE){
+ if (lp->options & OPTION_DYN_IPG_ENABLE) {
timer_setup(&lp->ipg_data.ipg_timer, amd8111e_config_ipg, 0);
lp->ipg_data.ipg_timer.expires = jiffies +
IPG_CONVERGE_JIFFIES;
@@ -1870,7 +1862,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
}
/* display driver and device information */
- chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
+ chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000) >> 28;
dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
chip_version, dev->dev_addr);
if (lp->ext_phy_id)
@@ -1879,7 +1871,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
else
dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n");
- return 0;
+ return 0;
err_free_dev:
free_netdev(dev);
@@ -1919,7 +1911,7 @@ MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
static SIMPLE_DEV_PM_OPS(amd8111e_pm_ops, amd8111e_suspend, amd8111e_resume);
static struct pci_driver amd8111e_driver = {
- .name = MODULE_NAME,
+ .name = MODULE_NAME,
.id_table = amd8111e_pci_tbl,
.probe = amd8111e_probe_one,
.remove = amd8111e_remove_one,
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 961796abab35..c1eab916438f 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -1156,11 +1156,3 @@ static void __exit atarilance_module_exit(void)
module_init(atarilance_module_init);
module_exit(atarilance_module_exit);
#endif /* MODULE */
-
-
-/*
- * Local variables:
- * c-indent-level: 4
- * tab-width: 4
- * End:
- */
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index e10aceb2b767..6784f8748638 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -170,6 +170,7 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d)
static void hplance_writerap(void *priv, unsigned short value)
{
struct lance_private *lp = (struct lance_private *)priv;
+
do {
out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
} while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
@@ -178,6 +179,7 @@ static void hplance_writerap(void *priv, unsigned short value)
static void hplance_writerdp(void *priv, unsigned short value)
{
struct lance_private *lp = (struct lance_private *)priv;
+
do {
out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
} while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
@@ -187,6 +189,7 @@ static unsigned short hplance_readrdp(void *priv)
{
struct lance_private *lp = (struct lance_private *)priv;
__u16 value;
+
do {
value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
} while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index f78daba60b35..4100ab07e6b7 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -2853,8 +2853,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
netif_info(lp, link, dev, "link down\n");
}
if (lp->phycount > 1) {
- curr_link = pcnet32_check_otherphy(dev);
- prev_link = 0;
+ pcnet32_check_otherphy(dev);
}
} else if (verbose || !prev_link) {
netif_carrier_on(dev);
@@ -3030,10 +3029,3 @@ static void __exit pcnet32_cleanup_module(void)
module_init(pcnet32_init_module);
module_exit(pcnet32_cleanup_module);
-
-/*
- * Local variables:
- * c-indent-level: 4
- * tab-width: 8
- * End:
- */
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index b56a9e2aecd9..67b8113a2b53 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -857,7 +857,6 @@ int arc_emac_probe(struct net_device *ndev, int interface)
struct device_node *phy_node;
struct phy_device *phydev = NULL;
struct arc_emac_priv *priv;
- const char *mac_addr;
unsigned int id, clock_frequency, irq;
int err;
@@ -942,11 +941,8 @@ int arc_emac_probe(struct net_device *ndev, int interface)
}
/* Get MAC address from device tree */
- mac_addr = of_get_mac_address(dev->of_node);
-
- if (!IS_ERR(mac_addr))
- ether_addr_copy(ndev->dev_addr, mac_addr);
- else
+ err = of_get_mac_address(dev->of_node, ndev->dev_addr);
+ if (err)
eth_hw_addr_random(ndev);
arc_emac_set_address_internal(ndev);
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index fb803bf92ded..482c58c4c584 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -21,6 +21,7 @@ config AG71XX
tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support"
depends on ATH79
select PHYLINK
+ imply NET_SELFTESTS
help
If you wish to compile a kernel for AR7XXX/91XXX and enable
ethernet support, then you should always answer Y to this.
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index a60ce9030581..1ba81b1eb6fd 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -37,6 +37,7 @@
#include <linux/reset.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <net/selftests.h>
/* For our NAPI weight bigger does *NOT* mean better - it means more
* D-cache misses and lots more wasted cycles than we'll ever
@@ -497,12 +498,17 @@ static int ag71xx_ethtool_set_pauseparam(struct net_device *ndev,
static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
u8 *data)
{
- if (sset == ETH_SS_STATS) {
- int i;
+ int i;
+ switch (sset) {
+ case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
memcpy(data + i * ETH_GSTRING_LEN,
ag71xx_statistics[i].name, ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_TEST:
+ net_selftest_get_strings(data);
+ break;
}
}
@@ -519,9 +525,14 @@ static void ag71xx_ethtool_get_stats(struct net_device *ndev,
static int ag71xx_ethtool_get_sset_count(struct net_device *ndev, int sset)
{
- if (sset == ETH_SS_STATS)
+ switch (sset) {
+ case ETH_SS_STATS:
return ARRAY_SIZE(ag71xx_statistics);
- return -EOPNOTSUPP;
+ case ETH_SS_TEST:
+ return net_selftest_get_count();
+ default:
+ return -EOPNOTSUPP;
+ }
}
static const struct ethtool_ops ag71xx_ethtool_ops = {
@@ -536,6 +547,7 @@ static const struct ethtool_ops ag71xx_ethtool_ops = {
.get_strings = ag71xx_ethtool_get_strings,
.get_ethtool_stats = ag71xx_ethtool_get_stats,
.get_sset_count = ag71xx_ethtool_get_sset_count,
+ .self_test = net_selftest,
};
static int ag71xx_mdio_wait_busy(struct ag71xx *ag)
@@ -1658,9 +1670,9 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
struct net_device *ndev = ag->ndev;
int ring_mask, ring_size, done = 0;
unsigned int pktlen_mask, offset;
- struct sk_buff *next, *skb;
struct ag71xx_ring *ring;
struct list_head rx_list;
+ struct sk_buff *skb;
ring = &ag->rx_ring;
pktlen_mask = ag->dcfg->desc_pktlen_mask;
@@ -1725,7 +1737,7 @@ next:
ag71xx_ring_rx_refill(ag);
- list_for_each_entry_safe(skb, next, &rx_list, list)
+ list_for_each_entry(skb, &rx_list, list)
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb_list(&rx_list);
@@ -1856,7 +1868,6 @@ static int ag71xx_probe(struct platform_device *pdev)
const struct ag71xx_dcfg *dcfg;
struct net_device *ndev;
struct resource *res;
- const void *mac_addr;
int tx_size, err, i;
struct ag71xx *ag;
@@ -1957,10 +1968,8 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc->ctrl = 0;
ag->stop_desc->next = (u32)ag->stop_desc_dma;
- mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr))
- memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
- if (IS_ERR(mac_addr) || !is_valid_ether_addr(ndev->dev_addr)) {
+ err = of_get_mac_address(np, ndev->dev_addr);
+ if (err) {
netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
eth_random_addr(ndev->dev_addr);
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index a0562a90fb6d..28ae5c16831e 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -367,6 +367,7 @@ struct atl1c_hw {
u16 phy_id1;
u16 phy_id2;
+ spinlock_t intr_mask_lock; /* protect the intr_mask */
u32 intr_mask;
u8 preamble_len;
@@ -506,6 +507,7 @@ struct atl1c_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct napi_struct napi;
+ struct napi_struct tx_napi;
struct page *rx_page;
unsigned int rx_page_offset;
unsigned int rx_frag_size;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 3f65f2b370c5..1d17c24e6d75 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -47,7 +47,7 @@ static void atl1c_down(struct atl1c_adapter *adapter);
static int atl1c_reset_mac(struct atl1c_hw *hw);
static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter);
static int atl1c_configure(struct atl1c_adapter *adapter);
-static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter);
+static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode);
static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
@@ -470,7 +470,7 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
- head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD) +
+ head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
adapter->rx_frag_size = roundup_pow_of_two(head_size);
}
@@ -813,6 +813,7 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
atl1c_set_rxbufsize(adapter, adapter->netdev);
atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->mdio_lock);
+ spin_lock_init(&adapter->hw.intr_mask_lock);
set_bit(__AT_DOWN, &adapter->flags);
return 0;
@@ -1434,7 +1435,7 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
atl1c_set_multi(netdev);
atl1c_restore_vlan(adapter);
- num = atl1c_alloc_rx_buffer(adapter);
+ num = atl1c_alloc_rx_buffer(adapter, false);
if (unlikely(num == 0))
return -ENOMEM;
@@ -1530,20 +1531,19 @@ static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
spin_unlock(&adapter->mdio_lock);
}
-static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
- enum atl1c_trans_queue type)
+static int atl1c_clean_tx(struct napi_struct *napi, int budget)
{
- struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
+ struct atl1c_adapter *adapter =
+ container_of(napi, struct atl1c_adapter, tx_napi);
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[atl1c_trans_normal];
struct atl1c_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
u16 hw_next_to_clean;
- u16 reg;
unsigned int total_bytes = 0, total_packets = 0;
+ unsigned long flags;
- reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX;
-
- AT_READ_REGW(&adapter->hw, reg, &hw_next_to_clean);
+ AT_READ_REGW(&adapter->hw, REG_TPD_PRI0_CIDX, &hw_next_to_clean);
while (next_to_clean != hw_next_to_clean) {
buffer_info = &tpd_ring->buffer_info[next_to_clean];
@@ -1564,7 +1564,15 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
netif_wake_queue(adapter->netdev);
}
- return true;
+ if (total_packets < budget) {
+ napi_complete_done(napi, total_packets);
+ spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags);
+ adapter->hw.intr_mask |= ISR_TX_PKT;
+ AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
+ spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags);
+ return total_packets;
+ }
+ return budget;
}
/**
@@ -1599,13 +1607,22 @@ static irqreturn_t atl1c_intr(int irq, void *data)
AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
if (status & ISR_RX_PKT) {
if (likely(napi_schedule_prep(&adapter->napi))) {
+ spin_lock(&hw->intr_mask_lock);
hw->intr_mask &= ~ISR_RX_PKT;
AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
+ spin_unlock(&hw->intr_mask_lock);
__napi_schedule(&adapter->napi);
}
}
- if (status & ISR_TX_PKT)
- atl1c_clean_tx_irq(adapter, atl1c_trans_normal);
+ if (status & ISR_TX_PKT) {
+ if (napi_schedule_prep(&adapter->tx_napi)) {
+ spin_lock(&hw->intr_mask_lock);
+ hw->intr_mask &= ~ISR_TX_PKT;
+ AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
+ spin_unlock(&hw->intr_mask_lock);
+ __napi_schedule(&adapter->tx_napi);
+ }
+ }
handled = IRQ_HANDLED;
/* check if PCIE PHY Link down */
@@ -1650,14 +1667,20 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
skb_checksum_none_assert(skb);
}
-static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
+static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter,
+ bool napi_mode)
{
struct sk_buff *skb;
struct page *page;
- if (adapter->rx_frag_size > PAGE_SIZE)
- return netdev_alloc_skb(adapter->netdev,
- adapter->rx_buffer_len);
+ if (adapter->rx_frag_size > PAGE_SIZE) {
+ if (likely(napi_mode))
+ return napi_alloc_skb(&adapter->napi,
+ adapter->rx_buffer_len);
+ else
+ return netdev_alloc_skb_ip_align(adapter->netdev,
+ adapter->rx_buffer_len);
+ }
page = adapter->rx_page;
if (!page) {
@@ -1670,7 +1693,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
skb = build_skb(page_address(page) + adapter->rx_page_offset,
adapter->rx_frag_size);
if (likely(skb)) {
- skb_reserve(skb, NET_SKB_PAD);
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
adapter->rx_page_offset += adapter->rx_frag_size;
if (adapter->rx_page_offset >= PAGE_SIZE)
adapter->rx_page = NULL;
@@ -1680,7 +1703,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
return skb;
}
-static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
+static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode)
{
struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
struct pci_dev *pdev = adapter->pdev;
@@ -1701,7 +1724,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
while (next_info->flags & ATL1C_BUFFER_FREE) {
rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
- skb = atl1c_alloc_skb(adapter);
+ skb = atl1c_alloc_skb(adapter, napi_mode);
if (unlikely(!skb)) {
if (netif_msg_rx_err(adapter))
dev_warn(&pdev->dev, "alloc rx buffer failed\n");
@@ -1851,13 +1874,13 @@ rrs_checked:
vlan = le16_to_cpu(vlan);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
}
- netif_receive_skb(skb);
+ napi_gro_receive(&adapter->napi, skb);
(*work_done)++;
count++;
}
if (count)
- atl1c_alloc_rx_buffer(adapter);
+ atl1c_alloc_rx_buffer(adapter, true);
}
/**
@@ -1870,6 +1893,7 @@ static int atl1c_clean(struct napi_struct *napi, int budget)
struct atl1c_adapter *adapter =
container_of(napi, struct atl1c_adapter, napi);
int work_done = 0;
+ unsigned long flags;
/* Keep link state information with original netdev */
if (!netif_carrier_ok(adapter->netdev))
@@ -1880,8 +1904,10 @@ static int atl1c_clean(struct napi_struct *napi, int budget)
if (work_done < budget) {
quit_polling:
napi_complete_done(napi, work_done);
+ spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags);
adapter->hw.intr_mask |= ISR_RX_PKT;
AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
+ spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags);
}
return work_done;
}
@@ -2319,6 +2345,7 @@ static int atl1c_up(struct atl1c_adapter *adapter)
atl1c_check_link_status(adapter);
clear_bit(__AT_DOWN, &adapter->flags);
napi_enable(&adapter->napi);
+ napi_enable(&adapter->tx_napi);
atl1c_irq_enable(adapter);
netif_start_queue(netdev);
return err;
@@ -2339,6 +2366,7 @@ static void atl1c_down(struct atl1c_adapter *adapter)
set_bit(__AT_DOWN, &adapter->flags);
netif_carrier_off(netdev);
napi_disable(&adapter->napi);
+ napi_disable(&adapter->tx_napi);
atl1c_irq_disable(adapter);
atl1c_free_irq(adapter);
/* disable ASPM if device inactive */
@@ -2587,7 +2615,9 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.mdio_write = atl1c_mdio_write;
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
+ dev_set_threaded(netdev, true);
netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
+ netif_napi_add(netdev, &adapter->tx_napi, atl1c_clean_tx, 64);
timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
/* setup the private structure */
err = atl1c_sw_init(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index f016f2e12ee7..0cc0db04c27d 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1675,29 +1675,7 @@ static struct pci_driver atl2_driver = {
.shutdown = atl2_shutdown,
};
-/**
- * atl2_init_module - Driver Registration Routine
- *
- * atl2_init_module is the first routine called when the driver is
- * loaded. All it does is register with the PCI subsystem.
- */
-static int __init atl2_init_module(void)
-{
- return pci_register_driver(&atl2_driver);
-}
-module_init(atl2_init_module);
-
-/**
- * atl2_exit_module - Driver Exit Cleanup Routine
- *
- * atl2_exit_module is called just before the driver is removed
- * from memory.
- */
-static void __exit atl2_exit_module(void)
-{
- pci_unregister_driver(&atl2_driver);
-}
-module_exit(atl2_exit_module);
+module_pci_driver(atl2_driver);
static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
{
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 65981931a798..60d908507f51 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_net.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -53,6 +54,7 @@ struct bcm4908_enet_dma_ring {
int length;
u16 cfg_block;
u16 st_ram_block;
+ struct napi_struct napi;
union {
void *cpu_addr;
@@ -66,8 +68,8 @@ struct bcm4908_enet_dma_ring {
struct bcm4908_enet {
struct device *dev;
struct net_device *netdev;
- struct napi_struct napi;
void __iomem *base;
+ int irq_tx;
struct bcm4908_enet_dma_ring tx_ring;
struct bcm4908_enet_dma_ring rx_ring;
@@ -122,24 +124,31 @@ static void enet_umac_set(struct bcm4908_enet *enet, u16 offset, u32 set)
* Helpers
*/
-static void bcm4908_enet_intrs_on(struct bcm4908_enet *enet)
+static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu)
{
- enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
+ enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD);
}
-static void bcm4908_enet_intrs_off(struct bcm4908_enet *enet)
+/***
+ * DMA ring ops
+ */
+
+static void bcm4908_enet_dma_ring_intrs_on(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
{
- enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, 0);
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
}
-static void bcm4908_enet_intrs_ack(struct bcm4908_enet *enet)
+static void bcm4908_enet_dma_ring_intrs_off(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
{
- enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0);
}
-static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu)
+static void bcm4908_enet_dma_ring_intrs_ack(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
{
- enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD);
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
}
/***
@@ -414,11 +423,14 @@ static void bcm4908_enet_gmac_init(struct bcm4908_enet *enet)
static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id)
{
struct bcm4908_enet *enet = dev_id;
+ struct bcm4908_enet_dma_ring *ring;
- bcm4908_enet_intrs_off(enet);
- bcm4908_enet_intrs_ack(enet);
+ ring = (irq == enet->irq_tx) ? &enet->tx_ring : &enet->rx_ring;
- napi_schedule(&enet->napi);
+ bcm4908_enet_dma_ring_intrs_off(enet, ring);
+ bcm4908_enet_dma_ring_intrs_ack(enet, ring);
+
+ napi_schedule(&ring->napi);
return IRQ_HANDLED;
}
@@ -426,6 +438,8 @@ static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id)
static int bcm4908_enet_open(struct net_device *netdev)
{
struct bcm4908_enet *enet = netdev_priv(netdev);
+ struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
struct device *dev = enet->dev;
int err;
@@ -435,6 +449,17 @@ static int bcm4908_enet_open(struct net_device *netdev)
return err;
}
+ if (enet->irq_tx > 0) {
+ err = request_irq(enet->irq_tx, bcm4908_enet_irq_handler, 0,
+ "tx", enet);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n",
+ enet->irq_tx, err);
+ free_irq(netdev->irq, enet);
+ return err;
+ }
+ }
+
bcm4908_enet_gmac_init(enet);
bcm4908_enet_dma_reset(enet);
bcm4908_enet_dma_init(enet);
@@ -443,14 +468,19 @@ static int bcm4908_enet_open(struct net_device *netdev)
enet_set(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN);
enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN, 0);
- bcm4908_enet_dma_rx_ring_enable(enet, &enet->rx_ring);
- napi_enable(&enet->napi);
+ if (enet->irq_tx > 0) {
+ napi_enable(&tx_ring->napi);
+ bcm4908_enet_dma_ring_intrs_ack(enet, tx_ring);
+ bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
+ }
+
+ bcm4908_enet_dma_rx_ring_enable(enet, rx_ring);
+ napi_enable(&rx_ring->napi);
netif_carrier_on(netdev);
netif_start_queue(netdev);
-
- bcm4908_enet_intrs_ack(enet);
- bcm4908_enet_intrs_on(enet);
+ bcm4908_enet_dma_ring_intrs_ack(enet, rx_ring);
+ bcm4908_enet_dma_ring_intrs_on(enet, rx_ring);
return 0;
}
@@ -458,16 +488,20 @@ static int bcm4908_enet_open(struct net_device *netdev)
static int bcm4908_enet_stop(struct net_device *netdev)
{
struct bcm4908_enet *enet = netdev_priv(netdev);
+ struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
netif_stop_queue(netdev);
netif_carrier_off(netdev);
- napi_disable(&enet->napi);
+ napi_disable(&rx_ring->napi);
+ napi_disable(&tx_ring->napi);
bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring);
bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring);
bcm4908_enet_dma_uninit(enet);
+ free_irq(enet->irq_tx, enet);
free_irq(enet->netdev->irq, enet);
return 0;
@@ -484,25 +518,19 @@ static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netde
u32 tmp;
/* Free transmitted skbs */
- while (ring->read_idx != ring->write_idx) {
- buf_desc = &ring->buf_desc[ring->read_idx];
- if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)
- break;
- slot = &ring->slots[ring->read_idx];
-
- dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
- dev_kfree_skb(slot->skb);
- if (++ring->read_idx == ring->length)
- ring->read_idx = 0;
- }
+ if (enet->irq_tx < 0 &&
+ !(le32_to_cpu(ring->buf_desc[ring->read_idx].ctl) & DMA_CTL_STATUS_OWN))
+ napi_schedule(&enet->tx_ring.napi);
/* Don't use the last empty buf descriptor */
if (ring->read_idx <= ring->write_idx)
free_buf_descs = ring->read_idx - ring->write_idx + ring->length;
else
free_buf_descs = ring->read_idx - ring->write_idx;
- if (free_buf_descs < 2)
+ if (free_buf_descs < 2) {
+ netif_stop_queue(netdev);
return NETDEV_TX_BUSY;
+ }
/* Hardware removes OWN bit after sending data */
buf_desc = &ring->buf_desc[ring->write_idx];
@@ -539,9 +567,10 @@ static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netde
return NETDEV_TX_OK;
}
-static int bcm4908_enet_poll(struct napi_struct *napi, int weight)
+static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight)
{
- struct bcm4908_enet *enet = container_of(napi, struct bcm4908_enet, napi);
+ struct bcm4908_enet_dma_ring *rx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi);
+ struct bcm4908_enet *enet = container_of(rx_ring, struct bcm4908_enet, rx_ring);
struct device *dev = enet->dev;
int handled = 0;
@@ -590,7 +619,7 @@ static int bcm4908_enet_poll(struct napi_struct *napi, int weight)
if (handled < weight) {
napi_complete_done(napi, handled);
- bcm4908_enet_intrs_on(enet);
+ bcm4908_enet_dma_ring_intrs_on(enet, rx_ring);
}
/* Hardware could disable ring if it run out of descriptors */
@@ -599,6 +628,42 @@ static int bcm4908_enet_poll(struct napi_struct *napi, int weight)
return handled;
}
+static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
+{
+ struct bcm4908_enet_dma_ring *tx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi);
+ struct bcm4908_enet *enet = container_of(tx_ring, struct bcm4908_enet, tx_ring);
+ struct bcm4908_enet_dma_ring_bd *buf_desc;
+ struct bcm4908_enet_dma_ring_slot *slot;
+ struct device *dev = enet->dev;
+ unsigned int bytes = 0;
+ int handled = 0;
+
+ while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) {
+ buf_desc = &tx_ring->buf_desc[tx_ring->read_idx];
+ if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)
+ break;
+ slot = &tx_ring->slots[tx_ring->read_idx];
+
+ dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
+ dev_kfree_skb(slot->skb);
+ bytes += slot->len;
+ if (++tx_ring->read_idx == tx_ring->length)
+ tx_ring->read_idx = 0;
+
+ handled++;
+ }
+
+ if (handled < weight) {
+ napi_complete_done(napi, handled);
+ bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
+ }
+
+ if (netif_queue_stopped(enet->netdev))
+ netif_wake_queue(enet->netdev);
+
+ return handled;
+}
+
static int bcm4908_enet_change_mtu(struct net_device *netdev, int new_mtu)
{
struct bcm4908_enet *enet = netdev_priv(netdev);
@@ -641,6 +706,8 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
if (netdev->irq < 0)
return netdev->irq;
+ enet->irq_tx = platform_get_irq_byname(pdev, "tx");
+
dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
err = bcm4908_enet_dma_alloc(enet);
@@ -648,12 +715,15 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
return err;
SET_NETDEV_DEV(netdev, &pdev->dev);
- eth_hw_addr_random(netdev);
+ err = of_get_mac_address(dev->of_node, netdev->dev_addr);
+ if (err)
+ eth_hw_addr_random(netdev);
netdev->netdev_ops = &bcm4908_enet_netdev_ops;
netdev->min_mtu = ETH_ZLEN;
netdev->mtu = ETH_DATA_LEN;
netdev->max_mtu = ENET_MTU_MAX;
- netif_napi_add(netdev, &enet->napi, bcm4908_enet_poll, 64);
+ netif_tx_napi_add(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx, NAPI_POLL_WEIGHT);
err = register_netdev(netdev);
if (err) {
@@ -671,7 +741,8 @@ static int bcm4908_enet_remove(struct platform_device *pdev)
struct bcm4908_enet *enet = platform_get_drvdata(pdev);
unregister_netdev(enet->netdev);
- netif_napi_del(&enet->napi);
+ netif_napi_del(&enet->rx_ring.napi);
+ netif_napi_del(&enet->tx_ring.napi);
bcm4908_enet_dma_free(enet);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 777bbf6d2586..d9f0f0df8f7b 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2457,7 +2457,6 @@ static int bcm_sysport_probe(struct platform_device *pdev)
struct bcm_sysport_priv *priv;
struct device_node *dn;
struct net_device *dev;
- const void *macaddr;
u32 txq, rxq;
int ret;
@@ -2552,12 +2551,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
}
/* Initialize netdevice members */
- macaddr = of_get_mac_address(dn);
- if (IS_ERR(macaddr)) {
+ ret = of_get_mac_address(dn, dev->dev_addr);
+ if (ret) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
eth_hw_addr_random(dev);
- } else {
- ether_addr_copy(dev->dev_addr, macaddr);
}
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index a5fd161ab5ee..85fa0ab7201c 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -115,7 +115,7 @@ static int bgmac_probe(struct bcma_device *core)
struct ssb_sprom *sprom = &core->bus->sprom;
struct mii_bus *mii_bus;
struct bgmac *bgmac;
- const u8 *mac = NULL;
+ const u8 *mac;
int err;
bgmac = bgmac_alloc(&core->dev);
@@ -128,11 +128,10 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
- if (bgmac->dev->of_node)
- mac = of_get_mac_address(bgmac->dev->of_node);
+ err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr);
/* If no MAC address assigned via device tree, check SPROM */
- if (IS_ERR_OR_NULL(mac)) {
+ if (err) {
switch (core->core_unit) {
case 0:
mac = sprom->et0mac;
@@ -149,10 +148,9 @@ static int bgmac_probe(struct bcma_device *core)
err = -ENOTSUPP;
goto err;
}
+ ether_addr_copy(bgmac->net_dev->dev_addr, mac);
}
- ether_addr_copy(bgmac->net_dev->dev_addr, mac);
-
/* On BCM4706 we need common core to access PHY */
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
!core->bus->drv_gmac_cmn.core) {
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index f37f1c58f368..9834b77cf4b6 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -173,7 +173,7 @@ static int bgmac_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct bgmac *bgmac;
struct resource *regs;
- const u8 *mac_addr;
+ int ret;
bgmac = bgmac_alloc(&pdev->dev);
if (!bgmac)
@@ -192,11 +192,10 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->dev = &pdev->dev;
bgmac->dma_dev = &pdev->dev;
- mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(bgmac->net_dev->dev_addr, mac_addr);
- else
- dev_warn(&pdev->dev, "MAC address not present in device tree\n");
+ ret = of_get_mac_address(np, bgmac->net_dev->dev_addr);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "MAC address not present in device tree\n");
bgmac->irq = platform_get_irq(pdev, 0);
if (bgmac->irq < 0)
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 3e8a179f39db..c0986096c701 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8057,7 +8057,7 @@ bnx2_read_vpd_fw_ver(struct bnx2 *bp)
data[i + 3] = data[i + BNX2_VPD_LEN];
}
- i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_tag(data, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
if (i < 0)
goto vpd_done;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b652ed72a621..281b1c2e04a7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1395,7 +1395,6 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
u32 op_gen_command = 0;
u32 comp_addr = BAR_CSTRORM_INTMEM +
CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
- int ret = 0;
if (REG_RD(bp, comp_addr)) {
BNX2X_ERR("Cleanup complete was not 0 before sending\n");
@@ -1420,7 +1419,7 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
/* Zero completion for next FLR */
REG_WR(bp, comp_addr, 0);
- return ret;
+ return 0;
}
u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
@@ -12207,8 +12206,7 @@ static void bnx2x_read_fwinfo(struct bnx2x *bp)
/* VPD RO tag should be first tag after identifier string, hence
* we should be able to find it in first BNX2X_VPD_LEN chars
*/
- i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
- PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_tag(vpd_start, BNX2X_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
if (i < 0)
goto out_not_found;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b53a0d87371a..2985844634c8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -122,7 +122,10 @@ enum board_idx {
NETXTREME_E_VF,
NETXTREME_C_VF,
NETXTREME_S_VF,
+ NETXTREME_C_VF_HV,
+ NETXTREME_E_VF_HV,
NETXTREME_E_P5_VF,
+ NETXTREME_E_P5_VF_HV,
};
/* indexed by enum above */
@@ -170,7 +173,10 @@ static const struct {
[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
+ [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
+ [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
+ [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
@@ -222,15 +228,25 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
+ { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
+ { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
+ { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
+ { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
+ { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
+ { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
{ 0 }
@@ -265,7 +281,8 @@ static struct workqueue_struct *bnxt_pf_wq;
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
- idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
+ idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
+ idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -358,6 +375,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct pci_dev *pdev = bp->pdev;
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
+ __le32 lflags = 0;
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
@@ -399,6 +417,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
}
+ if (unlikely(skb->no_fcs)) {
+ lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
+ goto normal_tx;
+ }
+
if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
struct tx_push_buffer *tx_push_buf = txr->tx_push;
struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
@@ -500,7 +523,7 @@ normal_tx:
txbd1 = (struct tx_bd_ext *)
&txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
- txbd1->tx_bd_hsize_lflags = 0;
+ txbd1->tx_bd_hsize_lflags = lflags;
if (skb_is_gso(skb)) {
u32 hdr_len;
@@ -512,14 +535,14 @@ normal_tx:
hdr_len = skb_transport_offset(skb) +
tcp_hdrlen(skb);
- txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
+ txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
TX_BD_FLAGS_T_IPID |
(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
length = skb_shinfo(skb)->gso_size;
txbd1->tx_bd_mss = cpu_to_le32(length);
length += hdr_len;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- txbd1->tx_bd_hsize_lflags =
+ txbd1->tx_bd_hsize_lflags |=
cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
txbd1->tx_bd_mss = 0;
}
@@ -1732,14 +1755,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cons = rxcmp->rx_cmp_opaque;
if (unlikely(cons != rxr->rx_next_cons)) {
- int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
+ int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
/* 0xffff is forced error, don't print it */
if (rxr->rx_next_cons != 0xffff)
netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
- return rc1;
+ if (rc1)
+ return rc1;
+ goto next_rx_no_prod_no_len;
}
rx_buf = &rxr->rx_buf_ring[cons];
data = rx_buf->data;
@@ -4145,7 +4170,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_ntp_fltrs(bp, irq_re_init);
if (irq_re_init) {
bnxt_free_ring_stats(bp);
- if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET) ||
+ if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_free_port_stats(bp);
bnxt_free_ring_grps(bp);
@@ -4470,7 +4495,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
writel(1, bp->bar0 + doorbell_offset);
if (!pci_is_enabled(bp->pdev))
- return 0;
+ return -ENODEV;
if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT;
@@ -4500,12 +4525,15 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
return -EBUSY;
/* on first few passes, just barely sleep */
- if (i < HWRM_SHORT_TIMEOUT_COUNTER)
+ if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
- else
+ } else {
+ if (HWRM_WAIT_MUST_ABORT(bp, req))
+ break;
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
+ }
}
if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
@@ -4530,15 +4558,19 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (len)
break;
/* on first few passes, just barely sleep */
- if (i < HWRM_SHORT_TIMEOUT_COUNTER)
+ if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
- else
+ } else {
+ if (HWRM_WAIT_MUST_ABORT(bp, req))
+ goto timeout_abort;
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
+ }
}
if (i >= tmo_count) {
+timeout_abort:
if (!silent)
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
HWRM_TOTAL_TIMEOUT(i),
@@ -7540,6 +7572,32 @@ static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
BNXT_FW_HEALTH_WIN_MAP_OFF);
}
+bool bnxt_is_fw_healthy(struct bnxt *bp)
+{
+ if (bp->fw_health && bp->fw_health->status_reliable) {
+ u32 fw_status;
+
+ fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
+ return false;
+ }
+
+ return true;
+}
+
+static void bnxt_inv_fw_health_reg(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg_type;
+
+ if (!fw_health || !fw_health->status_reliable)
+ return;
+
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
+ if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
+ fw_health->status_reliable = false;
+}
+
static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
{
void __iomem *hs;
@@ -7547,6 +7605,9 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
u32 reg_type;
u32 sig;
+ if (bp->fw_health)
+ bp->fw_health->status_reliable = false;
+
__bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
@@ -7558,11 +7619,9 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
BNXT_FW_HEALTH_WIN_BASE +
BNXT_GRC_REG_CHIP_NUM);
}
- if (!BNXT_CHIP_P5(bp)) {
- if (bp->fw_health)
- bp->fw_health->status_reliable = false;
+ if (!BNXT_CHIP_P5(bp))
return;
- }
+
status_loc = BNXT_GRC_REG_STATUS_P5 |
BNXT_FW_HEALTH_REG_TYPE_BAR0;
} else {
@@ -7592,6 +7651,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
u32 reg_base = 0xffffffff;
int i;
+ bp->fw_health->status_reliable = false;
/* Only pre-map the monitoring GRC registers using window 3 */
for (i = 0; i < 4; i++) {
u32 reg = fw_health->regs[i];
@@ -7604,6 +7664,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
return -ERANGE;
fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
}
+ bp->fw_health->status_reliable = true;
if (reg_base == 0xffffffff)
return 0;
@@ -8304,11 +8365,11 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
#endif
}
-/* Allow PF and VF with default VLAN to be in promiscuous mode */
+/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
static bool bnxt_promisc_ok(struct bnxt *bp)
{
#ifdef CONFIG_BNXT_SRIOV
- if (BNXT_VF(bp) && !bp->vf.vlan)
+ if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
return false;
#endif
return true;
@@ -8405,7 +8466,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (bp->dev->flags & IFF_BROADCAST)
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
- if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
+ if (bp->dev->flags & IFF_PROMISC)
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
if (bp->dev->flags & IFF_ALLMULTI) {
@@ -9039,8 +9100,9 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info)
static void bnxt_report_link(struct bnxt *bp)
{
if (bp->link_info.link_up) {
- const char *duplex;
+ const char *signal = "";
const char *flow_ctrl;
+ const char *duplex;
u32 speed;
u16 fec;
@@ -9062,9 +9124,24 @@ static void bnxt_report_link(struct bnxt *bp)
flow_ctrl = "ON - receive";
else
flow_ctrl = "none";
- netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
- speed, duplex, flow_ctrl);
- if (bp->flags & BNXT_FLAG_EEE_CAP)
+ if (bp->link_info.phy_qcfg_resp.option_flags &
+ PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
+ u8 sig_mode = bp->link_info.active_fec_sig_mode &
+ PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
+ switch (sig_mode) {
+ case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
+ signal = "(NRZ) ";
+ break;
+ case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
+ signal = "(PAM4) ";
+ break;
+ default:
+ break;
+ }
+ }
+ netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
+ speed, signal, duplex, flow_ctrl);
+ if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
netdev_info(bp->dev, "EEE is %s\n",
bp->eee.eee_active ? "active" :
"not active");
@@ -9096,10 +9173,6 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_link_info *link_info = &bp->link_info;
- bp->flags &= ~BNXT_FLAG_EEE_CAP;
- if (bp->test_info)
- bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
- BNXT_TEST_FL_AN_PHY_LPBK);
if (bp->hwrm_spec_code < 0x10201)
return 0;
@@ -9110,31 +9183,17 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_phy_qcaps_exit;
+ bp->phy_flags = resp->flags;
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
struct ethtool_eee *eee = &bp->eee;
u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
- bp->flags |= BNXT_FLAG_EEE_CAP;
eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
}
- if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
- if (bp->test_info)
- bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
- }
- if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
- if (bp->test_info)
- bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
- }
- if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
- if (BNXT_PF(bp))
- bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
- }
- if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET)
- bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET;
if (bp->hwrm_spec_code >= 0x10a01) {
if (bnxt_phy_qcaps_no_speed(resp)) {
@@ -9225,7 +9284,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
link_info->module_status = resp->module_status;
- if (bp->flags & BNXT_FLAG_EEE_CAP) {
+ if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
struct ethtool_eee *eee = &bp->eee;
u16 fw_speeds;
@@ -9461,7 +9520,8 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
if (!BNXT_SINGLE_PF(bp))
return 0;
- if (pci_num_vf(bp->pdev))
+ if (pci_num_vf(bp->pdev) &&
+ !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
@@ -9494,9 +9554,10 @@ static int bnxt_try_recover_fw(struct bnxt *bp)
mutex_lock(&bp->hwrm_cmd_lock);
do {
- rc = __bnxt_hwrm_ver_get(bp, true);
sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
- if (!sts || !BNXT_FW_IS_BOOTING(sts))
+ rc = __bnxt_hwrm_ver_get(bp, true);
+ if (!BNXT_FW_IS_BOOTING(sts) &&
+ !BNXT_FW_IS_RECOVERING(sts))
break;
retry++;
} while (rc == -EBUSY && retry < BNXT_FW_RETRY);
@@ -9556,13 +9617,17 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (rc)
return rc;
- if (!up)
+ if (!up) {
+ bnxt_inv_fw_health_reg(bp);
return 0;
+ }
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
resc_reinit = true;
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
fw_reset = true;
+ else if (bp->fw_health && !bp->fw_health->status_reliable)
+ bnxt_try_map_fw_health_reg(bp);
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
@@ -9571,6 +9636,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
}
if (resc_reinit || fw_reset) {
if (fw_reset) {
+ set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_stop(bp);
bnxt_free_ctx_mem(bp);
@@ -9579,21 +9645,25 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
+ clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return rc;
}
bnxt_clear_int_mode(bp);
rc = bnxt_init_int_mode(bp);
if (rc) {
+ clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
netdev_err(bp->dev, "init int mode failed\n");
return rc;
}
- set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
}
if (BNXT_NEW_RM(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ if (rc)
+ netdev_err(bp->dev, "resc_qcaps failed\n");
+
hw_resc->resv_cp_rings = 0;
hw_resc->resv_stat_ctxs = 0;
hw_resc->resv_irqs = 0;
@@ -9607,7 +9677,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
}
}
}
- return 0;
+ return rc;
}
static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
@@ -9736,7 +9806,9 @@ static ssize_t bnxt_show_temp(struct device *dev,
if (!rc)
len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
mutex_unlock(&bp->hwrm_cmd_lock);
- return rc ?: len;
+ if (rc)
+ return rc;
+ return len;
}
static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
@@ -9793,7 +9865,7 @@ static bool bnxt_eee_config_ok(struct bnxt *bp)
struct ethtool_eee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
- if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+ if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return true;
if (eee->eee_enabled) {
@@ -10440,7 +10512,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
- if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
+ if (dev->flags & IFF_PROMISC)
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
uc_update = bnxt_uc_list_updated(bp);
@@ -10516,6 +10588,9 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
}
skip_uc:
+ if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
+ !bnxt_promisc_ok(bp))
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
if (rc && vnic->mc_list_count) {
netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
@@ -10710,6 +10785,40 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
+static netdev_features_t bnxt_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ struct bnxt *bp;
+ __be16 udp_port;
+ u8 l4_proto = 0;
+
+ features = vlan_features_check(skb, features);
+ if (!skb->encapsulation)
+ return features;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_proto = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features;
+ }
+
+ if (l4_proto != IPPROTO_UDP)
+ return features;
+
+ bp = netdev_priv(dev);
+ /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
+ udp_port = udp_hdr(skb)->dest;
+ if (udp_port == bp->vxlan_port || udp_port == bp->nge_port)
+ return features;
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
u32 *reg_buf)
{
@@ -11035,6 +11144,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
pci_disable_device(bp->pdev);
}
__bnxt_close_nic(bp, true, false);
+ bnxt_vf_reps_free(bp);
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
if (pci_is_enabled(bp->pdev))
@@ -11640,7 +11750,7 @@ static void bnxt_reset_all(struct bnxt *bp)
req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
+ if (rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
}
bp->fw_reset_timestamp = jiffies;
@@ -11723,28 +11833,20 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
return;
case BNXT_FW_RESET_STATE_ENABLE_DEV:
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
- u32 val;
-
- if (!bp->fw_reset_min_dsecs) {
- u16 val;
-
- pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID,
- &val);
- if (val == 0xffff) {
- if (bnxt_fw_reset_timeout(bp)) {
- netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
- goto fw_reset_abort;
- }
- bnxt_queue_fw_reset_work(bp, HZ / 1000);
- return;
+ bnxt_inv_fw_health_reg(bp);
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
+ !bp->fw_reset_min_dsecs) {
+ u16 val;
+
+ pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
+ if (val == 0xffff) {
+ if (bnxt_fw_reset_timeout(bp)) {
+ netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
+ goto fw_reset_abort;
}
+ bnxt_queue_fw_reset_work(bp, HZ / 1000);
+ return;
}
- val = bnxt_fw_health_readl(bp,
- BNXT_FW_RESET_INPROG_REG);
- if (val)
- netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
- val);
}
clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
if (pci_enable_device(bp->pdev)) {
@@ -11787,6 +11889,8 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
+ bnxt_vf_reps_alloc(bp);
+ bnxt_vf_reps_open(bp);
bnxt_dl_health_recovery_done(bp);
bnxt_dl_health_status_update(bp, true);
rtnl_unlock();
@@ -12222,10 +12326,13 @@ static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
unsigned int cmd;
udp_tunnel_nic_get_port(netdev, table, 0, &ti);
- if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
+ if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
+ bp->vxlan_port = ti.port;
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
- else
+ } else {
+ bp->nge_port = ti.port;
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
+ }
if (ti.port)
return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
@@ -12325,6 +12432,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_change_mtu = bnxt_change_mtu,
.ndo_fix_features = bnxt_fix_features,
.ndo_set_features = bnxt_set_features,
+ .ndo_features_check = bnxt_features_check,
.ndo_tx_timeout = bnxt_tx_timeout,
#ifdef CONFIG_BNXT_SRIOV
.ndo_get_vf_config = bnxt_get_vf_config,
@@ -12393,12 +12501,17 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info;
+ bp->phy_flags = 0;
rc = bnxt_hwrm_phy_qcaps(bp);
if (rc) {
netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
rc);
return rc;
}
+ if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
+ bp->dev->priv_flags |= IFF_SUPP_NOFCS;
+ else
+ bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
if (!fw_dflt)
return 0;
@@ -12681,7 +12794,7 @@ static void bnxt_vpd_read_info(struct bnxt *bp)
goto exit;
}
- i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
if (i < 0) {
netdev_err(bp->dev, "VPD READ-Only not found\n");
goto exit;
@@ -12872,8 +12985,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!BNXT_CHIP_P4_PLUS(bp))
bp->flags |= BNXT_FLAG_DOUBLE_DB;
- bp->ulp_probe = bnxt_ulp_probe;
-
rc = bnxt_init_mac_addr(bp);
if (rc) {
dev_err(&pdev->dev, "Unable to initialize mac address.\n");
@@ -12934,6 +13045,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc);
}
+ bnxt_inv_fw_health_reg(bp);
bnxt_dl_register(bp);
rc = register_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 1259e68cba2a..98e0cef4532c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -671,6 +671,10 @@ struct nqe_cn {
#define HWRM_MIN_TIMEOUT 25
#define HWRM_MAX_TIMEOUT 40
+#define HWRM_WAIT_MUST_ABORT(bp, req) \
+ (le16_to_cpu((req)->req_type) != HWRM_VER_GET && \
+ !bnxt_is_fw_healthy(bp))
+
#define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ? \
((n) * HWRM_SHORT_MIN_TIMEOUT) : \
(HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \
@@ -1337,9 +1341,6 @@ struct bnxt_led_info {
struct bnxt_test_info {
u8 offline_mask;
- u8 flags;
-#define BNXT_TEST_FL_EXT_LPBK 0x1
-#define BNXT_TEST_FL_AN_PHY_LPBK 0x2
u16 timeout;
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
@@ -1560,6 +1561,7 @@ struct bnxt_fw_reporter_ctx {
#define BNXT_FW_STATUS_HEALTH_MSK 0xffff
#define BNXT_FW_STATUS_HEALTHY 0x8000
#define BNXT_FW_STATUS_SHUTDOWN 0x100000
+#define BNXT_FW_STATUS_RECOVERING 0x400000
#define BNXT_FW_IS_HEALTHY(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) ==\
BNXT_FW_STATUS_HEALTHY)
@@ -1570,6 +1572,9 @@ struct bnxt_fw_reporter_ctx {
#define BNXT_FW_IS_ERR(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) > \
BNXT_FW_STATUS_HEALTHY)
+#define BNXT_FW_IS_RECOVERING(sts) (BNXT_FW_IS_ERR(sts) && \
+ ((sts) & BNXT_FW_STATUS_RECOVERING))
+
#define BNXT_FW_RETRY 5
#define BNXT_FW_IF_RETRY 10
@@ -1685,7 +1690,6 @@ struct bnxt {
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
#define BNXT_FLAG_UDP_RSS_CAP 0x800
- #define BNXT_FLAG_EEE_CAP 0x1000
#define BNXT_FLAG_NEW_RSS_CAP 0x2000
#define BNXT_FLAG_WOL_CAP 0x4000
#define BNXT_FLAG_ROCEV1_CAP 0x8000
@@ -1712,8 +1716,10 @@ struct bnxt {
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
+#define BNXT_SH_PORT_CFG_OK(bp) (BNXT_PF(bp) && \
+ ((bp)->phy_flags & BNXT_PHY_FL_SHARED_PORT_CFG))
#define BNXT_PHY_CFG_ABLE(bp) ((BNXT_SINGLE_PF(bp) || \
- ((bp)->fw_cap & BNXT_FW_CAP_SHARED_PORT_CFG)) && \
+ BNXT_SH_PORT_CFG_OK(bp)) && \
(bp)->link_info.phy_state == BNXT_PHY_STATE_ENABLED)
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
@@ -1745,7 +1751,6 @@ struct bnxt {
(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
struct bnxt_en_dev *edev;
- struct bnxt_en_dev * (*ulp_probe)(struct net_device *);
struct bnxt_napi **bnapi;
@@ -1863,11 +1868,9 @@ struct bnxt {
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
#define BNXT_FW_CAP_HOT_RESET 0x00200000
- #define BNXT_FW_CAP_SHARED_PORT_CFG 0x00400000
#define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
- #define BNXT_FW_CAP_PORT_STATS_NO_RESET 0x10000000
#define BNXT_FW_CAP_RING_MONITOR 0x40000000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
@@ -1910,6 +1913,8 @@ struct bnxt {
u16 vxlan_fw_dst_port_id;
u16 nge_fw_dst_port_id;
+ __be16 vxlan_port;
+ __be16 nge_port;
u8 port_partition_type;
u8 port_count;
u16 br_mode;
@@ -2002,6 +2007,17 @@ struct bnxt {
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
+ /* copied from flags in hwrm_port_phy_qcaps_output */
+ u8 phy_flags;
+#define BNXT_PHY_FL_EEE_CAP PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED
+#define BNXT_PHY_FL_EXT_LPBK PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED
+#define BNXT_PHY_FL_AN_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED
+#define BNXT_PHY_FL_SHARED_PORT_CFG PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED
+#define BNXT_PHY_FL_PORT_STATS_NO_RESET PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET
+#define BNXT_PHY_FL_NO_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
+#define BNXT_PHY_FL_FW_MANAGED_LKDN PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN
+#define BNXT_PHY_FL_NO_FCS PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS
+
u8 num_tests;
struct bnxt_test_info *test_info;
@@ -2228,6 +2244,7 @@ int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
+bool bnxt_is_fw_healthy(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 2f8b193a772d..c664ec52ebcf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1930,6 +1930,20 @@ static int bnxt_get_fecparam(struct net_device *dev,
return 0;
}
+static void bnxt_get_fec_stats(struct net_device *dev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
+ return;
+
+ rx = bp->rx_port_stats_ext.sw_stats;
+ fec_stats->corrected_bits.total =
+ *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
+}
+
static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
u32 fec)
{
@@ -2898,7 +2912,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP;
- if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+ if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
@@ -2949,7 +2963,7 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
{
struct bnxt *bp = netdev_priv(dev);
- if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+ if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return -EOPNOTSUPP;
*edata = bp->eee;
@@ -3201,7 +3215,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
int rc;
if (!link_info->autoneg ||
- (bp->test_info->flags & BNXT_TEST_FL_AN_PHY_LPBK))
+ (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK))
return 0;
rc = bnxt_query_force_speeds(bp, &fw_advertising);
@@ -3402,7 +3416,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
}
if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
- (bp->test_info->flags & BNXT_TEST_FL_EXT_LPBK))
+ (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK))
do_ext_lpbk = true;
if (etest->flags & ETH_TEST_FL_OFFLINE) {
@@ -3976,6 +3990,133 @@ ethtool_init_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
}
+static void bnxt_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
+ return;
+
+ rx = bp->rx_port_stats_ext.sw_stats;
+ phy_stats->SymbolErrorDuringCarrier =
+ *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err));
+}
+
+static void bnxt_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx, *tx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
+ return;
+
+ rx = bp->port_stats.sw_stats;
+ tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+
+ mac_stats->FramesReceivedOK =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames);
+ mac_stats->FramesTransmittedOK =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames);
+ mac_stats->FrameCheckSequenceErrors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
+ mac_stats->AlignmentErrors =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
+ mac_stats->OutOfRangeLengthField =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames);
+}
+
+static void bnxt_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
+ return;
+
+ rx = bp->port_stats.sw_stats;
+ ctrl_stats->MACControlFramesReceived =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames);
+}
+
+static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 2047 },
+ { 2048, 4095 },
+ { 4096, 9216 },
+ { 9217, 16383 },
+ {}
+};
+
+static void bnxt_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx, *tx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
+ return;
+
+ rx = bp->port_stats.sw_stats;
+ tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+
+ rmon_stats->jabbers =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
+ rmon_stats->oversize_pkts =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames);
+ rmon_stats->undersize_pkts =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames);
+
+ rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames);
+ rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames);
+ rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames);
+ rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames);
+ rmon_stats->hist[4] =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames);
+ rmon_stats->hist[5] =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames);
+ rmon_stats->hist[6] =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames);
+ rmon_stats->hist[7] =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames);
+ rmon_stats->hist[8] =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames);
+ rmon_stats->hist[9] =
+ BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames);
+
+ rmon_stats->hist_tx[0] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames);
+ rmon_stats->hist_tx[1] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames);
+ rmon_stats->hist_tx[2] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames);
+ rmon_stats->hist_tx[3] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames);
+ rmon_stats->hist_tx[4] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames);
+ rmon_stats->hist_tx[5] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames);
+ rmon_stats->hist_tx[6] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames);
+ rmon_stats->hist_tx[7] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames);
+ rmon_stats->hist_tx[8] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames);
+ rmon_stats->hist_tx[9] =
+ BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames);
+
+ *ranges = bnxt_rmon_ranges;
+}
+
void bnxt_ethtool_free(struct bnxt *bp)
{
kfree(bp->test_info);
@@ -3991,6 +4132,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
+ .get_fec_stats = bnxt_get_fec_stats,
.get_fecparam = bnxt_get_fecparam,
.set_fecparam = bnxt_set_fecparam,
.get_pause_stats = bnxt_get_pause_stats,
@@ -4034,4 +4176,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_dump = bnxt_set_dump,
.get_dump_flag = bnxt_get_dump_flag,
.get_dump_data = bnxt_get_dump_data,
+ .get_eth_phy_stats = bnxt_get_eth_phy_stats,
+ .get_eth_mac_stats = bnxt_get_eth_mac_stats,
+ .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats,
+ .get_rmon_stats = bnxt_get_rmon_stats,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index a217316228f4..eb00a219aa51 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -49,10 +49,6 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
{
- if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
- netdev_err(bp->dev, "vf ndo called though PF is down\n");
- return -EINVAL;
- }
if (!bp->pf.active_vfs) {
netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
return -EINVAL;
@@ -113,7 +109,7 @@ static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
- req.fid = cpu_to_le16(vf->fw_fid);
+ req.fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
@@ -125,9 +121,9 @@ static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
return 0;
}
-static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
+bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
{
- if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
+ if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
return !!(vf->flags & BNXT_VF_TRUST);
bnxt_hwrm_func_qcfg_flags(bp, vf);
@@ -1120,10 +1116,38 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
}
}
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
+{
+ struct hwrm_func_vf_cfg_input req = {0};
+ int rc = 0;
+
+ if (!BNXT_VF(bp))
+ return 0;
+
+ if (bp->hwrm_spec_code < 0x10202) {
+ if (is_valid_ether_addr(bp->vf.mac_addr))
+ rc = -EADDRNOTAVAIL;
+ goto mac_done;
+ }
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+mac_done:
+ if (rc && strict) {
+ rc = -EADDRNOTAVAIL;
+ netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
+ mac);
+ return rc;
+ }
+ return 0;
+}
+
void bnxt_update_vf_mac(struct bnxt *bp)
{
struct hwrm_func_qcaps_input req = {0};
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ bool inform_pf = false;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
req.fid = cpu_to_le16(0xffff);
@@ -1139,42 +1163,24 @@ void bnxt_update_vf_mac(struct bnxt *bp)
* default but the stored zero MAC will allow the VF user to change
* the random MAC address using ndo_set_mac_address() if he wants.
*/
- if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
+ if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) {
memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
+ /* This means we are now using our own MAC address, let
+ * the PF know about this MAC address.
+ */
+ if (!is_valid_ether_addr(bp->vf.mac_addr))
+ inform_pf = true;
+ }
/* overwrite netdev dev_addr with admin VF MAC */
if (is_valid_ether_addr(bp->vf.mac_addr))
memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
update_vf_mac_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
+ if (inform_pf)
+ bnxt_approve_mac(bp, bp->dev->dev_addr, false);
}
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
-{
- struct hwrm_func_vf_cfg_input req = {0};
- int rc = 0;
-
- if (!BNXT_VF(bp))
- return 0;
-
- if (bp->hwrm_spec_code < 0x10202) {
- if (is_valid_ether_addr(bp->vf.mac_addr))
- rc = -EADDRNOTAVAIL;
- goto mac_done;
- }
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
- req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
- memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-mac_done:
- if (rc && strict) {
- rc = -EADDRNOTAVAIL;
- netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
- mac);
- return rc;
- }
- return 0;
-}
#else
int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 629641bf6fc5..995535e4c11b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -34,6 +34,7 @@ int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
int bnxt_set_vf_bw(struct net_device *, int, int, int);
int bnxt_set_vf_link_state(struct net_device *, int, int);
int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
+bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf);
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 64dbbb04b043..a918e374f3c5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -491,3 +491,4 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
}
return bp->edev;
}
+EXPORT_SYMBOL(bnxt_ulp_probe);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 4b5c8fd76a51..dd66302343a2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -284,8 +284,26 @@ void bnxt_vf_reps_open(struct bnxt *bp)
if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
return;
- for (i = 0; i < pci_num_vf(bp->pdev); i++)
- bnxt_vf_rep_open(bp->vf_reps[i]->dev);
+ for (i = 0; i < pci_num_vf(bp->pdev); i++) {
+ /* Open the VF-Rep only if it is allocated in the FW */
+ if (bp->vf_reps[i]->tx_cfa_action != CFA_HANDLE_INVALID)
+ bnxt_vf_rep_open(bp->vf_reps[i]->dev);
+ }
+}
+
+static void __bnxt_free_one_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep)
+{
+ if (!vf_rep)
+ return;
+
+ if (vf_rep->dst) {
+ dst_release((struct dst_entry *)vf_rep->dst);
+ vf_rep->dst = NULL;
+ }
+ if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID) {
+ hwrm_cfa_vfr_free(bp, vf_rep->vf_idx);
+ vf_rep->tx_cfa_action = CFA_HANDLE_INVALID;
+ }
}
static void __bnxt_vf_reps_destroy(struct bnxt *bp)
@@ -297,11 +315,7 @@ static void __bnxt_vf_reps_destroy(struct bnxt *bp)
for (i = 0; i < num_vfs; i++) {
vf_rep = bp->vf_reps[i];
if (vf_rep) {
- dst_release((struct dst_entry *)vf_rep->dst);
-
- if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID)
- hwrm_cfa_vfr_free(bp, vf_rep->vf_idx);
-
+ __bnxt_free_one_vf_rep(bp, vf_rep);
if (vf_rep->dev) {
/* if register_netdev failed, then netdev_ops
* would have been set to NULL
@@ -350,6 +364,80 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
__bnxt_vf_reps_destroy(bp);
}
+/* Free the VF-Reps in firmware, during firmware hot-reset processing.
+ * Note that the VF-Rep netdevs are still active (not unregistered) during
+ * this process. As the mode transition from SWITCHDEV to LEGACY happens
+ * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ */
+void bnxt_vf_reps_free(struct bnxt *bp)
+{
+ u16 num_vfs = pci_num_vf(bp->pdev);
+ int i;
+
+ if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return;
+
+ for (i = 0; i < num_vfs; i++)
+ __bnxt_free_one_vf_rep(bp, bp->vf_reps[i]);
+}
+
+static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
+ u16 *cfa_code_map)
+{
+ /* get cfa handles from FW */
+ if (hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx, &vf_rep->tx_cfa_action,
+ &vf_rep->rx_cfa_code))
+ return -ENOLINK;
+
+ cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx;
+ vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
+ if (!vf_rep->dst)
+ return -ENOMEM;
+
+ /* only cfa_action is needed to mux a packet while TXing */
+ vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action;
+ vf_rep->dst->u.port_info.lower_dev = bp->dev;
+
+ return 0;
+}
+
+/* Allocate the VF-Reps in firmware, during firmware hot-reset processing.
+ * Note that the VF-Rep netdevs are still active (not unregistered) during
+ * this process. As the mode transition from SWITCHDEV to LEGACY happens
+ * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ */
+int bnxt_vf_reps_alloc(struct bnxt *bp)
+{
+ u16 *cfa_code_map = bp->cfa_code_map, num_vfs = pci_num_vf(bp->pdev);
+ struct bnxt_vf_rep *vf_rep;
+ int rc, i;
+
+ if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return 0;
+
+ if (!cfa_code_map)
+ return -EINVAL;
+
+ for (i = 0; i < MAX_CFA_CODE; i++)
+ cfa_code_map[i] = VF_IDX_INVALID;
+
+ for (i = 0; i < num_vfs; i++) {
+ vf_rep = bp->vf_reps[i];
+ vf_rep->vf_idx = i;
+
+ rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map);
+ if (rc)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ netdev_info(bp->dev, "%s error=%d\n", __func__, rc);
+ bnxt_vf_reps_free(bp);
+ return rc;
+}
+
/* Use the OUI of the PF's perm addr and report the same mac addr
* for the same VF-rep each time
*/
@@ -428,25 +516,9 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
vf_rep->vf_idx = i;
vf_rep->tx_cfa_action = CFA_HANDLE_INVALID;
- /* get cfa handles from FW */
- rc = hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx,
- &vf_rep->tx_cfa_action,
- &vf_rep->rx_cfa_code);
- if (rc) {
- rc = -ENOLINK;
+ rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map);
+ if (rc)
goto err;
- }
- cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx;
-
- vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
- GFP_KERNEL);
- if (!vf_rep->dst) {
- rc = -ENOMEM;
- goto err;
- }
- /* only cfa_action is needed to mux a packet while TXing */
- vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action;
- vf_rep->dst->u.port_info.lower_dev = bp->dev;
bnxt_vf_rep_netdev_init(bp, vf_rep, dev);
rc = register_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
index d7287651422f..5637a84884d7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -19,6 +19,8 @@ void bnxt_vf_reps_close(struct bnxt *bp);
void bnxt_vf_reps_open(struct bnxt *bp);
void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb);
struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code);
+int bnxt_vf_reps_alloc(struct bnxt *bp);
+void bnxt_vf_reps_free(struct bnxt *bp);
static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
{
@@ -61,5 +63,15 @@ static inline bool bnxt_dev_is_vf_rep(struct net_device *dev)
{
return false;
}
+
+static inline int bnxt_vf_reps_alloc(struct bnxt *bp)
+{
+ return 0;
+}
+
+static inline void bnxt_vf_reps_free(struct bnxt *bp)
+{
+}
+
#endif /* CONFIG_BNXT_SRIOV */
#endif /* BNXT_VFR_H */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 641303894341..ec9564e584e0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -217,7 +217,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
struct pci_dev *pdev = bp->pdev;
struct bnxt_tx_ring_info *txr;
dma_addr_t mapping;
- int drops = 0;
+ int nxmit = 0;
int ring;
int i;
@@ -233,21 +233,17 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
struct xdp_frame *xdp = frames[i];
if (!txr || !bnxt_tx_avail(bp, txr) ||
- !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP)) {
- xdp_return_frame_rx_napi(xdp);
- drops++;
- continue;
- }
+ !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP))
+ break;
mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, mapping)) {
- xdp_return_frame_rx_napi(xdp);
- drops++;
- continue;
- }
+ if (dma_mapping_error(&pdev->dev, mapping))
+ break;
+
__bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
+ nxmit++;
}
if (flags & XDP_XMIT_FLUSH) {
@@ -256,7 +252,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
}
- return num_frames - drops;
+ return nxmit;
}
/* Under rtnl_lock */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 1c86eddb1b51..facde824bcaa 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -18,7 +18,6 @@
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/clk.h>
-#include <linux/version.h>
#include <linux/platform_device.h>
#include <net/arp.h>
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d2381929931b..b0e49643f483 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -13016,7 +13016,7 @@ static int tg3_test_nvram(struct tg3 *tp)
if (!buf)
return -ENOMEM;
- i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_tag((u8 *)buf, len, PCI_VPD_LRDT_RO_DATA);
if (i > 0) {
j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
if (j < 0)
@@ -15629,7 +15629,7 @@ static void tg3_read_vpd(struct tg3 *tp)
if (!vpd_data)
goto out_no_vpd;
- i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_tag(vpd_data, vpdlen, PCI_VPD_LRDT_RO_DATA);
if (i < 0)
goto out_not_found;
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 588c4804d10a..265c2fa6bbe0 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -524,6 +524,68 @@ bnad_set_pauseparam(struct net_device *netdev,
return 0;
}
+static void bnad_get_txf_strings(u8 **string, int f_num)
+{
+ ethtool_sprintf(string, "txf%d_ucast_octets", f_num);
+ ethtool_sprintf(string, "txf%d_ucast", f_num);
+ ethtool_sprintf(string, "txf%d_ucast_vlan", f_num);
+ ethtool_sprintf(string, "txf%d_mcast_octets", f_num);
+ ethtool_sprintf(string, "txf%d_mcast", f_num);
+ ethtool_sprintf(string, "txf%d_mcast_vlan", f_num);
+ ethtool_sprintf(string, "txf%d_bcast_octets", f_num);
+ ethtool_sprintf(string, "txf%d_bcast", f_num);
+ ethtool_sprintf(string, "txf%d_bcast_vlan", f_num);
+ ethtool_sprintf(string, "txf%d_errors", f_num);
+ ethtool_sprintf(string, "txf%d_filter_vlan", f_num);
+ ethtool_sprintf(string, "txf%d_filter_mac_sa", f_num);
+}
+
+static void bnad_get_rxf_strings(u8 **string, int f_num)
+{
+ ethtool_sprintf(string, "rxf%d_ucast_octets", f_num);
+ ethtool_sprintf(string, "rxf%d_ucast", f_num);
+ ethtool_sprintf(string, "rxf%d_ucast_vlan", f_num);
+ ethtool_sprintf(string, "rxf%d_mcast_octets", f_num);
+ ethtool_sprintf(string, "rxf%d_mcast", f_num);
+ ethtool_sprintf(string, "rxf%d_mcast_vlan", f_num);
+ ethtool_sprintf(string, "rxf%d_bcast_octets", f_num);
+ ethtool_sprintf(string, "rxf%d_bcast", f_num);
+ ethtool_sprintf(string, "rxf%d_bcast_vlan", f_num);
+ ethtool_sprintf(string, "rxf%d_frame_drops", f_num);
+}
+
+static void bnad_get_cq_strings(u8 **string, int q_num)
+{
+ ethtool_sprintf(string, "cq%d_producer_index", q_num);
+ ethtool_sprintf(string, "cq%d_consumer_index", q_num);
+ ethtool_sprintf(string, "cq%d_hw_producer_index", q_num);
+ ethtool_sprintf(string, "cq%d_intr", q_num);
+ ethtool_sprintf(string, "cq%d_poll", q_num);
+ ethtool_sprintf(string, "cq%d_schedule", q_num);
+ ethtool_sprintf(string, "cq%d_keep_poll", q_num);
+ ethtool_sprintf(string, "cq%d_complete", q_num);
+}
+
+static void bnad_get_rxq_strings(u8 **string, int q_num)
+{
+ ethtool_sprintf(string, "rxq%d_packets", q_num);
+ ethtool_sprintf(string, "rxq%d_bytes", q_num);
+ ethtool_sprintf(string, "rxq%d_packets_with_error", q_num);
+ ethtool_sprintf(string, "rxq%d_allocbuf_failed", q_num);
+ ethtool_sprintf(string, "rxq%d_mapbuf_failed", q_num);
+ ethtool_sprintf(string, "rxq%d_producer_index", q_num);
+ ethtool_sprintf(string, "rxq%d_consumer_index", q_num);
+}
+
+static void bnad_get_txq_strings(u8 **string, int q_num)
+{
+ ethtool_sprintf(string, "txq%d_packets", q_num);
+ ethtool_sprintf(string, "txq%d_bytes", q_num);
+ ethtool_sprintf(string, "txq%d_producer_index", q_num);
+ ethtool_sprintf(string, "txq%d_consumer_index", q_num);
+ ethtool_sprintf(string, "txq%d_hw_consumer_index", q_num);
+}
+
static void
bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
{
@@ -531,175 +593,57 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
int i, j, q_num;
u32 bmap;
+ if (stringset != ETH_SS_STATS)
+ return;
+
mutex_lock(&bnad->conf_mutex);
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
- BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
- ETH_GSTRING_LEN));
- strncpy(string, bnad_net_stats_strings[i],
- ETH_GSTRING_LEN);
- string += ETH_GSTRING_LEN;
- }
- bmap = bna_tx_rid_mask(&bnad->bna);
- for (i = 0; bmap; i++) {
- if (bmap & 1) {
- sprintf(string, "txf%d_ucast_octets", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_ucast", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_ucast_vlan", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_mcast_octets", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_mcast", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_mcast_vlan", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_bcast_octets", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_bcast", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_bcast_vlan", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_errors", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_filter_vlan", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txf%d_filter_mac_sa", i);
- string += ETH_GSTRING_LEN;
- }
- bmap >>= 1;
- }
+ for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
+ BUG_ON(!(strlen(bnad_net_stats_strings[i]) < ETH_GSTRING_LEN));
+ ethtool_sprintf(&string, bnad_net_stats_strings[i]);
+ }
- bmap = bna_rx_rid_mask(&bnad->bna);
- for (i = 0; bmap; i++) {
- if (bmap & 1) {
- sprintf(string, "rxf%d_ucast_octets", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_ucast", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_ucast_vlan", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_mcast_octets", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_mcast", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_mcast_vlan", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_bcast_octets", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_bcast", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_bcast_vlan", i);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxf%d_frame_drops", i);
- string += ETH_GSTRING_LEN;
- }
- bmap >>= 1;
- }
+ bmap = bna_tx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++) {
+ if (bmap & 1)
+ bnad_get_txf_strings(&string, i);
+ bmap >>= 1;
+ }
- q_num = 0;
- for (i = 0; i < bnad->num_rx; i++) {
- if (!bnad->rx_info[i].rx)
- continue;
- for (j = 0; j < bnad->num_rxp_per_rx; j++) {
- sprintf(string, "cq%d_producer_index", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "cq%d_consumer_index", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "cq%d_hw_producer_index",
- q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "cq%d_intr", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "cq%d_poll", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "cq%d_schedule", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "cq%d_keep_poll", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "cq%d_complete", q_num);
- string += ETH_GSTRING_LEN;
- q_num++;
- }
- }
+ bmap = bna_rx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++, bmap >>= 1) {
+ if (bmap & 1)
+ bnad_get_rxf_strings(&string, i);
+ bmap >>= 1;
+ }
- q_num = 0;
- for (i = 0; i < bnad->num_rx; i++) {
- if (!bnad->rx_info[i].rx)
- continue;
- for (j = 0; j < bnad->num_rxp_per_rx; j++) {
- sprintf(string, "rxq%d_packets", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_bytes", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_packets_with_error",
- q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_allocbuf_failed", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_mapbuf_failed", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_producer_index", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_consumer_index", q_num);
- string += ETH_GSTRING_LEN;
- q_num++;
- if (bnad->rx_info[i].rx_ctrl[j].ccb &&
- bnad->rx_info[i].rx_ctrl[j].ccb->
- rcb[1] &&
- bnad->rx_info[i].rx_ctrl[j].ccb->
- rcb[1]->rxq) {
- sprintf(string, "rxq%d_packets", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_bytes", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string,
- "rxq%d_packets_with_error", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_allocbuf_failed",
- q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_mapbuf_failed",
- q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_producer_index",
- q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "rxq%d_consumer_index",
- q_num);
- string += ETH_GSTRING_LEN;
- q_num++;
- }
- }
- }
+ q_num = 0;
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ bnad_get_cq_strings(&string, q_num++);
+ }
- q_num = 0;
- for (i = 0; i < bnad->num_tx; i++) {
- if (!bnad->tx_info[i].tx)
- continue;
- for (j = 0; j < bnad->num_txq_per_tx; j++) {
- sprintf(string, "txq%d_packets", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txq%d_bytes", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txq%d_producer_index", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txq%d_consumer_index", q_num);
- string += ETH_GSTRING_LEN;
- sprintf(string, "txq%d_hw_consumer_index",
- q_num);
- string += ETH_GSTRING_LEN;
- q_num++;
- }
+ q_num = 0;
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ bnad_get_rxq_strings(&string, q_num++);
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
+ bnad_get_rxq_strings(&string, q_num++);
}
+ }
- break;
-
- default:
- break;
+ q_num = 0;
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ for (j = 0; j < bnad->num_txq_per_tx; j++)
+ bnad_get_txq_strings(&string, q_num++);
}
mutex_unlock(&bnad->conf_mutex);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d8c68906525a..d8d87213697c 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -159,6 +159,16 @@
#define GEM_PEFTN 0x01f4 /* PTP Peer Event Frame Tx Ns */
#define GEM_PEFRSL 0x01f8 /* PTP Peer Event Frame Rx Sec Low */
#define GEM_PEFRN 0x01fc /* PTP Peer Event Frame Rx Ns */
+#define GEM_PCSCNTRL 0x0200 /* PCS Control */
+#define GEM_PCSSTS 0x0204 /* PCS Status */
+#define GEM_PCSPHYTOPID 0x0208 /* PCS PHY Top ID */
+#define GEM_PCSPHYBOTID 0x020c /* PCS PHY Bottom ID */
+#define GEM_PCSANADV 0x0210 /* PCS AN Advertisement */
+#define GEM_PCSANLPBASE 0x0214 /* PCS AN Link Partner Base */
+#define GEM_PCSANEXP 0x0218 /* PCS AN Expansion */
+#define GEM_PCSANNPTX 0x021c /* PCS AN Next Page TX */
+#define GEM_PCSANNPLP 0x0220 /* PCS AN Next Page LP */
+#define GEM_PCSANEXTSTS 0x023c /* PCS AN Extended Status */
#define GEM_DCFG1 0x0280 /* Design Config 1 */
#define GEM_DCFG2 0x0284 /* Design Config 2 */
#define GEM_DCFG3 0x0288 /* Design Config 3 */
@@ -478,6 +488,10 @@
#define GEM_HS_MAC_SPEED_OFFSET 0
#define GEM_HS_MAC_SPEED_SIZE 3
+/* Bitfields in PCSCNTRL */
+#define GEM_PCSAUTONEG_OFFSET 12
+#define GEM_PCSAUTONEG_SIZE 1
+
/* Bitfields in DCFG1. */
#define GEM_IRQCOR_OFFSET 23
#define GEM_IRQCOR_SIZE 1
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 0f6a6cb7e98d..0e94db9cd45d 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -694,6 +694,22 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode,
if (old_ncr ^ ncr)
macb_or_gem_writel(bp, NCR, ncr);
+ /* Disable AN for SGMII fixed link configuration, enable otherwise.
+ * Must be written after PCSSEL is set in NCFGR,
+ * otherwise writes will not take effect.
+ */
+ if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) {
+ u32 pcsctrl, old_pcsctrl;
+
+ old_pcsctrl = gem_readl(bp, PCSCNTRL);
+ if (mode == MLO_AN_FIXED)
+ pcsctrl = old_pcsctrl & ~GEM_BIT(PCSAUTONEG);
+ else
+ pcsctrl = old_pcsctrl | GEM_BIT(PCSAUTONEG);
+ if (old_pcsctrl != pcsctrl)
+ gem_writel(bp, PCSCNTRL, pcsctrl);
+ }
+
spin_unlock_irqrestore(&bp->lock, flags);
}
@@ -847,6 +863,15 @@ static int macb_phylink_connect(struct macb *bp)
return 0;
}
+static void macb_get_pcs_fixed_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
+
+ state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0;
+}
+
/* based on au1000_eth. c*/
static int macb_mii_probe(struct net_device *dev)
{
@@ -855,6 +880,11 @@ static int macb_mii_probe(struct net_device *dev)
bp->phylink_config.dev = &dev->dev;
bp->phylink_config.type = PHYLINK_NETDEV;
+ if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ bp->phylink_config.poll_fixed_state = true;
+ bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state;
+ }
+
bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
bp->phy_interface, &macb_phylink_ops);
if (IS_ERR(bp->phylink)) {
@@ -3735,17 +3765,15 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
*hclk = devm_clk_get(&pdev->dev, "hclk");
}
- if (IS_ERR_OR_NULL(*pclk)) {
- err = IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV;
- dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err);
- return err;
- }
+ if (IS_ERR_OR_NULL(*pclk))
+ return dev_err_probe(&pdev->dev,
+ IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV,
+ "failed to get pclk\n");
- if (IS_ERR_OR_NULL(*hclk)) {
- err = IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV;
- dev_err(&pdev->dev, "failed to get hclk (%d)\n", err);
- return err;
- }
+ if (IS_ERR_OR_NULL(*hclk))
+ return dev_err_probe(&pdev->dev,
+ IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV,
+ "failed to get hclk\n");
*tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
if (IS_ERR(*tx_clk))
@@ -4621,7 +4649,6 @@ static int macb_probe(struct platform_device *pdev)
struct net_device *dev;
struct resource *regs;
void __iomem *mem;
- const char *mac;
struct macb *bp;
int err, val;
@@ -4736,15 +4763,11 @@ static int macb_probe(struct platform_device *pdev)
if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
bp->rx_intr_mask |= MACB_BIT(RXUBR);
- mac = of_get_mac_address(np);
- if (PTR_ERR(mac) == -EPROBE_DEFER) {
- err = -EPROBE_DEFER;
+ err = of_get_mac_address(np, bp->dev->dev_addr);
+ if (err == -EPROBE_DEFER)
goto err_out_free_netdev;
- } else if (!IS_ERR_OR_NULL(mac)) {
- ether_addr_copy(bp->dev->dev_addr, mac);
- } else {
+ else if (err)
macb_get_hwaddr(bp);
- }
err = of_get_phy_mode(np, &interface);
if (err)
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
index e6d4ad99cc38..3f1c189646f4 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
@@ -521,7 +521,7 @@
#define CN23XX_BAR1_INDEX_OFFSET 3
#define CN23XX_PEM_BAR1_INDEX_REG(port, idx) \
- (CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \
+ (CN23XX_PEM_BAR1_INDEX_START + (((u64)port) << CN23XX_PEM_OFFSET) + \
((idx) << CN23XX_BAR1_INDEX_OFFSET))
/*############################ DPI #########################*/
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index ecffebd513be..48ff6fb0eed9 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1385,7 +1385,6 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
struct net_device *netdev;
struct octeon_mgmt *p;
const __be32 *data;
- const u8 *mac;
struct resource *res_mix;
struct resource *res_agl;
struct resource *res_agl_prt_ctl;
@@ -1502,11 +1501,8 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
- mac = of_get_mac_address(pdev->dev.of_node);
-
- if (!IS_ERR(mac))
- ether_addr_copy(netdev->dev_addr, mac);
- else
+ result = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr);
+ if (result)
eth_hw_addr_random(netdev);
p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index f782e6af45e9..50bbe79fb93d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -776,7 +776,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
mbx.rq.qs_num = qs->vnic_id;
mbx.rq.rq_num = qidx;
- mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
+ mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) |
(rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
(rq->cont_qs_rbdr_idx << 8) |
(rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 8ff28ed04b7f..0c783aadf393 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1474,7 +1474,6 @@ static int bgx_init_of_phy(struct bgx *bgx)
device_for_each_child_node(&bgx->pdev->dev, fwn) {
struct phy_device *pd;
struct device_node *phy_np;
- const char *mac;
/* Should always be an OF node. But if it is not, we
* cannot handle it, so exit the loop.
@@ -1483,9 +1482,7 @@ static int bgx_init_of_phy(struct bgx *bgx)
if (!node)
break;
- mac = of_get_mac_address(node);
- if (!IS_ERR(mac))
- ether_addr_copy(bgx->lmac[lmac].mac, mac);
+ of_get_mac_address(node, bgx->lmac[lmac].mac);
SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
bgx->lmac[lmac].lmacid = lmac;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index ce28820c57c9..12fcf84d67ad 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -323,8 +323,7 @@ void t4_cleanup_clip_tbl(struct adapter *adap)
struct clip_tbl *ctbl = adap->clipt;
if (ctbl) {
- if (ctbl->cl_list)
- kvfree(ctbl->cl_list);
+ kvfree(ctbl->cl_list);
kvfree(ctbl);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 23a2ebdfd503..a7f291c89702 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -3551,8 +3551,7 @@ out:
}
out_free:
- if (data)
- kvfree(data);
+ kvfree(data);
#undef QDESC_GET_FLQ
#undef QDESC_GET_RXQ
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 77648e4ab4cc..dd66b244466d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -157,8 +157,7 @@ static int cudbg_alloc_compress_buff(struct cudbg_init *pdbg_init)
static void cudbg_free_compress_buff(struct cudbg_init *pdbg_init)
{
- if (pdbg_init->compress_buff)
- vfree(pdbg_init->compress_buff);
+ vfree(pdbg_init->compress_buff);
}
int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 83b46440408b..bc581b149b11 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -174,31 +174,31 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
WORD_MASK, f->fs.nat_lip[15] |
f->fs.nat_lip[14] << 8 |
f->fs.nat_lip[13] << 16 |
- f->fs.nat_lip[12] << 24, 1);
+ (u64)f->fs.nat_lip[12] << 24, 1);
set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
WORD_MASK, f->fs.nat_lip[11] |
f->fs.nat_lip[10] << 8 |
f->fs.nat_lip[9] << 16 |
- f->fs.nat_lip[8] << 24, 1);
+ (u64)f->fs.nat_lip[8] << 24, 1);
set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
WORD_MASK, f->fs.nat_lip[7] |
f->fs.nat_lip[6] << 8 |
f->fs.nat_lip[5] << 16 |
- f->fs.nat_lip[4] << 24, 1);
+ (u64)f->fs.nat_lip[4] << 24, 1);
set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
WORD_MASK, f->fs.nat_lip[3] |
f->fs.nat_lip[2] << 8 |
f->fs.nat_lip[1] << 16 |
- f->fs.nat_lip[0] << 24, 1);
+ (u64)f->fs.nat_lip[0] << 24, 1);
} else {
set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
WORD_MASK, f->fs.nat_lip[3] |
f->fs.nat_lip[2] << 8 |
f->fs.nat_lip[1] << 16 |
- f->fs.nat_lip[0] << 24, 1);
+ (u64)f->fs.nat_lip[0] << 25, 1);
}
}
@@ -208,25 +208,25 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
WORD_MASK, f->fs.nat_fip[15] |
f->fs.nat_fip[14] << 8 |
f->fs.nat_fip[13] << 16 |
- f->fs.nat_fip[12] << 24, 1);
+ (u64)f->fs.nat_fip[12] << 24, 1);
set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
WORD_MASK, f->fs.nat_fip[11] |
f->fs.nat_fip[10] << 8 |
f->fs.nat_fip[9] << 16 |
- f->fs.nat_fip[8] << 24, 1);
+ (u64)f->fs.nat_fip[8] << 24, 1);
set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
WORD_MASK, f->fs.nat_fip[7] |
f->fs.nat_fip[6] << 8 |
f->fs.nat_fip[5] << 16 |
- f->fs.nat_fip[4] << 24, 1);
+ (u64)f->fs.nat_fip[4] << 24, 1);
set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
WORD_MASK, f->fs.nat_fip[3] |
f->fs.nat_fip[2] << 8 |
f->fs.nat_fip[1] << 16 |
- f->fs.nat_fip[0] << 24, 1);
+ (u64)f->fs.nat_fip[0] << 24, 1);
} else {
set_tcb_field(adap, f, tid,
@@ -234,13 +234,13 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
WORD_MASK, f->fs.nat_fip[3] |
f->fs.nat_fip[2] << 8 |
f->fs.nat_fip[1] << 16 |
- f->fs.nat_fip[0] << 24, 1);
+ (u64)f->fs.nat_fip[0] << 24, 1);
}
}
set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
(dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
- (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
+ (sp ? (nat_fp[1] << 16 | (u64)nat_fp[0] << 24) : 0),
1);
}
@@ -979,7 +979,7 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
{
struct port_info *pi = netdev_priv(f->dev);
- /* If the new or old filter have loopback rewriteing rules then we'll
+ /* If the new or old filter have loopback rewriting rules then we'll
* need to free any existing L2T, SMT, CLIP entries of filter
* rule.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 2e309f6673f7..28fd2de9e4cf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -48,6 +48,11 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
flow_action_for_each(i, entry, actions) {
switch (entry->id) {
case FLOW_ACTION_POLICE:
+ if (entry->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
/* Convert bytes per second to bits per second */
if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
NL_SET_ERR_MSG_MOD(extack,
@@ -145,7 +150,11 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
flow_action_for_each(i, entry, &cls->rule->action)
if (entry->id == FLOW_ACTION_POLICE)
break;
-
+ if (entry->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
/* Convert from bytes per second to Kbps */
p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
p.u.params.channel = pi->tx_chan;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index dede02505ceb..a5d2f84dcdd5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -524,13 +524,9 @@ struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
out_no_mem:
for (i = 0; i < t->size; i++) {
struct cxgb4_link *link = &t->table[i];
-
- if (link->tid_map)
- kvfree(link->tid_map);
+ kvfree(link->tid_map);
}
-
- if (t)
- kvfree(t);
+ kvfree(t);
return NULL;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 80882cfc370f..9428ef1f04a8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2775,7 +2775,7 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (id_len > ID_LEN)
id_len = ID_LEN;
- i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_tag(vpd, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
if (i < 0) {
dev_err(adapter->pdev_dev, "missing VPD-R section\n");
ret = -EINVAL;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index a3f5b80888e5..ef3f1e92632f 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -33,7 +33,6 @@ static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
if (unlikely(start < skb_linear_data_len)) {
frag_size = min(len, skb_linear_data_len - start);
- start = 0;
} else {
start -= skb_linear_data_len;
@@ -873,10 +872,10 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
}
/* update receive window */
if (first_wr || tx_info->prev_win != tcp_win) {
- pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
- TCB_RCV_WND_W,
- TCB_RCV_WND_V(TCB_RCV_WND_M),
- TCB_RCV_WND_V(tcp_win), 0);
+ chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_RCV_WND_W,
+ TCB_RCV_WND_V(TCB_RCV_WND_M),
+ TCB_RCV_WND_V(tcp_win), 0);
tx_info->prev_win = tcp_win;
cpl++;
}
@@ -1485,7 +1484,6 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
wr->cookie = 0;
- pos += sizeof(*wr);
/* ULP_TXPKT */
ulptx = (struct ulp_txpkt *)(wr + 1);
ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index f04ec53544ae..f48957a17c3a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -3040,15 +3040,4 @@ static struct pci_driver enic_driver = {
.remove = enic_remove,
};
-static int __init enic_init_module(void)
-{
- return pci_register_driver(&enic_driver);
-}
-
-static void __exit enic_cleanup_module(void)
-{
- pci_unregister_driver(&enic_driver);
-}
-
-module_init(enic_init_module);
-module_exit(enic_cleanup_module);
+module_pci_driver(enic_driver);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 8a9096aa85cd..2a8bf53c2f75 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1385,7 +1385,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
{
struct dm9000_plat_data *pdata;
struct device_node *np = dev->of_node;
- const void *mac_addr;
+ int ret;
if (!IS_ENABLED(CONFIG_OF) || !np)
return ERR_PTR(-ENXIO);
@@ -1399,11 +1399,9 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
if (of_find_property(np, "davicom,no-eeprom", NULL))
pdata->flags |= DM9000_PLATF_NO_EEPROM;
- mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(pdata->dev_addr, mac_addr);
- else if (PTR_ERR(mac_addr) == -EPROBE_DEFER)
- return ERR_CAST(mac_addr);
+ ret = of_get_mac_address(np, pdata->dev_addr);
+ if (ret == -EPROBE_DEFER)
+ return ERR_PTR(ret);
return pdata;
}
@@ -1524,7 +1522,6 @@ dm9000_probe(struct platform_device *pdev)
if (ret) {
dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
db->irq_wake, ret);
- ret = 0;
} else {
irq_set_irq_wake(db->irq_wake, 0);
db->wake_supported = 1;
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index c3cbe55205a7..b018195f0243 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -2193,15 +2193,4 @@ static struct pci_driver de_driver = {
.driver.pm = &de_pm_ops,
};
-static int __init de_init (void)
-{
- return pci_register_driver(&de_driver);
-}
-
-static void __exit de_exit (void)
-{
- pci_unregister_driver (&de_driver);
-}
-
-module_init(de_init);
-module_exit(de_exit);
+module_pci_driver(de_driver);
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 89cbdc1f4857..514df170ec5d 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -1629,15 +1629,4 @@ static struct pci_driver w840_driver = {
.driver.pm = &w840_pm_ops,
};
-static int __init w840_init(void)
-{
- return pci_register_driver(&w840_driver);
-}
-
-static void __exit w840_exit(void)
-{
- pci_unregister_driver(&w840_driver);
-}
-
-module_init(w840_init);
-module_exit(w840_exit);
+module_pci_driver(w840_driver);
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index df0eab479d51..ce61f79f3b7c 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1982,17 +1982,4 @@ static struct pci_driver sundance_driver = {
.driver.pm = &sundance_pm_ops,
};
-static int __init sundance_init(void)
-{
- return pci_register_driver(&sundance_driver);
-}
-
-static void __exit sundance_exit(void)
-{
- pci_unregister_driver(&sundance_driver);
-}
-
-module_init(sundance_init);
-module_exit(sundance_exit);
-
-
+module_pci_driver(sundance_driver);
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 3d9b0b161e24..e1b43b07755b 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1151,11 +1151,7 @@ static int ethoc_probe(struct platform_device *pdev)
ether_addr_copy(netdev->dev_addr, pdata->hwaddr);
priv->phy_id = pdata->phy_id;
} else {
- const void *mac;
-
- mac = of_get_mac_address(pdev->dev.of_node);
- if (!IS_ERR(mac))
- ether_addr_copy(netdev->dev_addr, mac);
+ of_get_mac_address(pdev->dev.of_node, netdev->dev_addr);
priv->phy_id = -1;
}
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 815fb62c4b02..e3954d8835e7 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -575,7 +575,6 @@ static s32 nps_enet_probe(struct platform_device *pdev)
struct net_device *ndev;
struct nps_enet_priv *priv;
s32 err = 0;
- const char *mac_addr;
if (!dev->of_node)
return -ENODEV;
@@ -602,10 +601,8 @@ static s32 nps_enet_probe(struct platform_device *pdev)
dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base);
/* set kernel MAC address to dev */
- mac_addr = of_get_mac_address(dev->of_node);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(ndev->dev_addr, mac_addr);
- else
+ err = of_get_mac_address(dev->of_node, ndev->dev_addr);
+ if (err)
eth_hw_addr_random(ndev);
/* Get IRQ number */
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 473b337b2e3b..5a1a8f2ea63c 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1177,18 +1177,7 @@ static struct platform_driver ftmac100_driver = {
/******************************************************************************
* initialization / finalization
*****************************************************************************/
-static int __init ftmac100_init(void)
-{
- return platform_driver_register(&ftmac100_driver);
-}
-
-static void __exit ftmac100_exit(void)
-{
- platform_driver_unregister(&ftmac100_driver);
-}
-
-module_init(ftmac100_init);
-module_exit(ftmac100_exit);
+module_platform_driver(ftmac100_driver);
MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
MODULE_DESCRIPTION("FTMAC100 driver");
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index c696651dd735..0908771aa9ac 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1948,15 +1948,4 @@ static struct pci_driver fealnx_driver = {
.remove = fealnx_remove_one,
};
-static int __init fealnx_init(void)
-{
- return pci_register_driver(&fealnx_driver);
-}
-
-static void __exit fealnx_exit(void)
-{
- pci_unregister_driver(&fealnx_driver);
-}
-
-module_init(fealnx_init);
-module_exit(fealnx_exit);
+module_pci_driver(fealnx_driver);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 3f9175bdce77..2d1abdd58fab 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -27,6 +27,7 @@ config FEC
default ARCH_MXC || SOC_IMX28 if ARM
select CRC32
select PHYLIB
+ imply NET_SELFTESTS
imply PTP_1588_CLOCK
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 67c436400352..de7b31842233 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -24,6 +24,4 @@ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
-obj-$(CONFIG_FSL_ENETC) += enetc/
-obj-$(CONFIG_FSL_ENETC_MDIO) += enetc/
-obj-$(CONFIG_FSL_ENETC_VF) += enetc/
+obj-y += enetc/
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 720dc99bd1fc..177c020bf34a 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -3081,7 +3081,7 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
struct xdp_frame **frames, u32 flags)
{
struct xdp_frame *xdpf;
- int i, err, drops = 0;
+ int i, nxmit = 0;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -3091,14 +3091,12 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
for (i = 0; i < n; i++) {
xdpf = frames[i];
- err = dpaa_xdp_xmit_frame(net_dev, xdpf);
- if (err) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ if (dpaa_xdp_xmit_frame(net_dev, xdpf))
+ break;
+ nxmit++;
}
- return n - drops;
+ return nxmit;
}
static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index ee7a906e30b3..d029b69c3f18 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -29,3 +29,11 @@ config FSL_DPAA2_PTP_CLOCK
help
This driver adds support for using the DPAA2 1588 timer module
as a PTP clock.
+
+config FSL_DPAA2_SWITCH
+ tristate "Freescale DPAA2 Ethernet Switch"
+ depends on BRIDGE || BRIDGE=n
+ depends on NET_SWITCHDEV
+ help
+ Driver for Freescale DPAA2 Ethernet Switch. This driver manages
+ switch objects discovered on the Freeescale MC bus.
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index 146cb3540e61..c2ef74052ef8 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -5,11 +5,13 @@
obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
+obj-$(CONFIG_FSL_DPAA2_SWITCH) += fsl-dpaa2-switch.o
fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o
fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
+fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o dpaa2-switch-flower.o
# Needed by the tracing framework
CFLAGS_dpaa2-eth.o := -I$(src)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 492943bb9c48..e0c3c58e2ac7 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -223,31 +223,31 @@ static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
}
}
-static void dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- dma_addr_t addr)
+static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr)
{
int retries = 0;
int err;
- ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
- if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
+ ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr;
+ if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
return;
while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
- ch->xdp.drop_bufs,
- ch->xdp.drop_cnt)) == -EBUSY) {
+ ch->recycled_bufs,
+ ch->recycled_bufs_cnt)) == -EBUSY) {
if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
break;
cpu_relax();
}
if (err) {
- dpaa2_eth_free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
- ch->buf_count -= ch->xdp.drop_cnt;
+ dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt);
+ ch->buf_count -= ch->recycled_bufs_cnt;
}
- ch->xdp.drop_cnt = 0;
+ ch->recycled_bufs_cnt = 0;
}
static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
@@ -300,7 +300,7 @@ static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
ch->stats.xdp_tx++;
}
for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
- dpaa2_eth_xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
+ dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
percpu_stats->tx_errors++;
ch->stats.xdp_tx_err++;
}
@@ -382,7 +382,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
fallthrough;
case XDP_DROP:
- dpaa2_eth_xdp_release_buf(priv, ch, addr);
+ dpaa2_eth_recycle_buf(priv, ch, addr);
ch->stats.xdp_drop++;
break;
case XDP_REDIRECT:
@@ -403,7 +403,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
free_pages((unsigned long)vaddr, 0);
} else {
ch->buf_count++;
- dpaa2_eth_xdp_release_buf(priv, ch, addr);
+ dpaa2_eth_recycle_buf(priv, ch, addr);
}
ch->stats.xdp_drop++;
} else {
@@ -418,6 +418,35 @@ out:
return xdp_act;
}
+static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
+{
+ u16 fd_offset = dpaa2_fd_get_offset(fd);
+ struct dpaa2_eth_priv *priv = ch->priv;
+ u32 fd_length = dpaa2_fd_get_len(fd);
+ struct sk_buff *skb = NULL;
+ unsigned int skb_len;
+
+ if (fd_length > priv->rx_copybreak)
+ return NULL;
+
+ skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
+
+ skb = napi_alloc_skb(&ch->napi, skb_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, dpaa2_eth_needed_headroom(NULL));
+ skb_put(skb, fd_length);
+
+ memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
+
+ dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
+
+ return skb;
+}
+
/* Main Rx frame processing routine */
static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
@@ -459,9 +488,12 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
return;
}
- dma_unmap_page(dev, addr, priv->rx_buf_size,
- DMA_BIDIRECTIONAL);
- skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
+ skb = dpaa2_eth_copybreak(ch, fd, vaddr);
+ if (!skb) {
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
+ }
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
@@ -2431,8 +2463,6 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
percpu_stats->tx_packets += enqueued;
for (i = 0; i < enqueued; i++)
percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
- for (i = enqueued; i < n; i++)
- xdp_return_frame_rx_napi(frames[i]);
return enqueued;
}
@@ -4304,6 +4334,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
skb_queue_head_init(&priv->tx_skbs);
+ priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
+
/* Obtain a MC portal */
err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
&priv->mc_io);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 9b6a89709ce1..cdb623d5f2c1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -438,8 +438,6 @@ struct dpaa2_eth_fq {
struct dpaa2_eth_ch_xdp {
struct bpf_prog *prog;
- u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD];
- int drop_cnt;
unsigned int res;
};
@@ -457,6 +455,10 @@ struct dpaa2_eth_channel {
struct dpaa2_eth_ch_xdp xdp;
struct xdp_rxq_info xdp_rxq;
struct list_head *rx_list;
+
+ /* Buffers to be recycled back in the buffer pool */
+ u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD];
+ int recycled_bufs_cnt;
};
struct dpaa2_eth_dist_fields {
@@ -487,6 +489,8 @@ struct dpaa2_eth_trap_data {
struct dpaa2_eth_priv *priv;
};
+#define DPAA2_ETH_DEFAULT_COPYBREAK 512
+
/* Driver private data */
struct dpaa2_eth_priv {
struct net_device *net_dev;
@@ -567,6 +571,8 @@ struct dpaa2_eth_priv {
struct devlink *devlink;
struct dpaa2_eth_trap_data *trap_data;
struct devlink_port devlink_port;
+
+ u32 rx_copybreak;
};
struct dpaa2_eth_devlink_priv {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index bf59708b869e..ad5e374eeccf 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -782,6 +782,44 @@ static int dpaa2_eth_get_ts_info(struct net_device *dev,
return 0;
}
+static int dpaa2_eth_get_tunable(struct net_device *net_dev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = priv->rx_copybreak;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dpaa2_eth_set_tunable(struct net_device *net_dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ priv->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
const struct ethtool_ops dpaa2_ethtool_ops = {
.get_drvinfo = dpaa2_eth_get_drvinfo,
.nway_reset = dpaa2_eth_nway_reset,
@@ -796,4 +834,6 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_rxnfc = dpaa2_eth_get_rxnfc,
.set_rxnfc = dpaa2_eth_set_rxnfc,
.get_ts_info = dpaa2_eth_get_ts_info,
+ .get_tunable = dpaa2_eth_get_tunable,
+ .set_tunable = dpaa2_eth_set_tunable,
};
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
index 0af2e9914ec4..70e04321c420 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
@@ -9,7 +9,7 @@
#include <linux/ethtool.h>
-#include "ethsw.h"
+#include "dpaa2-switch.h"
static struct {
enum dpsw_counter id;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
new file mode 100644
index 000000000000..f9451ec5f2cb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DPAA2 Ethernet Switch flower support
+ *
+ * Copyright 2021 NXP
+ *
+ */
+
+#include "dpaa2-switch.h"
+
+static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
+ struct dpsw_acl_key *acl_key)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct dpsw_acl_fields *acl_h, *acl_m;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_IP) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ acl_h = &acl_key->match;
+ acl_m = &acl_key->mask;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ acl_h->l3_protocol = match.key->ip_proto;
+ acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
+ acl_m->l3_protocol = match.mask->ip_proto;
+ acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
+ ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
+ ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
+ ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ acl_h->l2_vlan_id = match.key->vlan_id;
+ acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
+ acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
+ match.key->vlan_dei;
+
+ acl_m->l2_vlan_id = match.mask->vlan_id;
+ acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
+ acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
+ match.mask->vlan_dei;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ acl_h->l3_source_ip = be32_to_cpu(match.key->src);
+ acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
+ acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
+ acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ acl_h->l4_source_port = be16_to_cpu(match.key->src);
+ acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
+ acl_m->l4_source_port = be16_to_cpu(match.mask->src);
+ acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if (match.mask->ttl != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on TTL not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if ((match.mask->tos & 0x3) != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on ECN not supported, only DSCP");
+ return -EOPNOTSUPP;
+ }
+
+ acl_h->l3_dscp = match.key->tos >> 2;
+ acl_m->l3_dscp = match.mask->tos >> 2;
+ }
+
+ return 0;
+}
+
+int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
+ struct ethsw_core *ethsw = acl_tbl->ethsw;
+ struct dpsw_acl_key *acl_key = &entry->key;
+ struct device *dev = ethsw->dev;
+ u8 *cmd_buff;
+ int err;
+
+ cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
+ if (!cmd_buff)
+ return -ENOMEM;
+
+ dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
+
+ acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
+ dev_err(dev, "DMA mapping failed\n");
+ return -EFAULT;
+ }
+
+ err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ acl_tbl->id, acl_entry_cfg);
+
+ dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
+ DMA_TO_DEVICE);
+ if (err) {
+ dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
+ return err;
+ }
+
+ kfree(cmd_buff);
+
+ return 0;
+}
+
+static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
+ struct dpsw_acl_key *acl_key = &entry->key;
+ struct ethsw_core *ethsw = acl_tbl->ethsw;
+ struct device *dev = ethsw->dev;
+ u8 *cmd_buff;
+ int err;
+
+ cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
+ if (!cmd_buff)
+ return -ENOMEM;
+
+ dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
+
+ acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
+ dev_err(dev, "DMA mapping failed\n");
+ return -EFAULT;
+ }
+
+ err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ acl_tbl->id, acl_entry_cfg);
+
+ dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
+ DMA_TO_DEVICE);
+ if (err) {
+ dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
+ return err;
+ }
+
+ kfree(cmd_buff);
+
+ return 0;
+}
+
+static int
+dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ struct list_head *pos, *n;
+ int index = 0;
+
+ if (list_empty(&acl_tbl->entries)) {
+ list_add(&entry->list, &acl_tbl->entries);
+ return index;
+ }
+
+ list_for_each_safe(pos, n, &acl_tbl->entries) {
+ tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
+ if (entry->prio < tmp->prio)
+ break;
+ index++;
+ }
+ list_add(&entry->list, pos->prev);
+ return index;
+}
+
+static struct dpaa2_switch_acl_entry*
+dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl,
+ int index)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ int i = 0;
+
+ list_for_each_entry(tmp, &acl_tbl->entries, list) {
+ if (i == index)
+ return tmp;
+ ++i;
+ }
+
+ return NULL;
+}
+
+static int
+dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry,
+ int precedence)
+{
+ int err;
+
+ err = dpaa2_switch_acl_entry_remove(acl_tbl, entry);
+ if (err)
+ return err;
+
+ entry->cfg.precedence = precedence;
+ return dpaa2_switch_acl_entry_add(acl_tbl, entry);
+}
+
+static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ int index, i, precedence, err;
+
+ /* Add the new ACL entry to the linked list and get its index */
+ index = dpaa2_switch_acl_entry_add_to_list(acl_tbl, entry);
+
+ /* Move up in priority the ACL entries to make space
+ * for the new filter.
+ */
+ precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - acl_tbl->num_rules - 1;
+ for (i = 0; i < index; i++) {
+ tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i);
+
+ err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp,
+ precedence);
+ if (err)
+ return err;
+
+ precedence++;
+ }
+
+ /* Add the new entry to hardware */
+ entry->cfg.precedence = precedence;
+ err = dpaa2_switch_acl_entry_add(acl_tbl, entry);
+ acl_tbl->num_rules++;
+
+ return err;
+}
+
+static struct dpaa2_switch_acl_entry *
+dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl,
+ unsigned long cookie)
+{
+ struct dpaa2_switch_acl_entry *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) {
+ if (tmp->cookie == cookie)
+ return tmp;
+ }
+ return NULL;
+}
+
+static int
+dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp, *n;
+ int index = 0;
+
+ list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) {
+ if (tmp->cookie == entry->cookie)
+ return index;
+ index++;
+ }
+ return -ENOENT;
+}
+
+static int
+dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ int index, i, precedence, err;
+
+ index = dpaa2_switch_acl_entry_get_index(acl_tbl, entry);
+
+ /* Remove from hardware the ACL entry */
+ err = dpaa2_switch_acl_entry_remove(acl_tbl, entry);
+ if (err)
+ return err;
+
+ acl_tbl->num_rules--;
+
+ /* Remove it from the list also */
+ list_del(&entry->list);
+
+ /* Move down in priority the entries over the deleted one */
+ precedence = entry->cfg.precedence;
+ for (i = index - 1; i >= 0; i--) {
+ tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i);
+ err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp,
+ precedence);
+ if (err)
+ return err;
+
+ precedence--;
+ }
+
+ kfree(entry);
+
+ return 0;
+}
+
+static int dpaa2_switch_tc_parse_action(struct ethsw_core *ethsw,
+ struct flow_action_entry *cls_act,
+ struct dpsw_acl_result *dpsw_act,
+ struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ switch (cls_act->id) {
+ case FLOW_ACTION_TRAP:
+ dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
+ break;
+ case FLOW_ACTION_REDIRECT:
+ if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a DPAA2 switch port");
+ return -EOPNOTSUPP;
+ }
+
+ dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
+ dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
+ break;
+ case FLOW_ACTION_DROP:
+ dpsw_act->action = DPSW_ACL_ACTION_DROP;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Action not supported");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct ethsw_core *ethsw = acl_tbl->ethsw;
+ struct dpaa2_switch_acl_entry *acl_entry;
+ struct flow_action_entry *act;
+ int err;
+
+ if (!flow_offload_has_one_action(&rule->action)) {
+ NL_SET_ERR_MSG(extack, "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) {
+ NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
+ return -ENOMEM;
+ }
+
+ acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
+ if (!acl_entry)
+ return -ENOMEM;
+
+ err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
+ if (err)
+ goto free_acl_entry;
+
+ act = &rule->action.entries[0];
+ err = dpaa2_switch_tc_parse_action(ethsw, act,
+ &acl_entry->cfg.result, extack);
+ if (err)
+ goto free_acl_entry;
+
+ acl_entry->prio = cls->common.prio;
+ acl_entry->cookie = cls->cookie;
+
+ err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry);
+ if (err)
+ goto free_acl_entry;
+
+ return 0;
+
+free_acl_entry:
+ kfree(acl_entry);
+
+ return err;
+}
+
+int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct flow_cls_offload *cls)
+{
+ struct dpaa2_switch_acl_entry *entry;
+
+ entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie);
+ if (!entry)
+ return 0;
+
+ return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry);
+}
+
+int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct ethsw_core *ethsw = acl_tbl->ethsw;
+ struct dpaa2_switch_acl_entry *acl_entry;
+ struct flow_action_entry *act;
+ int err;
+
+ if (!flow_offload_has_one_action(&cls->rule->action)) {
+ NL_SET_ERR_MSG(extack, "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) {
+ NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
+ return -ENOMEM;
+ }
+
+ acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
+ if (!acl_entry)
+ return -ENOMEM;
+
+ act = &cls->rule->action.entries[0];
+ err = dpaa2_switch_tc_parse_action(ethsw, act,
+ &acl_entry->cfg.result, extack);
+ if (err)
+ goto free_acl_entry;
+
+ acl_entry->prio = cls->common.prio;
+ acl_entry->cookie = cls->cookie;
+
+ err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry);
+ if (err)
+ goto free_acl_entry;
+
+ return 0;
+
+free_acl_entry:
+ kfree(acl_entry);
+
+ return err;
+}
+
+int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct dpaa2_switch_acl_entry *entry;
+
+ entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie);
+ if (!entry)
+ return 0;
+
+ return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
new file mode 100644
index 000000000000..05de37c3b64c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -0,0 +1,3394 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DPAA2 Ethernet Switch driver
+ *
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/kthread.h>
+#include <linux/workqueue.h>
+#include <linux/iommu.h>
+#include <net/pkt_cls.h>
+
+#include <linux/fsl/mc.h>
+
+#include "dpaa2-switch.h"
+
+/* Minimal supported DPSW version */
+#define DPSW_MIN_VER_MAJOR 8
+#define DPSW_MIN_VER_MINOR 9
+
+#define DEFAULT_VLAN_ID 1
+
+static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv)
+{
+ return port_priv->fdb->fdb_id;
+}
+
+static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ if (!ethsw->fdbs[i].in_use)
+ return &ethsw->fdbs[i];
+ return NULL;
+}
+
+static struct dpaa2_switch_acl_tbl *
+dpaa2_switch_acl_tbl_get_unused(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ if (!ethsw->acls[i].in_use)
+ return &ethsw->acls[i];
+ return NULL;
+}
+
+static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv,
+ struct net_device *bridge_dev)
+{
+ struct ethsw_port_priv *other_port_priv = NULL;
+ struct dpaa2_switch_fdb *fdb;
+ struct net_device *other_dev;
+ struct list_head *iter;
+
+ /* If we leave a bridge (bridge_dev is NULL), find an unused
+ * FDB and use that.
+ */
+ if (!bridge_dev) {
+ fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data);
+
+ /* If there is no unused FDB, we must be the last port that
+ * leaves the last bridge, all the others are standalone. We
+ * can just keep the FDB that we already have.
+ */
+
+ if (!fdb) {
+ port_priv->fdb->bridge_dev = NULL;
+ return 0;
+ }
+
+ port_priv->fdb = fdb;
+ port_priv->fdb->in_use = true;
+ port_priv->fdb->bridge_dev = NULL;
+ return 0;
+ }
+
+ /* The below call to netdev_for_each_lower_dev() demands the RTNL lock
+ * being held. Assert on it so that it's easier to catch new code
+ * paths that reach this point without the RTNL lock.
+ */
+ ASSERT_RTNL();
+
+ /* If part of a bridge, use the FDB of the first dpaa2 switch interface
+ * to be present in that bridge
+ */
+ netdev_for_each_lower_dev(bridge_dev, other_dev, iter) {
+ if (!dpaa2_switch_port_dev_check(other_dev))
+ continue;
+
+ if (other_dev == port_priv->netdev)
+ continue;
+
+ other_port_priv = netdev_priv(other_dev);
+ break;
+ }
+
+ /* The current port is about to change its FDB to the one used by the
+ * first port that joined the bridge.
+ */
+ if (other_port_priv) {
+ /* The previous FDB is about to become unused, since the
+ * interface is no longer standalone.
+ */
+ port_priv->fdb->in_use = false;
+ port_priv->fdb->bridge_dev = NULL;
+
+ /* Get a reference to the new FDB */
+ port_priv->fdb = other_port_priv->fdb;
+ }
+
+ /* Keep track of the new upper bridge device */
+ port_priv->fdb->bridge_dev = bridge_dev;
+
+ return 0;
+}
+
+static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id,
+ enum dpsw_flood_type type,
+ struct dpsw_egress_flood_cfg *cfg)
+{
+ int i = 0, j;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ /* Add all the DPAA2 switch ports found in the same bridging domain to
+ * the egress flooding domain
+ */
+ for (j = 0; j < ethsw->sw_attr.num_ifs; j++) {
+ if (!ethsw->ports[j])
+ continue;
+ if (ethsw->ports[j]->fdb->fdb_id != fdb_id)
+ continue;
+
+ if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood)
+ cfg->if_id[i++] = ethsw->ports[j]->idx;
+ else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood)
+ cfg->if_id[i++] = ethsw->ports[j]->idx;
+ }
+
+ /* Add the CTRL interface to the egress flooding domain */
+ cfg->if_id[i++] = ethsw->sw_attr.num_ifs;
+
+ cfg->fdb_id = fdb_id;
+ cfg->flood_type = type;
+ cfg->num_ifs = i;
+}
+
+static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id)
+{
+ struct dpsw_egress_flood_cfg flood_cfg;
+ int err;
+
+ /* Setup broadcast flooding domain */
+ dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg);
+ err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &flood_cfg);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
+ return err;
+ }
+
+ /* Setup unknown flooding domain */
+ dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg);
+ err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &flood_cfg);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
+static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpsw_vlan_cfg vcfg = {0};
+ int err;
+
+ vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_vlan_add(ethsw->mc_io, 0,
+ ethsw->dpsw_handle, vid, &vcfg);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
+ return err;
+ }
+ ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
+
+ return 0;
+}
+
+static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv)
+{
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_link_state state;
+ int err;
+
+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx, &state);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
+ return true;
+ }
+
+ WARN_ONCE(state.up > 1, "Garbage read into link_state");
+
+ return state.up ? true : false;
+}
+
+static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_tci_cfg tci_cfg = { 0 };
+ bool up;
+ int err, ret;
+
+ err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &tci_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
+ return err;
+ }
+
+ tci_cfg.vlan_id = pvid;
+
+ /* Interface needs to be down to change PVID */
+ up = dpaa2_switch_port_is_up(port_priv);
+ if (up) {
+ err = dpsw_if_disable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
+ }
+
+ err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &tci_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
+ goto set_tci_error;
+ }
+
+ /* Delete previous PVID info and mark the new one */
+ port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
+ port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
+ port_priv->pvid = pvid;
+
+set_tci_error:
+ if (up) {
+ ret = dpsw_if_enable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (ret) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
+ return ret;
+ }
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
+ u16 vid, u16 flags)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_vlan_if_cfg vcfg = {0};
+ int err;
+
+ if (port_priv->vlans[vid]) {
+ netdev_warn(netdev, "VLAN %d already configured\n", vid);
+ return -EEXIST;
+ }
+
+ /* If hit, this VLAN rule will lead the packet into the FDB table
+ * specified in the vlan configuration below
+ */
+ vcfg.num_ifs = 1;
+ vcfg.if_id[0] = port_priv->idx;
+ vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID;
+ err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
+ return err;
+ }
+
+ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
+
+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
+ err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ vid, &vcfg);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_vlan_add_if_untagged err %d\n", err);
+ return err;
+ }
+ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
+ }
+
+ if (flags & BRIDGE_VLAN_INFO_PVID) {
+ err = dpaa2_switch_port_set_pvid(port_priv, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state)
+{
+ switch (state) {
+ case BR_STATE_DISABLED:
+ return DPSW_STP_STATE_DISABLED;
+ case BR_STATE_LISTENING:
+ return DPSW_STP_STATE_LISTENING;
+ case BR_STATE_LEARNING:
+ return DPSW_STP_STATE_LEARNING;
+ case BR_STATE_FORWARDING:
+ return DPSW_STP_STATE_FORWARDING;
+ case BR_STATE_BLOCKING:
+ return DPSW_STP_STATE_BLOCKING;
+ default:
+ return DPSW_STP_STATE_DISABLED;
+ }
+}
+
+static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
+{
+ struct dpsw_stp_cfg stp_cfg = {0};
+ int err;
+ u16 vid;
+
+ if (!netif_running(port_priv->netdev) || state == port_priv->stp_state)
+ return 0; /* Nothing to do */
+
+ stp_cfg.state = br_stp_state_to_dpsw(state);
+ for (vid = 0; vid <= VLAN_VID_MASK; vid++) {
+ if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
+ stp_cfg.vlan_id = vid;
+ err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx, &stp_cfg);
+ if (err) {
+ netdev_err(port_priv->netdev,
+ "dpsw_if_set_stp err %d\n", err);
+ return err;
+ }
+ }
+ }
+
+ port_priv->stp_state = state;
+
+ return 0;
+}
+
+static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
+{
+ struct ethsw_port_priv *ppriv_local = NULL;
+ int i, err;
+
+ if (!ethsw->vlans[vid])
+ return -ENOENT;
+
+ err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
+ return err;
+ }
+ ethsw->vlans[vid] = 0;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ ppriv_local = ethsw->ports[i];
+ ppriv_local->vlans[vid] = 0;
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_unicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ entry.if_egress = port_priv->idx;
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ ether_addr_copy(entry.mac_addr, addr);
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ if (err)
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_add_unicast err %d\n", err);
+ return err;
+}
+
+static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_unicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ entry.if_egress = port_priv->idx;
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ ether_addr_copy(entry.mac_addr, addr);
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ /* Silently discard error for calling multiple times the del command */
+ if (err && err != -ENXIO)
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_remove_unicast err %d\n", err);
+ return err;
+}
+
+static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_multicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ ether_addr_copy(entry.mac_addr, addr);
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ entry.num_ifs = 1;
+ entry.if_id[0] = port_priv->idx;
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ /* Silently discard error for calling multiple times the add command */
+ if (err && err != -ENXIO)
+ netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
+ err);
+ return err;
+}
+
+static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_multicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ ether_addr_copy(entry.mac_addr, addr);
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ entry.num_ifs = 1;
+ entry.if_id[0] = port_priv->idx;
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ /* Silently discard error for calling multiple times the del command */
+ if (err && err != -ENAVAIL)
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_remove_multicast err %d\n", err);
+ return err;
+}
+
+static void dpaa2_switch_port_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ u64 tmp;
+ int err;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_FRAME, &stats->rx_packets);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_EGR_FRAME, &stats->tx_packets);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_BYTE, &stats->rx_bytes);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_FRAME_DISCARD,
+ &stats->rx_dropped);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_FLTR_FRAME,
+ &tmp);
+ if (err)
+ goto error;
+ stats->rx_dropped += tmp;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_EGR_FRAME_DISCARD,
+ &stats->tx_dropped);
+ if (err)
+ goto error;
+
+ return;
+
+error:
+ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
+}
+
+static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev,
+ int attr_id)
+{
+ return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
+}
+
+static int dpaa2_switch_port_get_offload_stats(int attr_id,
+ const struct net_device *netdev,
+ void *sp)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ dpaa2_switch_port_get_stats((struct net_device *)netdev, sp);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
+ 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ (u16)ETHSW_L2_MAX_FRM(mtu));
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_if_set_max_frame_length() err %d\n", err);
+ return err;
+ }
+
+ netdev->mtu = mtu;
+ return 0;
+}
+
+static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpsw_link_state state;
+ int err;
+
+ /* Interrupts are received even though no one issued an 'ifconfig up'
+ * on the switch interface. Ignore these link state update interrupts
+ */
+ if (!netif_running(netdev))
+ return 0;
+
+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx, &state);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
+ return err;
+ }
+
+ WARN_ONCE(state.up > 1, "Garbage read into link_state");
+
+ if (state.up != port_priv->link_state) {
+ if (state.up) {
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ } else {
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+ port_priv->link_state = state.up;
+ }
+
+ return 0;
+}
+
+/* Manage all NAPI instances for the control interface.
+ *
+ * We only have one RX queue and one Tx Conf queue for all
+ * switch ports. Therefore, we only need to enable the NAPI instance once, the
+ * first time one of the switch ports runs .dev_open().
+ */
+
+static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw)
+{
+ int i;
+
+ /* Access to the ethsw->napi_users relies on the RTNL lock */
+ ASSERT_RTNL();
+
+ /* a new interface is using the NAPI instance */
+ ethsw->napi_users++;
+
+ /* if there is already a user of the instance, return */
+ if (ethsw->napi_users > 1)
+ return;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ napi_enable(&ethsw->fq[i].napi);
+}
+
+static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw)
+{
+ int i;
+
+ /* Access to the ethsw->napi_users relies on the RTNL lock */
+ ASSERT_RTNL();
+
+ /* If we are not the last interface using the NAPI, return */
+ ethsw->napi_users--;
+ if (ethsw->napi_users)
+ return;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ napi_disable(&ethsw->fq[i].napi);
+}
+
+static int dpaa2_switch_port_open(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
+ /* Explicitly set carrier off, otherwise
+ * netif_carrier_ok() will return true and cause 'ip link show'
+ * to report the LOWER_UP flag, even though the link
+ * notification wasn't even received.
+ */
+ netif_carrier_off(netdev);
+
+ err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n", err);
+ return err;
+ }
+
+ /* sync carrier state */
+ err = dpaa2_switch_port_carrier_state_sync(netdev);
+ if (err) {
+ netdev_err(netdev,
+ "dpaa2_switch_port_carrier_state_sync err %d\n", err);
+ goto err_carrier_sync;
+ }
+
+ dpaa2_switch_enable_ctrl_if_napi(ethsw);
+
+ return 0;
+
+err_carrier_sync:
+ dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+ return err;
+}
+
+static int dpaa2_switch_port_stop(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
+ err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
+
+ dpaa2_switch_disable_ctrl_if_napi(ethsw);
+
+ return 0;
+}
+
+static int dpaa2_switch_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(dev);
+
+ ppid->id_len = 1;
+ ppid->id[0] = port_priv->ethsw_data->dev_id;
+
+ return 0;
+}
+
+static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name,
+ size_t len)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ err = snprintf(name, len, "p%d", port_priv->idx);
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct ethsw_dump_ctx {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ int idx;
+};
+
+static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
+ struct ethsw_dump_ctx *dump)
+{
+ int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
+ u32 portid = NETLINK_CB(dump->cb->skb).portid;
+ u32 seq = dump->cb->nlh->nlmsg_seq;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ if (dump->idx < dump->cb->args[2])
+ goto skip;
+
+ nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
+ sizeof(*ndm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dump->dev->ifindex;
+ ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP;
+
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
+ goto nla_put_failure;
+
+ nlmsg_end(dump->skb, nlh);
+
+skip:
+ dump->idx++;
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(dump->skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry,
+ struct ethsw_port_priv *port_priv)
+{
+ int idx = port_priv->idx;
+ int valid;
+
+ if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
+ valid = entry->if_info == port_priv->idx;
+ else
+ valid = entry->if_mask[idx / 8] & BIT(idx % 8);
+
+ return valid;
+}
+
+static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv,
+ dpaa2_switch_fdb_cb_t cb, void *data)
+{
+ struct net_device *net_dev = port_priv->netdev;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct device *dev = net_dev->dev.parent;
+ struct fdb_dump_entry *fdb_entries;
+ struct fdb_dump_entry fdb_entry;
+ dma_addr_t fdb_dump_iova;
+ u16 num_fdb_entries;
+ u32 fdb_dump_size;
+ int err = 0, i;
+ u8 *dma_mem;
+ u16 fdb_id;
+
+ fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry);
+ dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL);
+ if (!dma_mem)
+ return -ENOMEM;
+
+ fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, fdb_dump_iova)) {
+ netdev_err(net_dev, "dma_map_single() failed\n");
+ err = -ENOMEM;
+ goto err_map;
+ }
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id,
+ fdb_dump_iova, fdb_dump_size, &num_fdb_entries);
+ if (err) {
+ netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err);
+ goto err_dump;
+ }
+
+ dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE);
+
+ fdb_entries = (struct fdb_dump_entry *)dma_mem;
+ for (i = 0; i < num_fdb_entries; i++) {
+ fdb_entry = fdb_entries[i];
+
+ err = cb(port_priv, &fdb_entry, data);
+ if (err)
+ goto end;
+ }
+
+end:
+ kfree(dma_mem);
+
+ return 0;
+
+err_dump:
+ dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE);
+err_map:
+ kfree(dma_mem);
+ return err;
+}
+
+static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv,
+ struct fdb_dump_entry *fdb_entry,
+ void *data)
+{
+ if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
+ return 0;
+
+ return dpaa2_switch_fdb_dump_nl(fdb_entry, data);
+}
+
+static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *net_dev,
+ struct net_device *filter_dev, int *idx)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
+ struct ethsw_dump_ctx dump = {
+ .dev = net_dev,
+ .skb = skb,
+ .cb = cb,
+ .idx = *idx,
+ };
+ int err;
+
+ err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump);
+ *idx = dump.idx;
+
+ return err;
+}
+
+static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv,
+ struct fdb_dump_entry *fdb_entry,
+ void *data __always_unused)
+{
+ if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
+ return 0;
+
+ if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC))
+ return 0;
+
+ if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
+ dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr);
+ else
+ dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr);
+
+ return 0;
+}
+
+static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv)
+{
+ dpaa2_switch_fdb_iterate(port_priv,
+ dpaa2_switch_fdb_entry_fast_age, NULL);
+}
+
+static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct switchdev_obj_port_vlan vlan = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .vid = vid,
+ .obj.orig_dev = netdev,
+ /* This API only allows programming tagged, non-PVID VIDs */
+ .flags = 0,
+ };
+
+ return dpaa2_switch_port_vlans_add(netdev, &vlan);
+}
+
+static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct switchdev_obj_port_vlan vlan = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .vid = vid,
+ .obj.orig_dev = netdev,
+ /* This API only allows programming tagged, non-PVID VIDs */
+ .flags = 0,
+ };
+
+ return dpaa2_switch_port_vlans_del(netdev, &vlan);
+}
+
+static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *net_dev = port_priv->netdev;
+ struct device *dev = net_dev->dev.parent;
+ u8 mac_addr[ETH_ALEN];
+ int err;
+
+ if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR))
+ return 0;
+
+ /* Get firmware address, if any */
+ err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, mac_addr);
+ if (err) {
+ dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n");
+ return err;
+ }
+
+ /* First check if firmware has any address configured by bootloader */
+ if (!is_zero_ether_addr(mac_addr)) {
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ } else {
+ /* No MAC address configured, fill in net_dev->dev_addr
+ * with a random one
+ */
+ eth_hw_addr_random(net_dev);
+ dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
+
+ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
+ * practical purposes, this will be our "permanent" mac address,
+ * at least until the next reboot. This move will also permit
+ * register_netdevice() to properly fill up net_dev->perm_addr.
+ */
+ net_dev->addr_assign_type = NET_ADDR_PERM;
+ }
+
+ return 0;
+}
+
+static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw,
+ const struct dpaa2_fd *fd)
+{
+ struct device *dev = ethsw->dev;
+ unsigned char *buffer_start;
+ struct sk_buff **skbh, *skb;
+ dma_addr_t fd_addr;
+
+ fd_addr = dpaa2_fd_get_addr(fd);
+ skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr);
+
+ skb = *skbh;
+ buffer_start = (unsigned char *)skbh;
+
+ dma_unmap_single(dev, fd_addr,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_TO_DEVICE);
+
+ /* Move on with skb release */
+ dev_kfree_skb(skb);
+}
+
+static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd)
+{
+ struct device *dev = ethsw->dev;
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+ u8 *buff_start;
+ void *hwa;
+
+ buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET -
+ DPAA2_SWITCH_TX_BUF_ALIGN,
+ DPAA2_SWITCH_TX_BUF_ALIGN);
+
+ /* Clear FAS to have consistent values for TX confirmation. It is
+ * located in the first 8 bytes of the buffer's hardware annotation
+ * area
+ */
+ hwa = buff_start + DPAA2_SWITCH_SWA_SIZE;
+ memset(hwa, 0, 8);
+
+ /* Store a backpointer to the skb at the beginning of the buffer
+ * (in the private data area) such that we can release it
+ * on Tx confirm
+ */
+ skbh = (struct sk_buff **)buff_start;
+ *skbh = skb;
+
+ addr = dma_map_single(dev, buff_start,
+ skb_tail_pointer(skb) - buff_start,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ return -ENOMEM;
+
+ /* Setup the FD fields */
+ memset(fd, 0, sizeof(*fd));
+
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start));
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
+
+ return 0;
+}
+
+static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES;
+ struct dpaa2_fd fd;
+ int err;
+
+ if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) {
+ struct sk_buff *ns;
+
+ ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM);
+ if (unlikely(!ns)) {
+ net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name);
+ goto err_free_skb;
+ }
+ dev_consume_skb_any(skb);
+ skb = ns;
+ }
+
+ /* We'll be holding a back-reference to the skb until Tx confirmation */
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ /* skb_unshare() has already freed the skb */
+ net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name);
+ goto err_exit;
+ }
+
+ /* At this stage, we do not support non-linear skbs so just try to
+ * linearize the skb and if that's not working, just drop the packet.
+ */
+ err = skb_linearize(skb);
+ if (err) {
+ net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err);
+ goto err_free_skb;
+ }
+
+ err = dpaa2_switch_build_single_fd(ethsw, skb, &fd);
+ if (unlikely(err)) {
+ net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err);
+ goto err_free_skb;
+ }
+
+ do {
+ err = dpaa2_io_service_enqueue_qd(NULL,
+ port_priv->tx_qdid,
+ 8, 0, &fd);
+ retries--;
+ } while (err == -EBUSY && retries);
+
+ if (unlikely(err < 0)) {
+ dpaa2_switch_free_fd(ethsw, &fd);
+ goto err_exit;
+ }
+
+ return NETDEV_TX_OK;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+err_exit:
+ return NETDEV_TX_OK;
+}
+
+static int
+dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct flow_cls_offload *f)
+{
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return dpaa2_switch_cls_flower_replace(acl_tbl, f);
+ case FLOW_CLS_DESTROY:
+ return dpaa2_switch_cls_flower_destroy(acl_tbl, f);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct tc_cls_matchall_offload *f)
+{
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return dpaa2_switch_cls_matchall_replace(acl_tbl, f);
+ case TC_CLSMATCHALL_DESTROY:
+ return dpaa2_switch_cls_matchall_destroy(acl_tbl, f);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(dpaa2_switch_block_cb_list);
+
+static int dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_acl_tbl *acl_tbl)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_acl_if_cfg acl_if_cfg;
+ int err;
+
+ if (port_priv->acl_tbl)
+ return -EINVAL;
+
+ acl_if_cfg.if_id[0] = port_priv->idx;
+ acl_if_cfg.num_ifs = 1;
+ err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ acl_tbl->id, &acl_if_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
+ return err;
+ }
+
+ acl_tbl->ports |= BIT(port_priv->idx);
+ port_priv->acl_tbl = acl_tbl;
+
+ return 0;
+}
+
+static int
+dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_acl_tbl *acl_tbl)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_acl_if_cfg acl_if_cfg;
+ int err;
+
+ if (port_priv->acl_tbl != acl_tbl)
+ return -EINVAL;
+
+ acl_if_cfg.if_id[0] = port_priv->idx;
+ acl_if_cfg.num_ifs = 1;
+ err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ acl_tbl->id, &acl_if_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
+ return err;
+ }
+
+ acl_tbl->ports &= ~BIT(port_priv->idx);
+ port_priv->acl_tbl = NULL;
+ return 0;
+}
+
+static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_acl_tbl *acl_tbl)
+{
+ struct dpaa2_switch_acl_tbl *old_acl_tbl = port_priv->acl_tbl;
+ int err;
+
+ /* If the port is already bound to this ACL table then do nothing. This
+ * can happen when this port is the first one to join a tc block
+ */
+ if (port_priv->acl_tbl == acl_tbl)
+ return 0;
+
+ err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_acl_tbl);
+ if (err)
+ return err;
+
+ /* Mark the previous ACL table as being unused if this was the last
+ * port that was using it.
+ */
+ if (old_acl_tbl->ports == 0)
+ old_acl_tbl->in_use = false;
+
+ return dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl);
+}
+
+static int dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_acl_tbl *acl_tbl)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_acl_tbl *new_acl_tbl;
+ int err;
+
+ /* We are the last port that leaves a block (an ACL table).
+ * We'll continue to use this table.
+ */
+ if (acl_tbl->ports == BIT(port_priv->idx))
+ return 0;
+
+ err = dpaa2_switch_port_acl_tbl_unbind(port_priv, acl_tbl);
+ if (err)
+ return err;
+
+ if (acl_tbl->ports == 0)
+ acl_tbl->in_use = false;
+
+ new_acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw);
+ new_acl_tbl->in_use = true;
+ return dpaa2_switch_port_acl_tbl_bind(port_priv, new_acl_tbl);
+}
+
+static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_acl_tbl *acl_tbl;
+ struct flow_block_cb *block_cb;
+ bool register_block = false;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ dpaa2_switch_port_setup_tc_block_cb_ig,
+ ethsw);
+
+ if (!block_cb) {
+ /* If the ACL table is not already known, then this port must
+ * be the first to join it. In this case, we can just continue
+ * to use our private table
+ */
+ acl_tbl = port_priv->acl_tbl;
+
+ block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig,
+ ethsw, acl_tbl, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ register_block = true;
+ } else {
+ acl_tbl = flow_block_cb_priv(block_cb);
+ }
+
+ flow_block_cb_incref(block_cb);
+ err = dpaa2_switch_port_block_bind(port_priv, acl_tbl);
+ if (err)
+ goto err_block_bind;
+
+ if (register_block) {
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list,
+ &dpaa2_switch_block_cb_list);
+ }
+
+ return 0;
+
+err_block_bind:
+ if (!flow_block_cb_decref(block_cb))
+ flow_block_cb_free(block_cb);
+ return err;
+}
+
+static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_acl_tbl *acl_tbl;
+ struct flow_block_cb *block_cb;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ dpaa2_switch_port_setup_tc_block_cb_ig,
+ ethsw);
+ if (!block_cb)
+ return;
+
+ acl_tbl = flow_block_cb_priv(block_cb);
+ err = dpaa2_switch_port_block_unbind(port_priv, acl_tbl);
+ if (!err && !flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+}
+
+static int dpaa2_switch_setup_tc_block(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &dpaa2_switch_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ return dpaa2_switch_setup_tc_block_bind(netdev, f);
+ case FLOW_BLOCK_UNBIND:
+ dpaa2_switch_setup_tc_block_unbind(netdev, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dpaa2_switch_port_setup_tc(struct net_device *netdev,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK: {
+ return dpaa2_switch_setup_tc_block(netdev, type_data);
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops dpaa2_switch_port_ops = {
+ .ndo_open = dpaa2_switch_port_open,
+ .ndo_stop = dpaa2_switch_port_stop,
+
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_get_stats64 = dpaa2_switch_port_get_stats,
+ .ndo_change_mtu = dpaa2_switch_port_change_mtu,
+ .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats,
+ .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats,
+ .ndo_fdb_dump = dpaa2_switch_port_fdb_dump,
+ .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add,
+ .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill,
+
+ .ndo_start_xmit = dpaa2_switch_port_tx,
+ .ndo_get_port_parent_id = dpaa2_switch_port_parent_id,
+ .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name,
+ .ndo_setup_tc = dpaa2_switch_port_setup_tc,
+};
+
+bool dpaa2_switch_port_dev_check(const struct net_device *netdev)
+{
+ return netdev->netdev_ops == &dpaa2_switch_port_ops;
+}
+
+static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev);
+ dpaa2_switch_port_set_mac_addr(ethsw->ports[i]);
+ }
+}
+
+static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+{
+ struct device *dev = (struct device *)arg;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+
+ /* Mask the events and the if_id reserved bits to be cleared on read */
+ u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
+ int err;
+
+ err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, &status);
+ if (err) {
+ dev_err(dev, "Can't get irq status (err %d)\n", err);
+
+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
+ if (err)
+ dev_err(dev, "Can't clear irq status (err %d)\n", err);
+ goto out;
+ }
+
+ if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
+ dpaa2_switch_links_state_update(ethsw);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
+ struct fsl_mc_device_irq *irq;
+ int err;
+
+ err = fsl_mc_allocate_irqs(sw_dev);
+ if (err) {
+ dev_err(dev, "MC irqs allocation failed\n");
+ return err;
+ }
+
+ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
+ err = -EINVAL;
+ goto free_irq;
+ }
+
+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 0);
+ if (err) {
+ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
+ goto free_irq;
+ }
+
+ irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
+
+ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
+ NULL,
+ dpaa2_switch_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(dev), dev);
+ if (err) {
+ dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
+ goto free_irq;
+ }
+
+ err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, mask);
+ if (err) {
+ dev_err(dev, "dpsw_set_irq_mask(): %d\n", err);
+ goto free_devm_irq;
+ }
+
+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 1);
+ if (err) {
+ dev_err(dev, "dpsw_set_irq_enable(): %d\n", err);
+ goto free_devm_irq;
+ }
+
+ return 0;
+
+free_devm_irq:
+ devm_free_irq(dev, irq->msi_desc->irq, dev);
+free_irq:
+ fsl_mc_free_irqs(sw_dev);
+ return err;
+}
+
+static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ int err;
+
+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 0);
+ if (err)
+ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
+
+ fsl_mc_free_irqs(sw_dev);
+}
+
+static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ enum dpsw_learning_mode learn_mode;
+ int err;
+
+ if (enable)
+ learn_mode = DPSW_LEARNING_MODE_HW;
+ else
+ learn_mode = DPSW_LEARNING_MODE_DIS;
+
+ err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, learn_mode);
+ if (err)
+ netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err);
+
+ if (!enable)
+ dpaa2_switch_port_fast_age(port_priv);
+
+ return err;
+}
+
+static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
+ u8 state)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ err = dpaa2_switch_port_set_stp_state(port_priv, state);
+ if (err)
+ return err;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ err = dpaa2_switch_port_set_learning(port_priv, false);
+ break;
+ case BR_STATE_LEARNING:
+ case BR_STATE_FORWARDING:
+ err = dpaa2_switch_port_set_learning(port_priv,
+ port_priv->learn_ena);
+ break;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv,
+ struct switchdev_brport_flags flags)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+
+ if (flags.mask & BR_BCAST_FLOOD)
+ port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD);
+
+ if (flags.mask & BR_FLOOD)
+ port_priv->ucast_flood = !!(flags.val & BR_FLOOD);
+
+ return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+}
+
+static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD |
+ BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) {
+ bool multicast = !!(flags.val & BR_MCAST_FLOOD);
+ bool unicast = !!(flags.val & BR_FLOOD);
+
+ if (unicast != multicast) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot configure multicast flooding independently of unicast");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_bridge_flags(struct net_device *netdev,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ if (flags.mask & BR_LEARNING) {
+ bool learn_ena = !!(flags.val & BR_LEARNING);
+
+ err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
+ if (err)
+ return err;
+ port_priv->learn_ena = learn_ena;
+ }
+
+ if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) {
+ err = dpaa2_switch_port_flood(port_priv, flags);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_attr_set(struct net_device *netdev,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = dpaa2_switch_port_attr_stp_state_set(netdev,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ if (!attr->u.vlan_filtering) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The DPAA2 switch does not support VLAN-unaware operation");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+int dpaa2_switch_port_vlans_add(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpsw_attr *attr = &ethsw->sw_attr;
+ int err = 0;
+
+ /* Make sure that the VLAN is not already configured
+ * on the switch port
+ */
+ if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
+ return -EEXIST;
+
+ /* Check if there is space for a new VLAN */
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
+ return err;
+ }
+ if (attr->max_vlans - attr->num_vlans < 1)
+ return -ENOSPC;
+
+ /* Check if there is space for a new VLAN */
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
+ return err;
+ }
+ if (attr->max_vlans - attr->num_vlans < 1)
+ return -ENOSPC;
+
+ if (!port_priv->ethsw_data->vlans[vlan->vid]) {
+ /* this is a new VLAN */
+ err = dpaa2_switch_add_vlan(port_priv, vlan->vid);
+ if (err)
+ return err;
+
+ port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL;
+ }
+
+ return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags);
+}
+
+static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
+ const unsigned char *addr)
+{
+ struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(netdev);
+ list_for_each_entry(ha, &list->list, list) {
+ if (ether_addr_equal(ha->addr, addr)) {
+ netif_addr_unlock_bh(netdev);
+ return 1;
+ }
+ }
+ netif_addr_unlock_bh(netdev);
+ return 0;
+}
+
+static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ /* Check if address is already set on this port */
+ if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
+ return -EEXIST;
+
+ err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr);
+ if (err)
+ return err;
+
+ err = dev_mc_add(netdev, mdb->addr);
+ if (err) {
+ netdev_err(netdev, "dev_mc_add err %d\n", err);
+ dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_obj_add(struct net_device *netdev,
+ const struct switchdev_obj *obj)
+{
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = dpaa2_switch_port_vlans_add(netdev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = dpaa2_switch_port_mdb_add(netdev,
+ SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_vlan_if_cfg vcfg;
+ int i, err;
+
+ if (!port_priv->vlans[vid])
+ return -ENOENT;
+
+ if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
+ /* If we are deleting the PVID of a port, use VLAN 4095 instead
+ * as we are sure that neither the bridge nor the 8021q module
+ * will use it
+ */
+ err = dpaa2_switch_port_set_pvid(port_priv, 4095);
+ if (err)
+ return err;
+ }
+
+ vcfg.num_ifs = 1;
+ vcfg.if_id[0] = port_priv->idx;
+ if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
+ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ vid, &vcfg);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_vlan_remove_if_untagged err %d\n",
+ err);
+ }
+ port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
+ }
+
+ if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
+ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ vid, &vcfg);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_vlan_remove_if err %d\n", err);
+ return err;
+ }
+ port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
+
+ /* Delete VLAN from switch if it is no longer configured on
+ * any port
+ */
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
+ return 0; /* Found a port member in VID */
+
+ ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
+
+ err = dpaa2_switch_dellink(ethsw, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int dpaa2_switch_port_vlans_del(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
+ if (netif_is_bridge_master(vlan->obj.orig_dev))
+ return -EOPNOTSUPP;
+
+ return dpaa2_switch_port_del_vlan(port_priv, vlan->vid);
+}
+
+static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
+ return -ENOENT;
+
+ err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
+ if (err)
+ return err;
+
+ err = dev_mc_del(netdev, mdb->addr);
+ if (err) {
+ netdev_err(netdev, "dev_mc_del err %d\n", err);
+ return err;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_obj_del(struct net_device *netdev,
+ const struct switchdev_obj *obj)
+{
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ return err;
+}
+
+static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
+ struct switchdev_notifier_port_attr_info *ptr)
+{
+ int err;
+
+ err = switchdev_handle_port_attr_set(netdev, ptr,
+ dpaa2_switch_port_dev_check,
+ dpaa2_switch_port_attr_set);
+ return notifier_from_errno(err);
+}
+
+static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
+ struct net_device *upper_dev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct ethsw_port_priv *other_port_priv;
+ struct net_device *other_dev;
+ struct list_head *iter;
+ bool learn_ena;
+ int err;
+
+ netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
+ if (!dpaa2_switch_port_dev_check(other_dev))
+ continue;
+
+ other_port_priv = netdev_priv(other_dev);
+ if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
+ netdev_err(netdev,
+ "Interface from a different DPSW is in the bridge already!\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Delete the previously manually installed VLAN 1 */
+ err = dpaa2_switch_port_del_vlan(port_priv, 1);
+ if (err)
+ return err;
+
+ dpaa2_switch_port_set_fdb(port_priv, upper_dev);
+
+ /* Inherit the initial bridge port learning state */
+ learn_ena = br_port_flag_is_set(netdev, BR_LEARNING);
+ err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
+ port_priv->learn_ena = learn_ena;
+
+ /* Setup the egress flood policy (broadcast, unknown unicast) */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+ if (err)
+ goto err_egress_flood;
+
+ return 0;
+
+err_egress_flood:
+ dpaa2_switch_port_set_fdb(port_priv, NULL);
+ return err;
+}
+
+static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg)
+{
+ __be16 vlan_proto = htons(ETH_P_8021Q);
+
+ if (vdev)
+ vlan_proto = vlan_dev_vlan_proto(vdev);
+
+ return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid);
+}
+
+static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg)
+{
+ __be16 vlan_proto = htons(ETH_P_8021Q);
+
+ if (vdev)
+ vlan_proto = vlan_dev_vlan_proto(vdev);
+
+ return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid);
+}
+
+static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpaa2_switch_fdb *old_fdb = port_priv->fdb;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
+ /* First of all, fast age any learn FDB addresses on this switch port */
+ dpaa2_switch_port_fast_age(port_priv);
+
+ /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN
+ * upper devices or otherwise from the FDB table that we are about to
+ * leave
+ */
+ err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev);
+ if (err)
+ netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err);
+
+ dpaa2_switch_port_set_fdb(port_priv, NULL);
+
+ /* Restore all RX VLANs into the new FDB table that we just joined */
+ err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev);
+ if (err)
+ netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err);
+
+ /* Reset the flooding state to denote that this port can send any
+ * packet in standalone mode. With this, we are also ensuring that any
+ * later bridge join will have the flooding flag on.
+ */
+ port_priv->bcast_flood = true;
+ port_priv->ucast_flood = true;
+
+ /* Setup the egress flood policy (broadcast, unknown unicast).
+ * When the port is not under a bridge, only the CTRL interface is part
+ * of the flooding domain besides the actual port
+ */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+ if (err)
+ return err;
+
+ /* Recreate the egress flood domain of the FDB that we just left */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id);
+ if (err)
+ return err;
+
+ /* No HW learning when not under a bridge */
+ err = dpaa2_switch_port_set_learning(port_priv, false);
+ if (err)
+ return err;
+ port_priv->learn_ena = false;
+
+ /* Add the VLAN 1 as PVID when not under a bridge. We need this since
+ * the dpaa2 switch interfaces are not capable to be VLAN unaware
+ */
+ return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID,
+ BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID);
+}
+
+static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev)
+{
+ struct net_device *upper_dev;
+ struct list_head *iter;
+
+ /* RCU read lock not necessary because we have write-side protection
+ * (rtnl_mutex), however a non-rcu iterator does not exist.
+ */
+ netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter)
+ if (is_vlan_dev(upper_dev))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+ int err = 0;
+
+ if (!dpaa2_switch_port_dev_check(netdev))
+ return NOTIFY_DONE;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!netif_is_bridge_master(upper_dev))
+ break;
+
+ if (!br_vlan_enabled(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot join a bridge while VLAN uppers are present");
+ goto out;
+ }
+
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (netif_is_bridge_master(upper_dev)) {
+ if (info->linking)
+ err = dpaa2_switch_port_bridge_join(netdev, upper_dev);
+ else
+ err = dpaa2_switch_port_bridge_leave(netdev);
+ }
+ break;
+ }
+
+out:
+ return notifier_from_errno(err);
+}
+
+struct ethsw_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+static void dpaa2_switch_event_work(struct work_struct *work)
+{
+ struct ethsw_switchdev_event_work *switchdev_work =
+ container_of(work, struct ethsw_switchdev_event_work, work);
+ struct net_device *dev = switchdev_work->dev;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ int err;
+
+ rtnl_lock();
+ fdb_info = &switchdev_work->fdb_info;
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user || fdb_info->is_local)
+ break;
+ if (is_unicast_ether_addr(fdb_info->addr))
+ err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
+ fdb_info->addr);
+ else
+ err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
+ fdb_info->addr);
+ if (err)
+ break;
+ fdb_info->offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
+ &fdb_info->info, NULL);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user || fdb_info->is_local)
+ break;
+ if (is_unicast_ether_addr(fdb_info->addr))
+ dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
+ else
+ dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
+ break;
+ }
+
+ rtnl_unlock();
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(dev);
+}
+
+/* Called under rcu_read_lock() */
+static int dpaa2_switch_port_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct ethsw_port_priv *port_priv = netdev_priv(dev);
+ struct ethsw_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET)
+ return dpaa2_switch_port_attr_set_event(dev, ptr);
+
+ if (!dpaa2_switch_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (!switchdev_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work);
+ switchdev_work->dev = dev;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+
+ /* Take a reference on the device to avoid being freed. */
+ dev_hold(dev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(ethsw->workqueue, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static int dpaa2_switch_port_obj_event(unsigned long event,
+ struct net_device *netdev,
+ struct switchdev_notifier_port_obj_info *port_obj_info)
+{
+ int err = -EOPNOTSUPP;
+
+ if (!dpaa2_switch_port_dev_check(netdev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
+ break;
+ }
+
+ port_obj_info->handled = true;
+ return notifier_from_errno(err);
+}
+
+static int dpaa2_switch_port_blocking_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ case SWITCHDEV_PORT_OBJ_DEL:
+ return dpaa2_switch_port_obj_event(event, dev, ptr);
+ case SWITCHDEV_PORT_ATTR_SET:
+ return dpaa2_switch_port_attr_set_event(dev, ptr);
+ }
+
+ return NOTIFY_DONE;
+}
+
+/* Build a linear skb based on a single-buffer frame descriptor */
+static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw,
+ const struct dpaa2_fd *fd)
+{
+ u16 fd_offset = dpaa2_fd_get_offset(fd);
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u32 fd_length = dpaa2_fd_get_len(fd);
+ struct device *dev = ethsw->dev;
+ struct sk_buff *skb = NULL;
+ void *fd_vaddr;
+
+ fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr);
+ dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (unlikely(!skb)) {
+ dev_err(dev, "build_skb() failed\n");
+ return NULL;
+ }
+
+ skb_reserve(skb, fd_offset);
+ skb_put(skb, fd_length);
+
+ ethsw->buf_count--;
+
+ return skb;
+}
+
+static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq,
+ const struct dpaa2_fd *fd)
+{
+ dpaa2_switch_free_fd(fq->ethsw, fd);
+}
+
+static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq,
+ const struct dpaa2_fd *fd)
+{
+ struct ethsw_core *ethsw = fq->ethsw;
+ struct ethsw_port_priv *port_priv;
+ struct net_device *netdev;
+ struct vlan_ethhdr *hdr;
+ struct sk_buff *skb;
+ u16 vlan_tci, vid;
+ int if_id, err;
+
+ /* get switch ingress interface ID */
+ if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF;
+
+ if (if_id >= ethsw->sw_attr.num_ifs) {
+ dev_err(ethsw->dev, "Frame received from unknown interface!\n");
+ goto err_free_fd;
+ }
+ port_priv = ethsw->ports[if_id];
+ netdev = port_priv->netdev;
+
+ /* build the SKB based on the FD received */
+ if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) {
+ if (net_ratelimit()) {
+ netdev_err(netdev, "Received invalid frame format\n");
+ goto err_free_fd;
+ }
+ }
+
+ skb = dpaa2_switch_build_linear_skb(ethsw, fd);
+ if (unlikely(!skb))
+ goto err_free_fd;
+
+ skb_reset_mac_header(skb);
+
+ /* Remove the VLAN header if the packet that we just received has a vid
+ * equal to the port PVIDs. Since the dpaa2-switch can operate only in
+ * VLAN-aware mode and no alterations are made on the packet when it's
+ * redirected/mirrored to the control interface, we are sure that there
+ * will always be a VLAN header present.
+ */
+ hdr = vlan_eth_hdr(skb);
+ vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK;
+ if (vid == port_priv->pvid) {
+ err = __skb_vlan_pop(skb, &vlan_tci);
+ if (err) {
+ dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err);
+ goto err_free_fd;
+ }
+ }
+
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ /* Setup the offload_fwd_mark only if the port is under a bridge */
+ skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev);
+
+ netif_receive_skb(skb);
+
+ return;
+
+err_free_fd:
+ dpaa2_switch_free_fd(ethsw, fd);
+}
+
+static void dpaa2_switch_detect_features(struct ethsw_core *ethsw)
+{
+ ethsw->features = 0;
+
+ if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6))
+ ethsw->features |= ETHSW_FEATURE_MAC_ADDR;
+}
+
+static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw)
+{
+ struct dpsw_ctrl_if_attr ctrl_if_attr;
+ struct device *dev = ethsw->dev;
+ int i = 0;
+ int err;
+
+ err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ctrl_if_attr);
+ if (err) {
+ dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err);
+ return err;
+ }
+
+ ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid;
+ ethsw->fq[i].ethsw = ethsw;
+ ethsw->fq[i++].type = DPSW_QUEUE_RX;
+
+ ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid;
+ ethsw->fq[i].ethsw = ethsw;
+ ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF;
+
+ return 0;
+}
+
+/* Free buffers acquired from the buffer pool or which were meant to
+ * be released in the pool
+ */
+static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count)
+{
+ struct device *dev = ethsw->dev;
+ void *vaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]);
+ dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ free_pages((unsigned long)vaddr, 0);
+ }
+}
+
+/* Perform a single release command to add buffers
+ * to the specified buffer pool
+ */
+static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid)
+{
+ struct device *dev = ethsw->dev;
+ u64 buf_array[BUFS_PER_CMD];
+ struct page *page;
+ int retries = 0;
+ dma_addr_t addr;
+ int err;
+ int i;
+
+ for (i = 0; i < BUFS_PER_CMD; i++) {
+ /* Allocate one page for each Rx buffer. WRIOP sees
+ * the entire page except for a tailroom reserved for
+ * skb shared info
+ */
+ page = dev_alloc_pages(0);
+ if (!page) {
+ dev_err(dev, "buffer allocation failed\n");
+ goto err_alloc;
+ }
+
+ addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, addr)) {
+ dev_err(dev, "dma_map_single() failed\n");
+ goto err_map;
+ }
+ buf_array[i] = addr;
+ }
+
+release_bufs:
+ /* In case the portal is busy, retry until successful or
+ * max retries hit.
+ */
+ while ((err = dpaa2_io_service_release(NULL, bpid,
+ buf_array, i)) == -EBUSY) {
+ if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES)
+ break;
+
+ cpu_relax();
+ }
+
+ /* If release command failed, clean up and bail out. */
+ if (err) {
+ dpaa2_switch_free_bufs(ethsw, buf_array, i);
+ return 0;
+ }
+
+ return i;
+
+err_map:
+ __free_pages(page, 0);
+err_alloc:
+ /* If we managed to allocate at least some buffers,
+ * release them to hardware
+ */
+ if (i)
+ goto release_bufs;
+
+ return 0;
+}
+
+static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
+{
+ int *count = &ethsw->buf_count;
+ int new_count;
+ int err = 0;
+
+ if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) {
+ do {
+ new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
+ if (unlikely(!new_count)) {
+ /* Out of memory; abort for now, we'll
+ * try later on
+ */
+ break;
+ }
+ *count += new_count;
+ } while (*count < DPAA2_ETHSW_NUM_BUFS);
+
+ if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS))
+ err = -ENOMEM;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
+{
+ int *count, i;
+
+ for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
+ count = &ethsw->buf_count;
+ *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
+
+ if (unlikely(*count < BUFS_PER_CMD))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw)
+{
+ u64 buf_array[BUFS_PER_CMD];
+ int ret;
+
+ do {
+ ret = dpaa2_io_service_acquire(NULL, ethsw->bpid,
+ buf_array, BUFS_PER_CMD);
+ if (ret < 0) {
+ dev_err(ethsw->dev,
+ "dpaa2_io_service_acquire() = %d\n", ret);
+ return;
+ }
+ dpaa2_switch_free_bufs(ethsw, buf_array, ret);
+
+ } while (ret);
+}
+
+static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
+{
+ struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 };
+ struct device *dev = ethsw->dev;
+ struct fsl_mc_device *dpbp_dev;
+ struct dpbp_attr dpbp_attrs;
+ int err;
+
+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
+ &dpbp_dev);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "DPBP device allocation failed\n");
+ return err;
+ }
+ ethsw->dpbp_dev = dpbp_dev;
+
+ err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id,
+ &dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_open() failed\n");
+ goto err_open;
+ }
+
+ err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_reset() failed\n");
+ goto err_reset;
+ }
+
+ err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_enable() failed\n");
+ goto err_enable;
+ }
+
+ err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle,
+ &dpbp_attrs);
+ if (err) {
+ dev_err(dev, "dpbp_get_attributes() failed\n");
+ goto err_get_attr;
+ }
+
+ dpsw_ctrl_if_pools_cfg.num_dpbp = 1;
+ dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id;
+ dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE;
+ dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0;
+
+ err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &dpsw_ctrl_if_pools_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
+ goto err_get_attr;
+ }
+ ethsw->bpid = dpbp_attrs.id;
+
+ return 0;
+
+err_get_attr:
+ dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+err_enable:
+err_reset:
+ dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+err_open:
+ fsl_mc_object_free(dpbp_dev);
+ return err;
+}
+
+static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw)
+{
+ dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
+ dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
+ fsl_mc_object_free(ethsw->dpbp_dev);
+}
+
+static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
+ ethsw->fq[i].store =
+ dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE,
+ ethsw->dev);
+ if (!ethsw->fq[i].store) {
+ dev_err(ethsw->dev, "dpaa2_io_store_create failed\n");
+ while (--i >= 0)
+ dpaa2_io_store_destroy(ethsw->fq[i].store);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ dpaa2_io_store_destroy(ethsw->fq[i].store);
+}
+
+static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq)
+{
+ int err, retries = 0;
+
+ /* Try to pull from the FQ while the portal is busy and we didn't hit
+ * the maximum number fo retries
+ */
+ do {
+ err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store);
+ cpu_relax();
+ } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
+
+ if (unlikely(err))
+ dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err);
+
+ return err;
+}
+
+/* Consume all frames pull-dequeued into the store */
+static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq)
+{
+ struct ethsw_core *ethsw = fq->ethsw;
+ int cleaned = 0, is_last;
+ struct dpaa2_dq *dq;
+ int retries = 0;
+
+ do {
+ /* Get the next available FD from the store */
+ dq = dpaa2_io_store_next(fq->store, &is_last);
+ if (unlikely(!dq)) {
+ if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) {
+ dev_err_once(ethsw->dev,
+ "No valid dequeue response\n");
+ return -ETIMEDOUT;
+ }
+ continue;
+ }
+
+ if (fq->type == DPSW_QUEUE_RX)
+ dpaa2_switch_rx(fq, dpaa2_dq_fd(dq));
+ else
+ dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq));
+ cleaned++;
+
+ } while (!is_last);
+
+ return cleaned;
+}
+
+/* NAPI poll routine */
+static int dpaa2_switch_poll(struct napi_struct *napi, int budget)
+{
+ int err, cleaned = 0, store_cleaned, work_done;
+ struct dpaa2_switch_fq *fq;
+ int retries = 0;
+
+ fq = container_of(napi, struct dpaa2_switch_fq, napi);
+
+ do {
+ err = dpaa2_switch_pull_fq(fq);
+ if (unlikely(err))
+ break;
+
+ /* Refill pool if appropriate */
+ dpaa2_switch_refill_bp(fq->ethsw);
+
+ store_cleaned = dpaa2_switch_store_consume(fq);
+ cleaned += store_cleaned;
+
+ if (cleaned >= budget) {
+ work_done = budget;
+ goto out;
+ }
+
+ } while (store_cleaned);
+
+ /* We didn't consume the entire budget, so finish napi and re-enable
+ * data availability notifications
+ */
+ napi_complete_done(napi, cleaned);
+ do {
+ err = dpaa2_io_service_rearm(NULL, &fq->nctx);
+ cpu_relax();
+ } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
+
+ work_done = max(cleaned, 1);
+out:
+
+ return work_done;
+}
+
+static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
+{
+ struct dpaa2_switch_fq *fq;
+
+ fq = container_of(nctx, struct dpaa2_switch_fq, nctx);
+
+ napi_schedule(&fq->napi);
+}
+
+static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw)
+{
+ struct dpsw_ctrl_if_queue_cfg queue_cfg;
+ struct dpaa2_io_notification_ctx *nctx;
+ int err, i, j;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
+ nctx = &ethsw->fq[i].nctx;
+
+ /* Register a new software context for the FQID.
+ * By using NULL as the first parameter, we specify that we do
+ * not care on which cpu are interrupts received for this queue
+ */
+ nctx->is_cdan = 0;
+ nctx->id = ethsw->fq[i].fqid;
+ nctx->desired_cpu = DPAA2_IO_ANY_CPU;
+ nctx->cb = dpaa2_switch_fqdan_cb;
+ err = dpaa2_io_service_register(NULL, nctx, ethsw->dev);
+ if (err) {
+ err = -EPROBE_DEFER;
+ goto err_register;
+ }
+
+ queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST |
+ DPSW_CTRL_IF_QUEUE_OPT_USER_CTX;
+ queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO;
+ queue_cfg.dest_cfg.dest_id = nctx->dpio_id;
+ queue_cfg.dest_cfg.priority = 0;
+ queue_cfg.user_ctx = nctx->qman64;
+
+ err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ ethsw->fq[i].type,
+ &queue_cfg);
+ if (err)
+ goto err_set_queue;
+ }
+
+ return 0;
+
+err_set_queue:
+ dpaa2_io_service_deregister(NULL, nctx, ethsw->dev);
+err_register:
+ for (j = 0; j < i; j++)
+ dpaa2_io_service_deregister(NULL, &ethsw->fq[j].nctx,
+ ethsw->dev);
+
+ return err;
+}
+
+static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ dpaa2_io_service_deregister(NULL, &ethsw->fq[i].nctx,
+ ethsw->dev);
+}
+
+static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
+{
+ int err;
+
+ /* setup FQs for Rx and Tx Conf */
+ err = dpaa2_switch_setup_fqs(ethsw);
+ if (err)
+ return err;
+
+ /* setup the buffer pool needed on the Rx path */
+ err = dpaa2_switch_setup_dpbp(ethsw);
+ if (err)
+ return err;
+
+ err = dpaa2_switch_seed_bp(ethsw);
+ if (err)
+ goto err_free_dpbp;
+
+ err = dpaa2_switch_alloc_rings(ethsw);
+ if (err)
+ goto err_drain_dpbp;
+
+ err = dpaa2_switch_setup_dpio(ethsw);
+ if (err)
+ goto err_destroy_rings;
+
+ err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
+ goto err_deregister_dpio;
+ }
+
+ return 0;
+
+err_deregister_dpio:
+ dpaa2_switch_free_dpio(ethsw);
+err_destroy_rings:
+ dpaa2_switch_destroy_rings(ethsw);
+err_drain_dpbp:
+ dpaa2_switch_drain_bp(ethsw);
+err_free_dpbp:
+ dpaa2_switch_free_dpbp(ethsw);
+
+ return err;
+}
+
+static int dpaa2_switch_init(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ struct dpsw_vlan_if_cfg vcfg = {0};
+ struct dpsw_tci_cfg tci_cfg = {0};
+ struct dpsw_stp_cfg stp_cfg;
+ int err;
+ u16 i;
+
+ ethsw->dev_id = sw_dev->obj_desc.id;
+
+ err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, &ethsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_open err %d\n", err);
+ return err;
+ }
+
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ dev_err(dev, "dpsw_get_attributes err %d\n", err);
+ goto err_close;
+ }
+
+ err = dpsw_get_api_version(ethsw->mc_io, 0,
+ &ethsw->major,
+ &ethsw->minor);
+ if (err) {
+ dev_err(dev, "dpsw_get_api_version err %d\n", err);
+ goto err_close;
+ }
+
+ /* Minimum supported DPSW version check */
+ if (ethsw->major < DPSW_MIN_VER_MAJOR ||
+ (ethsw->major == DPSW_MIN_VER_MAJOR &&
+ ethsw->minor < DPSW_MIN_VER_MINOR)) {
+ dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n",
+ ethsw->major, ethsw->minor);
+ err = -EOPNOTSUPP;
+ goto err_close;
+ }
+
+ if (!dpaa2_switch_supports_cpu_traffic(ethsw)) {
+ err = -EOPNOTSUPP;
+ goto err_close;
+ }
+
+ dpaa2_switch_detect_features(ethsw);
+
+ err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_reset err %d\n", err);
+ goto err_close;
+ }
+
+ stp_cfg.vlan_id = DEFAULT_VLAN_ID;
+ stp_cfg.state = DPSW_STP_STATE_FORWARDING;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i);
+ if (err) {
+ dev_err(dev, "dpsw_if_disable err %d\n", err);
+ goto err_close;
+ }
+
+ err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
+ &stp_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
+ err, i);
+ goto err_close;
+ }
+
+ /* Switch starts with all ports configured to VLAN 1. Need to
+ * remove this setting to allow configuration at bridge join
+ */
+ vcfg.num_ifs = 1;
+ vcfg.if_id[0] = i;
+ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DEFAULT_VLAN_ID, &vcfg);
+ if (err) {
+ dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n",
+ err);
+ goto err_close;
+ }
+
+ tci_cfg.vlan_id = 4095;
+ err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_if_set_tci err %d\n", err);
+ goto err_close;
+ }
+
+ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DEFAULT_VLAN_ID, &vcfg);
+ if (err) {
+ dev_err(dev, "dpsw_vlan_remove_if err %d\n", err);
+ goto err_close;
+ }
+ }
+
+ err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID);
+ if (err) {
+ dev_err(dev, "dpsw_vlan_remove err %d\n", err);
+ goto err_close;
+ }
+
+ ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
+ WQ_MEM_RECLAIM, "ethsw",
+ ethsw->sw_attr.id);
+ if (!ethsw->workqueue) {
+ err = -ENOMEM;
+ goto err_close;
+ }
+
+ err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0);
+ if (err)
+ goto err_destroy_ordered_workqueue;
+
+ err = dpaa2_switch_ctrl_if_setup(ethsw);
+ if (err)
+ goto err_destroy_ordered_workqueue;
+
+ return 0;
+
+err_destroy_ordered_workqueue:
+ destroy_workqueue(ethsw->workqueue);
+
+err_close:
+ dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ return err;
+}
+
+/* Add an ACL to redirect frames with specific destination MAC address to
+ * control interface
+ */
+static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv,
+ const char *mac)
+{
+ struct dpaa2_switch_acl_entry acl_entry = {0};
+
+ /* Match on the destination MAC address */
+ ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac);
+ eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac);
+
+ /* Trap to CPU */
+ acl_entry.cfg.precedence = 0;
+ acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
+
+ return dpaa2_switch_acl_entry_add(port_priv->acl_tbl, &acl_entry);
+}
+
+static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
+{
+ const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
+ struct switchdev_obj_port_vlan vlan = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .vid = DEFAULT_VLAN_ID,
+ .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID,
+ };
+ struct net_device *netdev = port_priv->netdev;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_acl_tbl *acl_tbl;
+ struct dpsw_fdb_cfg fdb_cfg = {0};
+ struct dpsw_if_attr dpsw_if_attr;
+ struct dpaa2_switch_fdb *fdb;
+ struct dpsw_acl_cfg acl_cfg;
+ u16 fdb_id, acl_tbl_id;
+ int err;
+
+ /* Get the Tx queue for this specific port */
+ err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &dpsw_if_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err);
+ return err;
+ }
+ port_priv->tx_qdid = dpsw_if_attr.qdid;
+
+ /* Create a FDB table for this particular switch port */
+ fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs;
+ err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &fdb_id, &fdb_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_fdb_add err %d\n", err);
+ return err;
+ }
+
+ /* Find an unused dpaa2_switch_fdb structure and use it */
+ fdb = dpaa2_switch_fdb_get_unused(ethsw);
+ fdb->fdb_id = fdb_id;
+ fdb->in_use = true;
+ fdb->bridge_dev = NULL;
+ port_priv->fdb = fdb;
+
+ /* We need to add VLAN 1 as the PVID on this port until it is under a
+ * bridge since the DPAA2 switch is not able to handle the traffic in a
+ * VLAN unaware fashion
+ */
+ err = dpaa2_switch_port_vlans_add(netdev, &vlan);
+ if (err)
+ return err;
+
+ /* Setup the egress flooding domains (broadcast, unknown unicast */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+ if (err)
+ return err;
+
+ /* Create an ACL table to be used by this switch port */
+ acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES;
+ err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &acl_tbl_id, &acl_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_acl_add err %d\n", err);
+ return err;
+ }
+
+ acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw);
+ acl_tbl->ethsw = ethsw;
+ acl_tbl->id = acl_tbl_id;
+ acl_tbl->in_use = true;
+ acl_tbl->num_rules = 0;
+ INIT_LIST_HEAD(&acl_tbl->entries);
+
+ err = dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl);
+ if (err)
+ return err;
+
+ err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ int err;
+
+ err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err)
+ dev_warn(dev, "dpsw_close err %d\n", err);
+}
+
+static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
+{
+ dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ dpaa2_switch_free_dpio(ethsw);
+ dpaa2_switch_destroy_rings(ethsw);
+ dpaa2_switch_drain_bp(ethsw);
+ dpaa2_switch_free_dpbp(ethsw);
+}
+
+static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
+{
+ struct ethsw_port_priv *port_priv;
+ struct ethsw_core *ethsw;
+ struct device *dev;
+ int i;
+
+ dev = &sw_dev->dev;
+ ethsw = dev_get_drvdata(dev);
+
+ dpaa2_switch_ctrl_if_teardown(ethsw);
+
+ dpaa2_switch_teardown_irqs(sw_dev);
+
+ dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ port_priv = ethsw->ports[i];
+ unregister_netdev(port_priv->netdev);
+ free_netdev(port_priv->netdev);
+ }
+
+ kfree(ethsw->fdbs);
+ kfree(ethsw->acls);
+ kfree(ethsw->ports);
+
+ dpaa2_switch_takedown(sw_dev);
+
+ destroy_workqueue(ethsw->workqueue);
+
+ fsl_mc_portal_free(ethsw->mc_io);
+
+ kfree(ethsw);
+
+ dev_set_drvdata(dev, NULL);
+
+ return 0;
+}
+
+static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
+ u16 port_idx)
+{
+ struct ethsw_port_priv *port_priv;
+ struct device *dev = ethsw->dev;
+ struct net_device *port_netdev;
+ int err;
+
+ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
+ if (!port_netdev) {
+ dev_err(dev, "alloc_etherdev error\n");
+ return -ENOMEM;
+ }
+
+ port_priv = netdev_priv(port_netdev);
+ port_priv->netdev = port_netdev;
+ port_priv->ethsw_data = ethsw;
+
+ port_priv->idx = port_idx;
+ port_priv->stp_state = BR_STATE_FORWARDING;
+
+ SET_NETDEV_DEV(port_netdev, dev);
+ port_netdev->netdev_ops = &dpaa2_switch_port_ops;
+ port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops;
+
+ port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM;
+
+ port_priv->bcast_flood = true;
+ port_priv->ucast_flood = true;
+
+ /* Set MTU limits */
+ port_netdev->min_mtu = ETH_MIN_MTU;
+ port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
+
+ /* Populate the private port structure so that later calls to
+ * dpaa2_switch_port_init() can use it.
+ */
+ ethsw->ports[port_idx] = port_priv;
+
+ /* The DPAA2 switch's ingress path depends on the VLAN table,
+ * thus we are not able to disable VLAN filtering.
+ */
+ port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_STAG_FILTER |
+ NETIF_F_HW_TC;
+
+ err = dpaa2_switch_port_init(port_priv, port_idx);
+ if (err)
+ goto err_port_probe;
+
+ err = dpaa2_switch_port_set_mac_addr(port_priv);
+ if (err)
+ goto err_port_probe;
+
+ err = dpaa2_switch_port_set_learning(port_priv, false);
+ if (err)
+ goto err_port_probe;
+ port_priv->learn_ena = false;
+
+ return 0;
+
+err_port_probe:
+ free_netdev(port_netdev);
+ ethsw->ports[port_idx] = NULL;
+
+ return err;
+}
+
+static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw;
+ int i, err;
+
+ /* Allocate switch core*/
+ ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
+
+ if (!ethsw)
+ return -ENOMEM;
+
+ ethsw->dev = dev;
+ ethsw->iommu_domain = iommu_get_domain_for_dev(dev);
+ dev_set_drvdata(dev, ethsw);
+
+ err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &ethsw->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ goto err_free_drvdata;
+ }
+
+ err = dpaa2_switch_init(sw_dev);
+ if (err)
+ goto err_free_cmdport;
+
+ ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
+ GFP_KERNEL);
+ if (!(ethsw->ports)) {
+ err = -ENOMEM;
+ goto err_takedown;
+ }
+
+ ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
+ GFP_KERNEL);
+ if (!ethsw->fdbs) {
+ err = -ENOMEM;
+ goto err_free_ports;
+ }
+
+ ethsw->acls = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->acls),
+ GFP_KERNEL);
+ if (!ethsw->acls) {
+ err = -ENOMEM;
+ goto err_free_fdbs;
+ }
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ err = dpaa2_switch_probe_port(ethsw, i);
+ if (err)
+ goto err_free_netdev;
+ }
+
+ /* Add a NAPI instance for each of the Rx queues. The first port's
+ * net_device will be associated with the instances since we do not have
+ * different queues for each switch ports.
+ */
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ netif_napi_add(ethsw->ports[0]->netdev,
+ &ethsw->fq[i].napi, dpaa2_switch_poll,
+ NAPI_POLL_WEIGHT);
+
+ err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
+ goto err_free_netdev;
+ }
+
+ /* Setup IRQs */
+ err = dpaa2_switch_setup_irqs(sw_dev);
+ if (err)
+ goto err_stop;
+
+ /* Register the netdev only when the entire setup is done and the
+ * switch port interfaces are ready to receive traffic
+ */
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ err = register_netdev(ethsw->ports[i]->netdev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev error %d\n", err);
+ goto err_unregister_ports;
+ }
+ }
+
+ return 0;
+
+err_unregister_ports:
+ for (i--; i >= 0; i--)
+ unregister_netdev(ethsw->ports[i]->netdev);
+ dpaa2_switch_teardown_irqs(sw_dev);
+err_stop:
+ dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+err_free_netdev:
+ for (i--; i >= 0; i--)
+ free_netdev(ethsw->ports[i]->netdev);
+ kfree(ethsw->acls);
+err_free_fdbs:
+ kfree(ethsw->fdbs);
+err_free_ports:
+ kfree(ethsw->ports);
+
+err_takedown:
+ dpaa2_switch_takedown(sw_dev);
+
+err_free_cmdport:
+ fsl_mc_portal_free(ethsw->mc_io);
+
+err_free_drvdata:
+ kfree(ethsw);
+ dev_set_drvdata(dev, NULL);
+
+ return err;
+}
+
+static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpsw",
+ },
+ { .vendor = 0x0 }
+};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
+
+static struct fsl_mc_driver dpaa2_switch_drv = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_switch_probe,
+ .remove = dpaa2_switch_remove,
+ .match_id_table = dpaa2_switch_match_id_table
+};
+
+static struct notifier_block dpaa2_switch_port_nb __read_mostly = {
+ .notifier_call = dpaa2_switch_port_netdevice_event,
+};
+
+static struct notifier_block dpaa2_switch_port_switchdev_nb = {
+ .notifier_call = dpaa2_switch_port_event,
+};
+
+static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = {
+ .notifier_call = dpaa2_switch_port_blocking_event,
+};
+
+static int dpaa2_switch_register_notifiers(void)
+{
+ int err;
+
+ err = register_netdevice_notifier(&dpaa2_switch_port_nb);
+ if (err) {
+ pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err);
+ return err;
+ }
+
+ err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
+ if (err) {
+ pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err);
+ goto err_switchdev_nb;
+ }
+
+ err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
+ if (err) {
+ pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err);
+ goto err_switchdev_blocking_nb;
+ }
+
+ return 0;
+
+err_switchdev_blocking_nb:
+ unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
+err_switchdev_nb:
+ unregister_netdevice_notifier(&dpaa2_switch_port_nb);
+
+ return err;
+}
+
+static void dpaa2_switch_unregister_notifiers(void)
+{
+ int err;
+
+ err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
+ if (err)
+ pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n",
+ err);
+
+ err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
+ if (err)
+ pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err);
+
+ err = unregister_netdevice_notifier(&dpaa2_switch_port_nb);
+ if (err)
+ pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err);
+}
+
+static int __init dpaa2_switch_driver_init(void)
+{
+ int err;
+
+ err = fsl_mc_driver_register(&dpaa2_switch_drv);
+ if (err)
+ return err;
+
+ err = dpaa2_switch_register_notifiers();
+ if (err) {
+ fsl_mc_driver_unregister(&dpaa2_switch_drv);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit dpaa2_switch_driver_exit(void)
+{
+ dpaa2_switch_unregister_notifiers();
+ fsl_mc_driver_unregister(&dpaa2_switch_drv);
+}
+
+module_init(dpaa2_switch_driver_init);
+module_exit(dpaa2_switch_driver_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
new file mode 100644
index 000000000000..bdef71f234cb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DPAA2 Ethernet Switch declarations
+ *
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#ifndef __ETHSW_H
+#define __ETHSW_H
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <uapi/linux/if_bridge.h>
+#include <net/switchdev.h>
+#include <linux/if_bridge.h>
+#include <linux/fsl/mc.h>
+#include <net/pkt_cls.h>
+#include <soc/fsl/dpaa2-io.h>
+
+#include "dpsw.h"
+
+/* Number of IRQs supported */
+#define DPSW_IRQ_NUM 2
+
+/* Port is member of VLAN */
+#define ETHSW_VLAN_MEMBER 1
+/* VLAN to be treated as untagged on egress */
+#define ETHSW_VLAN_UNTAGGED 2
+/* Untagged frames will be assigned to this VLAN */
+#define ETHSW_VLAN_PVID 4
+/* VLAN configured on the switch */
+#define ETHSW_VLAN_GLOBAL 8
+
+/* Maximum Frame Length supported by HW (currently 10k) */
+#define DPAA2_MFL (10 * 1024)
+#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
+
+#define ETHSW_FEATURE_MAC_ADDR BIT(0)
+
+/* Number of receive queues (one RX and one TX_CONF) */
+#define DPAA2_SWITCH_RX_NUM_FQS 2
+
+/* Hardware requires alignment for ingress/egress buffer addresses */
+#define DPAA2_SWITCH_RX_BUF_RAW_SIZE PAGE_SIZE
+#define DPAA2_SWITCH_RX_BUF_TAILROOM \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define DPAA2_SWITCH_RX_BUF_SIZE \
+ (DPAA2_SWITCH_RX_BUF_RAW_SIZE - DPAA2_SWITCH_RX_BUF_TAILROOM)
+
+#define DPAA2_SWITCH_STORE_SIZE 16
+
+/* Buffer management */
+#define BUFS_PER_CMD 7
+#define DPAA2_ETHSW_NUM_BUFS (1024 * BUFS_PER_CMD)
+#define DPAA2_ETHSW_REFILL_THRESH (DPAA2_ETHSW_NUM_BUFS * 5 / 6)
+
+/* Number of times to retry DPIO portal operations while waiting
+ * for portal to finish executing current command and become
+ * available. We want to avoid being stuck in a while loop in case
+ * hardware becomes unresponsive, but not give up too easily if
+ * the portal really is busy for valid reasons
+ */
+#define DPAA2_SWITCH_SWP_BUSY_RETRIES 1000
+
+/* Hardware annotation buffer size */
+#define DPAA2_SWITCH_HWA_SIZE 64
+/* Software annotation buffer size */
+#define DPAA2_SWITCH_SWA_SIZE 64
+
+#define DPAA2_SWITCH_TX_BUF_ALIGN 64
+
+#define DPAA2_SWITCH_TX_DATA_OFFSET \
+ (DPAA2_SWITCH_HWA_SIZE + DPAA2_SWITCH_SWA_SIZE)
+
+#define DPAA2_SWITCH_NEEDED_HEADROOM \
+ (DPAA2_SWITCH_TX_DATA_OFFSET + DPAA2_SWITCH_TX_BUF_ALIGN)
+
+#define DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES 16
+#define DPAA2_ETHSW_PORT_DEFAULT_TRAPS 1
+
+#define DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE 256
+
+extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops;
+
+struct ethsw_core;
+
+struct dpaa2_switch_fq {
+ struct ethsw_core *ethsw;
+ enum dpsw_queue_type type;
+ struct dpaa2_io_store *store;
+ struct dpaa2_io_notification_ctx nctx;
+ struct napi_struct napi;
+ u32 fqid;
+};
+
+struct dpaa2_switch_fdb {
+ struct net_device *bridge_dev;
+ u16 fdb_id;
+ bool in_use;
+};
+
+struct dpaa2_switch_acl_entry {
+ struct list_head list;
+ u16 prio;
+ unsigned long cookie;
+
+ struct dpsw_acl_entry_cfg cfg;
+ struct dpsw_acl_key key;
+};
+
+struct dpaa2_switch_acl_tbl {
+ struct list_head entries;
+ struct ethsw_core *ethsw;
+ u64 ports;
+
+ u16 id;
+ u8 num_rules;
+ bool in_use;
+};
+
+static inline bool
+dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_acl_tbl *acl_tbl)
+{
+ if ((acl_tbl->num_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >=
+ DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES)
+ return true;
+ return false;
+}
+
+/* Per port private data */
+struct ethsw_port_priv {
+ struct net_device *netdev;
+ u16 idx;
+ struct ethsw_core *ethsw_data;
+ u8 link_state;
+ u8 stp_state;
+
+ u8 vlans[VLAN_VID_MASK + 1];
+ u16 pvid;
+ u16 tx_qdid;
+
+ struct dpaa2_switch_fdb *fdb;
+ bool bcast_flood;
+ bool ucast_flood;
+ bool learn_ena;
+
+ struct dpaa2_switch_acl_tbl *acl_tbl;
+};
+
+/* Switch data */
+struct ethsw_core {
+ struct device *dev;
+ struct fsl_mc_io *mc_io;
+ u16 dpsw_handle;
+ struct dpsw_attr sw_attr;
+ u16 major, minor;
+ unsigned long features;
+ int dev_id;
+ struct ethsw_port_priv **ports;
+ struct iommu_domain *iommu_domain;
+
+ u8 vlans[VLAN_VID_MASK + 1];
+
+ struct workqueue_struct *workqueue;
+
+ struct dpaa2_switch_fq fq[DPAA2_SWITCH_RX_NUM_FQS];
+ struct fsl_mc_device *dpbp_dev;
+ int buf_count;
+ u16 bpid;
+ int napi_users;
+
+ struct dpaa2_switch_fdb *fdbs;
+ struct dpaa2_switch_acl_tbl *acls;
+};
+
+static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw,
+ struct net_device *netdev)
+{
+ int i;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ if (ethsw->ports[i]->netdev == netdev)
+ return ethsw->ports[i]->idx;
+
+ return -EINVAL;
+}
+
+static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw)
+{
+ if (ethsw->sw_attr.options & DPSW_OPT_CTRL_IF_DIS) {
+ dev_err(ethsw->dev, "Control Interface is disabled, cannot probe\n");
+ return false;
+ }
+
+ if (ethsw->sw_attr.flooding_cfg != DPSW_FLOODING_PER_FDB) {
+ dev_err(ethsw->dev, "Flooding domain is not per FDB, cannot probe\n");
+ return false;
+ }
+
+ if (ethsw->sw_attr.broadcast_cfg != DPSW_BROADCAST_PER_FDB) {
+ dev_err(ethsw->dev, "Broadcast domain is not per FDB, cannot probe\n");
+ return false;
+ }
+
+ if (ethsw->sw_attr.max_fdbs < ethsw->sw_attr.num_ifs) {
+ dev_err(ethsw->dev, "The number of FDBs is lower than the number of ports, cannot probe\n");
+ return false;
+ }
+
+ return true;
+}
+
+bool dpaa2_switch_port_dev_check(const struct net_device *netdev);
+
+int dpaa2_switch_port_vlans_add(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan);
+
+int dpaa2_switch_port_vlans_del(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan);
+
+typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv,
+ struct fdb_dump_entry *fdb_entry,
+ void *data);
+
+/* TC offload */
+
+int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct flow_cls_offload *cls);
+
+int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct flow_cls_offload *cls);
+
+int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct tc_cls_matchall_offload *cls);
+
+int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct tc_cls_matchall_offload *cls);
+
+int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry);
+#endif /* __ETHSW_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpkg.h b/drivers/net/ethernet/freescale/dpaa2/dpkg.h
index 6de613b13e4d..6f596a5fbeeb 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpkg.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpkg.h
@@ -13,11 +13,12 @@
/** Key Generator properties */
/**
- * Number of masks per key extraction
+ * DPKG_NUM_OF_MASKS - Number of masks per key extraction
*/
#define DPKG_NUM_OF_MASKS 4
+
/**
- * Number of extractions per key profile
+ * DPKG_MAX_NUM_OF_EXTRACTS - Number of extractions per key profile
*/
#define DPKG_MAX_NUM_OF_EXTRACTS 10
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
index 135f143097a5..8f7ceb731282 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
@@ -83,39 +83,21 @@ int dpmac_get_attributes(struct fsl_mc_io *mc_io,
u16 token,
struct dpmac_attr *attr);
-/**
- * DPMAC link configuration/state options
- */
+/* DPMAC link configuration/state options */
-/**
- * Enable auto-negotiation
- */
#define DPMAC_LINK_OPT_AUTONEG BIT_ULL(0)
-/**
- * Enable half-duplex mode
- */
#define DPMAC_LINK_OPT_HALF_DUPLEX BIT_ULL(1)
-/**
- * Enable pause frames
- */
#define DPMAC_LINK_OPT_PAUSE BIT_ULL(2)
-/**
- * Enable a-symmetric pause frames
- */
#define DPMAC_LINK_OPT_ASYM_PAUSE BIT_ULL(3)
-/**
- * Advertised link speeds
- */
+/* Advertised link speeds */
#define DPMAC_ADVERTISED_10BASET_FULL BIT_ULL(0)
#define DPMAC_ADVERTISED_100BASET_FULL BIT_ULL(1)
#define DPMAC_ADVERTISED_1000BASET_FULL BIT_ULL(2)
#define DPMAC_ADVERTISED_10000BASET_FULL BIT_ULL(4)
#define DPMAC_ADVERTISED_2500BASEX_FULL BIT_ULL(5)
-/**
- * Advertise auto-negotiation enable
- */
+/* Advertise auto-negotiation enable */
#define DPMAC_ADVERTISED_AUTONEG BIT_ULL(3)
/**
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index aa429c17c343..d6afada99fb6 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -17,6 +17,8 @@
* This function has to be called before the following functions:
* - dpni_set_rx_tc_dist()
* - dpni_set_qos_table()
+ *
+ * Return: '0' on Success; Error code otherwise.
*/
int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
{
@@ -1793,6 +1795,8 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io,
* If cfg.enable is set to 0 the command will clear flow steering table.
* The packets will be classified according to settings made in
* dpni_set_rx_hash_dist()
+ *
+ * Return: '0' on Success; Error code otherwise.
*/
int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
u32 cmd_flags,
@@ -1826,6 +1830,8 @@ int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
* If cfg.enable is set to 1 the packets will be classified using a hash
* function based on the key received in cfg.key_cfg_iova parameter.
* If cfg.enable is set to 0 the packets will be sent to the default queue
+ *
+ * Return: '0' on Success; Error code otherwise.
*/
int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
u32 cmd_flags,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index 4e96d9362dd2..7de0562bbf59 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -10,73 +10,76 @@
struct fsl_mc_io;
-/**
- * Data Path Network Interface API
+/* Data Path Network Interface API
* Contains initialization APIs and runtime control APIs for DPNI
*/
/** General DPNI macros */
/**
- * Maximum number of traffic classes
+ * DPNI_MAX_TC - Maximum number of traffic classes
*/
#define DPNI_MAX_TC 8
/**
- * Maximum number of buffer pools per DPNI
+ * DPNI_MAX_DPBP - Maximum number of buffer pools per DPNI
*/
#define DPNI_MAX_DPBP 8
/**
- * All traffic classes considered; see dpni_set_queue()
+ * DPNI_ALL_TCS - All traffic classes considered; see dpni_set_queue()
*/
#define DPNI_ALL_TCS (u8)(-1)
/**
- * All flows within traffic class considered; see dpni_set_queue()
+ * DPNI_ALL_TC_FLOWS - All flows within traffic class considered; see
+ * dpni_set_queue()
*/
#define DPNI_ALL_TC_FLOWS (u16)(-1)
/**
- * Generate new flow ID; see dpni_set_queue()
+ * DPNI_NEW_FLOW_ID - Generate new flow ID; see dpni_set_queue()
*/
#define DPNI_NEW_FLOW_ID (u16)(-1)
/**
- * Tx traffic is always released to a buffer pool on transmit, there are no
- * resources allocated to have the frames confirmed back to the source after
- * transmission.
+ * DPNI_OPT_TX_FRM_RELEASE - Tx traffic is always released to a buffer pool on
+ * transmit, there are no resources allocated to have the frames confirmed back
+ * to the source after transmission.
*/
#define DPNI_OPT_TX_FRM_RELEASE 0x000001
/**
- * Disables support for MAC address filtering for addresses other than primary
- * MAC address. This affects both unicast and multicast. Promiscuous mode can
- * still be enabled/disabled for both unicast and multicast. If promiscuous mode
- * is disabled, only traffic matching the primary MAC address will be accepted.
+ * DPNI_OPT_NO_MAC_FILTER - Disables support for MAC address filtering for
+ * addresses other than primary MAC address. This affects both unicast and
+ * multicast. Promiscuous mode can still be enabled/disabled for both unicast
+ * and multicast. If promiscuous mode is disabled, only traffic matching the
+ * primary MAC address will be accepted.
*/
#define DPNI_OPT_NO_MAC_FILTER 0x000002
/**
- * Allocate policers for this DPNI. They can be used to rate-limit traffic per
- * traffic class (TC) basis.
+ * DPNI_OPT_HAS_POLICING - Allocate policers for this DPNI. They can be used to
+ * rate-limit traffic per traffic class (TC) basis.
*/
#define DPNI_OPT_HAS_POLICING 0x000004
/**
- * Congestion can be managed in several ways, allowing the buffer pool to
- * deplete on ingress, taildrop on each queue or use congestion groups for sets
- * of queues. If set, it configures a single congestion groups across all TCs.
- * If reset, a congestion group is allocated for each TC. Only relevant if the
- * DPNI has multiple traffic classes.
+ * DPNI_OPT_SHARED_CONGESTION - Congestion can be managed in several ways,
+ * allowing the buffer pool to deplete on ingress, taildrop on each queue or
+ * use congestion groups for sets of queues. If set, it configures a single
+ * congestion groups across all TCs. If reset, a congestion group is allocated
+ * for each TC. Only relevant if the DPNI has multiple traffic classes.
*/
#define DPNI_OPT_SHARED_CONGESTION 0x000008
/**
- * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
- * look-ups are exact match. Note that TCAM is not available on LS1088 and its
- * variants. Setting this bit on these SoCs will trigger an error.
+ * DPNI_OPT_HAS_KEY_MASKING - Enables TCAM for Flow Steering and QoS look-ups.
+ * If not specified, all look-ups are exact match. Note that TCAM is not
+ * available on LS1088 and its variants. Setting this bit on these SoCs will
+ * trigger an error.
*/
#define DPNI_OPT_HAS_KEY_MASKING 0x000010
/**
- * Disables the flow steering table.
+ * DPNI_OPT_NO_FS - Disables the flow steering table.
*/
#define DPNI_OPT_NO_FS 0x000020
/**
- * Flow steering table is shared between all traffic classes
+ * DPNI_OPT_SHARED_FS - Flow steering table is shared between all traffic
+ * classes
*/
#define DPNI_OPT_SHARED_FS 0x001000
@@ -129,20 +132,14 @@ int dpni_reset(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
-/**
- * DPNI IRQ Index and Events
- */
+/* DPNI IRQ Index and Events */
-/**
- * IRQ index
- */
#define DPNI_IRQ_INDEX 0
-/**
- * IRQ events:
- * indicates a change in link state
- * indicates a change in endpoint
- */
+
+/* DPNI_IRQ_EVENT_LINK_CHANGED - indicates a change in link state */
#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
+
+/* DPNI_IRQ_EVENT_ENDPOINT_CHANGED - indicates a change in endpoint */
#define DPNI_IRQ_EVENT_ENDPOINT_CHANGED 0x00000002
int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
@@ -222,32 +219,30 @@ int dpni_get_attributes(struct fsl_mc_io *mc_io,
u16 token,
struct dpni_attr *attr);
-/**
- * DPNI errors
- */
+/* DPNI errors */
/**
- * Extract out of frame header error
+ * DPNI_ERROR_EOFHE - Extract out of frame header error
*/
#define DPNI_ERROR_EOFHE 0x00020000
/**
- * Frame length error
+ * DPNI_ERROR_FLE - Frame length error
*/
#define DPNI_ERROR_FLE 0x00002000
/**
- * Frame physical error
+ * DPNI_ERROR_FPE - Frame physical error
*/
#define DPNI_ERROR_FPE 0x00001000
/**
- * Parsing header error
+ * DPNI_ERROR_PHE - Parsing header error
*/
#define DPNI_ERROR_PHE 0x00000020
/**
- * Parser L3 checksum error
+ * DPNI_ERROR_L3CE - Parser L3 checksum error
*/
#define DPNI_ERROR_L3CE 0x00000004
/**
- * Parser L3 checksum error
+ * DPNI_ERROR_L4CE - Parser L3 checksum error
*/
#define DPNI_ERROR_L4CE 0x00000001
@@ -281,36 +276,35 @@ int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
u16 token,
struct dpni_error_cfg *cfg);
-/**
- * DPNI buffer layout modification options
- */
+/* DPNI buffer layout modification options */
/**
- * Select to modify the time-stamp setting
+ * DPNI_BUF_LAYOUT_OPT_TIMESTAMP - Select to modify the time-stamp setting
*/
#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
/**
- * Select to modify the parser-result setting; not applicable for Tx
+ * DPNI_BUF_LAYOUT_OPT_PARSER_RESULT - Select to modify the parser-result
+ * setting; not applicable for Tx
*/
#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
/**
- * Select to modify the frame-status setting
+ * DPNI_BUF_LAYOUT_OPT_FRAME_STATUS - Select to modify the frame-status setting
*/
#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
/**
- * Select to modify the private-data-size setting
+ * DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE - Select to modify the private-data-size setting
*/
#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
/**
- * Select to modify the data-alignment setting
+ * DPNI_BUF_LAYOUT_OPT_DATA_ALIGN - Select to modify the data-alignment setting
*/
#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
/**
- * Select to modify the data-head-room setting
+ * DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM - Select to modify the data-head-room setting
*/
#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
/**
- * Select to modify the data-tail-room setting
+ * DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM - Select to modify the data-tail-room setting
*/
#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
@@ -343,7 +337,8 @@ struct dpni_buffer_layout {
* @DPNI_QUEUE_TX: Tx queue
* @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
* @DPNI_QUEUE_RX_ERR: Rx error queue
- */enum dpni_queue_type {
+ */
+enum dpni_queue_type {
DPNI_QUEUE_RX,
DPNI_QUEUE_TX,
DPNI_QUEUE_TX_CONFIRM,
@@ -424,7 +419,7 @@ int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
* lack of buffers
* @page_2.egress_discarded_frames: Egress discarded frame count
* @page_2.egress_confirmed_frames: Egress confirmed frame count
- * @page3: Page_3 statistics structure
+ * @page_3: Page_3 statistics structure
* @page_3.egress_dequeue_bytes: Cumulative count of the number of bytes
* dequeued from egress FQs
* @page_3.egress_dequeue_frames: Cumulative count of the number of frames
@@ -501,30 +496,14 @@ int dpni_get_statistics(struct fsl_mc_io *mc_io,
u8 page,
union dpni_statistics *stat);
-/**
- * Enable auto-negotiation
- */
#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
-/**
- * Enable half-duplex mode
- */
#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
-/**
- * Enable pause frames
- */
#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
-/**
- * Enable a-symmetric pause frames
- */
#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
-
-/**
- * Enable priority flow control pause frames
- */
#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
/**
- * struct - Structure representing DPNI link configuration
+ * struct dpni_link_cfg - Structure representing DPNI link configuration
* @rate: Rate
* @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
*/
@@ -687,8 +666,8 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
const struct dpni_rx_tc_dist_cfg *cfg);
/**
- * When used for fs_miss_flow_id in function dpni_set_rx_dist,
- * will signal to dpni to drop all unclassified frames
+ * DPNI_FS_MISS_DROP - When used for fs_miss_flow_id in function
+ * dpni_set_rx_dist, will signal to dpni to drop all unclassified frames
*/
#define DPNI_FS_MISS_DROP ((uint16_t)-1)
@@ -766,7 +745,7 @@ enum dpni_dest {
/**
* struct dpni_queue - Queue structure
- * @destination - Destination structure
+ * @destination: - Destination structure
* @destination.id: ID of the destination, only relevant if DEST_TYPE is > 0.
* Identifies either a DPIO or a DPCON object.
* Not relevant for Tx queues.
@@ -837,9 +816,7 @@ struct dpni_queue_id {
u16 qdbin;
};
-/**
- * Set User Context
- */
+/* Set User Context */
#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
#define DPNI_QUEUE_OPT_DEST 0x00000002
#define DPNI_QUEUE_OPT_FLC 0x00000004
@@ -904,9 +881,9 @@ struct dpni_dest_cfg {
/* DPNI congestion options */
/**
- * This congestion will trigger flow control or priority flow control.
- * This will have effect only if flow control is enabled with
- * dpni_set_link_cfg().
+ * DPNI_CONG_OPT_FLOW_CONTROL - This congestion will trigger flow control or
+ * priority flow control. This will have effect only if flow control is
+ * enabled with dpni_set_link_cfg().
*/
#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
@@ -990,23 +967,24 @@ struct dpni_rule_cfg {
};
/**
- * Discard matching traffic. If set, this takes precedence over any other
- * configuration and matching traffic is always discarded.
+ * DPNI_FS_OPT_DISCARD - Discard matching traffic. If set, this takes
+ * precedence over any other configuration and matching traffic is always
+ * discarded.
*/
#define DPNI_FS_OPT_DISCARD 0x1
/**
- * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
- * override the FLC value set per queue.
+ * DPNI_FS_OPT_SET_FLC - Set FLC value. If set, flc member of struct
+ * dpni_fs_action_cfg is used to override the FLC value set per queue.
* For more details check the Frame Descriptor section in the hardware
* documentation.
*/
#define DPNI_FS_OPT_SET_FLC 0x2
/**
- * Indicates whether the 6 lowest significant bits of FLC are used for stash
- * control. If set, the 6 least significant bits in value are interpreted as
- * follows:
+ * DPNI_FS_OPT_SET_STASH_CONTROL - Indicates whether the 6 lowest significant
+ * bits of FLC are used for stash control. If set, the 6 least significant bits
+ * in value are interpreted as follows:
* - bits 0-1: indicates the number of 64 byte units of context that are
* stashed. FLC value is interpreted as a memory address in this case,
* excluding the 6 LS bits.
@@ -1068,7 +1046,7 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io,
u16 *major_ver,
u16 *minor_ver);
/**
- * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
+ * struct dpni_tx_shaping_cfg - Structure representing DPNI tx shaping configuration
* @rate_limit: Rate in Mbps
* @max_burst_size: Burst size in bytes (up to 64KB)
*/
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
index 05c413719e55..01d77c685a5b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dprtc.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
@@ -13,9 +13,6 @@
struct fsl_mc_io;
-/**
- * Number of irq's
- */
#define DPRTC_MAX_IRQ_NUM 1
#define DPRTC_IRQ_INDEX 0
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h
index 450841cc6ca8..cb13e740f72b 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h
@@ -1,16 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2017-2020 NXP
+ * Copyright 2017-2021 NXP
*
*/
#ifndef __FSL_DPSW_CMD_H
#define __FSL_DPSW_CMD_H
+#include "dpsw.h"
+
/* DPSW Version */
#define DPSW_VER_MAJOR 8
-#define DPSW_VER_MINOR 5
+#define DPSW_VER_MINOR 9
#define DPSW_CMD_BASE_VERSION 1
#define DPSW_CMD_VERSION_2 2
@@ -27,7 +29,7 @@
#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
-#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004)
+#define DPSW_CMDID_GET_ATTR DPSW_CMD_V2(0x004)
#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
@@ -45,18 +47,18 @@
#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
+#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042)
+
#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
-#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
-#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
-#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061)
+#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_V2(0x061)
#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
@@ -64,16 +66,31 @@
#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
+#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082)
+#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083)
#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
-#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088)
#define DPSW_CMDID_FDB_DUMP DPSW_CMD_ID(0x08A)
+#define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090)
+#define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091)
+#define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092)
+#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093)
+#define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094)
+#define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095)
+
#define DPSW_CMDID_IF_GET_PORT_MAC_ADDR DPSW_CMD_ID(0x0A7)
-#define DPSW_CMDID_IF_GET_PRIMARY_MAC_ADDR DPSW_CMD_ID(0x0A8)
-#define DPSW_CMDID_IF_SET_PRIMARY_MAC_ADDR DPSW_CMD_ID(0x0A9)
+
+#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0)
+#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1)
+#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2)
+#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3)
+#define DPSW_CMDID_CTRL_IF_SET_QUEUE DPSW_CMD_ID(0x0A6)
+
+#define DPSW_CMDID_SET_EGRESS_FLOOD DPSW_CMD_ID(0x0AC)
+#define DPSW_CMDID_IF_SET_LEARNING_MODE DPSW_CMD_ID(0x0AD)
/* Macros for accessing command fields smaller than 1byte */
#define DPSW_MASK(field) \
@@ -169,6 +186,12 @@ struct dpsw_cmd_clear_irq_status {
#define DPSW_COMPONENT_TYPE_SHIFT 0
#define DPSW_COMPONENT_TYPE_SIZE 4
+#define DPSW_FLOODING_CFG_SHIFT 0
+#define DPSW_FLOODING_CFG_SIZE 4
+
+#define DPSW_BROADCAST_CFG_SHIFT 4
+#define DPSW_BROADCAST_CFG_SIZE 4
+
struct dpsw_rsp_get_attr {
/* cmd word 0 */
__le16 num_ifs;
@@ -186,23 +209,15 @@ struct dpsw_rsp_get_attr {
u8 max_meters_per_if;
/* from LSB only the first 4 bits */
u8 component_type;
- __le16 pad;
+ /* [0:3] - flooding configuration
+ * [4:7] - broadcast configuration
+ */
+ u8 repl_cfg;
+ u8 pad;
/* cmd word 3 */
__le64 options;
};
-struct dpsw_cmd_if_set_flooding {
- __le16 if_id;
- /* from LSB: enable:1 */
- u8 enable;
-};
-
-struct dpsw_cmd_if_set_broadcast {
- __le16 if_id;
- /* from LSB: enable:1 */
- u8 enable;
-};
-
#define DPSW_VLAN_ID_SHIFT 0
#define DPSW_VLAN_ID_SIZE 12
#define DPSW_DEI_SHIFT 12
@@ -255,6 +270,28 @@ struct dpsw_cmd_if {
__le16 if_id;
};
+#define DPSW_ADMIT_UNTAGGED_SHIFT 0
+#define DPSW_ADMIT_UNTAGGED_SIZE 4
+#define DPSW_ENABLED_SHIFT 5
+#define DPSW_ENABLED_SIZE 1
+#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6
+#define DPSW_ACCEPT_ALL_VLAN_SIZE 1
+
+struct dpsw_rsp_if_get_attr {
+ /* cmd word 0 */
+ /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */
+ u8 conf;
+ u8 pad1;
+ u8 num_tcs;
+ u8 pad2;
+ __le16 qdid;
+ /* cmd word 1 */
+ __le32 options;
+ __le32 pad3;
+ /* cmd word 2 */
+ __le32 rate;
+};
+
struct dpsw_cmd_if_set_max_frame_length {
__le16 if_id;
__le16 frame_length;
@@ -295,13 +332,23 @@ struct dpsw_vlan_add {
__le16 vlan_id;
};
+struct dpsw_cmd_vlan_add_if {
+ /* cmd word 0 */
+ __le16 options;
+ __le16 vlan_id;
+ __le16 fdb_id;
+ __le16 pad0;
+ /* cmd word 1-4 */
+ __le64 if_id;
+};
+
struct dpsw_cmd_vlan_manage_if {
/* cmd word 0 */
__le16 pad0;
__le16 vlan_id;
__le32 pad1;
/* cmd word 1-4 */
- __le64 if_id[4];
+ __le64 if_id;
};
struct dpsw_cmd_vlan_remove {
@@ -311,7 +358,7 @@ struct dpsw_cmd_vlan_remove {
struct dpsw_cmd_fdb_add {
__le32 pad;
- __le16 fdb_aging_time;
+ __le16 fdb_ageing_time;
__le16 num_fdb_entries;
};
@@ -347,16 +394,7 @@ struct dpsw_cmd_fdb_multicast_op {
u8 mac_addr[6];
__le16 pad2;
/* cmd word 2-5 */
- __le64 if_id[4];
-};
-
-#define DPSW_LEARNING_MODE_SHIFT 0
-#define DPSW_LEARNING_MODE_SIZE 4
-
-struct dpsw_cmd_fdb_set_learning_mode {
- __le16 fdb_id;
- /* only the first 4 bits from LSB */
- u8 mode;
+ __le64 if_id;
};
struct dpsw_cmd_fdb_dump {
@@ -371,6 +409,36 @@ struct dpsw_rsp_fdb_dump {
__le16 num_entries;
};
+struct dpsw_rsp_ctrl_if_get_attr {
+ __le64 pad;
+ __le32 rx_fqid;
+ __le32 rx_err_fqid;
+ __le32 tx_err_conf_fqid;
+};
+
+#define DPSW_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
+struct dpsw_cmd_ctrl_if_set_pools {
+ u8 num_dpbp;
+ u8 backup_pool_mask;
+ __le16 pad;
+ __le32 dpbp_id[DPSW_MAX_DPBP];
+ __le16 buffer_size[DPSW_MAX_DPBP];
+};
+
+#define DPSW_DEST_TYPE_SHIFT 0
+#define DPSW_DEST_TYPE_SIZE 4
+
+struct dpsw_cmd_ctrl_if_set_queue {
+ __le32 dest_id;
+ u8 dest_priority;
+ u8 pad;
+ /* from LSB: dest_type:4 */
+ u8 dest_type;
+ u8 qtype;
+ __le64 user_ctx;
+ __le32 options;
+};
+
struct dpsw_rsp_get_api_version {
__le16 version_major;
__le16 version_minor;
@@ -381,10 +449,89 @@ struct dpsw_rsp_if_get_mac_addr {
u8 mac_addr[6];
};
-struct dpsw_cmd_if_set_mac_addr {
+struct dpsw_cmd_set_egress_flood {
+ __le16 fdb_id;
+ u8 flood_type;
+ u8 pad[5];
+ __le64 if_id;
+};
+
+#define DPSW_LEARNING_MODE_SHIFT 0
+#define DPSW_LEARNING_MODE_SIZE 4
+
+struct dpsw_cmd_if_set_learning_mode {
__le16 if_id;
- u8 mac_addr[6];
+ /* only the first 4 bits from LSB */
+ u8 mode;
+};
+
+struct dpsw_cmd_acl_add {
+ __le16 pad;
+ __le16 max_entries;
+};
+
+struct dpsw_rsp_acl_add {
+ __le16 acl_id;
+};
+
+struct dpsw_cmd_acl_remove {
+ __le16 acl_id;
+};
+
+struct dpsw_cmd_acl_if {
+ __le16 acl_id;
+ __le16 num_ifs;
+ __le32 pad;
+ __le64 if_id;
+};
+
+struct dpsw_prep_acl_entry {
+ u8 match_l2_dest_mac[6];
+ __le16 match_l2_tpid;
+
+ u8 match_l2_source_mac[6];
+ __le16 match_l2_vlan_id;
+
+ __le32 match_l3_dest_ip;
+ __le32 match_l3_source_ip;
+
+ __le16 match_l4_dest_port;
+ __le16 match_l4_source_port;
+ __le16 match_l2_ether_type;
+ u8 match_l2_pcp_dei;
+ u8 match_l3_dscp;
+
+ u8 mask_l2_dest_mac[6];
+ __le16 mask_l2_tpid;
+
+ u8 mask_l2_source_mac[6];
+ __le16 mask_l2_vlan_id;
+
+ __le32 mask_l3_dest_ip;
+ __le32 mask_l3_source_ip;
+
+ __le16 mask_l4_dest_port;
+ __le16 mask_l4_source_port;
+ __le16 mask_l2_ether_type;
+ u8 mask_l2_pcp_dei;
+ u8 mask_l3_dscp;
+
+ u8 match_l3_protocol;
+ u8 mask_l3_protocol;
};
+#define DPSW_RESULT_ACTION_SHIFT 0
+#define DPSW_RESULT_ACTION_SIZE 4
+
+struct dpsw_cmd_acl_entry {
+ __le16 acl_id;
+ __le16 result_if_id;
+ __le32 precedence;
+ /* from LSB only the first 4 bits */
+ u8 result_action;
+ u8 pad[7];
+ __le64 pad2[4];
+ __le64 key_iova;
+};
#pragma pack(pop)
#endif /* __FSL_DPSW_CMD_H */
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c b/drivers/net/ethernet/freescale/dpaa2/dpsw.c
index f8bfe779bd30..6352d6d1ecba 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2017-2018 NXP
+ * Copyright 2017-2021 NXP
*
*/
@@ -9,9 +9,7 @@
#include "dpsw.h"
#include "dpsw-cmd.h"
-static void build_if_id_bitmap(__le64 *bmap,
- const u16 *id,
- const u16 num_ifs)
+static void build_if_id_bitmap(__le64 *bmap, const u16 *id, const u16 num_ifs)
{
int i;
@@ -38,10 +36,7 @@ static void build_if_id_bitmap(__le64 *bmap,
*
* Return: '0' on Success; Error code otherwise.
*/
-int dpsw_open(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int dpsw_id,
- u16 *token)
+int dpsw_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpsw_id, u16 *token)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_open *cmd_params;
@@ -76,9 +71,7 @@ int dpsw_open(struct fsl_mc_io *mc_io,
*
* Return: '0' on Success; Error code otherwise.
*/
-int dpsw_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
+int dpsw_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
{
struct fsl_mc_command cmd = { 0 };
@@ -99,9 +92,7 @@ int dpsw_close(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
+int dpsw_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
{
struct fsl_mc_command cmd = { 0 };
@@ -122,9 +113,7 @@ int dpsw_enable(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_disable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
+int dpsw_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
{
struct fsl_mc_command cmd = { 0 };
@@ -145,9 +134,7 @@ int dpsw_disable(struct fsl_mc_io *mc_io,
*
* Return: '0' on Success; Error code otherwise.
*/
-int dpsw_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
+int dpsw_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
{
struct fsl_mc_command cmd = { 0 };
@@ -175,11 +162,8 @@ int dpsw_reset(struct fsl_mc_io *mc_io,
*
* Return: '0' on Success; Error code otherwise.
*/
-int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 en)
+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 en)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_set_irq_enable *cmd_params;
@@ -212,11 +196,8 @@ int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
*
* Return: '0' on Success; Error code otherwise.
*/
-int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 mask)
+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 mask)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_set_irq_mask *cmd_params;
@@ -245,11 +226,8 @@ int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
*
* Return: '0' on Success; Error code otherwise.
*/
-int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *status)
+int dpsw_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *status)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_get_irq_status *cmd_params;
@@ -288,11 +266,8 @@ int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
*
* Return: '0' on Success; Error code otherwise.
*/
-int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status)
+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 status)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_clear_irq_status *cmd_params;
@@ -318,9 +293,7 @@ int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_get_attributes(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
+int dpsw_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
struct dpsw_attr *attr)
{
struct fsl_mc_command cmd = { 0 };
@@ -351,9 +324,9 @@ int dpsw_get_attributes(struct fsl_mc_io *mc_io,
attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
attr->max_meters_per_if = rsp_params->max_meters_per_if;
attr->options = le64_to_cpu(rsp_params->options);
- attr->component_type = dpsw_get_field(rsp_params->component_type,
- COMPONENT_TYPE);
-
+ attr->component_type = dpsw_get_field(rsp_params->component_type, COMPONENT_TYPE);
+ attr->flooding_cfg = dpsw_get_field(rsp_params->repl_cfg, FLOODING_CFG);
+ attr->broadcast_cfg = dpsw_get_field(rsp_params->repl_cfg, BROADCAST_CFG);
return 0;
}
@@ -367,10 +340,7 @@ int dpsw_get_attributes(struct fsl_mc_io *mc_io,
*
* Return: '0' on Success; Error code otherwise.
*/
-int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
struct dpsw_link_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
@@ -397,13 +367,10 @@ int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
* @if_id: Interface id
* @state: Link state 1 - linkup, 0 - link down or disconnected
*
- * @Return '0' on Success; Error code otherwise.
+ * Return: '0' on Success; Error code otherwise.
*/
-int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
- struct dpsw_link_state *state)
+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_link_state *state)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_if_get_link_state *cmd_params;
@@ -432,68 +399,6 @@ int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
}
/**
- * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPSW object
- * @if_id: Interface Identifier
- * @en: 1 - enable, 0 - disable
- *
- * Return: Completion status. '0' on Success; Error code otherwise.
- */
-int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
- u8 en)
-{
- struct fsl_mc_command cmd = { 0 };
- struct dpsw_cmd_if_set_flooding *cmd_params;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
- cmd_flags,
- token);
- cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params;
- cmd_params->if_id = cpu_to_le16(if_id);
- dpsw_set_field(cmd_params->enable, ENABLE, en);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPSW object
- * @if_id: Interface Identifier
- * @en: 1 - enable, 0 - disable
- *
- * Return: Completion status. '0' on Success; Error code otherwise.
- */
-int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
- u8 en)
-{
- struct fsl_mc_command cmd = { 0 };
- struct dpsw_cmd_if_set_broadcast *cmd_params;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
- cmd_flags,
- token);
- cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params;
- cmd_params->if_id = cpu_to_le16(if_id);
- dpsw_set_field(cmd_params->enable, ENABLE, en);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
* dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -503,10 +408,7 @@ int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
+int dpsw_if_set_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
const struct dpsw_tci_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
@@ -538,10 +440,7 @@ int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
+int dpsw_if_get_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
struct dpsw_tci_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
@@ -583,10 +482,7 @@ int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
+int dpsw_if_set_stp(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
const struct dpsw_stp_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
@@ -616,12 +512,8 @@ int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
- enum dpsw_counter type,
- u64 *counter)
+int dpsw_if_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_counter type, u64 *counter)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_if_get_counter *cmd_params;
@@ -657,10 +549,7 @@ int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_if_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id)
+int dpsw_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_if *cmd_params;
@@ -685,10 +574,7 @@ int dpsw_if_enable(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_if_disable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id)
+int dpsw_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_if *cmd_params;
@@ -705,6 +591,47 @@ int dpsw_if_disable(struct fsl_mc_io *mc_io,
}
/**
+ * dpsw_if_get_attributes() - Function obtains attributes of interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @attr: Returned interface attributes
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_if_attr *attr)
+{
+ struct dpsw_rsp_if_get_attr *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params;
+ attr->num_tcs = rsp_params->num_tcs;
+ attr->rate = le32_to_cpu(rsp_params->rate);
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->qdid = le16_to_cpu(rsp_params->qdid);
+ attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED);
+ attr->accept_all_vlan = dpsw_get_field(rsp_params->conf,
+ ACCEPT_ALL_VLAN);
+ attr->admit_untagged = dpsw_get_field(rsp_params->conf,
+ ADMIT_UNTAGGED);
+
+ return 0;
+}
+
+/**
* dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -714,11 +641,8 @@ int dpsw_if_disable(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
- u16 frame_length)
+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, u16 frame_length)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_if_set_max_frame_length *cmd_params;
@@ -752,11 +676,8 @@ int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_vlan_add(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 vlan_id,
- const struct dpsw_vlan_cfg *cfg)
+int dpsw_vlan_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_vlan_add *cmd_params;
@@ -788,22 +709,21 @@ int dpsw_vlan_add(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 vlan_id,
- const struct dpsw_vlan_if_cfg *cfg)
+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
{
+ struct dpsw_cmd_vlan_add_if *cmd_params;
struct fsl_mc_command cmd = { 0 };
- struct dpsw_cmd_vlan_manage_if *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
cmd_flags,
token);
- cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
+ cmd_params = (struct dpsw_cmd_vlan_add_if *)cmd.params;
cmd_params->vlan_id = cpu_to_le16(vlan_id);
- build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+ cmd_params->options = cpu_to_le16(cfg->options);
+ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -826,11 +746,8 @@ int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 vlan_id,
- const struct dpsw_vlan_if_cfg *cfg)
+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_vlan_manage_if *cmd_params;
@@ -841,7 +758,7 @@ int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
cmd_params->vlan_id = cpu_to_le16(vlan_id);
- build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -860,11 +777,8 @@ int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 vlan_id,
- const struct dpsw_vlan_if_cfg *cfg)
+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_vlan_manage_if *cmd_params;
@@ -875,7 +789,7 @@ int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
cmd_params->vlan_id = cpu_to_le16(vlan_id);
- build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -896,11 +810,8 @@ int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 vlan_id,
- const struct dpsw_vlan_if_cfg *cfg)
+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_vlan_manage_if *cmd_params;
@@ -911,7 +822,7 @@ int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
cmd_params->vlan_id = cpu_to_le16(vlan_id);
- build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -926,9 +837,7 @@ int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
+int dpsw_vlan_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u16 vlan_id)
{
struct fsl_mc_command cmd = { 0 };
@@ -946,6 +855,66 @@ int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
}
/**
+ * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for
+ * the reference
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Returned Forwarding Database Identifier
+ * @cfg: FDB Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *fdb_id,
+ const struct dpsw_fdb_cfg *cfg)
+{
+ struct dpsw_cmd_fdb_add *cmd_params;
+ struct dpsw_rsp_fdb_add *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params;
+ cmd_params->fdb_ageing_time = cpu_to_le16(cfg->fdb_ageing_time);
+ cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params;
+ *fdb_id = le16_to_cpu(rsp_params->fdb_id);
+
+ return 0;
+}
+
+/**
+ * dpsw_fdb_remove() - Remove FDB from switch
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id)
+{
+ struct dpsw_cmd_fdb_remove *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
* dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -955,11 +924,8 @@ int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 fdb_id,
- const struct dpsw_fdb_unicast_cfg *cfg)
+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_fdb_unicast_op *cmd_params;
@@ -998,13 +964,8 @@ int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
* The struct fdb_dump_entry array must be parsed until the end of memory
* area or until an entry with mac_addr set to zero is found.
*/
-int dpsw_fdb_dump(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 fdb_id,
- u64 iova_addr,
- u32 iova_size,
- u16 *num_entries)
+int dpsw_fdb_dump(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id,
+ u64 iova_addr, u32 iova_size, u16 *num_entries)
{
struct dpsw_cmd_fdb_dump *cmd_params;
struct dpsw_rsp_fdb_dump *rsp_params;
@@ -1041,11 +1002,8 @@ int dpsw_fdb_dump(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 fdb_id,
- const struct dpsw_fdb_unicast_cfg *cfg)
+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_fdb_unicast_op *cmd_params;
@@ -1083,11 +1041,8 @@ int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 fdb_id,
- const struct dpsw_fdb_multicast_cfg *cfg)
+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_fdb_multicast_op *cmd_params;
@@ -1101,7 +1056,7 @@ int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
cmd_params->fdb_id = cpu_to_le16(fdb_id);
cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
- build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
for (i = 0; i < 6; i++)
cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
@@ -1125,11 +1080,8 @@ int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 fdb_id,
- const struct dpsw_fdb_multicast_cfg *cfg)
+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_cmd_fdb_multicast_op *cmd_params;
@@ -1143,7 +1095,7 @@ int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
cmd_params->fdb_id = cpu_to_le16(fdb_id);
cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
- build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
for (i = 0; i < 6; i++)
cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
@@ -1152,33 +1104,97 @@ int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
}
/**
- * dpsw_fdb_set_learning_mode() - Define FDB learning mode
+ * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPSW object
- * @fdb_id: Forwarding Database Identifier
- * @mode: Learning mode
+ * @attr: Returned control interface attributes
*
- * Return: Completion status. '0' on Success; Error code otherwise.
+ * Return: '0' on Success; Error code otherwise.
*/
-int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 fdb_id,
- enum dpsw_fdb_learning_mode mode)
+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpsw_ctrl_if_attr *attr)
{
+ struct dpsw_rsp_ctrl_if_get_attr *rsp_params;
struct fsl_mc_command cmd = { 0 };
- struct dpsw_cmd_fdb_set_learning_mode *cmd_params;
+ int err;
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR,
+ cmd_flags, token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params;
+ attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid);
+ attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid);
+ attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid);
+
+ return 0;
+}
+
+/**
+ * dpsw_ctrl_if_set_pools() - Set control interface buffer pools
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @cfg: Buffer pools configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_ctrl_if_pools_cfg *cfg)
+{
+ struct dpsw_cmd_ctrl_if_set_pools *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int i;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS,
+ cmd_flags, token);
+ cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params;
+ cmd_params->num_dpbp = cfg->num_dpbp;
+ for (i = 0; i < DPSW_MAX_DPBP; i++) {
+ cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
+ cmd_params->buffer_size[i] =
+ cpu_to_le16(cfg->pools[i].buffer_size);
+ cmd_params->backup_pool_mask |=
+ DPSW_BACKUP_POOL(cfg->pools[i].backup_pool, i);
+ }
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_ctrl_if_set_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of dpsw object
+ * @qtype: dpsw_queue_type of the targeted queue
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_set_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpsw_queue_type qtype,
+ const struct dpsw_ctrl_if_queue_cfg *cfg)
+{
+ struct dpsw_cmd_ctrl_if_set_queue *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_QUEUE,
cmd_flags,
token);
- cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params;
- cmd_params->fdb_id = cpu_to_le16(fdb_id);
- dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
+ cmd_params = (struct dpsw_cmd_ctrl_if_set_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->qtype = qtype;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpsw_set_field(cmd_params->dest_type,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
- /* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
@@ -1191,10 +1207,8 @@ int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
*
* Return: '0' on Success; Error code otherwise.
*/
-int dpsw_get_api_version(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 *major_ver,
- u16 *minor_ver)
+int dpsw_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
{
struct fsl_mc_command cmd = { 0 };
struct dpsw_rsp_get_api_version *rsp_params;
@@ -1216,7 +1230,7 @@ int dpsw_get_api_version(struct fsl_mc_io *mc_io,
}
/**
- * dpsw_if_get_port_mac_addr()
+ * dpsw_if_get_port_mac_addr() - Retrieve MAC address associated to the physical port
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPSW object
@@ -1254,68 +1268,313 @@ int dpsw_if_get_port_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
}
/**
- * dpsw_if_get_primary_mac_addr()
+ * dpsw_ctrl_if_enable() - Enable control interface
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPSW object
- * @if_id: Interface Identifier
- * @mac_addr: MAC address of the physical port, if any, otherwise 0
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE, cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_ctrl_if_disable() - Function disables control interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_set_egress_flood() - Set egress parameters associated with an FDB ID
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @cfg: Egress flooding configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_set_egress_flood(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_egress_flood_cfg *cfg)
+{
+ struct dpsw_cmd_set_egress_flood *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_EGRESS_FLOOD, cmd_flags, token);
+ cmd_params = (struct dpsw_cmd_set_egress_flood *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
+ cmd_params->flood_type = cfg->flood_type;
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_set_learning_mode() - Configure the learning mode on an interface.
+ * If this API is used, it will take precedence over the FDB configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: InterfaceID
+ * @mode: Learning mode
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
-int dpsw_if_get_primary_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags,
- u16 token, u16 if_id, u8 mac_addr[6])
+int dpsw_if_set_learning_mode(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_learning_mode mode)
{
- struct dpsw_rsp_if_get_mac_addr *rsp_params;
+ struct dpsw_cmd_if_set_learning_mode *cmd_params;
struct fsl_mc_command cmd = { 0 };
- struct dpsw_cmd_if *cmd_params;
- int err, i;
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_PRIMARY_MAC_ADDR,
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LEARNING_MODE,
cmd_flags,
token);
- cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params = (struct dpsw_cmd_if_set_learning_mode *)cmd.params;
cmd_params->if_id = cpu_to_le16(if_id);
+ dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_add() - Create an ACL table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: Returned ACL ID, for future references
+ * @cfg: ACL configuration
+ *
+ * Create Access Control List table. Multiple ACLs can be created and
+ * co-exist in L2 switch
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *acl_id,
+ const struct dpsw_acl_cfg *cfg)
+{
+ struct dpsw_cmd_acl_add *cmd_params;
+ struct dpsw_rsp_acl_add *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD, cmd_flags, token);
+ cmd_params = (struct dpsw_cmd_acl_add *)cmd.params;
+ cmd_params->max_entries = cpu_to_le16(cfg->max_entries);
- /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
- /* retrieve response parameters */
- rsp_params = (struct dpsw_rsp_if_get_mac_addr *)cmd.params;
- for (i = 0; i < 6; i++)
- mac_addr[5 - i] = rsp_params->mac_addr[i];
+ rsp_params = (struct dpsw_rsp_acl_add *)cmd.params;
+ *acl_id = le16_to_cpu(rsp_params->acl_id);
return 0;
}
/**
- * dpsw_if_set_primary_mac_addr()
+ * dpsw_acl_remove() - Remove an ACL table from L2 switch.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPSW object
- * @if_id: Interface Identifier
- * @mac_addr: MAC address of the physical port, if any, otherwise 0
+ * @acl_id: ACL ID
*
- * Return: Completion status. '0' on Success; Error code otherwise.
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id)
+{
+ struct dpsw_cmd_acl_remove *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_remove *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_add_if() - Associate interface/interfaces with an ACL table.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Interfaces list
+ *
+ * Return: '0' on Success; Error code otherwise.
*/
-int dpsw_if_set_primary_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags,
- u16 token, u16 if_id, u8 mac_addr[6])
+int dpsw_acl_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg)
{
- struct dpsw_cmd_if_set_mac_addr *cmd_params;
+ struct dpsw_cmd_acl_if *cmd_params;
struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_remove_if() - De-associate interface/interfaces from an ACL table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Interfaces list
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg)
+{
+ struct dpsw_cmd_acl_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_prepare_entry_cfg() - Setup an ACL entry
+ * @key: Key
+ * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
+ *
+ * This function has to be called before adding or removing acl_entry
+ *
+ */
+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
+ u8 *entry_cfg_buf)
+{
+ struct dpsw_prep_acl_entry *ext_params;
int i;
+ ext_params = (struct dpsw_prep_acl_entry *)entry_cfg_buf;
+
+ for (i = 0; i < 6; i++) {
+ ext_params->match_l2_dest_mac[i] = key->match.l2_dest_mac[5 - i];
+ ext_params->match_l2_source_mac[i] = key->match.l2_source_mac[5 - i];
+ ext_params->mask_l2_dest_mac[i] = key->mask.l2_dest_mac[5 - i];
+ ext_params->mask_l2_source_mac[i] = key->mask.l2_source_mac[5 - i];
+ }
+
+ ext_params->match_l2_tpid = cpu_to_le16(key->match.l2_tpid);
+ ext_params->match_l2_vlan_id = cpu_to_le16(key->match.l2_vlan_id);
+ ext_params->match_l3_dest_ip = cpu_to_le32(key->match.l3_dest_ip);
+ ext_params->match_l3_source_ip = cpu_to_le32(key->match.l3_source_ip);
+ ext_params->match_l4_dest_port = cpu_to_le16(key->match.l4_dest_port);
+ ext_params->match_l4_source_port = cpu_to_le16(key->match.l4_source_port);
+ ext_params->match_l2_ether_type = cpu_to_le16(key->match.l2_ether_type);
+ ext_params->match_l2_pcp_dei = key->match.l2_pcp_dei;
+ ext_params->match_l3_dscp = key->match.l3_dscp;
+
+ ext_params->mask_l2_tpid = cpu_to_le16(key->mask.l2_tpid);
+ ext_params->mask_l2_vlan_id = cpu_to_le16(key->mask.l2_vlan_id);
+ ext_params->mask_l3_dest_ip = cpu_to_le32(key->mask.l3_dest_ip);
+ ext_params->mask_l3_source_ip = cpu_to_le32(key->mask.l3_source_ip);
+ ext_params->mask_l4_dest_port = cpu_to_le16(key->mask.l4_dest_port);
+ ext_params->mask_l4_source_port = cpu_to_le16(key->mask.l4_source_port);
+ ext_params->mask_l2_ether_type = cpu_to_le16(key->mask.l2_ether_type);
+ ext_params->mask_l2_pcp_dei = key->mask.l2_pcp_dei;
+ ext_params->mask_l3_dscp = key->mask.l3_dscp;
+ ext_params->match_l3_protocol = key->match.l3_protocol;
+ ext_params->mask_l3_protocol = key->mask.l3_protocol;
+}
+
+/**
+ * dpsw_acl_add_entry() - Add a rule to the ACL table.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Entry configuration
+ *
+ * warning: This function has to be called after dpsw_acl_prepare_entry_cfg()
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg)
+{
+ struct dpsw_cmd_acl_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
+ cmd_params->precedence = cpu_to_le32(cfg->precedence);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ dpsw_set_field(cmd_params->result_action,
+ RESULT_ACTION,
+ cfg->result.action);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_remove_entry() - Removes an entry from ACL.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Entry configuration
+ *
+ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg)
+{
+ struct dpsw_cmd_acl_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_PRIMARY_MAC_ADDR,
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
cmd_flags,
token);
- cmd_params = (struct dpsw_cmd_if_set_mac_addr *)cmd.params;
- cmd_params->if_id = cpu_to_le16(if_id);
- for (i = 0; i < 6; i++)
- cmd_params->mac_addr[i] = mac_addr[5 - i];
+ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
+ cmd_params->precedence = cpu_to_le32(cfg->precedence);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ dpsw_set_field(cmd_params->result_action,
+ RESULT_ACTION,
+ cfg->result.action);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.h b/drivers/net/ethernet/freescale/dpaa2/dpsw.h
new file mode 100644
index 000000000000..5ef221a25b02
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.h
@@ -0,0 +1,755 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#ifndef __FSL_DPSW_H
+#define __FSL_DPSW_H
+
+/* Data Path L2-Switch API
+ * Contains API for handling DPSW topology and functionality
+ */
+
+struct fsl_mc_io;
+
+/* DPSW general definitions */
+
+#define DPSW_MAX_PRIORITIES 8
+
+#define DPSW_MAX_IF 64
+
+int dpsw_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpsw_id, u16 *token);
+
+int dpsw_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/* DPSW options */
+
+/**
+ * DPSW_OPT_FLOODING_DIS - Flooding was disabled at device create
+ */
+#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
+/**
+ * DPSW_OPT_MULTICAST_DIS - Multicast was disabled at device create
+ */
+#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
+/**
+ * DPSW_OPT_CTRL_IF_DIS - Control interface support is disabled
+ */
+#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
+
+/**
+ * enum dpsw_component_type - component type of a bridge
+ * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
+ * enterprise VLAN bridge or of a Provider Bridge used
+ * to process C-tagged frames
+ * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
+ * Provider Bridge
+ *
+ */
+enum dpsw_component_type {
+ DPSW_COMPONENT_TYPE_C_VLAN = 0,
+ DPSW_COMPONENT_TYPE_S_VLAN
+};
+
+/**
+ * enum dpsw_flooding_cfg - flooding configuration requested
+ * @DPSW_FLOODING_PER_VLAN: Flooding replicators are allocated per VLAN and
+ * interfaces present in each of them can be configured using
+ * dpsw_vlan_add_if_flooding()/dpsw_vlan_remove_if_flooding().
+ * This is the default configuration.
+ *
+ * @DPSW_FLOODING_PER_FDB: Flooding replicators are allocated per FDB and
+ * interfaces present in each of them can be configured using
+ * dpsw_set_egress_flood().
+ */
+enum dpsw_flooding_cfg {
+ DPSW_FLOODING_PER_VLAN = 0,
+ DPSW_FLOODING_PER_FDB,
+};
+
+/**
+ * enum dpsw_broadcast_cfg - broadcast configuration requested
+ * @DPSW_BROADCAST_PER_OBJECT: There is only one broadcast replicator per DPSW
+ * object. This is the default configuration.
+ * @DPSW_BROADCAST_PER_FDB: Broadcast replicators are allocated per FDB and
+ * interfaces present in each of them can be configured using
+ * dpsw_set_egress_flood().
+ */
+enum dpsw_broadcast_cfg {
+ DPSW_BROADCAST_PER_OBJECT = 0,
+ DPSW_BROADCAST_PER_FDB,
+};
+
+int dpsw_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpsw_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpsw_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/* DPSW IRQ Index and Events */
+
+#define DPSW_IRQ_INDEX_IF 0x0000
+#define DPSW_IRQ_INDEX_L2SW 0x0001
+
+/**
+ * DPSW_IRQ_EVENT_LINK_CHANGED - Indicates that the link state changed
+ */
+#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
+
+/**
+ * struct dpsw_irq_cfg - IRQ configuration
+ * @addr: Address that must be written to signal a message-based interrupt
+ * @val: Value to write into irq_addr address
+ * @irq_num: A user defined number associated with this IRQ
+ */
+struct dpsw_irq_cfg {
+ u64 addr;
+ u32 val;
+ int irq_num;
+};
+
+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 en);
+
+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 mask);
+
+int dpsw_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *status);
+
+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 status);
+
+/**
+ * struct dpsw_attr - Structure representing DPSW attributes
+ * @id: DPSW object ID
+ * @options: Enable/Disable DPSW features
+ * @max_vlans: Maximum Number of VLANs
+ * @max_meters_per_if: Number of meters per interface
+ * @max_fdbs: Maximum Number of FDBs
+ * @max_fdb_entries: Number of FDB entries for default FDB table;
+ * 0 - indicates default 1024 entries.
+ * @fdb_aging_time: Default FDB aging time for default FDB table;
+ * 0 - indicates default 300 seconds
+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
+ * 0 - indicates default 32
+ * @mem_size: DPSW frame storage memory size
+ * @num_ifs: Number of interfaces
+ * @num_vlans: Current number of VLANs
+ * @num_fdbs: Current number of FDBs
+ * @component_type: Component type of this bridge
+ * @flooding_cfg: Flooding configuration (PER_VLAN - default, PER_FDB)
+ * @broadcast_cfg: Broadcast configuration (PER_OBJECT - default, PER_FDB)
+ */
+struct dpsw_attr {
+ int id;
+ u64 options;
+ u16 max_vlans;
+ u8 max_meters_per_if;
+ u8 max_fdbs;
+ u16 max_fdb_entries;
+ u16 fdb_aging_time;
+ u16 max_fdb_mc_groups;
+ u16 num_ifs;
+ u16 mem_size;
+ u16 num_vlans;
+ u8 num_fdbs;
+ enum dpsw_component_type component_type;
+ enum dpsw_flooding_cfg flooding_cfg;
+ enum dpsw_broadcast_cfg broadcast_cfg;
+};
+
+int dpsw_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpsw_attr *attr);
+
+/**
+ * struct dpsw_ctrl_if_attr - Control interface attributes
+ * @rx_fqid: Receive FQID
+ * @rx_err_fqid: Receive error FQID
+ * @tx_err_conf_fqid: Transmit error and confirmation FQID
+ */
+struct dpsw_ctrl_if_attr {
+ u32 rx_fqid;
+ u32 rx_err_fqid;
+ u32 tx_err_conf_fqid;
+};
+
+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpsw_ctrl_if_attr *attr);
+
+enum dpsw_queue_type {
+ DPSW_QUEUE_RX,
+ DPSW_QUEUE_TX_ERR_CONF,
+ DPSW_QUEUE_RX_ERR,
+};
+
+#define DPSW_MAX_DPBP 8
+
+/**
+ * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration
+ * @num_dpbp: Number of DPBPs
+ * @pools: Array of buffer pools parameters; The number of valid entries
+ * must match 'num_dpbp' value
+ * @pools.dpbp_id: DPBP object ID
+ * @pools.buffer_size: Buffer size
+ * @pools.backup_pool: Backup pool
+ */
+struct dpsw_ctrl_if_pools_cfg {
+ u8 num_dpbp;
+ struct {
+ int dpbp_id;
+ u16 buffer_size;
+ int backup_pool;
+ } pools[DPSW_MAX_DPBP];
+};
+
+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_ctrl_if_pools_cfg *cfg);
+
+#define DPSW_CTRL_IF_QUEUE_OPT_USER_CTX 0x00000001
+#define DPSW_CTRL_IF_QUEUE_OPT_DEST 0x00000002
+
+enum dpsw_ctrl_if_dest {
+ DPSW_CTRL_IF_DEST_NONE = 0,
+ DPSW_CTRL_IF_DEST_DPIO = 1,
+};
+
+struct dpsw_ctrl_if_dest_cfg {
+ enum dpsw_ctrl_if_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+struct dpsw_ctrl_if_queue_cfg {
+ u32 options;
+ u64 user_ctx;
+ struct dpsw_ctrl_if_dest_cfg dest_cfg;
+};
+
+int dpsw_ctrl_if_set_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpsw_queue_type qtype,
+ const struct dpsw_ctrl_if_queue_cfg *cfg);
+
+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/**
+ * enum dpsw_action - Action selection for special/control frames
+ * @DPSW_ACTION_DROP: Drop frame
+ * @DPSW_ACTION_REDIRECT: Redirect frame to control port
+ */
+enum dpsw_action {
+ DPSW_ACTION_DROP = 0,
+ DPSW_ACTION_REDIRECT = 1
+};
+
+#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
+#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
+#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
+#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+
+/**
+ * struct dpsw_link_cfg - Structure representing DPSW link configuration
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
+ */
+struct dpsw_link_cfg {
+ u32 rate;
+ u64 options;
+};
+
+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ struct dpsw_link_cfg *cfg);
+
+/**
+ * struct dpsw_link_state - Structure representing DPSW link state
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
+ * @up: 0 - covers two cases: down and disconnected, 1 - up
+ */
+struct dpsw_link_state {
+ u32 rate;
+ u64 options;
+ u8 up;
+};
+
+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_link_state *state);
+
+/**
+ * struct dpsw_tci_cfg - Tag Control Information (TCI) configuration
+ * @pcp: Priority Code Point (PCP): a 3-bit field which refers
+ * to the IEEE 802.1p priority
+ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
+ * separately or in conjunction with PCP to indicate frames
+ * eligible to be dropped in the presence of congestion
+ * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
+ * to which the frame belongs. The hexadecimal values
+ * of 0x000 and 0xFFF are reserved;
+ * all other values may be used as VLAN identifiers,
+ * allowing up to 4,094 VLANs
+ */
+struct dpsw_tci_cfg {
+ u8 pcp;
+ u8 dei;
+ u16 vlan_id;
+};
+
+int dpsw_if_set_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ const struct dpsw_tci_cfg *cfg);
+
+int dpsw_if_get_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ struct dpsw_tci_cfg *cfg);
+
+/**
+ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
+ * @DPSW_STP_STATE_DISABLED: Disabled state
+ * @DPSW_STP_STATE_LISTENING: Listening state
+ * @DPSW_STP_STATE_LEARNING: Learning state
+ * @DPSW_STP_STATE_FORWARDING: Forwarding state
+ * @DPSW_STP_STATE_BLOCKING: Blocking state
+ *
+ */
+enum dpsw_stp_state {
+ DPSW_STP_STATE_DISABLED = 0,
+ DPSW_STP_STATE_LISTENING = 1,
+ DPSW_STP_STATE_LEARNING = 2,
+ DPSW_STP_STATE_FORWARDING = 3,
+ DPSW_STP_STATE_BLOCKING = 0
+};
+
+/**
+ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
+ * @vlan_id: VLAN ID STP state
+ * @state: STP state
+ */
+struct dpsw_stp_cfg {
+ u16 vlan_id;
+ enum dpsw_stp_state state;
+};
+
+int dpsw_if_set_stp(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ const struct dpsw_stp_cfg *cfg);
+
+/**
+ * enum dpsw_accepted_frames - Types of frames to accept
+ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
+ * priority tagged frames
+ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
+ * Priority-Tagged frames received on this interface.
+ *
+ */
+enum dpsw_accepted_frames {
+ DPSW_ADMIT_ALL = 1,
+ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
+};
+
+/**
+ * enum dpsw_counter - Counters types
+ * @DPSW_CNT_ING_FRAME: Counts ingress frames
+ * @DPSW_CNT_ING_BYTE: Counts ingress bytes
+ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
+ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
+ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
+ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
+ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
+ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
+ * @DPSW_CNT_EGR_FRAME: Counts egress frames
+ * @DPSW_CNT_EGR_BYTE: Counts egress bytes
+ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
+ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
+ * @DPSW_CNT_ING_NO_BUFF_DISCARD: Counts ingress no buffer discarded frames
+ */
+enum dpsw_counter {
+ DPSW_CNT_ING_FRAME = 0x0,
+ DPSW_CNT_ING_BYTE = 0x1,
+ DPSW_CNT_ING_FLTR_FRAME = 0x2,
+ DPSW_CNT_ING_FRAME_DISCARD = 0x3,
+ DPSW_CNT_ING_MCAST_FRAME = 0x4,
+ DPSW_CNT_ING_MCAST_BYTE = 0x5,
+ DPSW_CNT_ING_BCAST_FRAME = 0x6,
+ DPSW_CNT_ING_BCAST_BYTES = 0x7,
+ DPSW_CNT_EGR_FRAME = 0x8,
+ DPSW_CNT_EGR_BYTE = 0x9,
+ DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
+ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb,
+ DPSW_CNT_ING_NO_BUFF_DISCARD = 0xc,
+};
+
+int dpsw_if_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_counter type, u64 *counter);
+
+int dpsw_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id);
+
+int dpsw_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id);
+
+/**
+ * struct dpsw_if_attr - Structure representing DPSW interface attributes
+ * @num_tcs: Number of traffic classes
+ * @rate: Transmit rate in bits per second
+ * @options: Interface configuration options (bitmap)
+ * @enabled: Indicates if interface is enabled
+ * @accept_all_vlan: The device discards/accepts incoming frames
+ * for VLANs that do not include this interface
+ * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device
+ * discards untagged frames or priority-tagged frames received on
+ * this interface;
+ * When set to 'DPSW_ADMIT_ALL', untagged frames or priority-
+ * tagged frames received on this interface are accepted
+ * @qdid: control frames transmit qdid
+ */
+struct dpsw_if_attr {
+ u8 num_tcs;
+ u32 rate;
+ u32 options;
+ int enabled;
+ int accept_all_vlan;
+ enum dpsw_accepted_frames admit_untagged;
+ u16 qdid;
+};
+
+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_if_attr *attr);
+
+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, u16 frame_length);
+
+/**
+ * struct dpsw_vlan_cfg - VLAN Configuration
+ * @fdb_id: Forwarding Data Base
+ */
+struct dpsw_vlan_cfg {
+ u16 fdb_id;
+};
+
+int dpsw_vlan_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_cfg *cfg);
+
+#define DPSW_VLAN_ADD_IF_OPT_FDB_ID 0x0001
+
+/**
+ * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
+ * @num_ifs: The number of interfaces that are assigned to the egress
+ * list for this VLAN
+ * @if_id: The set of interfaces that are
+ * assigned to the egress list for this VLAN
+ * @options: Options map for this command (DPSW_VLAN_ADD_IF_OPT_FDB_ID)
+ * @fdb_id: FDB id to be used by this VLAN on these specific interfaces
+ * (taken into account only if the DPSW_VLAN_ADD_IF_OPT_FDB_ID is
+ * specified in the options field)
+ */
+struct dpsw_vlan_if_cfg {
+ u16 num_ifs;
+ u16 options;
+ u16 if_id[DPSW_MAX_IF];
+ u16 fdb_id;
+};
+
+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id);
+
+/**
+ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
+ * @DPSW_FDB_ENTRY_STATIC: Static entry
+ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
+ */
+enum dpsw_fdb_entry_type {
+ DPSW_FDB_ENTRY_STATIC = 0,
+ DPSW_FDB_ENTRY_DINAMIC = 1
+};
+
+/**
+ * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
+ * @type: Select static or dynamic entry
+ * @mac_addr: MAC address
+ * @if_egress: Egress interface ID
+ */
+struct dpsw_fdb_unicast_cfg {
+ enum dpsw_fdb_entry_type type;
+ u8 mac_addr[6];
+ u16 if_egress;
+};
+
+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg);
+
+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg);
+
+#define DPSW_FDB_ENTRY_TYPE_DYNAMIC BIT(0)
+#define DPSW_FDB_ENTRY_TYPE_UNICAST BIT(1)
+
+/**
+ * struct fdb_dump_entry - fdb snapshot entry
+ * @mac_addr: MAC address
+ * @type: bit0 - DINAMIC(1)/STATIC(0), bit1 - UNICAST(1)/MULTICAST(0)
+ * @if_info: unicast - egress interface, multicast - number of egress interfaces
+ * @if_mask: multicast - egress interface mask
+ */
+struct fdb_dump_entry {
+ u8 mac_addr[6];
+ u8 type;
+ u8 if_info;
+ u8 if_mask[8];
+};
+
+int dpsw_fdb_dump(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id,
+ u64 iova_addr, u32 iova_size, u16 *num_entries);
+
+/**
+ * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
+ * @type: Select static or dynamic entry
+ * @mac_addr: MAC address
+ * @num_ifs: Number of external and internal interfaces
+ * @if_id: Egress interface IDs
+ */
+struct dpsw_fdb_multicast_cfg {
+ enum dpsw_fdb_entry_type type;
+ u8 mac_addr[6];
+ u16 num_ifs;
+ u16 if_id[DPSW_MAX_IF];
+};
+
+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg);
+
+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg);
+
+/**
+ * enum dpsw_learning_mode - Auto-learning modes
+ * @DPSW_LEARNING_MODE_DIS: Disable Auto-learning
+ * @DPSW_LEARNING_MODE_HW: Enable HW auto-Learning
+ * @DPSW_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
+ * @DPSW_LEARNING_MODE_SECURE: Enable secure learning by CPU
+ *
+ * NONE - SECURE LEARNING
+ * SMAC found DMAC found CTLU Action
+ * v v Forward frame to
+ * 1. DMAC destination
+ * - v Forward frame to
+ * 1. DMAC destination
+ * 2. Control interface
+ * v - Forward frame to
+ * 1. Flooding list of interfaces
+ * - - Forward frame to
+ * 1. Flooding list of interfaces
+ * 2. Control interface
+ * SECURE LEARING
+ * SMAC found DMAC found CTLU Action
+ * v v Forward frame to
+ * 1. DMAC destination
+ * - v Forward frame to
+ * 1. Control interface
+ * v - Forward frame to
+ * 1. Flooding list of interfaces
+ * - - Forward frame to
+ * 1. Control interface
+ */
+enum dpsw_learning_mode {
+ DPSW_LEARNING_MODE_DIS = 0,
+ DPSW_LEARNING_MODE_HW = 1,
+ DPSW_LEARNING_MODE_NON_SECURE = 2,
+ DPSW_LEARNING_MODE_SECURE = 3
+};
+
+/**
+ * struct dpsw_fdb_attr - FDB Attributes
+ * @max_fdb_entries: Number of FDB entries
+ * @fdb_ageing_time: Ageing time in seconds
+ * @learning_mode: Learning mode
+ * @num_fdb_mc_groups: Current number of multicast groups
+ * @max_fdb_mc_groups: Maximum number of multicast groups
+ */
+struct dpsw_fdb_attr {
+ u16 max_fdb_entries;
+ u16 fdb_ageing_time;
+ enum dpsw_learning_mode learning_mode;
+ u16 num_fdb_mc_groups;
+ u16 max_fdb_mc_groups;
+};
+
+int dpsw_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+int dpsw_if_get_port_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, u8 mac_addr[6]);
+
+/**
+ * struct dpsw_fdb_cfg - FDB Configuration
+ * @num_fdb_entries: Number of FDB entries
+ * @fdb_ageing_time: Ageing time in seconds
+ */
+struct dpsw_fdb_cfg {
+ u16 num_fdb_entries;
+ u16 fdb_ageing_time;
+};
+
+int dpsw_fdb_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *fdb_id,
+ const struct dpsw_fdb_cfg *cfg);
+
+int dpsw_fdb_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id);
+
+/**
+ * enum dpsw_flood_type - Define the flood type of a DPSW object
+ * @DPSW_BROADCAST: Broadcast flooding
+ * @DPSW_FLOODING: Unknown flooding
+ */
+enum dpsw_flood_type {
+ DPSW_BROADCAST = 0,
+ DPSW_FLOODING,
+};
+
+struct dpsw_egress_flood_cfg {
+ u16 fdb_id;
+ enum dpsw_flood_type flood_type;
+ u16 num_ifs;
+ u16 if_id[DPSW_MAX_IF];
+};
+
+int dpsw_set_egress_flood(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_egress_flood_cfg *cfg);
+
+int dpsw_if_set_learning_mode(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_learning_mode mode);
+
+/**
+ * struct dpsw_acl_cfg - ACL Configuration
+ * @max_entries: Number of ACL rules
+ */
+struct dpsw_acl_cfg {
+ u16 max_entries;
+};
+
+int dpsw_acl_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *acl_id,
+ const struct dpsw_acl_cfg *cfg);
+
+int dpsw_acl_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id);
+
+/**
+ * struct dpsw_acl_if_cfg - List of interfaces to associate with an ACL table
+ * @num_ifs: Number of interfaces
+ * @if_id: List of interfaces
+ */
+struct dpsw_acl_if_cfg {
+ u16 num_ifs;
+ u16 if_id[DPSW_MAX_IF];
+};
+
+int dpsw_acl_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg);
+
+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg);
+
+/**
+ * struct dpsw_acl_fields - ACL fields.
+ * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast,
+ * slow protocols, MVRP, STP
+ * @l2_source_mac: Source MAC address
+ * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following
+ * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae,
+ * Q-in-Q, IPv4, IPv6, PPPoE
+ * @l2_pcp_dei: indicate which protocol is encapsulated in the payload
+ * @l2_vlan_id: layer 2 VLAN ID
+ * @l2_ether_type: layer 2 Ethernet type
+ * @l3_dscp: Layer 3 differentiated services code point
+ * @l3_protocol: Tells the Network layer at the destination host, to which
+ * Protocol this packet belongs to. The following protocol are
+ * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6
+ * (encapsulation), GRE, PTP
+ * @l3_source_ip: Source IPv4 IP
+ * @l3_dest_ip: Destination IPv4 IP
+ * @l4_source_port: Source TCP/UDP Port
+ * @l4_dest_port: Destination TCP/UDP Port
+ */
+struct dpsw_acl_fields {
+ u8 l2_dest_mac[6];
+ u8 l2_source_mac[6];
+ u16 l2_tpid;
+ u8 l2_pcp_dei;
+ u16 l2_vlan_id;
+ u16 l2_ether_type;
+ u8 l3_dscp;
+ u8 l3_protocol;
+ u32 l3_source_ip;
+ u32 l3_dest_ip;
+ u16 l4_source_port;
+ u16 l4_dest_port;
+};
+
+/**
+ * struct dpsw_acl_key - ACL key
+ * @match: Match fields
+ * @mask: Mask: b'1 - valid, b'0 don't care
+ */
+struct dpsw_acl_key {
+ struct dpsw_acl_fields match;
+ struct dpsw_acl_fields mask;
+};
+
+/**
+ * enum dpsw_acl_action - action to be run on the ACL rule match
+ * @DPSW_ACL_ACTION_DROP: Drop frame
+ * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port
+ * @DPSW_ACL_ACTION_ACCEPT: Accept frame
+ * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface
+ */
+enum dpsw_acl_action {
+ DPSW_ACL_ACTION_DROP,
+ DPSW_ACL_ACTION_REDIRECT,
+ DPSW_ACL_ACTION_ACCEPT,
+ DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
+};
+
+/**
+ * struct dpsw_acl_result - ACL action
+ * @action: Action should be taken when ACL entry hit
+ * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for
+ * action
+ */
+struct dpsw_acl_result {
+ enum dpsw_acl_action action;
+ u16 if_id;
+};
+
+/**
+ * struct dpsw_acl_entry_cfg - ACL entry
+ * @key_iova: I/O virtual address of DMA-able memory filled with key after call
+ * to dpsw_acl_prepare_entry_cfg()
+ * @result: Required action when entry hit occurs
+ * @precedence: Precedence inside ACL 0 is lowest; This priority can not change
+ * during the lifetime of a Policy. It is user responsibility to
+ * space the priorities according to consequent rule additions.
+ */
+struct dpsw_acl_entry_cfg {
+ u64 key_iova;
+ struct dpsw_acl_result result;
+ int precedence;
+};
+
+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
+ u8 *entry_cfg_buf);
+
+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg);
+
+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg);
+#endif /* __FSL_DPSW_H */
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index ab92382c399a..cdc0ff89388a 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -2,6 +2,7 @@
config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI && PCI_MSI
+ select FSL_ENETC_IERB
select FSL_ENETC_MDIO
select PHYLINK
select PCS_LYNX
@@ -25,6 +26,14 @@ config FSL_ENETC_VF
If compiled as module (M), the module name is fsl-enetc-vf.
+config FSL_ENETC_IERB
+ tristate "ENETC IERB driver"
+ help
+ This driver configures the Integrated Endpoint Register Block on NXP
+ LS1028A.
+
+ If compiled as module (M), the module name is fsl-enetc-ierb.
+
config FSL_ENETC_MDIO
tristate "ENETC MDIO driver"
depends on PCI && MDIO_DEVRES && MDIO_BUS
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index 74f7ac253b8b..a139f2e9d59f 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -11,6 +11,9 @@ obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o $(common-objs)
fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
+obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
+fsl-enetc-ierb-y := enetc_ierb.o
+
obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o
fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 09471329f3a3..3ca93adb9662 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2,88 +2,138 @@
/* Copyright 2017-2019 NXP */
#include "enetc.h"
+#include <linux/bpf_trace.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/vmalloc.h>
+#include <linux/ptp_classify.h>
+#include <net/pkt_sched.h>
-/* ENETC overhead: optional extension BD + 1 BD gap */
-#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
-/* max # of chained Tx BDs is 15, including head and extension BD */
-#define ENETC_MAX_SKB_FRAGS 13
-#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
-
-static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
- int active_offloads);
-
-netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
+static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_bdr *tx_ring;
- int count;
+ int num_tx_rings = priv->num_tx_rings;
+ int i;
- tx_ring = priv->tx_ring[skb->queue_mapping];
+ for (i = 0; i < priv->num_rx_rings; i++)
+ if (priv->rx_ring[i]->xdp.prog)
+ return num_tx_rings - num_possible_cpus();
- if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
- if (unlikely(skb_linearize(skb)))
- goto drop_packet_err;
+ return num_tx_rings;
+}
- count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
- netif_stop_subqueue(ndev, tx_ring->index);
- return NETDEV_TX_BUSY;
- }
+static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv,
+ struct enetc_bdr *tx_ring)
+{
+ int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring;
- enetc_lock_mdio();
- count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
- enetc_unlock_mdio();
+ return priv->rx_ring[index];
+}
- if (unlikely(!count))
- goto drop_packet_err;
+static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd)
+{
+ if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect)
+ return NULL;
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
- netif_stop_subqueue(ndev, tx_ring->index);
+ return tx_swbd->skb;
+}
- return NETDEV_TX_OK;
+static struct xdp_frame *
+enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd)
+{
+ if (tx_swbd->is_xdp_redirect)
+ return tx_swbd->xdp_frame;
-drop_packet_err:
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
+ return NULL;
}
static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
struct enetc_tx_swbd *tx_swbd)
{
+ /* For XDP_TX, pages come from RX, whereas for the other contexts where
+ * we have is_dma_page_set, those come from skb_frag_dma_map. We need
+ * to match the DMA mapping length, so we need to differentiate those.
+ */
if (tx_swbd->is_dma_page)
dma_unmap_page(tx_ring->dev, tx_swbd->dma,
- tx_swbd->len, DMA_TO_DEVICE);
+ tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len,
+ tx_swbd->dir);
else
dma_unmap_single(tx_ring->dev, tx_swbd->dma,
- tx_swbd->len, DMA_TO_DEVICE);
+ tx_swbd->len, tx_swbd->dir);
tx_swbd->dma = 0;
}
-static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
- struct enetc_tx_swbd *tx_swbd)
+static void enetc_free_tx_frame(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *tx_swbd)
{
+ struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
+ struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
+
if (tx_swbd->dma)
enetc_unmap_tx_buff(tx_ring, tx_swbd);
- if (tx_swbd->skb) {
- dev_kfree_skb_any(tx_swbd->skb);
+ if (xdp_frame) {
+ xdp_return_frame(tx_swbd->xdp_frame);
+ tx_swbd->xdp_frame = NULL;
+ } else if (skb) {
+ dev_kfree_skb_any(skb);
tx_swbd->skb = NULL;
}
}
-static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
- int active_offloads)
+/* Let H/W know BD ring has been updated */
+static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring)
+{
+ /* includes wmb() */
+ enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use);
+}
+
+static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
+ u8 *msgtype, u8 *twostep,
+ u16 *correction_offset, u16 *body_offset)
+{
+ unsigned int ptp_class;
+ struct ptp_header *hdr;
+ unsigned int type;
+ u8 *base;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return -EINVAL;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return -EINVAL;
+
+ type = ptp_class & PTP_CLASS_PMASK;
+ if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6)
+ *udp = 1;
+ else
+ *udp = 0;
+
+ *msgtype = ptp_get_msgtype(hdr, ptp_class);
+ *twostep = hdr->flag_field[0] & 0x2;
+
+ base = skb_mac_header(skb);
+ *correction_offset = (u8 *)&hdr->correction - base;
+ *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
+
+ return 0;
+}
+
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
+ bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
+ struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_tx_swbd *tx_swbd;
- skb_frag_t *frag;
int len = skb_headlen(skb);
union enetc_tx_bd temp_bd;
+ u8 msgtype, twostep, udp;
union enetc_tx_bd *txbd;
- bool do_vlan, do_tstamp;
+ u16 offset1, offset2;
int i, count = 0;
+ skb_frag_t *frag;
unsigned int f;
dma_addr_t dma;
u8 flags = 0;
@@ -104,15 +154,25 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
tx_swbd->dma = dma;
tx_swbd->len = len;
tx_swbd->is_dma_page = 0;
+ tx_swbd->dir = DMA_TO_DEVICE;
count++;
do_vlan = skb_vlan_tag_present(skb);
- do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) &&
- (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
- tx_swbd->do_tstamp = do_tstamp;
- tx_swbd->check_wb = tx_swbd->do_tstamp;
+ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1,
+ &offset2) ||
+ msgtype != PTP_MSGTYPE_SYNC || twostep)
+ WARN_ONCE(1, "Bad packet for one-step timestamping\n");
+ else
+ do_onestep_tstamp = true;
+ } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) {
+ do_twostep_tstamp = true;
+ }
- if (do_vlan || do_tstamp)
+ tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
+ tx_swbd->check_wb = tx_swbd->do_twostep_tstamp;
+
+ if (do_vlan || do_onestep_tstamp || do_twostep_tstamp)
flags |= ENETC_TXBD_FLAGS_EX;
if (tx_ring->tsd_enable)
@@ -149,7 +209,40 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
}
- if (do_tstamp) {
+ if (do_onestep_tstamp) {
+ u32 lo, hi, val;
+ u64 sec, nsec;
+ u8 *data;
+
+ lo = enetc_rd_hot(hw, ENETC_SICTR0);
+ hi = enetc_rd_hot(hw, ENETC_SICTR1);
+ sec = (u64)hi << 32 | lo;
+ nsec = do_div(sec, 1000000000);
+
+ /* Configure extension BD */
+ temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff);
+ e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
+
+ /* Update originTimestamp field of Sync packet
+ * - 48 bits seconds field
+ * - 32 bits nanseconds field
+ */
+ data = skb_mac_header(skb);
+ *(__be16 *)(data + offset2) =
+ htons((sec >> 32) & 0xffff);
+ *(__be32 *)(data + offset2 + 2) =
+ htonl(sec & 0xffffffff);
+ *(__be32 *)(data + offset2 + 6) = htonl(nsec);
+
+ /* Configure single-step register */
+ val = ENETC_PM0_SINGLE_STEP_EN;
+ val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1);
+ if (udp)
+ val |= ENETC_PM0_SINGLE_STEP_CH;
+
+ enetc_port_wr(hw, ENETC_PM0_SINGLE_STEP, val);
+ enetc_port_wr(hw, ENETC_PM1_SINGLE_STEP, val);
+ } else if (do_twostep_tstamp) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
}
@@ -186,6 +279,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
tx_swbd->dma = dma;
tx_swbd->len = len;
tx_swbd->is_dma_page = 1;
+ tx_swbd->dir = DMA_TO_DEVICE;
count++;
}
@@ -194,6 +288,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
temp_bd.flags = flags;
*txbd = temp_bd;
+ tx_ring->tx_swbd[i].is_eof = true;
tx_ring->tx_swbd[i].skb = skb;
enetc_bdr_idx_inc(tx_ring, &i);
@@ -201,8 +296,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
skb_tx_timestamp(skb);
- /* let H/W know BD ring has been updated */
- enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */
+ enetc_update_tx_ring_tail(tx_ring);
return count;
@@ -211,7 +305,7 @@ dma_err:
do {
tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_skb(tx_ring, tx_swbd);
+ enetc_free_tx_frame(tx_ring, tx_swbd);
if (i == 0)
i = tx_ring->bd_count;
i--;
@@ -220,6 +314,76 @@ dma_err:
return 0;
}
+static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_bdr *tx_ring;
+ int count;
+
+ /* Queue one-step Sync packet if already locked */
+ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
+ &priv->flags)) {
+ skb_queue_tail(&priv->tx_skbs, skb);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ tx_ring = priv->tx_ring[skb->queue_mapping];
+
+ if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_linearize(skb)))
+ goto drop_packet_err;
+
+ count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
+
+ enetc_lock_mdio();
+ count = enetc_map_tx_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+
+ if (unlikely(!count))
+ goto drop_packet_err;
+
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
+ netif_stop_subqueue(ndev, tx_ring->index);
+
+ return NETDEV_TX_OK;
+
+drop_packet_err:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ u8 udp, msgtype, twostep;
+ u16 offset1, offset2;
+
+ /* Mark tx timestamp type on skb->cb[0] if requires */
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) {
+ skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
+ } else {
+ skb->cb[0] = 0;
+ }
+
+ /* Fall back to two-step timestamp if not one-step Sync packet */
+ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
+ &offset1, &offset2) ||
+ msgtype != PTP_MSGTYPE_SYNC || twostep != 0)
+ skb->cb[0] = ENETC_F_TX_TSTAMP;
+ }
+
+ return enetc_start_xmit(skb, ndev);
+}
+
static irqreturn_t enetc_msix(int irq, void *data)
{
struct enetc_int_vector *v = data;
@@ -241,10 +405,6 @@ static irqreturn_t enetc_msix(int irq, void *data)
return IRQ_HANDLED;
}
-static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
-static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
- struct napi_struct *napi, int work_limit);
-
static void enetc_rx_dim_work(struct work_struct *w)
{
struct dim *dim = container_of(w, struct dim, work);
@@ -273,55 +433,30 @@ static void enetc_rx_net_dim(struct enetc_int_vector *v)
net_dim(&v->rx_dim, dim_sample);
}
-static int enetc_poll(struct napi_struct *napi, int budget)
+static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
{
- struct enetc_int_vector
- *v = container_of(napi, struct enetc_int_vector, napi);
- bool complete = true;
- int work_done;
- int i;
-
- enetc_lock_mdio();
-
- for (i = 0; i < v->count_tx_rings; i++)
- if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
- complete = false;
-
- work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
- if (work_done == budget)
- complete = false;
- if (work_done)
- v->rx_napi_work = true;
-
- if (!complete) {
- enetc_unlock_mdio();
- return budget;
- }
-
- napi_complete_done(napi, work_done);
-
- if (likely(v->rx_dim_en))
- enetc_rx_net_dim(v);
-
- v->rx_napi_work = false;
-
- /* enable interrupts */
- enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
-
- for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
- enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
- ENETC_TBIER_TXTIE);
+ int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
- enetc_unlock_mdio();
+ return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
+}
- return work_done;
+static bool enetc_page_reusable(struct page *page)
+{
+ return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
}
-static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
+static void enetc_reuse_page(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *old)
{
- int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
+ struct enetc_rx_swbd *new;
- return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
+ new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
+
+ /* next buf that may reuse a page */
+ enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
+
+ /* copy page reference */
+ *new = *old;
}
static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
@@ -344,23 +479,58 @@ static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
- /* Ensure skb_mstamp_ns, which might have been populated with
- * the txtime, is not mistaken for a software timestamp,
- * because this will prevent the dispatch of our hardware
- * timestamp to the socket.
- */
- skb->tstamp = ktime_set(0, 0);
+ skb_txtime_consumed(skb);
skb_tstamp_tx(skb, &shhwtstamps);
}
}
+static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *tx_swbd)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
+ struct enetc_rx_swbd rx_swbd = {
+ .dma = tx_swbd->dma,
+ .page = tx_swbd->page,
+ .page_offset = tx_swbd->page_offset,
+ .dir = tx_swbd->dir,
+ .len = tx_swbd->len,
+ };
+ struct enetc_bdr *rx_ring;
+
+ rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring);
+
+ if (likely(enetc_swbd_unused(rx_ring))) {
+ enetc_reuse_page(rx_ring, &rx_swbd);
+
+ /* sync for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma,
+ rx_swbd.page_offset,
+ ENETC_RXB_DMA_SIZE_XDP,
+ rx_swbd.dir);
+
+ rx_ring->stats.recycles++;
+ } else {
+ /* RX ring is already full, we need to unmap and free the
+ * page, since there's nothing useful we can do with it.
+ */
+ rx_ring->stats.recycle_failures++;
+
+ dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE,
+ rx_swbd.dir);
+ __free_page(rx_swbd.page);
+ }
+
+ rx_ring->xdp.xdp_tx_in_flight--;
+}
+
static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
{
struct net_device *ndev = tx_ring->ndev;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
int tx_frm_cnt = 0, tx_byte_cnt = 0;
struct enetc_tx_swbd *tx_swbd;
int i, bds_to_clean;
- bool do_tstamp;
+ bool do_twostep_tstamp;
u64 tstamp = 0;
i = tx_ring->next_to_clean;
@@ -368,10 +538,12 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
bds_to_clean = enetc_bd_ready_count(tx_ring, i);
- do_tstamp = false;
+ do_twostep_tstamp = false;
while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
- bool is_eof = !!tx_swbd->skb;
+ struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
+ struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
+ bool is_eof = tx_swbd->is_eof;
if (unlikely(tx_swbd->check_wb)) {
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -380,26 +552,40 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
txbd = ENETC_TXBD(*tx_ring, i);
if (txbd->flags & ENETC_TXBD_FLAGS_W &&
- tx_swbd->do_tstamp) {
+ tx_swbd->do_twostep_tstamp) {
enetc_get_tx_tstamp(&priv->si->hw, txbd,
&tstamp);
- do_tstamp = true;
+ do_twostep_tstamp = true;
}
}
- if (likely(tx_swbd->dma))
+ if (tx_swbd->is_xdp_tx)
+ enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd);
+ else if (likely(tx_swbd->dma))
enetc_unmap_tx_buff(tx_ring, tx_swbd);
- if (is_eof) {
- if (unlikely(do_tstamp)) {
- enetc_tstamp_tx(tx_swbd->skb, tstamp);
- do_tstamp = false;
+ if (xdp_frame) {
+ xdp_return_frame(xdp_frame);
+ } else if (skb) {
+ if (unlikely(tx_swbd->skb->cb[0] &
+ ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
+ /* Start work to release lock for next one-step
+ * timestamping packet. And send one skb in
+ * tx_skbs queue if has.
+ */
+ schedule_work(&priv->tx_onestep_tstamp);
+ } else if (unlikely(do_twostep_tstamp)) {
+ enetc_tstamp_tx(skb, tstamp);
+ do_twostep_tstamp = false;
}
- napi_consume_skb(tx_swbd->skb, napi_budget);
- tx_swbd->skb = NULL;
+ napi_consume_skb(skb, napi_budget);
}
tx_byte_cnt += tx_swbd->len;
+ /* Scrub the swbd here so we don't have to do that
+ * when we reuse it during xmit
+ */
+ memset(tx_swbd, 0, sizeof(*tx_swbd));
bds_to_clean--;
tx_swbd++;
@@ -437,6 +623,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
static bool enetc_new_page(struct enetc_bdr *rx_ring,
struct enetc_rx_swbd *rx_swbd)
{
+ bool xdp = !!(rx_ring->xdp.prog);
struct page *page;
dma_addr_t addr;
@@ -444,7 +631,10 @@ static bool enetc_new_page(struct enetc_bdr *rx_ring,
if (unlikely(!page))
return false;
- addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ /* For XDP_TX, we forgo dma_unmap -> dma_map */
+ rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+
+ addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir);
if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
__free_page(page);
@@ -453,7 +643,7 @@ static bool enetc_new_page(struct enetc_bdr *rx_ring,
rx_swbd->dma = addr;
rx_swbd->page = page;
- rx_swbd->page_offset = ENETC_RXB_PAD;
+ rx_swbd->page_offset = rx_ring->buffer_offset;
return true;
}
@@ -483,18 +673,16 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
/* clear 'R" as well */
rxbd->r.lstatus = 0;
- rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
- rx_swbd++;
- i++;
- if (unlikely(i == rx_ring->bd_count)) {
- i = 0;
- rx_swbd = rx_ring->rx_swbd;
- }
+ enetc_rxbd_next(rx_ring, &rxbd, &i);
+ rx_swbd = &rx_ring->rx_swbd[i];
}
if (likely(j)) {
rx_ring->next_to_alloc = i; /* keep track from page reuse */
rx_ring->next_to_use = i;
+
+ /* update ENETC's consumer index */
+ enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
}
return j;
@@ -570,32 +758,10 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
#endif
}
-static void enetc_process_skb(struct enetc_bdr *rx_ring,
- struct sk_buff *skb)
-{
- skb_record_rx_queue(skb, rx_ring->index);
- skb->protocol = eth_type_trans(skb, rx_ring->ndev);
-}
-
-static bool enetc_page_reusable(struct page *page)
-{
- return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
-}
-
-static void enetc_reuse_page(struct enetc_bdr *rx_ring,
- struct enetc_rx_swbd *old)
-{
- struct enetc_rx_swbd *new;
-
- new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
-
- /* next buf that may reuse a page */
- enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
-
- /* copy page reference */
- *new = *old;
-}
-
+/* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
+ * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL
+ * mapped buffers.
+ */
static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
int i, u16 size)
{
@@ -603,30 +769,39 @@ static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
rx_swbd->page_offset,
- size, DMA_FROM_DEVICE);
+ size, rx_swbd->dir);
return rx_swbd;
}
+/* Reuse the current page without performing half-page buffer flipping */
static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
struct enetc_rx_swbd *rx_swbd)
{
+ size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset;
+
+ enetc_reuse_page(rx_ring, rx_swbd);
+
+ dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
+ rx_swbd->page_offset,
+ buffer_size, rx_swbd->dir);
+
+ rx_swbd->page = NULL;
+}
+
+/* Reuse the current page by performing half-page buffer flipping */
+static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *rx_swbd)
+{
if (likely(enetc_page_reusable(rx_swbd->page))) {
rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
page_ref_inc(rx_swbd->page);
- enetc_reuse_page(rx_ring, rx_swbd);
-
- /* sync for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
- rx_swbd->page_offset,
- ENETC_RXB_DMA_SIZE,
- DMA_FROM_DEVICE);
+ enetc_put_rx_buff(rx_ring, rx_swbd);
} else {
- dma_unmap_page(rx_ring->dev, rx_swbd->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
+ rx_swbd->dir);
+ rx_swbd->page = NULL;
}
-
- rx_swbd->page = NULL;
}
static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
@@ -637,16 +812,16 @@ static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
void *ba;
ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
- skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
+ skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE);
if (unlikely(!skb)) {
rx_ring->stats.rx_alloc_errs++;
return NULL;
}
- skb_reserve(skb, ENETC_RXB_PAD);
+ skb_reserve(skb, rx_ring->buffer_offset);
__skb_put(skb, size);
- enetc_put_rx_buff(rx_ring, rx_swbd);
+ enetc_flip_rx_buff(rx_ring, rx_swbd);
return skb;
}
@@ -659,7 +834,72 @@ static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
- enetc_put_rx_buff(rx_ring, rx_swbd);
+ enetc_flip_rx_buff(rx_ring, rx_swbd);
+}
+
+static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring,
+ u32 bd_status,
+ union enetc_rx_bd **rxbd, int *i)
+{
+ if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))))
+ return false;
+
+ enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
+ enetc_rxbd_next(rx_ring, rxbd, i);
+
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ dma_rmb();
+ bd_status = le32_to_cpu((*rxbd)->r.lstatus);
+
+ enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
+ enetc_rxbd_next(rx_ring, rxbd, i);
+ }
+
+ rx_ring->ndev->stats.rx_dropped++;
+ rx_ring->ndev->stats.rx_errors++;
+
+ return true;
+}
+
+static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
+ u32 bd_status, union enetc_rx_bd **rxbd,
+ int *i, int *cleaned_cnt, int buffer_size)
+{
+ struct sk_buff *skb;
+ u16 size;
+
+ size = le16_to_cpu((*rxbd)->r.buf_len);
+ skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size);
+ if (!skb)
+ return NULL;
+
+ enetc_get_offloads(rx_ring, *rxbd, skb);
+
+ (*cleaned_cnt)++;
+
+ enetc_rxbd_next(rx_ring, rxbd, i);
+
+ /* not last BD in frame? */
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ bd_status = le32_to_cpu((*rxbd)->r.lstatus);
+ size = buffer_size;
+
+ if (bd_status & ENETC_RXBD_LSTATUS_F) {
+ dma_rmb();
+ size = le16_to_cpu((*rxbd)->r.buf_len);
+ }
+
+ enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb);
+
+ (*cleaned_cnt)++;
+
+ enetc_rxbd_next(rx_ring, rxbd, i);
+ }
+
+ skb_record_rx_queue(skb, rx_ring->index);
+ skb->protocol = eth_type_trans(skb, rx_ring->ndev);
+
+ return skb;
}
#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
@@ -678,15 +918,10 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
union enetc_rx_bd *rxbd;
struct sk_buff *skb;
u32 bd_status;
- u16 size;
- if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
- int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
-
- /* update ENETC's consumer index */
- enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
- cleaned_cnt -= count;
- }
+ if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
+ cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
+ cleaned_cnt);
rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
@@ -695,73 +930,511 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
dma_rmb(); /* for reading other rxbd fields */
- size = le16_to_cpu(rxbd->r.buf_len);
- skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
+
+ if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
+ &rxbd, &i))
+ break;
+
+ skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i,
+ &cleaned_cnt, ENETC_RXB_DMA_SIZE);
if (!skb)
break;
- enetc_get_offloads(rx_ring, rxbd, skb);
+ rx_byte_cnt += skb->len;
+ rx_frm_cnt++;
- cleaned_cnt++;
+ napi_gro_receive(napi, skb);
+ }
- rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
- if (unlikely(++i == rx_ring->bd_count))
- i = 0;
+ rx_ring->next_to_clean = i;
- if (unlikely(bd_status &
- ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
- dev_kfree_skb(skb);
- while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
- dma_rmb();
- bd_status = le32_to_cpu(rxbd->r.lstatus);
+ rx_ring->stats.packets += rx_frm_cnt;
+ rx_ring->stats.bytes += rx_byte_cnt;
- rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
- if (unlikely(++i == rx_ring->bd_count))
- i = 0;
- }
+ return rx_frm_cnt;
+}
+
+static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i,
+ struct enetc_tx_swbd *tx_swbd,
+ int frm_len)
+{
+ union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
+
+ prefetchw(txbd);
+
+ enetc_clear_tx_bd(txbd);
+ txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset);
+ txbd->buf_len = cpu_to_le16(tx_swbd->len);
+ txbd->frm_len = cpu_to_le16(frm_len);
+
+ memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd));
+}
+
+/* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer
+ * descriptors.
+ */
+static bool enetc_xdp_tx(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd)
+{
+ struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr;
+ int i, k, frm_len = tmp_tx_swbd->len;
+
+ if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd)))
+ return false;
+
+ while (unlikely(!tmp_tx_swbd->is_eof)) {
+ tmp_tx_swbd++;
+ frm_len += tmp_tx_swbd->len;
+ }
+
+ i = tx_ring->next_to_use;
+
+ for (k = 0; k < num_tx_swbd; k++) {
+ struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k];
+
+ enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len);
+
+ /* last BD needs 'F' bit set */
+ if (xdp_tx_swbd->is_eof) {
+ union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
+
+ txbd->flags = ENETC_TXBD_FLAGS_F;
+ }
+
+ enetc_bdr_idx_inc(tx_ring, &i);
+ }
+
+ tx_ring->next_to_use = i;
+
+ return true;
+}
+
+static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *xdp_tx_arr,
+ struct xdp_frame *xdp_frame)
+{
+ struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0];
+ struct skb_shared_info *shinfo;
+ void *data = xdp_frame->data;
+ int len = xdp_frame->len;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ unsigned int f;
+ int n = 0;
+
+ dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
+ netdev_err(tx_ring->ndev, "DMA map error\n");
+ return -1;
+ }
+
+ xdp_tx_swbd->dma = dma;
+ xdp_tx_swbd->dir = DMA_TO_DEVICE;
+ xdp_tx_swbd->len = len;
+ xdp_tx_swbd->is_xdp_redirect = true;
+ xdp_tx_swbd->is_eof = false;
+ xdp_tx_swbd->xdp_frame = NULL;
+
+ n++;
+ xdp_tx_swbd = &xdp_tx_arr[n];
+
+ shinfo = xdp_get_shared_info_from_frame(xdp_frame);
+
+ for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags;
+ f++, frag++) {
+ data = skb_frag_address(frag);
+ len = skb_frag_size(frag);
+
+ dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
+ /* Undo the DMA mapping for all fragments */
+ while (--n >= 0)
+ enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]);
+
+ netdev_err(tx_ring->ndev, "DMA map error\n");
+ return -1;
+ }
+
+ xdp_tx_swbd->dma = dma;
+ xdp_tx_swbd->dir = DMA_TO_DEVICE;
+ xdp_tx_swbd->len = len;
+ xdp_tx_swbd->is_xdp_redirect = true;
+ xdp_tx_swbd->is_eof = false;
+ xdp_tx_swbd->xdp_frame = NULL;
+
+ n++;
+ xdp_tx_swbd = &xdp_tx_arr[n];
+ }
+
+ xdp_tx_arr[n - 1].is_eof = true;
+ xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
+
+ return n;
+}
+
+int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0};
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_bdr *tx_ring;
+ int xdp_tx_bd_cnt, i, k;
+ int xdp_tx_frm_cnt = 0;
+
+ enetc_lock_mdio();
+
+ tx_ring = priv->xdp_tx_ring[smp_processor_id()];
- rx_ring->ndev->stats.rx_dropped++;
- rx_ring->ndev->stats.rx_errors++;
+ prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use));
+ for (k = 0; k < num_frames; k++) {
+ xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring,
+ xdp_redirect_arr,
+ frames[k]);
+ if (unlikely(xdp_tx_bd_cnt < 0))
+ break;
+
+ if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr,
+ xdp_tx_bd_cnt))) {
+ for (i = 0; i < xdp_tx_bd_cnt; i++)
+ enetc_unmap_tx_buff(tx_ring,
+ &xdp_redirect_arr[i]);
+ tx_ring->stats.xdp_tx_drops++;
break;
}
- /* not last BD in frame? */
- while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
- bd_status = le32_to_cpu(rxbd->r.lstatus);
- size = ENETC_RXB_DMA_SIZE;
+ xdp_tx_frm_cnt++;
+ }
- if (bd_status & ENETC_RXBD_LSTATUS_F) {
- dma_rmb();
- size = le16_to_cpu(rxbd->r.buf_len);
- }
+ if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt))
+ enetc_update_tx_ring_tail(tx_ring);
+
+ tx_ring->stats.xdp_tx += xdp_tx_frm_cnt;
+
+ enetc_unlock_mdio();
+
+ return xdp_tx_frm_cnt;
+}
+
+static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
+ struct xdp_buff *xdp_buff, u16 size)
+{
+ struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+ void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset;
+ struct skb_shared_info *shinfo;
+
+ /* To be used for XDP_TX */
+ rx_swbd->len = size;
- enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
+ xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset,
+ rx_ring->buffer_offset, size, false);
- cleaned_cnt++;
+ shinfo = xdp_get_shared_info_from_buff(xdp_buff);
+ shinfo->nr_frags = 0;
+}
+
+static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
+ u16 size, struct xdp_buff *xdp_buff)
+{
+ struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff);
+ struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+ skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags];
+
+ /* To be used for XDP_TX */
+ rx_swbd->len = size;
+
+ skb_frag_off_set(frag, rx_swbd->page_offset);
+ skb_frag_size_set(frag, size);
+ __skb_frag_set_page(frag, rx_swbd->page);
+
+ shinfo->nr_frags++;
+}
+
+static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status,
+ union enetc_rx_bd **rxbd, int *i,
+ int *cleaned_cnt, struct xdp_buff *xdp_buff)
+{
+ u16 size = le16_to_cpu((*rxbd)->r.buf_len);
+
+ xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq);
+
+ enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size);
+ (*cleaned_cnt)++;
+ enetc_rxbd_next(rx_ring, rxbd, i);
- rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
- if (unlikely(++i == rx_ring->bd_count))
- i = 0;
+ /* not last BD in frame? */
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ bd_status = le32_to_cpu((*rxbd)->r.lstatus);
+ size = ENETC_RXB_DMA_SIZE_XDP;
+
+ if (bd_status & ENETC_RXBD_LSTATUS_F) {
+ dma_rmb();
+ size = le16_to_cpu((*rxbd)->r.buf_len);
}
- rx_byte_cnt += skb->len;
+ enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff);
+ (*cleaned_cnt)++;
+ enetc_rxbd_next(rx_ring, rxbd, i);
+ }
+}
- enetc_process_skb(rx_ring, skb);
+/* Convert RX buffer descriptors to TX buffer descriptors. These will be
+ * recycled back into the RX ring in enetc_clean_tx_ring.
+ */
+static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr,
+ struct enetc_bdr *rx_ring,
+ int rx_ring_first, int rx_ring_last)
+{
+ int n = 0;
+
+ for (; rx_ring_first != rx_ring_last;
+ n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) {
+ struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
+ struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n];
+
+ /* No need to dma_map, we already have DMA_BIDIRECTIONAL */
+ tx_swbd->dma = rx_swbd->dma;
+ tx_swbd->dir = rx_swbd->dir;
+ tx_swbd->page = rx_swbd->page;
+ tx_swbd->page_offset = rx_swbd->page_offset;
+ tx_swbd->len = rx_swbd->len;
+ tx_swbd->is_dma_page = true;
+ tx_swbd->is_xdp_tx = true;
+ tx_swbd->is_eof = false;
+ }
- napi_gro_receive(napi, skb);
+ /* We rely on caller providing an rx_ring_last > rx_ring_first */
+ xdp_tx_arr[n - 1].is_eof = true;
+
+ return n;
+}
+
+static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
+ int rx_ring_last)
+{
+ while (rx_ring_first != rx_ring_last) {
+ enetc_put_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[rx_ring_first]);
+ enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+ }
+ rx_ring->stats.xdp_drops++;
+}
+
+static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first,
+ int rx_ring_last)
+{
+ while (rx_ring_first != rx_ring_last) {
+ struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
+
+ if (rx_swbd->page) {
+ dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
+ rx_swbd->dir);
+ __free_page(rx_swbd->page);
+ rx_swbd->page = NULL;
+ }
+ enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+ }
+ rx_ring->stats.xdp_redirect_failures++;
+}
+
+static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ struct napi_struct *napi, int work_limit,
+ struct bpf_prog *prog)
+{
+ int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0;
+ struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0};
+ struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
+ int rx_frm_cnt = 0, rx_byte_cnt = 0;
+ struct enetc_bdr *tx_ring;
+ int cleaned_cnt, i;
+ u32 xdp_act;
+
+ cleaned_cnt = enetc_bd_unused(rx_ring);
+ /* next descriptor to process */
+ i = rx_ring->next_to_clean;
+
+ while (likely(rx_frm_cnt < work_limit)) {
+ union enetc_rx_bd *rxbd, *orig_rxbd;
+ int orig_i, orig_cleaned_cnt;
+ struct xdp_buff xdp_buff;
+ struct sk_buff *skb;
+ int tmp_orig_i, err;
+ u32 bd_status;
+
+ rxbd = enetc_rxbd(rx_ring, i);
+ bd_status = le32_to_cpu(rxbd->r.lstatus);
+ if (!bd_status)
+ break;
+
+ enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
+ dma_rmb(); /* for reading other rxbd fields */
+
+ if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
+ &rxbd, &i))
+ break;
+
+ orig_rxbd = rxbd;
+ orig_cleaned_cnt = cleaned_cnt;
+ orig_i = i;
+
+ enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
+ &cleaned_cnt, &xdp_buff);
+
+ xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
+
+ switch (xdp_act) {
+ default:
+ bpf_warn_invalid_xdp_action(xdp_act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->ndev, prog, xdp_act);
+ fallthrough;
+ case XDP_DROP:
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ break;
+ case XDP_PASS:
+ rxbd = orig_rxbd;
+ cleaned_cnt = orig_cleaned_cnt;
+ i = orig_i;
+
+ skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
+ &i, &cleaned_cnt,
+ ENETC_RXB_DMA_SIZE_XDP);
+ if (unlikely(!skb))
+ goto out;
+
+ napi_gro_receive(napi, skb);
+ break;
+ case XDP_TX:
+ tx_ring = priv->xdp_tx_ring[rx_ring->index];
+ xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
+ rx_ring,
+ orig_i, i);
+
+ if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ tx_ring->stats.xdp_tx_drops++;
+ } else {
+ tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
+ rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
+ xdp_tx_frm_cnt++;
+ /* The XDP_TX enqueue was successful, so we
+ * need to scrub the RX software BDs because
+ * the ownership of the buffers no longer
+ * belongs to the RX ring, and we must prevent
+ * enetc_refill_rx_ring() from reusing
+ * rx_swbd->page.
+ */
+ while (orig_i != i) {
+ rx_ring->rx_swbd[orig_i].page = NULL;
+ enetc_bdr_idx_inc(rx_ring, &orig_i);
+ }
+ }
+ break;
+ case XDP_REDIRECT:
+ /* xdp_return_frame does not support S/G in the sense
+ * that it leaks the fragments (__xdp_return should not
+ * call page_frag_free only for the initial buffer).
+ * Until XDP_REDIRECT gains support for S/G let's keep
+ * the code structure in place, but dead. We drop the
+ * S/G frames ourselves to avoid memory leaks which
+ * would otherwise leave the kernel OOM.
+ */
+ if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_redirect_sg++;
+ break;
+ }
+
+ tmp_orig_i = orig_i;
+
+ while (orig_i != i) {
+ enetc_flip_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[orig_i]);
+ enetc_bdr_idx_inc(rx_ring, &orig_i);
+ }
+
+ err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
+ if (unlikely(err)) {
+ enetc_xdp_free(rx_ring, tmp_orig_i, i);
+ } else {
+ xdp_redirect_frm_cnt++;
+ rx_ring->stats.xdp_redirect++;
+ }
+ }
rx_frm_cnt++;
}
+out:
rx_ring->next_to_clean = i;
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
+ if (xdp_redirect_frm_cnt)
+ xdp_do_flush_map();
+
+ if (xdp_tx_frm_cnt)
+ enetc_update_tx_ring_tail(tx_ring);
+
+ if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight)
+ enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
+ rx_ring->xdp.xdp_tx_in_flight);
+
return rx_frm_cnt;
}
+static int enetc_poll(struct napi_struct *napi, int budget)
+{
+ struct enetc_int_vector
+ *v = container_of(napi, struct enetc_int_vector, napi);
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+ struct bpf_prog *prog;
+ bool complete = true;
+ int work_done;
+ int i;
+
+ enetc_lock_mdio();
+
+ for (i = 0; i < v->count_tx_rings; i++)
+ if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
+ complete = false;
+
+ prog = rx_ring->xdp.prog;
+ if (prog)
+ work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog);
+ else
+ work_done = enetc_clean_rx_ring(rx_ring, napi, budget);
+ if (work_done == budget)
+ complete = false;
+ if (work_done)
+ v->rx_napi_work = true;
+
+ if (!complete) {
+ enetc_unlock_mdio();
+ return budget;
+ }
+
+ napi_complete_done(napi, work_done);
+
+ if (likely(v->rx_dim_en))
+ enetc_rx_net_dim(v);
+
+ v->rx_napi_work = false;
+
+ /* enable interrupts */
+ enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
+
+ for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
+ enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
+ ENETC_TBIER_TXTIE);
+
+ enetc_unlock_mdio();
+
+ return work_done;
+}
+
/* Probing and Init */
#define ENETC_MAX_RFS_SIZE 64
void enetc_get_si_caps(struct enetc_si *si)
@@ -836,7 +1509,7 @@ static void enetc_free_txbdr(struct enetc_bdr *txr)
int size, i;
for (i = 0; i < txr->bd_count; i++)
- enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
+ enetc_free_tx_frame(txr, &txr->tx_swbd[i]);
size = txr->bd_count * sizeof(union enetc_tx_bd);
@@ -953,7 +1626,7 @@ static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
for (i = 0; i < tx_ring->bd_count; i++) {
struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_skb(tx_ring, tx_swbd);
+ enetc_free_tx_frame(tx_ring, tx_swbd);
}
tx_ring->next_to_clean = 0;
@@ -973,8 +1646,8 @@ static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
if (!rx_swbd->page)
continue;
- dma_unmap_page(rx_ring->dev, rx_swbd->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
+ rx_swbd->dir);
__free_page(rx_swbd->page);
rx_swbd->page = NULL;
}
@@ -995,60 +1668,6 @@ static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
enetc_free_tx_ring(priv->tx_ring[i]);
}
-int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
-{
- int size = cbdr->bd_count * sizeof(struct enetc_cbd);
-
- cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
- GFP_KERNEL);
- if (!cbdr->bd_base)
- return -ENOMEM;
-
- /* h/w requires 128B alignment */
- if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
- dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
- return -EINVAL;
- }
-
- cbdr->next_to_clean = 0;
- cbdr->next_to_use = 0;
-
- return 0;
-}
-
-void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
-{
- int size = cbdr->bd_count * sizeof(struct enetc_cbd);
-
- dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
- cbdr->bd_base = NULL;
-}
-
-void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
-{
- /* set CBDR cache attributes */
- enetc_wr(hw, ENETC_SICAR2,
- ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
-
- enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
- enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
- enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
-
- enetc_wr(hw, ENETC_SICBDRPIR, 0);
- enetc_wr(hw, ENETC_SICBDRCIR, 0);
-
- /* enable ring */
- enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
-
- cbdr->pir = hw->reg + ENETC_SICBDRPIR;
- cbdr->cir = hw->reg + ENETC_SICBDRCIR;
-}
-
-void enetc_clear_cbdr(struct enetc_hw *hw)
-{
- enetc_wr(hw, ENETC_SICBDRMR, 0);
-}
-
static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
{
int *rss_table;
@@ -1108,45 +1727,22 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
priv->bdr_int_num = cpus;
priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
priv->tx_ictt = ENETC_TXIC_TIMETHR;
-
- /* SI specific */
- si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
}
int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
- int err;
-
- err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
- if (err)
- return err;
-
- enetc_setup_cbdr(&si->hw, &si->cbd_ring);
priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
GFP_KERNEL);
- if (!priv->cls_rules) {
- err = -ENOMEM;
- goto err_alloc_cls;
- }
+ if (!priv->cls_rules)
+ return -ENOMEM;
return 0;
-
-err_alloc_cls:
- enetc_clear_cbdr(&si->hw);
- enetc_free_cbdr(priv->dev, &si->cbd_ring);
-
- return err;
}
void enetc_free_si_resources(struct enetc_ndev_priv *priv)
{
- struct enetc_si *si = priv->si;
-
- enetc_clear_cbdr(&si->hw);
- enetc_free_cbdr(priv->dev, &si->cbd_ring);
-
kfree(priv->cls_rules);
}
@@ -1199,7 +1795,10 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
ENETC_RTBLENR_LEN(rx_ring->bd_count));
- enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
+ if (rx_ring->xdp.prog)
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP);
+ else
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
@@ -1217,9 +1816,9 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
rx_ring->idr = hw->reg + ENETC_SIRXIDR;
+ enetc_lock_mdio();
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
- /* update ENETC's consumer index */
- enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, rx_ring->next_to_use);
+ enetc_unlock_mdio();
/* enable ring */
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
@@ -1408,6 +2007,29 @@ static int enetc_phylink_connect(struct net_device *ndev)
return 0;
}
+static void enetc_tx_onestep_tstamp(struct work_struct *work)
+{
+ struct enetc_ndev_priv *priv;
+ struct sk_buff *skb;
+
+ priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
+
+ netif_tx_lock(priv->ndev);
+
+ clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
+ skb = skb_dequeue(&priv->tx_skbs);
+ if (skb)
+ enetc_start_xmit(skb, priv->ndev);
+
+ netif_tx_unlock(priv->ndev);
+}
+
+static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
+{
+ INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp);
+ skb_queue_head_init(&priv->tx_skbs);
+}
+
void enetc_start(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -1434,6 +2056,7 @@ void enetc_start(struct net_device *ndev)
int enetc_open(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int num_stack_tx_queues;
int err;
err = enetc_setup_irqs(priv);
@@ -1452,7 +2075,9 @@ int enetc_open(struct net_device *ndev)
if (err)
goto err_alloc_rx;
- err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+
+ err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
if (err)
goto err_set_queues;
@@ -1460,6 +2085,7 @@ int enetc_open(struct net_device *ndev)
if (err)
goto err_set_queues;
+ enetc_tx_onestep_tstamp_init(priv);
enetc_setup_bdrs(priv);
enetc_start(ndev);
@@ -1524,15 +2150,17 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_mqprio_qopt *mqprio = type_data;
struct enetc_bdr *tx_ring;
+ int num_stack_tx_queues;
u8 num_tc;
int i;
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
num_tc = mqprio->num_tc;
if (!num_tc) {
netdev_reset_tc(ndev);
- netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
+ netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
/* Reset all ring priorities to 0 */
for (i = 0; i < priv->num_tx_rings; i++) {
@@ -1544,7 +2172,7 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
}
/* Check if we have enough BD rings available to accommodate all TCs */
- if (num_tc > priv->num_tx_rings) {
+ if (num_tc > num_stack_tx_queues) {
netdev_err(ndev, "Max %d traffic classes supported\n",
priv->num_tx_rings);
return -EINVAL;
@@ -1590,6 +2218,54 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
}
}
+static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+ bool is_up;
+ int i;
+
+ /* The buffer layout is changing, so we need to drain the old
+ * RX buffers and seed new ones.
+ */
+ is_up = netif_running(dev);
+ if (is_up)
+ dev_close(dev);
+
+ old_prog = xchg(&priv->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ struct enetc_bdr *rx_ring = priv->rx_ring[i];
+
+ rx_ring->xdp.prog = prog;
+
+ if (prog)
+ rx_ring->buffer_offset = XDP_PACKET_HEADROOM;
+ else
+ rx_ring->buffer_offset = ENETC_RXB_PAD;
+ }
+
+ if (is_up)
+ return dev_open(dev, extack);
+
+ return 0;
+}
+
+int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
struct net_device_stats *enetc_get_stats(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -1710,11 +2386,16 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- priv->active_offloads &= ~ENETC_F_TX_TSTAMP;
+ priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
break;
case HWTSTAMP_TX_ON:
+ priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
priv->active_offloads |= ENETC_F_TX_TSTAMP;
break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
+ break;
default:
return -ERANGE;
}
@@ -1745,7 +2426,9 @@ static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
config.flags = 0;
- if (priv->active_offloads & ENETC_F_TX_TSTAMP)
+ if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
+ config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
config.tx_type = HWTSTAMP_TX_ON;
else
config.tx_type = HWTSTAMP_TX_OFF;
@@ -1777,8 +2460,9 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
- int v_tx_rings;
+ int first_xdp_tx_ring;
int i, n, err, nvec;
+ int v_tx_rings;
nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
/* allocate MSIX for both messaging and Rx/Tx interrupts */
@@ -1806,6 +2490,28 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
priv->int_vector[i] = v;
+ bdr = &v->rx_ring;
+ bdr->index = i;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->rx_bd_count;
+ bdr->buffer_offset = ENETC_RXB_PAD;
+ priv->rx_ring[i] = bdr;
+
+ err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
+ if (err) {
+ kfree(v);
+ goto fail;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
+ MEM_TYPE_PAGE_SHARED, NULL);
+ if (err) {
+ xdp_rxq_info_unreg(&bdr->xdp.rxq);
+ kfree(v);
+ goto fail;
+ }
+
/* init defaults for adaptive IC */
if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
v->rx_ictt = 0x1;
@@ -1820,11 +2526,7 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
int idx;
/* default tx ring mapping policy */
- if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
- idx = 2 * j + i; /* 2 CPUs */
- else
- idx = j + i * v_tx_rings; /* default */
-
+ idx = priv->bdr_int_num * j + i;
__set_bit(idx, &v->tx_rings_map);
bdr = &v->tx_ring[j];
bdr->index = idx;
@@ -1833,22 +2535,23 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
bdr->bd_count = priv->tx_bd_count;
priv->tx_ring[idx] = bdr;
}
-
- bdr = &v->rx_ring;
- bdr->index = i;
- bdr->ndev = priv->ndev;
- bdr->dev = priv->dev;
- bdr->bd_count = priv->rx_bd_count;
- priv->rx_ring[i] = bdr;
}
+ first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
+ priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
+
return 0;
fail:
while (i--) {
- netif_napi_del(&priv->int_vector[i]->napi);
- cancel_work_sync(&priv->int_vector[i]->rx_dim.work);
- kfree(priv->int_vector[i]);
+ struct enetc_int_vector *v = priv->int_vector[i];
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
+ netif_napi_del(&v->napi);
+ cancel_work_sync(&v->rx_dim.work);
+ kfree(v);
}
pci_free_irq_vectors(pdev);
@@ -1862,7 +2565,10 @@ void enetc_free_msix(struct enetc_ndev_priv *priv)
for (i = 0; i < priv->bdr_int_num; i++) {
struct enetc_int_vector *v = priv->int_vector[i];
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
netif_napi_del(&v->napi);
cancel_work_sync(&v->rx_dim.work);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 8b380fc13314..08b283347d9c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -19,12 +19,21 @@
(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
struct enetc_tx_swbd {
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdp_frame;
+ };
dma_addr_t dma;
+ struct page *page; /* valid only if is_xdp_tx */
+ u16 page_offset; /* valid only if is_xdp_tx */
u16 len;
+ enum dma_data_direction dir;
u8 is_dma_page:1;
u8 check_wb:1;
- u8 do_tstamp:1;
+ u8 do_twostep_tstamp:1;
+ u8 is_eof:1;
+ u8 is_xdp_tx:1;
+ u8 is_xdp_redirect:1;
};
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
@@ -32,21 +41,45 @@ struct enetc_tx_swbd {
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
#define ENETC_RXB_DMA_SIZE \
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
+#define ENETC_RXB_DMA_SIZE_XDP \
+ (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM)
struct enetc_rx_swbd {
dma_addr_t dma;
struct page *page;
u16 page_offset;
+ enum dma_data_direction dir;
+ u16 len;
};
+/* ENETC overhead: optional extension BD + 1 BD gap */
+#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
+/* max # of chained Tx BDs is 15, including head and extension BD */
+#define ENETC_MAX_SKB_FRAGS 13
+#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
+
struct enetc_ring_stats {
unsigned int packets;
unsigned int bytes;
unsigned int rx_alloc_errs;
+ unsigned int xdp_drops;
+ unsigned int xdp_tx;
+ unsigned int xdp_tx_drops;
+ unsigned int xdp_redirect;
+ unsigned int xdp_redirect_failures;
+ unsigned int xdp_redirect_sg;
+ unsigned int recycles;
+ unsigned int recycle_failures;
+};
+
+struct enetc_xdp_data {
+ struct xdp_rxq_info rxq;
+ struct bpf_prog *prog;
+ int xdp_tx_in_flight;
};
-#define ENETC_RX_RING_DEFAULT_SIZE 512
-#define ENETC_TX_RING_DEFAULT_SIZE 256
+#define ENETC_RX_RING_DEFAULT_SIZE 2048
+#define ENETC_TX_RING_DEFAULT_SIZE 2048
#define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2)
struct enetc_bdr {
@@ -71,6 +104,9 @@ struct enetc_bdr {
};
void __iomem *idr; /* Interrupt Detect Register pointer */
+ int buffer_offset;
+ struct enetc_xdp_data xdp;
+
struct enetc_ring_stats stats;
dma_addr_t bd_dma_base;
@@ -92,18 +128,28 @@ static inline int enetc_bd_unused(struct enetc_bdr *bdr)
return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1;
}
+static inline int enetc_swbd_unused(struct enetc_bdr *bdr)
+{
+ if (bdr->next_to_clean > bdr->next_to_alloc)
+ return bdr->next_to_clean - bdr->next_to_alloc - 1;
+
+ return bdr->bd_count + bdr->next_to_clean - bdr->next_to_alloc - 1;
+}
+
/* Control BD ring */
#define ENETC_CBDR_DEFAULT_SIZE 64
struct enetc_cbdr {
void *bd_base; /* points to Rx or Tx BD ring */
void __iomem *pir;
void __iomem *cir;
+ void __iomem *mr; /* mode register */
int bd_count; /* # of BDs */
int next_to_use;
int next_to_clean;
dma_addr_t bd_dma_base;
+ struct device *dma_dev;
};
#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
@@ -119,19 +165,26 @@ static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
}
-static inline union enetc_rx_bd *enetc_rxbd_next(struct enetc_bdr *rx_ring,
- union enetc_rx_bd *rxbd,
- int i)
+static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
+ union enetc_rx_bd **old_rxbd, int *old_index)
{
- rxbd++;
+ union enetc_rx_bd *new_rxbd = *old_rxbd;
+ int new_index = *old_index;
+
+ new_rxbd++;
+
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (rx_ring->ext_en)
- rxbd++;
+ new_rxbd++;
#endif
- if (unlikely(++i == rx_ring->bd_count))
- rxbd = rx_ring->bd_base;
- return rxbd;
+ if (unlikely(++new_index == rx_ring->bd_count)) {
+ new_rxbd = rx_ring->bd_base;
+ new_index = 0;
+ }
+
+ *old_rxbd = new_rxbd;
+ *old_index = new_index;
}
static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd)
@@ -184,6 +237,22 @@ static inline bool enetc_si_is_pf(struct enetc_si *si)
return !!(si->hw.port);
}
+static inline int enetc_pf_to_port(struct pci_dev *pf_pdev)
+{
+ switch (pf_pdev->devfn) {
+ case 0:
+ return 0;
+ case 1:
+ return 1;
+ case 2:
+ return 2;
+ case 6:
+ return 3;
+ default:
+ return -1;
+ }
+}
+
#define ENETC_MAX_NUM_TXQS 8
#define ENETC_INT_NAME_MAX (IFNAMSIZ + 8)
@@ -218,12 +287,20 @@ struct psfp_cap {
u32 max_psfp_meter;
};
+#define ENETC_F_TX_TSTAMP_MASK 0xff
/* TODO: more hardware offloads */
enum enetc_active_offloads {
- ENETC_F_RX_TSTAMP = BIT(0),
- ENETC_F_TX_TSTAMP = BIT(1),
- ENETC_F_QBV = BIT(2),
- ENETC_F_QCI = BIT(3),
+ /* 8 bits reserved for TX timestamp types (hwtstamp_tx_types) */
+ ENETC_F_TX_TSTAMP = BIT(0),
+ ENETC_F_TX_ONESTEP_SYNC_TSTAMP = BIT(1),
+
+ ENETC_F_RX_TSTAMP = BIT(8),
+ ENETC_F_QBV = BIT(9),
+ ENETC_F_QCI = BIT(10),
+};
+
+enum enetc_flags_bit {
+ ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
};
/* interrupt coalescing modes */
@@ -252,10 +329,11 @@ struct enetc_ndev_priv {
u16 rx_bd_count, tx_bd_count;
u16 msg_enable;
- int active_offloads;
+ enum enetc_active_offloads active_offloads;
u32 speed; /* store speed for compare update pspeed */
+ struct enetc_bdr **xdp_tx_ring;
struct enetc_bdr *tx_ring[16];
struct enetc_bdr *rx_ring[16];
@@ -266,6 +344,13 @@ struct enetc_ndev_priv {
struct phylink *phylink;
int ic_mode;
u32 tx_ictt;
+
+ struct bpf_prog *xdp_prog;
+
+ unsigned long flags;
+
+ struct work_struct tx_onestep_tstamp;
+ struct sk_buff_head tx_skbs;
};
/* Messaging */
@@ -305,15 +390,17 @@ int enetc_set_features(struct net_device *ndev,
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
+int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
+int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
+ struct xdp_frame **frames, u32 flags);
/* ethtool */
void enetc_set_ethtool_ops(struct net_device *ndev);
/* control buffer descriptor ring (CBDR) */
-int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr);
-void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr);
-void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr);
-void enetc_clear_cbdr(struct enetc_hw *hw);
+int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
+ struct enetc_cbdr *cbdr);
+void enetc_teardown_cbdr(struct enetc_cbdr *cbdr);
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
char *mac_addr, int si_map);
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index 201cbc362e33..073e56dcca4e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -3,9 +3,63 @@
#include "enetc.h"
-static void enetc_clean_cbdr(struct enetc_si *si)
+int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
+ struct enetc_cbdr *cbdr)
+{
+ int size = bd_count * sizeof(struct enetc_cbd);
+
+ cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
+ GFP_KERNEL);
+ if (!cbdr->bd_base)
+ return -ENOMEM;
+
+ /* h/w requires 128B alignment */
+ if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
+ dma_free_coherent(dev, size, cbdr->bd_base,
+ cbdr->bd_dma_base);
+ return -EINVAL;
+ }
+
+ cbdr->next_to_clean = 0;
+ cbdr->next_to_use = 0;
+ cbdr->dma_dev = dev;
+ cbdr->bd_count = bd_count;
+
+ cbdr->pir = hw->reg + ENETC_SICBDRPIR;
+ cbdr->cir = hw->reg + ENETC_SICBDRCIR;
+ cbdr->mr = hw->reg + ENETC_SICBDRMR;
+
+ /* set CBDR cache attributes */
+ enetc_wr(hw, ENETC_SICAR2,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+
+ enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
+ enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
+ enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
+
+ enetc_wr_reg(cbdr->pir, cbdr->next_to_clean);
+ enetc_wr_reg(cbdr->cir, cbdr->next_to_use);
+ /* enable ring */
+ enetc_wr_reg(cbdr->mr, BIT(31));
+
+ return 0;
+}
+
+void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
+{
+ int size = cbdr->bd_count * sizeof(struct enetc_cbd);
+
+ /* disable ring */
+ enetc_wr_reg(cbdr->mr, 0);
+
+ dma_free_coherent(cbdr->dma_dev, size, cbdr->bd_base,
+ cbdr->bd_dma_base);
+ cbdr->bd_base = NULL;
+ cbdr->dma_dev = NULL;
+}
+
+static void enetc_clean_cbdr(struct enetc_cbdr *ring)
{
- struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd *dest_cbd;
int i, status;
@@ -15,7 +69,7 @@ static void enetc_clean_cbdr(struct enetc_si *si)
dest_cbd = ENETC_CBD(*ring, i);
status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
if (status)
- dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n",
+ dev_warn(ring->dma_dev, "CMD err %04x for cmd %04x\n",
status, dest_cbd->cmd);
memset(dest_cbd, 0, sizeof(*dest_cbd));
@@ -43,7 +97,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
return -EIO;
if (unlikely(!enetc_cbd_unused(ring)))
- enetc_clean_cbdr(si);
+ enetc_clean_cbdr(ring);
i = ring->next_to_use;
dest_cbd = ENETC_CBD(*ring, i);
@@ -69,7 +123,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
/* CBD may writeback data, feedback up level */
*cbd = *dest_cbd;
- enetc_clean_cbdr(si);
+ enetc_clean_cbdr(ring);
return 0;
}
@@ -117,6 +171,7 @@ int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index)
{
+ struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
dma_addr_t dma, dma_align;
void *tmp, *tmp_align;
@@ -129,10 +184,10 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
cbd.length = cpu_to_le16(sizeof(*rfse));
cbd.opt[3] = cpu_to_le32(0); /* SI */
- tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
+ tmp = dma_alloc_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
&dma, GFP_KERNEL);
if (!tmp) {
- dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n");
+ dev_err(ring->dma_dev, "DMA mapping of RFS entry failed!\n");
return -ENOMEM;
}
@@ -145,9 +200,9 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
err = enetc_send_cmd(si, &cbd);
if (err)
- dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err);
+ dev_err(ring->dma_dev, "FS entry add failed (%d)!", err);
- dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
+ dma_free_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
tmp, dma);
return err;
@@ -157,6 +212,7 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
bool read)
{
+ struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
dma_addr_t dma, dma_align;
u8 *tmp, *tmp_align;
@@ -166,10 +222,10 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
/* HW only takes in a full 64 entry table */
return -EINVAL;
- tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN,
+ tmp = dma_alloc_coherent(ring->dma_dev, count + RSSE_ALIGN,
&dma, GFP_KERNEL);
if (!tmp) {
- dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n");
+ dev_err(ring->dma_dev, "DMA mapping of RSS table failed!\n");
return -ENOMEM;
}
dma_align = ALIGN(dma, RSSE_ALIGN);
@@ -189,13 +245,13 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
err = enetc_send_cmd(si, &cbd);
if (err)
- dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err);
+ dev_err(ring->dma_dev, "RSS cmd failed (%d)!", err);
if (read)
for (i = 0; i < count; i++)
table[i] = tmp_align[i];
- dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma);
+ dma_free_coherent(ring->dma_dev, count + RSSE_ALIGN, tmp, dma);
return err;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 89e558135432..ebccaf02411c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -192,10 +192,18 @@ static const struct {
static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
"Rx ring %2d frames",
"Rx ring %2d alloc errors",
+ "Rx ring %2d XDP drops",
+ "Rx ring %2d recycles",
+ "Rx ring %2d recycle failures",
+ "Rx ring %2d redirects",
+ "Rx ring %2d redirect failures",
+ "Rx ring %2d redirect S/G",
};
static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
"Tx ring %2d frames",
+ "Tx ring %2d XDP frames",
+ "Tx ring %2d XDP drops",
};
static int enetc_get_sset_count(struct net_device *ndev, int sset)
@@ -267,12 +275,21 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++)
data[o++] = enetc_rd64(hw, enetc_si_counters[i].reg);
- for (i = 0; i < priv->num_tx_rings; i++)
+ for (i = 0; i < priv->num_tx_rings; i++) {
data[o++] = priv->tx_ring[i]->stats.packets;
+ data[o++] = priv->tx_ring[i]->stats.xdp_tx;
+ data[o++] = priv->tx_ring[i]->stats.xdp_tx_drops;
+ }
for (i = 0; i < priv->num_rx_rings; i++) {
data[o++] = priv->rx_ring[i]->stats.packets;
data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs;
+ data[o++] = priv->rx_ring[i]->stats.xdp_drops;
+ data[o++] = priv->rx_ring[i]->stats.recycles;
+ data[o++] = priv->rx_ring[i]->stats.recycle_failures;
+ data[o++] = priv->rx_ring[i]->stats.xdp_redirect;
+ data[o++] = priv->rx_ring[i]->stats.xdp_redirect_failures;
+ data[o++] = priv->rx_ring[i]->stats.xdp_redirect_sg;
}
if (!enetc_si_is_pf(priv->si))
@@ -654,7 +671,8 @@ static int enetc_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_SYNC);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
#else
@@ -690,6 +708,22 @@ static int enetc_set_wol(struct net_device *dev,
return ret;
}
+static void enetc_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(priv->phylink, pause);
+}
+
+static int enetc_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(priv->phylink, pause);
+}
+
static int enetc_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -736,6 +770,8 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_ts_info = enetc_get_ts_info,
.get_wol = enetc_get_wol,
.set_wol = enetc_set_wol,
+ .get_pauseparam = enetc_get_pauseparam,
+ .set_pauseparam = enetc_set_pauseparam,
};
static const struct ethtool_ops enetc_vf_ethtool_ops = {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 00938f7960a4..0f5f081a5baf 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -109,6 +109,7 @@ enum enetc_bdr_type {TX, RX};
/* RX BDR reg offsets */
#define ENETC_RBMR 0
#define ENETC_RBMR_BDS BIT(2)
+#define ENETC_RBMR_CM BIT(4)
#define ENETC_RBMR_VTE BIT(5)
#define ENETC_RBMR_EN BIT(31)
#define ENETC_RBSR 0x4
@@ -180,6 +181,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PSIVLANR(n) (0x0240 + (n) * 4) /* n = SI index */
#define ENETC_PSIVLAN_EN BIT(31)
#define ENETC_PSIVLAN_SET_QOS(val) ((u32)(val) << 12)
+#define ENETC_PPAUONTR 0x0410
+#define ENETC_PPAUOFFTR 0x0414
#define ENETC_PTXMBAR 0x0608
#define ENETC_PCAPR0 0x0900
#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24)
@@ -227,6 +230,7 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_TX_EN BIT(0)
#define ENETC_PM0_RX_EN BIT(1)
#define ENETC_PM0_PROMISC BIT(4)
+#define ENETC_PM0_PAUSE_IGN BIT(8)
#define ENETC_PM0_CMD_XGLP BIT(10)
#define ENETC_PM0_CMD_TXP BIT(11)
#define ENETC_PM0_CMD_PHY_TX_EN BIT(15)
@@ -239,6 +243,17 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM_IMDIO_BASE 0x8030
+#define ENETC_PM0_PAUSE_QUANTA 0x8054
+#define ENETC_PM0_PAUSE_THRESH 0x8064
+#define ENETC_PM1_PAUSE_QUANTA 0x9054
+#define ENETC_PM1_PAUSE_THRESH 0x9064
+
+#define ENETC_PM0_SINGLE_STEP 0x80c0
+#define ENETC_PM1_SINGLE_STEP 0x90c0
+#define ENETC_PM0_SINGLE_STEP_CH BIT(7)
+#define ENETC_PM0_SINGLE_STEP_EN BIT(31)
+#define ENETC_SET_SINGLE_STEP_OFFSET(v) (((v) & 0xff) << 8)
+
#define ENETC_PM0_IF_MODE 0x8300
#define ENETC_PM0_IFM_RG BIT(2)
#define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11))
@@ -548,6 +563,7 @@ static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
/* Extension flags */
#define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0)
+#define ENETC_TXBD_E_FLAGS_ONE_STEP_PTP BIT(1)
#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2)
union enetc_rx_bd {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
new file mode 100644
index 000000000000..8b356c485507
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2021 NXP Semiconductors
+ *
+ * The Integrated Endpoint Register Block (IERB) is configured by pre-boot
+ * software and is supposed to be to ENETC what a NVRAM is to a 'real' PCIe
+ * card. Upon FLR, values from the IERB are transferred to the ENETC PFs, and
+ * are read-only in the PF memory space.
+ *
+ * This driver fixes up the power-on reset values for the ENETC shared FIFO,
+ * such that the TX and RX allocations are sufficient for jumbo frames, and
+ * that intelligent FIFO dropping is enabled before the internal data
+ * structures are corrupted.
+ *
+ * Even though not all ports might be used on a given board, we are not
+ * concerned with partitioning the FIFO, because the default values configure
+ * no strict reservations, so the entire FIFO can be used by the RX of a single
+ * port, or the TX of a single port.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include "enetc.h"
+#include "enetc_ierb.h"
+
+/* IERB registers */
+#define ENETC_IERB_TXMBAR(port) (((port) * 0x100) + 0x8080)
+#define ENETC_IERB_RXMBER(port) (((port) * 0x100) + 0x8090)
+#define ENETC_IERB_RXMBLR(port) (((port) * 0x100) + 0x8094)
+#define ENETC_IERB_RXBCR(port) (((port) * 0x100) + 0x80a0)
+#define ENETC_IERB_TXBCR(port) (((port) * 0x100) + 0x80a8)
+#define ENETC_IERB_FMBDTR 0xa000
+
+#define ENETC_RESERVED_FOR_ICM 1024
+
+struct enetc_ierb {
+ void __iomem *regs;
+};
+
+static void enetc_ierb_write(struct enetc_ierb *ierb, u32 offset, u32 val)
+{
+ iowrite32(val, ierb->regs + offset);
+}
+
+int enetc_ierb_register_pf(struct platform_device *pdev,
+ struct pci_dev *pf_pdev)
+{
+ struct enetc_ierb *ierb = platform_get_drvdata(pdev);
+ int port = enetc_pf_to_port(pf_pdev);
+ u16 tx_credit, rx_credit, tx_alloc;
+
+ if (port < 0)
+ return -ENODEV;
+
+ if (!ierb)
+ return -EPROBE_DEFER;
+
+ /* By default, it is recommended to set the Host Transfer Agent
+ * per port transmit byte credit to "1000 + max_frame_size/2".
+ * The power-on reset value (1800 bytes) is rounded up to the nearest
+ * 100 assuming a maximum frame size of 1536 bytes.
+ */
+ tx_credit = roundup(1000 + ENETC_MAC_MAXFRM_SIZE / 2, 100);
+
+ /* Internal memory allocated for transmit buffering is guaranteed but
+ * not reserved; i.e. if the total transmit allocation is not used,
+ * then the unused portion is not left idle, it can be used for receive
+ * buffering but it will be reclaimed, if required, from receive by
+ * intelligently dropping already stored receive frames in the internal
+ * memory to ensure that the transmit allocation is respected.
+ *
+ * PaTXMBAR must be set to a value larger than
+ * PaTXBCR + 2 * max_frame_size + 32
+ * if frame preemption is not enabled, or to
+ * 2 * PaTXBCR + 2 * p_max_frame_size (pMAC maximum frame size) +
+ * 2 * np_max_frame_size (eMAC maximum frame size) + 64
+ * if frame preemption is enabled.
+ */
+ tx_alloc = roundup(2 * tx_credit + 4 * ENETC_MAC_MAXFRM_SIZE + 64, 16);
+
+ /* Initial credits, in units of 8 bytes, to the Ingress Congestion
+ * Manager for the maximum amount of bytes the port is allocated for
+ * pending traffic.
+ * It is recommended to set the initial credits to 2 times the maximum
+ * frame size (2 frames of maximum size).
+ */
+ rx_credit = DIV_ROUND_UP(ENETC_MAC_MAXFRM_SIZE * 2, 8);
+
+ enetc_ierb_write(ierb, ENETC_IERB_TXBCR(port), tx_credit);
+ enetc_ierb_write(ierb, ENETC_IERB_TXMBAR(port), tx_alloc);
+ enetc_ierb_write(ierb, ENETC_IERB_RXBCR(port), rx_credit);
+
+ return 0;
+}
+EXPORT_SYMBOL(enetc_ierb_register_pf);
+
+static int enetc_ierb_probe(struct platform_device *pdev)
+{
+ struct enetc_ierb *ierb;
+ struct resource *res;
+ void __iomem *regs;
+
+ ierb = devm_kzalloc(&pdev->dev, sizeof(*ierb), GFP_KERNEL);
+ if (!ierb)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ ierb->regs = regs;
+
+ /* Free buffer depletion threshold in bytes.
+ * This sets the minimum amount of free buffer memory that should be
+ * maintained in the datapath sub system, and when the amount of free
+ * buffer memory falls below this threshold, a depletion indication is
+ * asserted, which may trigger "intelligent drop" frame releases from
+ * the ingress queues in the ICM.
+ * It is recommended to set the free buffer depletion threshold to 1024
+ * bytes, since the ICM needs some FIFO memory for its own use.
+ */
+ enetc_ierb_write(ierb, ENETC_IERB_FMBDTR, ENETC_RESERVED_FOR_ICM);
+
+ platform_set_drvdata(pdev, ierb);
+
+ return 0;
+}
+
+static int enetc_ierb_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id enetc_ierb_match[] = {
+ { .compatible = "fsl,ls1028a-enetc-ierb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, enetc_ierb_match);
+
+static struct platform_driver enetc_ierb_driver = {
+ .driver = {
+ .name = "fsl-enetc-ierb",
+ .of_match_table = enetc_ierb_match,
+ },
+ .probe = enetc_ierb_probe,
+ .remove = enetc_ierb_remove,
+};
+
+module_platform_driver(enetc_ierb_driver);
+
+MODULE_DESCRIPTION("NXP ENETC IERB");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.h b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
new file mode 100644
index 000000000000..b3b774e0998a
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2021 NXP Semiconductors */
+
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#if IS_ENABLED(CONFIG_FSL_ENETC_IERB)
+
+int enetc_ierb_register_pf(struct platform_device *pdev,
+ struct pci_dev *pf_pdev);
+
+#else
+
+static inline int enetc_ierb_register_pf(struct platform_device *pdev,
+ struct pci_dev *pf_pdev)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 224fc37a6757..31274325159a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -4,8 +4,10 @@
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/fsl/enetc_mdio.h>
+#include <linux/of_platform.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include "enetc_ierb.h"
#include "enetc_pf.h"
#define ENETC_DRV_NAME_STR "ENETC PF driver"
@@ -129,16 +131,20 @@ static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type)
}
static void enetc_set_mac_ht_flt(struct enetc_si *si, int si_idx, int type,
- u32 *hash)
+ unsigned long hash)
{
bool err = si->errata & ENETC_ERR_UCMCSWP;
if (type == UC) {
- enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), *hash);
- enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), *(hash + 1));
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err),
+ lower_32_bits(hash));
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx),
+ upper_32_bits(hash));
} else { /* MC */
- enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), *hash);
- enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), *(hash + 1));
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err),
+ lower_32_bits(hash));
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx),
+ upper_32_bits(hash));
}
}
@@ -182,7 +188,7 @@ static void enetc_sync_mac_filters(struct enetc_pf *pf)
if (i == UC)
enetc_clear_mac_flt_entry(si, pos);
- enetc_set_mac_ht_flt(si, 0, i, (u32 *)f->mac_hash_table);
+ enetc_set_mac_ht_flt(si, 0, i, *f->mac_hash_table);
}
}
@@ -248,10 +254,10 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
}
static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx,
- u32 *hash)
+ unsigned long hash)
{
- enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), *hash);
- enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), *(hash + 1));
+ enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), lower_32_bits(hash));
+ enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), upper_32_bits(hash));
}
static int enetc_vid_hash_idx(unsigned int vid)
@@ -279,7 +285,7 @@ static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash)
}
}
- enetc_set_vlan_ht_filter(&pf->si->hw, 0, (u32 *)pf->vlan_ht_filter);
+ enetc_set_vlan_ht_filter(&pf->si->hw, 0, *pf->vlan_ht_filter);
}
static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
@@ -386,23 +392,54 @@ static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en)
return 0;
}
-static void enetc_port_setup_primary_mac_address(struct enetc_si *si)
+static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf,
+ int si)
{
- unsigned char mac_addr[MAX_ADDR_LEN];
- struct enetc_pf *pf = enetc_si_priv(si);
- struct enetc_hw *hw = &si->hw;
- int i;
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_hw *hw = &pf->si->hw;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ int err;
- /* check MAC addresses for PF and all VFs, if any is 0 set it ro rand */
- for (i = 0; i < pf->total_vfs + 1; i++) {
- enetc_pf_get_primary_mac_addr(hw, i, mac_addr);
- if (!is_zero_ether_addr(mac_addr))
- continue;
+ /* (1) try to get the MAC address from the device tree */
+ if (np) {
+ err = of_get_mac_address(np, mac_addr);
+ if (err == -EPROBE_DEFER)
+ return err;
+ }
+
+ /* (2) bootloader supplied MAC address */
+ if (is_zero_ether_addr(mac_addr))
+ enetc_pf_get_primary_mac_addr(hw, si, mac_addr);
+
+ /* (3) choose a random one */
+ if (is_zero_ether_addr(mac_addr)) {
eth_random_addr(mac_addr);
- dev_info(&si->pdev->dev, "no MAC address specified for SI%d, using %pM\n",
- i, mac_addr);
- enetc_pf_set_primary_mac_addr(hw, i, mac_addr);
+ dev_info(dev, "no MAC address specified for SI%d, using %pM\n",
+ si, mac_addr);
+ }
+
+ enetc_pf_set_primary_mac_addr(hw, si, mac_addr);
+
+ return 0;
+}
+
+static int enetc_setup_mac_addresses(struct device_node *np,
+ struct enetc_pf *pf)
+{
+ int err, i;
+
+ /* The PF might take its MAC from the device tree */
+ err = enetc_setup_mac_address(np, pf, 0);
+ if (err)
+ return err;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ err = enetc_setup_mac_address(NULL, pf, i + 1);
+ if (err)
+ return err;
}
+
+ return 0;
}
static void enetc_port_assign_rfs_entries(struct enetc_si *si)
@@ -483,7 +520,6 @@ static void enetc_configure_port_mac(struct enetc_hw *hw)
ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
- enetc_port_wr(hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
@@ -558,9 +594,6 @@ static void enetc_configure_port(struct enetc_pf *pf)
/* split up RFS entries */
enetc_port_assign_rfs_entries(pf->si);
- /* fix-up primary MAC addresses, if not set already */
- enetc_port_setup_primary_mac_address(pf->si);
-
/* enforce VLAN promisc mode for all SIs */
pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL;
enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap);
@@ -703,6 +736,8 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_features = enetc_pf_set_features,
.ndo_do_ioctl = enetc_ioctl,
.ndo_setup_tc = enetc_setup_tc,
+ .ndo_bpf = enetc_setup_bpf,
+ .ndo_xdp_xmit = enetc_xdp_xmit,
};
static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
@@ -979,7 +1014,12 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
int duplex, bool tx_pause, bool rx_pause)
{
struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ u32 pause_off_thresh = 0, pause_on_thresh = 0;
+ u32 init_quanta = 0, refresh_quanta = 0;
+ struct enetc_hw *hw = &pf->si->hw;
struct enetc_ndev_priv *priv;
+ u32 rbmr, cmd_cfg;
+ int idx;
priv = netdev_priv(pf->si->ndev);
if (priv->active_offloads & ENETC_F_QBV)
@@ -987,9 +1027,60 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
if (!phylink_autoneg_inband(mode) &&
phy_interface_mode_is_rgmii(interface))
- enetc_force_rgmii_mac(&pf->si->hw, speed, duplex);
+ enetc_force_rgmii_mac(hw, speed, duplex);
+
+ /* Flow control */
+ for (idx = 0; idx < priv->num_rx_rings; idx++) {
+ rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
+
+ if (tx_pause)
+ rbmr |= ENETC_RBMR_CM;
+ else
+ rbmr &= ~ENETC_RBMR_CM;
- enetc_mac_enable(&pf->si->hw, true);
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+ }
+
+ if (tx_pause) {
+ /* When the port first enters congestion, send a PAUSE request
+ * with the maximum number of quanta. When the port exits
+ * congestion, it will automatically send a PAUSE frame with
+ * zero quanta.
+ */
+ init_quanta = 0xffff;
+
+ /* Also, set up the refresh timer to send follow-up PAUSE
+ * frames at half the quanta value, in case the congestion
+ * condition persists.
+ */
+ refresh_quanta = 0xffff / 2;
+
+ /* Start emitting PAUSE frames when 3 large frames (or more
+ * smaller frames) have accumulated in the FIFO waiting to be
+ * DMAed to the RX ring.
+ */
+ pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE;
+ pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE;
+ }
+
+ enetc_port_wr(hw, ENETC_PM0_PAUSE_QUANTA, init_quanta);
+ enetc_port_wr(hw, ENETC_PM1_PAUSE_QUANTA, init_quanta);
+ enetc_port_wr(hw, ENETC_PM0_PAUSE_THRESH, refresh_quanta);
+ enetc_port_wr(hw, ENETC_PM1_PAUSE_THRESH, refresh_quanta);
+ enetc_port_wr(hw, ENETC_PPAUONTR, pause_on_thresh);
+ enetc_port_wr(hw, ENETC_PPAUOFFTR, pause_off_thresh);
+
+ cmd_cfg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+
+ if (rx_pause)
+ cmd_cfg &= ~ENETC_PM0_PAUSE_IGN;
+ else
+ cmd_cfg |= ENETC_PM0_PAUSE_IGN;
+
+ enetc_port_wr(hw, ENETC_PM0_CMD_CFG, cmd_cfg);
+ enetc_port_wr(hw, ENETC_PM1_CMD_CFG, cmd_cfg);
+
+ enetc_mac_enable(hw, true);
}
static void enetc_pl_mac_link_down(struct phylink_config *config,
@@ -1081,24 +1172,28 @@ static int enetc_init_port_rss_memory(struct enetc_si *si)
return err;
}
-static void enetc_init_unused_port(struct enetc_si *si)
+static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
{
- struct device *dev = &si->pdev->dev;
- struct enetc_hw *hw = &si->hw;
- int err;
+ struct device_node *node = pdev->dev.of_node;
+ struct platform_device *ierb_pdev;
+ struct device_node *ierb_node;
- si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
- err = enetc_alloc_cbdr(dev, &si->cbd_ring);
- if (err)
- return;
+ /* Don't register with the IERB if the PF itself is disabled */
+ if (!node || !of_device_is_available(node))
+ return 0;
+
+ ierb_node = of_find_compatible_node(NULL, NULL,
+ "fsl,ls1028a-enetc-ierb");
+ if (!ierb_node || !of_device_is_available(ierb_node))
+ return -ENODEV;
- enetc_setup_cbdr(hw, &si->cbd_ring);
+ ierb_pdev = of_find_device_by_node(ierb_node);
+ of_node_put(ierb_node);
- enetc_init_port_rfs_memory(si);
- enetc_init_port_rss_memory(si);
+ if (!ierb_pdev)
+ return -EPROBE_DEFER;
- enetc_clear_cbdr(hw);
- enetc_free_cbdr(dev, &si->cbd_ring);
+ return enetc_ierb_register_pf(ierb_pdev, pdev);
}
static int enetc_pf_probe(struct pci_dev *pdev,
@@ -1111,6 +1206,14 @@ static int enetc_pf_probe(struct pci_dev *pdev,
struct enetc_pf *pf;
int err;
+ err = enetc_pf_register_with_ierb(pdev);
+ if (err == -EPROBE_DEFER)
+ return err;
+ if (err)
+ dev_warn(&pdev->dev,
+ "Could not register with IERB driver: %pe, please update the device tree\n",
+ ERR_PTR(err));
+
err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
if (err) {
dev_err(&pdev->dev, "PCI probing failed\n");
@@ -1124,8 +1227,24 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_map_pf_space;
}
+ err = enetc_setup_cbdr(&pdev->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE,
+ &si->cbd_ring);
+ if (err)
+ goto err_setup_cbdr;
+
+ err = enetc_init_port_rfs_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
+ goto err_init_port_rfs;
+ }
+
+ err = enetc_init_port_rss_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
+ goto err_init_port_rss;
+ }
+
if (node && !of_device_is_available(node)) {
- enetc_init_unused_port(si);
dev_info(&pdev->dev, "device is disabled, skipping\n");
err = -ENODEV;
goto err_device_disabled;
@@ -1135,6 +1254,10 @@ static int enetc_pf_probe(struct pci_dev *pdev,
pf->si = si;
pf->total_vfs = pci_sriov_get_totalvfs(pdev);
+ err = enetc_setup_mac_addresses(node, pf);
+ if (err)
+ goto err_setup_mac_addresses;
+
enetc_configure_port(pf);
enetc_get_si_caps(si);
@@ -1158,18 +1281,6 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_alloc_si_res;
}
- err = enetc_init_port_rfs_memory(si);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
- goto err_init_port_rfs;
- }
-
- err = enetc_init_port_rss_memory(si);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
- goto err_init_port_rss;
- }
-
err = enetc_configure_si(priv);
if (err) {
dev_err(&pdev->dev, "Failed to configure SI\n");
@@ -1205,15 +1316,18 @@ err_phylink_create:
err_mdiobus_create:
enetc_free_msix(priv);
err_config_si:
-err_init_port_rss:
-err_init_port_rfs:
err_alloc_msix:
enetc_free_si_resources(priv);
err_alloc_si_res:
si->ndev = NULL;
free_netdev(ndev);
err_alloc_netdev:
+err_init_port_rss:
+err_init_port_rfs:
err_device_disabled:
+err_setup_mac_addresses:
+ enetc_teardown_cbdr(&si->cbd_ring);
+err_setup_cbdr:
err_map_pf_space:
enetc_pci_remove(pdev);
@@ -1239,6 +1353,7 @@ static void enetc_pf_remove(struct pci_dev *pdev)
enetc_free_msix(priv);
enetc_free_si_resources(priv);
+ enetc_teardown_cbdr(&si->cbd_ring);
free_netdev(si->ndev);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index a9aee219fb58..af699f2ad095 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -455,11 +455,6 @@ static struct enetc_psfp epsfp = {
static LIST_HEAD(enetc_block_cb_list);
-static inline int enetc_get_port(struct enetc_ndev_priv *priv)
-{
- return priv->si->pdev->devfn & 0x7;
-}
-
/* Stream Identity Entry Set Descriptor */
static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
struct enetc_streamid *sid,
@@ -504,7 +499,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
si_conf = &cbd.sid_set;
/* Only one port supported for one entry, set itself */
- si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv));
+ si_conf->iports = cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev));
si_conf->id_type = 1;
si_conf->oui[2] = 0x0;
si_conf->oui[1] = 0x80;
@@ -529,7 +524,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
si_conf->en = 0x80;
si_conf->stream_handle = cpu_to_le32(sid->handle);
- si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv));
+ si_conf->iports = cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev));
si_conf->id_type = sid->filtertype;
si_conf->oui[2] = 0x0;
si_conf->oui[1] = 0x80;
@@ -591,7 +586,8 @@ static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
}
sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id);
- sfi_config->input_ports = cpu_to_le32(1 << enetc_get_port(priv));
+ sfi_config->input_ports =
+ cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev));
/* The priority value which may be matched against the
* frame’s priority value to determine a match for this entry.
@@ -1221,6 +1217,11 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
/* Flow meter and max frame size */
if (entryp) {
+ if (entryp->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
+ err = -EOPNOTSUPP;
+ goto free_sfi;
+ }
if (entryp->police.burst) {
fmi = kzalloc(sizeof(*fmi), GFP_KERNEL);
if (!fmi) {
@@ -1557,10 +1558,10 @@ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
switch (f->command) {
case FLOW_BLOCK_BIND:
- set_bit(enetc_get_port(priv), &epsfp.dev_bitmap);
+ set_bit(enetc_pf_to_port(priv->si->pdev), &epsfp.dev_bitmap);
break;
case FLOW_BLOCK_UNBIND:
- clear_bit(enetc_get_port(priv), &epsfp.dev_bitmap);
+ clear_bit(enetc_pf_to_port(priv->si->pdev), &epsfp.dev_bitmap);
if (!epsfp.dev_bitmap)
clean_psfp_all();
break;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 9b755a84c2d6..03090ba7e226 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -165,6 +165,11 @@ static int enetc_vf_probe(struct pci_dev *pdev,
enetc_init_si_rings_params(priv);
+ err = enetc_setup_cbdr(priv->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE,
+ &si->cbd_ring);
+ if (err)
+ goto err_setup_cbdr;
+
err = enetc_alloc_si_resources(priv);
if (err) {
dev_err(&pdev->dev, "SI resource alloc failed\n");
@@ -197,6 +202,8 @@ err_config_si:
err_alloc_msix:
enetc_free_si_resources(priv);
err_alloc_si_res:
+ enetc_teardown_cbdr(&si->cbd_ring);
+err_setup_cbdr:
si->ndev = NULL;
free_netdev(ndev);
err_alloc_netdev:
@@ -216,6 +223,7 @@ static void enetc_vf_remove(struct pci_dev *pdev)
enetc_free_msix(priv);
enetc_free_si_resources(priv);
+ enetc_teardown_cbdr(&si->cbd_ring);
free_netdev(si->ndev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 3db882322b2b..f2065f9d02e6 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -38,6 +38,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <net/ip.h>
+#include <net/selftests.h>
#include <net/tso.h>
#include <linux/tcp.h>
#include <linux/udp.h>
@@ -1665,6 +1666,7 @@ static void fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned char *iap, tmpaddr[ETH_ALEN];
+ int ret;
/*
* try to get mac address in following order:
@@ -1680,9 +1682,9 @@ static void fec_get_mac(struct net_device *ndev)
if (!is_valid_ether_addr(iap)) {
struct device_node *np = fep->pdev->dev.of_node;
if (np) {
- const char *mac = of_get_mac_address(np);
- if (!IS_ERR(mac))
- iap = (unsigned char *) mac;
+ ret = of_get_mac_address(np, tmpaddr);
+ if (!ret)
+ iap = tmpaddr;
}
}
@@ -2048,6 +2050,8 @@ static int fec_enet_mii_probe(struct net_device *ndev)
fep->link = 0;
fep->full_duplex = 0;
+ phy_dev->mac_managed_pm = 1;
+
phy_attached_info(phy_dev);
return 0;
@@ -2479,6 +2483,9 @@ static void fec_enet_get_strings(struct net_device *netdev,
memcpy(data + i * ETH_GSTRING_LEN,
fec_stats[i].name, ETH_GSTRING_LEN);
break;
+ case ETH_SS_TEST:
+ net_selftest_get_strings(data);
+ break;
}
}
@@ -2487,6 +2494,8 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(fec_stats);
+ case ETH_SS_TEST:
+ return net_selftest_get_count();
default:
return -EOPNOTSUPP;
}
@@ -2738,6 +2747,7 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.set_wol = fec_enet_set_wol,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .self_test = net_selftest,
};
static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -3864,6 +3874,7 @@ static int __maybe_unused fec_resume(struct device *dev)
netif_device_attach(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
+ phy_init_hw(ndev->phydev);
phy_start(ndev->phydev);
}
rtnl_unlock();
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index b3bad429e03b..02c47658a215 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -813,7 +813,6 @@ static int mpc52xx_fec_probe(struct platform_device *op)
const u32 *prop;
int prop_size;
struct device_node *np = op->dev.of_node;
- const char *mac_addr;
phys_addr_t rx_fifo;
phys_addr_t tx_fifo;
@@ -891,10 +890,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
*
* First try to read MAC address from DT
*/
- mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr)) {
- ether_addr_copy(ndev->dev_addr, mac_addr);
- } else {
+ rv = of_get_mac_address(np, ndev->dev_addr);
+ if (rv) {
struct mpc52xx_fec __iomem *fec = priv->fec;
/*
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 901749a7a318..46ecb42f2ef8 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -605,7 +605,6 @@ static int mac_probe(struct platform_device *_of_dev)
struct platform_device *of_dev;
struct resource res;
struct mac_priv_s *priv;
- const u8 *mac_addr;
u32 val;
u8 fman_id;
phy_interface_t phy_if;
@@ -723,11 +722,9 @@ static int mac_probe(struct platform_device *_of_dev)
priv->cell_index = (u8)val;
/* Get the MAC address */
- mac_addr = of_get_mac_address(mac_node);
- if (IS_ERR(mac_addr))
+ err = of_get_mac_address(mac_node, mac_dev->addr);
+ if (err)
dev_warn(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
- else
- ether_addr_copy(mac_dev->addr, mac_addr);
/* Get the port handles */
nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
@@ -853,7 +850,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (err < 0)
dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
- if (!IS_ERR(mac_addr))
+ if (!is_zero_ether_addr(mac_dev->addr))
dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 78e008b81374..6ee325ad35c5 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -918,7 +918,6 @@ static int fs_enet_probe(struct platform_device *ofdev)
const u32 *data;
struct clk *clk;
int err;
- const u8 *mac_addr;
const char *phy_connection_type;
int privsize, len, ret = -ENODEV;
@@ -1006,9 +1005,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
spin_lock_init(&fep->lock);
spin_lock_init(&fep->tx_lock);
- mac_addr = of_get_mac_address(ofdev->dev.of_node);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(ndev->dev_addr, mac_addr);
+ of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr);
ret = fep->ops->allocate_bd(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3ec4d9fddd52..f2945abdb041 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -175,10 +175,7 @@ static void gfar_mac_rx_config(struct gfar_private *priv)
if (priv->rx_filer_enable) {
rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
/* Program the RIR0 reg with the required distribution */
- if (priv->poll_mode == GFAR_SQ_POLLING)
- gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
- else /* GFAR_MQ_POLLING */
- gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
+ gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
}
/* Restore PROMISC mode */
@@ -521,29 +518,9 @@ static int gfar_parse_group(struct device_node *np,
grp->priv = priv;
spin_lock_init(&grp->grplock);
if (priv->mode == MQ_MG_MODE) {
- u32 rxq_mask, txq_mask;
- int ret;
-
+ /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
-
- ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
- if (!ret) {
- grp->rx_bit_map = rxq_mask ?
- rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
- }
-
- ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
- if (!ret) {
- grp->tx_bit_map = txq_mask ?
- txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
- }
-
- if (priv->poll_mode == GFAR_SQ_POLLING) {
- /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
- grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
- grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
- }
} else {
grp->rx_bit_map = 0xFF;
grp->tx_bit_map = 0xFF;
@@ -640,7 +617,6 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
{
const char *model;
- const void *mac_addr;
int err = 0, i;
phy_interface_t interface;
struct net_device *dev = NULL;
@@ -650,18 +626,15 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
u32 stash_len = 0;
u32 stash_idx = 0;
unsigned int num_tx_qs, num_rx_qs;
- unsigned short mode, poll_mode;
+ unsigned short mode;
if (!np)
return -ENODEV;
- if (of_device_is_compatible(np, "fsl,etsec2")) {
+ if (of_device_is_compatible(np, "fsl,etsec2"))
mode = MQ_MG_MODE;
- poll_mode = GFAR_SQ_POLLING;
- } else {
+ else
mode = SQ_SG_MODE;
- poll_mode = GFAR_SQ_POLLING;
- }
if (mode == SQ_SG_MODE) {
num_tx_qs = 1;
@@ -677,22 +650,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
return -EINVAL;
}
- if (poll_mode == GFAR_SQ_POLLING) {
- num_tx_qs = num_grps; /* one txq per int group */
- num_rx_qs = num_grps; /* one rxq per int group */
- } else { /* GFAR_MQ_POLLING */
- u32 tx_queues, rx_queues;
- int ret;
-
- /* parse the num of HW tx and rx queues */
- ret = of_property_read_u32(np, "fsl,num_tx_queues",
- &tx_queues);
- num_tx_qs = ret ? 1 : tx_queues;
-
- ret = of_property_read_u32(np, "fsl,num_rx_queues",
- &rx_queues);
- num_rx_qs = ret ? 1 : rx_queues;
- }
+ num_tx_qs = num_grps; /* one txq per int group */
+ num_rx_qs = num_grps; /* one rxq per int group */
}
if (num_tx_qs > MAX_TX_QS) {
@@ -718,7 +677,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->ndev = dev;
priv->mode = mode;
- priv->poll_mode = poll_mode;
priv->num_tx_queues = num_tx_qs;
netif_set_real_num_rx_queues(dev, num_rx_qs);
@@ -782,11 +740,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
if (stash_len || stash_idx)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
- mac_addr = of_get_mac_address(np);
-
- if (!IS_ERR(mac_addr)) {
- ether_addr_copy(dev->dev_addr, mac_addr);
- } else {
+ err = of_get_mac_address(np, dev->dev_addr);
+ if (err) {
eth_hw_addr_random(dev);
dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
}
@@ -2695,106 +2650,6 @@ static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
return 0;
}
-static int gfar_poll_rx(struct napi_struct *napi, int budget)
-{
- struct gfar_priv_grp *gfargrp =
- container_of(napi, struct gfar_priv_grp, napi_rx);
- struct gfar_private *priv = gfargrp->priv;
- struct gfar __iomem *regs = gfargrp->regs;
- struct gfar_priv_rx_q *rx_queue = NULL;
- int work_done = 0, work_done_per_q = 0;
- int i, budget_per_q = 0;
- unsigned long rstat_rxf;
- int num_act_queues;
-
- /* Clear IEVENT, so interrupts aren't called again
- * because of the packets that have already arrived
- */
- gfar_write(&regs->ievent, IEVENT_RX_MASK);
-
- rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
-
- num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
- if (num_act_queues)
- budget_per_q = budget/num_act_queues;
-
- for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
- /* skip queue if not active */
- if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
- continue;
-
- rx_queue = priv->rx_queue[i];
- work_done_per_q =
- gfar_clean_rx_ring(rx_queue, budget_per_q);
- work_done += work_done_per_q;
-
- /* finished processing this queue */
- if (work_done_per_q < budget_per_q) {
- /* clear active queue hw indication */
- gfar_write(&regs->rstat,
- RSTAT_CLEAR_RXF0 >> i);
- num_act_queues--;
-
- if (!num_act_queues)
- break;
- }
- }
-
- if (!num_act_queues) {
- u32 imask;
- napi_complete_done(napi, work_done);
-
- /* Clear the halt bit in RSTAT */
- gfar_write(&regs->rstat, gfargrp->rstat);
-
- spin_lock_irq(&gfargrp->grplock);
- imask = gfar_read(&regs->imask);
- imask |= IMASK_RX_DEFAULT;
- gfar_write(&regs->imask, imask);
- spin_unlock_irq(&gfargrp->grplock);
- }
-
- return work_done;
-}
-
-static int gfar_poll_tx(struct napi_struct *napi, int budget)
-{
- struct gfar_priv_grp *gfargrp =
- container_of(napi, struct gfar_priv_grp, napi_tx);
- struct gfar_private *priv = gfargrp->priv;
- struct gfar __iomem *regs = gfargrp->regs;
- struct gfar_priv_tx_q *tx_queue = NULL;
- int has_tx_work = 0;
- int i;
-
- /* Clear IEVENT, so interrupts aren't called again
- * because of the packets that have already arrived
- */
- gfar_write(&regs->ievent, IEVENT_TX_MASK);
-
- for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
- tx_queue = priv->tx_queue[i];
- /* run Tx cleanup to completion */
- if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
- gfar_clean_tx_ring(tx_queue);
- has_tx_work = 1;
- }
- }
-
- if (!has_tx_work) {
- u32 imask;
- napi_complete(napi);
-
- spin_lock_irq(&gfargrp->grplock);
- imask = gfar_read(&regs->imask);
- imask |= IMASK_TX_DEFAULT;
- gfar_write(&regs->imask, imask);
- spin_unlock_irq(&gfargrp->grplock);
- }
-
- return 0;
-}
-
/* GFAR error interrupt handler */
static irqreturn_t gfar_error(int irq, void *grp_id)
{
@@ -3352,17 +3207,10 @@ static int gfar_probe(struct platform_device *ofdev)
/* Register for napi ...We are registering NAPI for each grp */
for (i = 0; i < priv->num_grps; i++) {
- if (priv->poll_mode == GFAR_SQ_POLLING) {
- netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
- gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
- netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
- gfar_poll_tx_sq, 2);
- } else {
- netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
- gfar_poll_rx, GFAR_DEV_WEIGHT);
- netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
- gfar_poll_tx, 2);
- }
+ netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
+ gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
+ netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
+ gfar_poll_tx_sq, 2);
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 8ced783f5302..5ea47df93e5e 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -909,22 +909,6 @@ enum {
MQ_MG_MODE
};
-/* GFAR_SQ_POLLING: Single Queue NAPI polling mode
- * The driver supports a single pair of RX/Tx queues
- * per interrupt group (Rx/Tx int line). MQ_MG mode
- * devices have 2 interrupt groups, so the device will
- * have a total of 2 Tx and 2 Rx queues in this case.
- * GFAR_MQ_POLLING: Multi Queue NAPI polling mode
- * The driver supports all the 8 Rx and Tx HW queues
- * each queue mapped by the Device Tree to one of
- * the 2 interrupt groups. This mode implies significant
- * processing overhead (CPU and controller level).
- */
-enum gfar_poll_mode {
- GFAR_SQ_POLLING = 0,
- GFAR_MQ_POLLING
-};
-
/*
* Per TX queue stats
*/
@@ -1105,7 +1089,6 @@ struct gfar_private {
unsigned long state;
unsigned short mode;
- unsigned short poll_mode;
unsigned int num_tx_queues;
unsigned int num_rx_queues;
unsigned int num_grps;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index ef4e2febeb5b..e0936510fa34 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3562,7 +3562,6 @@ static int ucc_geth_probe(struct platform_device* ofdev)
struct resource res;
int err, ucc_num, max_speed = 0;
const unsigned int *prop;
- const void *mac_addr;
phy_interface_t phy_interface;
static const int enet_to_speed[] = {
SPEED_10, SPEED_10, SPEED_10,
@@ -3733,9 +3732,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
goto err_free_netdev;
}
- mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(dev->dev_addr, mac_addr);
+ of_get_mac_address(np, dev->dev_addr);
ugeth->ug_info = ug_info;
ugeth->dev = device;
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 0901fa6853ca..5fb05cf36b49 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -14,9 +14,9 @@ static void gve_get_drvinfo(struct net_device *netdev,
{
struct gve_priv *priv = netdev_priv(netdev);
- strlcpy(info->driver, "gve", sizeof(info->driver));
- strlcpy(info->version, gve_version_str, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, "gve", sizeof(info->driver));
+ strscpy(info->version, gve_version_str, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
}
static void gve_set_msglevel(struct net_device *netdev, u32 value)
@@ -388,7 +388,7 @@ static int gve_set_channels(struct net_device *netdev,
gve_get_channels(netdev, &old_settings);
- /* Changing combined is not allowed allowed */
+ /* Changing combined is not allowed */
if (cmd->combined_count != old_settings.combined_count)
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 57c3bc4f7089..3c4db4a6b431 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -772,7 +772,6 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
struct net_device *ndev;
struct hisi_femac_priv *priv;
struct phy_device *phy;
- const char *mac_addr;
int ret;
ndev = alloc_etherdev(sizeof(*priv));
@@ -842,10 +841,8 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
(unsigned long)phy->phy_id,
phy_modes(phy->interface));
- mac_addr = of_get_mac_address(node);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(ndev->dev_addr, mac_addr);
- if (!is_valid_ether_addr(ndev->dev_addr)) {
+ ret = of_get_mac_address(node, ndev->dev_addr);
+ if (ret) {
eth_hw_addr_random(ndev);
dev_warn(dev, "using random MAC address %pM\n",
ndev->dev_addr);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 8b2bf85039f1..c1aae0fca5e9 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1098,7 +1098,6 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
struct net_device *ndev;
struct hix5hd2_priv *priv;
struct mii_bus *bus;
- const char *mac_addr;
int ret;
ndev = alloc_etherdev(sizeof(struct hix5hd2_priv));
@@ -1220,10 +1219,8 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
goto out_phy_node;
}
- mac_addr = of_get_mac_address(node);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(ndev->dev_addr, mac_addr);
- if (!is_valid_ether_addr(ndev->dev_addr)) {
+ ret = of_get_mac_address(node, ndev->dev_addr);
+ if (ret) {
eth_hw_addr_random(ndev);
netdev_warn(ndev, "using random MAC address %pM\n",
ndev->dev_addr);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 6ab9458302e1..2b7db1c22321 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -414,10 +414,6 @@ enum hnae_media_type {
* get ring bd number limit
* get_pauseparam()
* get tx and rx of pause frame use
- * set_autoneg()
- * set auto autonegotiation of pause frame use
- * get_autoneg()
- * get auto autonegotiation of pause frame use
* set_pauseparam()
* set tx and rx of pause frame use
* get_coalesce_usecs()
@@ -487,8 +483,6 @@ struct hnae_ae_ops {
u32 *uplimit);
void (*get_pauseparam)(struct hnae_handle *handle,
u32 *auto_neg, u32 *rx_en, u32 *tx_en);
- int (*set_autoneg)(struct hnae_handle *handle, u8 enable);
- int (*get_autoneg)(struct hnae_handle *handle);
int (*set_pauseparam)(struct hnae_handle *handle,
u32 auto_neg, u32 rx_en, u32 tx_en);
void (*get_coalesce_usecs)(struct hnae_handle *handle,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index b98244f75ab9..c615fbf9094e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -487,13 +487,6 @@ static void hns_ae_get_pauseparam(struct hnae_handle *handle,
hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en);
}
-static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
-{
- assert(handle);
-
- return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable);
-}
-
static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en)
{
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
@@ -502,17 +495,6 @@ static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en)
hns_mac_set_promisc(mac_cb, (u8)!!en);
}
-static int hns_ae_get_autoneg(struct hnae_handle *handle)
-{
- u32 auto_neg;
-
- assert(handle);
-
- hns_mac_get_autoneg(hns_get_mac_cb(handle), &auto_neg);
-
- return auto_neg;
-}
-
static int hns_ae_set_pauseparam(struct hnae_handle *handle,
u32 autoneg, u32 rx_en, u32 tx_en)
{
@@ -648,7 +630,7 @@ static void hns_ae_update_stats(struct hnae_handle *handle,
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
u64 tx_bytes = 0, rx_bytes = 0, tx_packets = 0, rx_packets = 0;
u64 rx_errors = 0, tx_errors = 0, tx_dropped = 0;
- u64 rx_missed_errors = 0;
+ u64 rx_missed_errors;
dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
if (!dsaf_dev)
@@ -965,8 +947,6 @@ static struct hnae_ae_ops hns_dsaf_ops = {
.set_loopback = hns_ae_config_loopback,
.get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
.get_pauseparam = hns_ae_get_pauseparam,
- .set_autoneg = hns_ae_set_autoneg,
- .get_autoneg = hns_ae_get_autoneg,
.set_pauseparam = hns_ae_set_pauseparam,
.get_coalesce_usecs = hns_ae_get_coalesce_usecs,
.get_max_coalesced_frames = hns_ae_get_max_coalesced_frames,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 7fb7a419607d..f387a859a201 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -130,14 +130,6 @@ static void hns_gmac_get_tx_auto_pause_frames(void *mac_drv, u16 *newval)
GMAC_FC_TX_TIMER_M, GMAC_FC_TX_TIMER_S);
}
-static void hns_gmac_set_rx_auto_pause_frames(void *mac_drv, u32 newval)
-{
- struct mac_driver *drv = (struct mac_driver *)mac_drv;
-
- dsaf_set_dev_bit(drv, GMAC_PAUSE_EN_REG,
- GMAC_PAUSE_EN_RX_FDFC_B, !!newval);
-}
-
static void hns_gmac_config_max_frame_length(void *mac_drv, u16 newval)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
@@ -179,14 +171,6 @@ static void hns_gmac_tx_loop_pkt_dis(void *mac_drv)
dsaf_write_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG, tx_loop_pkt_pri);
}
-static void hns_gmac_set_duplex_type(void *mac_drv, u8 newval)
-{
- struct mac_driver *drv = (struct mac_driver *)mac_drv;
-
- dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
- GMAC_DUPLEX_TYPE_B, !!newval);
-}
-
static void hns_gmac_get_duplex_type(void *mac_drv,
enum hns_gmac_duplex_mdoe *duplex_mode)
{
@@ -687,17 +671,14 @@ static void hns_gmac_get_stats(void *mac_drv, u64 *data)
static void hns_gmac_get_strings(u32 stringset, u8 *data)
{
- char *buff = (char *)data;
+ u8 *buff = data;
u32 i;
if (stringset != ETH_SS_STATS)
return;
- for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
- snprintf(buff, ETH_GSTRING_LEN, "%s",
- g_gmac_stats_string[i].desc);
- buff = buff + ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++)
+ ethtool_sprintf(&buff, g_gmac_stats_string[i].desc);
}
static int hns_gmac_get_sset_count(int stringset)
@@ -741,8 +722,6 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
mac_drv->set_an_mode = hns_gmac_config_an_mode;
mac_drv->config_loopback = hns_gmac_config_loopback;
mac_drv->config_pad_and_crc = hns_gmac_config_pad_and_crc;
- mac_drv->config_half_duplex = hns_gmac_set_duplex_type;
- mac_drv->set_rx_ignore_pause_frames = hns_gmac_set_rx_auto_pause_frames;
mac_drv->get_info = hns_gmac_get_info;
mac_drv->autoneg_stat = hns_gmac_autoneg_stat;
mac_drv->get_pause_enable = hns_gmac_get_pausefrm_cfg;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 4a448138b4ec..f4cf569a2599 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -1202,7 +1202,7 @@ void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data)
void hns_set_led_opt(struct hns_mac_cb *mac_cb)
{
- int nic_data = 0;
+ int nic_data;
int txpkts, rxpkts;
txpkts = mac_cb->txpkt_for_led - mac_cb->hw_stats.tx_good_pkts;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 3278bf471ddf..8943ffab4418 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -364,12 +364,8 @@ struct mac_driver {
void (*config_max_frame_length)(void *mac_drv, u16 newval);
/*config PAD and CRC enable */
void (*config_pad_and_crc)(void *mac_drv, u8 newval);
- /* config duplex mode*/
- void (*config_half_duplex)(void *mac_drv, u8 newval);
/*config tx pause time,if pause_time is zero,disable tx pause enable*/
void (*set_tx_auto_pause_frames)(void *mac_drv, u16 pause_time);
- /*config rx pause enable*/
- void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable);
/* config rx mode for promiscuous*/
void (*set_promiscuous)(void *mac_drv, u8 enable);
void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 87d3db4666df..c2a60612f503 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1613,7 +1613,7 @@ int hns_dsaf_set_mac_uc_entry(
struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_single_dest_entry *mac_entry)
{
- u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ u16 entry_index;
struct dsaf_drv_tbl_tcam_key mac_key;
struct dsaf_tbl_tcam_ucast_cfg mac_data;
struct dsaf_drv_priv *priv =
@@ -1679,7 +1679,7 @@ int hns_dsaf_rm_mac_addr(
struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_single_dest_entry *mac_entry)
{
- u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ u16 entry_index;
struct dsaf_tbl_tcam_ucast_cfg mac_data;
struct dsaf_drv_tbl_tcam_key mac_key;
@@ -1751,7 +1751,7 @@ static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src)
int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_single_dest_entry *mac_entry)
{
- u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ u16 entry_index;
struct dsaf_drv_tbl_tcam_key mac_key;
struct dsaf_drv_tbl_tcam_key mask_key;
struct dsaf_tbl_tcam_data *pmask_key = NULL;
@@ -1861,7 +1861,7 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
u8 in_port_num, u8 *addr)
{
- u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ u16 entry_index;
struct dsaf_drv_tbl_tcam_key mac_key;
struct dsaf_drv_priv *priv =
(struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
@@ -1910,7 +1910,7 @@ int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_single_dest_entry *mac_entry)
{
- u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ u16 entry_index;
struct dsaf_drv_tbl_tcam_key mac_key;
struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
@@ -2264,7 +2264,7 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
*/
void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
{
- u32 i = 0;
+ u32 i;
u32 j;
u32 *p = data;
u32 reg_tmp;
@@ -2768,7 +2768,7 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
struct dsaf_drv_mac_single_dest_entry mask_entry;
struct dsaf_drv_tbl_tcam_key temp_key, mask_key;
struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
- u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ u16 entry_index;
struct dsaf_drv_tbl_tcam_key mac_key;
struct hns_mac_cb *mac_cb;
u8 addr[ETH_ALEN] = {0};
@@ -2870,7 +2870,7 @@ static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port)
struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0};
struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0};
struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
- u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ u16 entry_index;
struct dsaf_drv_tbl_tcam_key mac_key;
u8 addr[ETH_ALEN] = {0};
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 173d6966c1a3..325e81d30cfd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -686,7 +686,7 @@ hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en)
obj_args[0].integer.type = ACPI_TYPE_INTEGER;
obj_args[0].integer.value = mac_cb->mac_id;
obj_args[1].integer.type = ACPI_TYPE_INTEGER;
- obj_args[1].integer.value = !!en;
+ obj_args[1].integer.value = en;
argv4.type = ACPI_TYPE_PACKAGE;
argv4.package.count = 2;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index d0f8b1fff333..ff03cafccb66 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -462,33 +462,22 @@ int hns_ppe_get_regs_count(void)
*/
void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data)
{
- char *buff = (char *)data;
int index = ppe_cb->index;
-
- snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_sw_pkt", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_ok", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_drop_pkt_no_bd", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_fail", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_wait", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_drop_no_buf", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_err_fifo_full", index);
- buff = buff + ETH_GSTRING_LEN;
-
- snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_bd", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_ok", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_fifo_empty", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_csum_fail", index);
+ u8 *buff = data;
+
+ ethtool_sprintf(&buff, "ppe%d_rx_sw_pkt", index);
+ ethtool_sprintf(&buff, "ppe%d_rx_pkt_ok", index);
+ ethtool_sprintf(&buff, "ppe%d_rx_drop_pkt_no_bd", index);
+ ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_fail", index);
+ ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_wait", index);
+ ethtool_sprintf(&buff, "ppe%d_rx_pkt_drop_no_buf", index);
+ ethtool_sprintf(&buff, "ppe%d_rx_pkt_err_fifo_full", index);
+
+ ethtool_sprintf(&buff, "ppe%d_tx_bd", index);
+ ethtool_sprintf(&buff, "ppe%d_tx_pkt", index);
+ ethtool_sprintf(&buff, "ppe%d_tx_pkt_ok", index);
+ ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_fifo_empty", index);
+ ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_csum_fail", index);
}
void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index b6c8910cf7ba..5d5dc6942232 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -929,69 +929,42 @@ int hns_rcb_get_ring_regs_count(void)
*/
void hns_rcb_get_strings(int stringset, u8 *data, int index)
{
- char *buff = (char *)data;
+ u8 *buff = data;
if (stringset != ETH_SS_STATS)
return;
- snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index);
- buff = buff + ETH_GSTRING_LEN;
-
- snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index);
- buff = buff + ETH_GSTRING_LEN;
-
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index);
- buff = buff + ETH_GSTRING_LEN;
-
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index);
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index);
+ ethtool_sprintf(&buff, "tx_ring%d_rcb_pkt_num", index);
+ ethtool_sprintf(&buff, "tx_ring%d_ppe_tx_pkt_num", index);
+ ethtool_sprintf(&buff, "tx_ring%d_ppe_drop_pkt_num", index);
+ ethtool_sprintf(&buff, "tx_ring%d_fbd_num", index);
+
+ ethtool_sprintf(&buff, "tx_ring%d_pkt_num", index);
+ ethtool_sprintf(&buff, "tx_ring%d_bytes", index);
+ ethtool_sprintf(&buff, "tx_ring%d_err_cnt", index);
+ ethtool_sprintf(&buff, "tx_ring%d_io_err", index);
+ ethtool_sprintf(&buff, "tx_ring%d_sw_err", index);
+ ethtool_sprintf(&buff, "tx_ring%d_seg_pkt", index);
+ ethtool_sprintf(&buff, "tx_ring%d_restart_queue", index);
+ ethtool_sprintf(&buff, "tx_ring%d_tx_busy", index);
+
+ ethtool_sprintf(&buff, "rx_ring%d_rcb_pkt_num", index);
+ ethtool_sprintf(&buff, "rx_ring%d_ppe_pkt_num", index);
+ ethtool_sprintf(&buff, "rx_ring%d_ppe_drop_pkt_num", index);
+ ethtool_sprintf(&buff, "rx_ring%d_fbd_num", index);
+
+ ethtool_sprintf(&buff, "rx_ring%d_pkt_num", index);
+ ethtool_sprintf(&buff, "rx_ring%d_bytes", index);
+ ethtool_sprintf(&buff, "rx_ring%d_err_cnt", index);
+ ethtool_sprintf(&buff, "rx_ring%d_io_err", index);
+ ethtool_sprintf(&buff, "rx_ring%d_sw_err", index);
+ ethtool_sprintf(&buff, "rx_ring%d_seg_pkt", index);
+ ethtool_sprintf(&buff, "rx_ring%d_reuse_pg", index);
+ ethtool_sprintf(&buff, "rx_ring%d_len_err", index);
+ ethtool_sprintf(&buff, "rx_ring%d_non_vld_desc_err", index);
+ ethtool_sprintf(&buff, "rx_ring%d_bd_num_err", index);
+ ethtool_sprintf(&buff, "rx_ring%d_l2_err", index);
+ ethtool_sprintf(&buff, "rx_ring%d_l3l4csum_err", index);
}
void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
@@ -1001,7 +974,7 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev);
u32 reg_tmp;
u32 reg_num_tmp;
- u32 i = 0;
+ u32 i;
/*rcb common registers */
regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG);
@@ -1072,7 +1045,7 @@ void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data)
u32 *regs = data;
struct ring_pair_cb *ring_pair
= container_of(queue, struct ring_pair_cb, q);
- u32 i = 0;
+ u32 i;
/*rcb ring registers */
regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 7e3609ce112a..be52acd448f9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -267,19 +267,6 @@ static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr)
}
/**
- *hns_xgmac_set_rx_ignore_pause_frames - set rx pause param about xgmac
- *@mac_drv: mac driver
- *@enable:enable rx pause param
- */
-static void hns_xgmac_set_rx_ignore_pause_frames(void *mac_drv, u32 enable)
-{
- struct mac_driver *drv = (struct mac_driver *)mac_drv;
-
- dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG,
- XGMAC_PAUSE_CTL_RX_B, !!enable);
-}
-
-/**
*hns_xgmac_set_tx_auto_pause_frames - set tx pause param about xgmac
*@mac_drv: mac driver
*@enable:enable tx pause param
@@ -495,7 +482,7 @@ static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat)
*/
static void hns_xgmac_get_regs(void *mac_drv, void *data)
{
- u32 i = 0;
+ u32 i;
struct mac_driver *drv = (struct mac_driver *)mac_drv;
u32 *regs = data;
u64 qtmp;
@@ -758,16 +745,14 @@ static void hns_xgmac_get_stats(void *mac_drv, u64 *data)
*/
static void hns_xgmac_get_strings(u32 stringset, u8 *data)
{
- char *buff = (char *)data;
+ u8 *buff = data;
u32 i;
if (stringset != ETH_SS_STATS)
return;
- for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) {
- snprintf(buff, ETH_GSTRING_LEN, g_xgmac_stats_string[i].desc);
- buff = buff + ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++)
+ ethtool_sprintf(&buff, g_xgmac_stats_string[i].desc);
}
/**
@@ -814,9 +799,6 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
mac_drv->set_an_mode = NULL;
mac_drv->config_loopback = NULL;
mac_drv->config_pad_and_crc = hns_xgmac_config_pad_and_crc;
- mac_drv->config_half_duplex = NULL;
- mac_drv->set_rx_ignore_pause_frames =
- hns_xgmac_set_rx_ignore_pause_frames;
mac_drv->mac_free = hns_xgmac_free;
mac_drv->adjust_link = NULL;
mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index c66a7a51198e..5e349c0bdecc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -872,7 +872,7 @@ out:
static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
- int num = 0;
+ int num;
bool rx_stopped;
hns_update_rx_rate(ring);
@@ -1235,7 +1235,7 @@ static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
{
int cpu;
- /* Diffrent irq banlance between 16core and 32core.
+ /* Different irq balance between 16core and 32core.
* The cpu mask set by ring index according to the ring flag
* which indicate the ring is tx or rx.
*/
@@ -1592,7 +1592,7 @@ static void hns_disable_serdes_lb(struct net_device *ndev)
* which buffer size is 4096.
* 2. we set the chip serdes loopback and set rss indirection to the ring.
* 3. construct 64-bytes ip broadcast packages, wait the associated rx ring
- * recieving all packages and it will fetch new descriptions.
+ * receiving all packages and it will fetch new descriptions.
* 4. recover to the original state.
*
*@ndev: net device
@@ -1621,7 +1621,7 @@ static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
if (!org_indir)
return -ENOMEM;
- /* store the orginal indirection */
+ /* store the original indirection */
ops->get_rss(h, org_indir, NULL, NULL);
cur_indir = kzalloc(indir_size, GFP_KERNEL);
@@ -1881,7 +1881,7 @@ static void hns_nic_set_rx_mode(struct net_device *ndev)
static void hns_nic_get_stats64(struct net_device *ndev,
struct rtnl_link_stats64 *stats)
{
- int idx = 0;
+ int idx;
u64 tx_bytes = 0;
u64 rx_bytes = 0;
u64 tx_pkts = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index a6e3f07caf99..da48c05435ea 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -17,7 +17,6 @@
#define HNS_PHY_CSC_REG 16 /* Copper Specific Control Register */
#define HNS_PHY_CSS_REG 17 /* Copper Specific Status Register */
#define HNS_LED_FC_REG 16 /* LED Function Control Reg. */
-#define HNS_LED_PC_REG 17 /* LED Polarity Control Reg. */
#define HNS_LED_FORCE_ON 9
#define HNS_LED_FORCE_OFF 8
@@ -480,7 +479,7 @@ static int __lb_run_test(struct net_device *ndev,
#define NIC_LB_TEST_NO_MEM_ERR 1
#define NIC_LB_TEST_TX_CNT_ERR 2
#define NIC_LB_TEST_RX_CNT_ERR 3
-#define NIC_LB_TEST_RX_PKG_ERR 4
+
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
int i, j, lc, good_cnt, ret_val = 0;
@@ -895,7 +894,7 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
- char *buff = (char *)data;
+ u8 *buff = data;
if (!h->dev->ops->get_strings) {
netdev_err(netdev, "h->dev->ops->get_strings is null!\n");
@@ -903,74 +902,45 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
if (stringset == ETH_SS_TEST) {
- if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) {
- memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_MAC],
- ETH_GSTRING_LEN);
- buff += ETH_GSTRING_LEN;
- }
- memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES],
- ETH_GSTRING_LEN);
- buff += ETH_GSTRING_LEN;
+ if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
+ ethtool_sprintf(&buff,
+ hns_nic_test_strs[MAC_INTERNALLOOP_MAC]);
+ ethtool_sprintf(&buff,
+ hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]);
if ((netdev->phydev) && (!netdev->phydev->is_c45))
- memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_PHY],
- ETH_GSTRING_LEN);
+ ethtool_sprintf(&buff,
+ hns_nic_test_strs[MAC_INTERNALLOOP_PHY]);
} else {
- snprintf(buff, ETH_GSTRING_LEN, "rx_packets");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_packets");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_bytes");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_bytes");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_errors");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_errors");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_dropped");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_dropped");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "multicast");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "collisions");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_over_errors");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_crc_errors");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_frame_errors");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_fifo_errors");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_missed_errors");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_aborted_errors");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_carrier_errors");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_fifo_errors");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_heartbeat_errors");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_length_errors");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_window_errors");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "rx_compressed");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "tx_compressed");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "netdev_rx_dropped");
- buff = buff + ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_dropped");
- buff = buff + ETH_GSTRING_LEN;
-
- snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_timeout");
- buff = buff + ETH_GSTRING_LEN;
-
- h->dev->ops->get_strings(h, stringset, (u8 *)buff);
+ ethtool_sprintf(&buff, "rx_packets");
+ ethtool_sprintf(&buff, "tx_packets");
+ ethtool_sprintf(&buff, "rx_bytes");
+ ethtool_sprintf(&buff, "tx_bytes");
+ ethtool_sprintf(&buff, "rx_errors");
+ ethtool_sprintf(&buff, "tx_errors");
+ ethtool_sprintf(&buff, "rx_dropped");
+ ethtool_sprintf(&buff, "tx_dropped");
+ ethtool_sprintf(&buff, "multicast");
+ ethtool_sprintf(&buff, "collisions");
+ ethtool_sprintf(&buff, "rx_over_errors");
+ ethtool_sprintf(&buff, "rx_crc_errors");
+ ethtool_sprintf(&buff, "rx_frame_errors");
+ ethtool_sprintf(&buff, "rx_fifo_errors");
+ ethtool_sprintf(&buff, "rx_missed_errors");
+ ethtool_sprintf(&buff, "tx_aborted_errors");
+ ethtool_sprintf(&buff, "tx_carrier_errors");
+ ethtool_sprintf(&buff, "tx_fifo_errors");
+ ethtool_sprintf(&buff, "tx_heartbeat_errors");
+ ethtool_sprintf(&buff, "rx_length_errors");
+ ethtool_sprintf(&buff, "tx_window_errors");
+ ethtool_sprintf(&buff, "rx_compressed");
+ ethtool_sprintf(&buff, "tx_compressed");
+ ethtool_sprintf(&buff, "netdev_rx_dropped");
+ ethtool_sprintf(&buff, "netdev_tx_dropped");
+
+ ethtool_sprintf(&buff, "netdev_tx_timeout");
+
+ h->dev->ops->get_strings(h, stringset, buff);
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 33defa4c180a..a2c17af57fde 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -172,4 +172,7 @@ struct hclgevf_mbx_arq_ring {
(arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#define hclge_mbx_head_ptr_move_arq(arq) \
(arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
+
+/* PF immediately push link status to VFs when link status changed */
+#define HCLGE_MBX_PUSH_LINK_STATUS_EN BIT(0)
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index e9e60a935f40..1d2189047781 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -90,6 +90,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_HW_PAD_B,
HNAE3_DEV_SUPPORT_STASH_B,
HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B,
+ HNAE3_DEV_SUPPORT_PAUSE_B,
};
#define hnae3_dev_fd_supported(hdev) \
@@ -134,6 +135,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_dev_stash_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_STASH_B, (hdev)->ae_dev->caps)
+#define hnae3_dev_pause_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, (hdev)->ae_dev->caps)
+
#define hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (ae_dev)->caps)
@@ -470,8 +474,9 @@ struct hnae3_ae_dev {
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev);
- void (*flr_prepare)(struct hnae3_ae_dev *ae_dev);
- void (*flr_done)(struct hnae3_ae_dev *ae_dev);
+ void (*reset_prepare)(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type);
+ void (*reset_done)(struct hnae3_ae_dev *ae_dev);
int (*init_client_instance)(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev);
void (*uninit_client_instance)(struct hnae3_client *client,
@@ -575,7 +580,7 @@ struct hnae3_ae_ops {
int vector_num,
struct hnae3_ring_chain_node *vr_chain);
- int (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
+ int (*reset_queue)(struct hnae3_handle *handle);
u32 (*get_fw_version)(struct hnae3_handle *handle);
void (*get_mdix_mode)(struct hnae3_handle *handle,
u8 *tp_mdix_ctrl, u8 *tp_mdix);
@@ -608,8 +613,6 @@ struct hnae3_ae_ops {
struct ethtool_rxnfc *cmd);
int (*del_fd_entry)(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd);
- void (*del_all_fd_entries)(struct hnae3_handle *handle,
- bool clear_list);
int (*get_fd_rule_cnt)(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd);
int (*get_fd_rule_info)(struct hnae3_handle *handle,
@@ -649,6 +652,10 @@ struct hnae3_ae_ops {
int (*del_cls_flower)(struct hnae3_handle *handle,
struct flow_cls_offload *cls_flower);
bool (*cls_flower_active)(struct hnae3_handle *handle);
+ int (*get_phy_link_ksettings)(struct hnae3_handle *handle,
+ struct ethtool_link_ksettings *cmd);
+ int (*set_phy_link_ksettings)(struct hnae3_handle *handle,
+ const struct ethtool_link_ksettings *cmd);
};
struct hnae3_dcb_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index dd11c57027bb..9d702bd0c7c1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -362,6 +362,11 @@ static void hns3_dbg_dev_caps(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "support UDP tunnel csum: %s\n",
test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, caps) ?
"yes" : "no");
+ dev_info(&h->pdev->dev, "support PAUSE: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps) ?
+ "yes" : "no");
+ dev_info(&h->pdev->dev, "support imp-controlled PHY: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, caps) ? "yes" : "no");
}
static void hns3_dbg_dev_specs(struct hnae3_handle *h)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index bf4302a5cf95..c21dd11baed9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -210,7 +210,6 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
* Rl defines rate of interrupts i.e. number of interrupts-per-second
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
*/
-
if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable &&
!tqp_vector->rx_group.coal.adapt_enable)
/* According to the hardware, the range of rl_reg is
@@ -695,7 +694,7 @@ void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
}
static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
- u16 *mss, u32 *type_cs_vlan_tso)
+ u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes)
{
u32 l4_offset, hdr_len;
union l3_hdr_info l3;
@@ -751,6 +750,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
(__force __wsum)htonl(l4_paylen));
}
+ *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len;
+
/* find the txbd field values */
*paylen_fdop_ol4cs = skb->len - hdr_len;
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
@@ -883,7 +884,6 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
hns3_set_field(*ol_type_vlan_len_msec,
HNS3_TXD_OL3T_S,
HNS3_OL3T_IPV4_NO_CSUM);
-
} else if (skb->protocol == htons(ETH_P_IPV6)) {
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
HNS3_OL3T_IPV6);
@@ -1078,7 +1078,8 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
}
static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
- struct sk_buff *skb, struct hns3_desc *desc)
+ struct sk_buff *skb, struct hns3_desc *desc,
+ struct hns3_desc_cb *desc_cb)
{
u32 ol_type_vlan_len_msec = 0;
u32 paylen_ol4cs = skb->len;
@@ -1107,6 +1108,8 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
1);
}
+ desc_cb->send_bytes = skb->len;
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ol4_proto, il4_proto;
@@ -1142,7 +1145,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
}
ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum,
- &type_cs_vlan_tso);
+ &type_cs_vlan_tso, &desc_cb->send_bytes);
if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_tso_err++;
@@ -1277,31 +1280,29 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
}
static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
- u8 max_non_tso_bd_num)
+ u8 max_non_tso_bd_num, unsigned int bd_num,
+ unsigned int recursion_level)
{
+#define HNS3_MAX_RECURSION_LEVEL 24
+
struct sk_buff *frag_skb;
- unsigned int bd_num = 0;
/* If the total len is within the max bd limit */
- if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
+ if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level &&
+ !skb_has_frag_list(skb) &&
skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
return skb_shinfo(skb)->nr_frags + 1U;
- /* The below case will always be linearized, return
- * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
- */
- if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
- (!skb_is_gso(skb) && skb->len >
- HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))))
- return HNS3_MAX_TSO_BD_NUM + 1U;
+ if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL))
+ return UINT_MAX;
bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
-
if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM)
return bd_num;
skb_walk_frags(skb, frag_skb) {
- bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num);
+ bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num,
+ bd_num, recursion_level + 1);
if (bd_num > HNS3_MAX_TSO_BD_NUM)
return bd_num;
}
@@ -1361,6 +1362,43 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
size[i] = skb_frag_size(&shinfo->frags[i]);
}
+static int hns3_skb_linearize(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ u8 max_non_tso_bd_num,
+ unsigned int bd_num)
+{
+ /* 'bd_num == UINT_MAX' means the skb' fraglist has a
+ * recursion level of over HNS3_MAX_RECURSION_LEVEL.
+ */
+ if (bd_num == UINT_MAX) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.over_max_recursion++;
+ u64_stats_update_end(&ring->syncp);
+ return -ENOMEM;
+ }
+
+ /* The skb->len has exceeded the hw limitation, linearization
+ * will not help.
+ */
+ if (skb->len > HNS3_MAX_TSO_SIZE ||
+ (!skb_is_gso(skb) && skb->len >
+ HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.hw_limitation++;
+ u64_stats_update_end(&ring->syncp);
+ return -ENOMEM;
+ }
+
+ if (__skb_linearize(skb)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct net_device *netdev,
struct sk_buff *skb)
@@ -1370,7 +1408,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
unsigned int bd_num;
- bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num);
+ bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0);
if (unlikely(bd_num > max_non_tso_bd_num)) {
if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
!hns3_skb_need_linearized(skb, bd_size, bd_num,
@@ -1379,16 +1417,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
goto out;
}
- if (__skb_linearize(skb))
+ if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
+ bd_num))
return -ENOMEM;
bd_num = hns3_tx_bd_count(skb->len);
- if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
- (!skb_is_gso(skb) &&
- bd_num > max_non_tso_bd_num)) {
- trace_hns3_over_max_bd(skb);
- return -ENOMEM;
- }
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_copy++;
@@ -1412,6 +1445,10 @@ out:
return bd_num;
}
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_busy++;
+ u64_stats_update_end(&ring->syncp);
+
return -EBUSY;
}
@@ -1459,6 +1496,7 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
struct sk_buff *skb, enum hns_desc_type type)
{
unsigned int size = skb_headlen(skb);
+ struct sk_buff *frag_skb;
int i, ret, bd_num = 0;
if (size) {
@@ -1483,6 +1521,15 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
bd_num += ret;
}
+ skb_walk_frags(skb, frag_skb) {
+ ret = hns3_fill_skb_to_desc(ring, frag_skb,
+ DESC_TYPE_FRAGLIST_SKB);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+ }
+
return bd_num;
}
@@ -1511,16 +1558,20 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct netdev_queue *dev_queue;
int pre_ntu, next_to_use_head;
- struct sk_buff *frag_skb;
- int bd_num = 0;
bool doorbell;
int ret;
/* Hardware can only handle short frames above 32 bytes */
if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
return NETDEV_TX_OK;
}
@@ -1530,15 +1581,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
if (unlikely(ret <= 0)) {
if (ret == -EBUSY) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_busy++;
- u64_stats_update_end(&ring->syncp);
hns3_tx_doorbell(ring, 0, true);
return NETDEV_TX_BUSY;
- } else if (ret == -ENOMEM) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
}
hns3_rl_err(netdev, "xmit error: %d!\n", ret);
@@ -1547,25 +1591,19 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
next_to_use_head = ring->next_to_use;
- ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
+ ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
+ desc_cb);
if (unlikely(ret < 0))
goto fill_err;
+ /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
+ * zero, which is unlikely, and 'ret > 0' means how many tx desc
+ * need to be notified to the hw.
+ */
ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
- if (unlikely(ret < 0))
+ if (unlikely(ret <= 0))
goto fill_err;
- bd_num += ret;
-
- skb_walk_frags(skb, frag_skb) {
- ret = hns3_fill_skb_to_desc(ring, frag_skb,
- DESC_TYPE_FRAGLIST_SKB);
- if (unlikely(ret < 0))
- goto fill_err;
-
- bd_num += ret;
- }
-
pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
(ring->desc_num - 1);
ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
@@ -1574,9 +1612,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Complete translate all packets */
dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
- doorbell = __netdev_tx_sent_queue(dev_queue, skb->len,
+ doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes,
netdev_xmit_more());
- hns3_tx_doorbell(ring, bd_num, doorbell);
+ hns3_tx_doorbell(ring, ret, doorbell);
return NETDEV_TX_OK;
@@ -1748,11 +1786,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
tx_drop += ring->stats.tx_l4_proto_err;
tx_drop += ring->stats.tx_l2l3l4_err;
tx_drop += ring->stats.tx_tso_err;
+ tx_drop += ring->stats.over_max_recursion;
+ tx_drop += ring->stats.hw_limitation;
tx_errors += ring->stats.sw_err_cnt;
tx_errors += ring->stats.tx_vlan_err;
tx_errors += ring->stats.tx_l4_proto_err;
tx_errors += ring->stats.tx_l2l3l4_err;
tx_errors += ring->stats.tx_tso_err;
+ tx_errors += ring->stats.over_max_recursion;
+ tx_errors += ring->stats.hw_limitation;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
@@ -2323,6 +2365,32 @@ static void hns3_shutdown(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D3hot);
}
+static int __maybe_unused hns3_suspend(struct device *dev)
+{
+ struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev);
+
+ if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) {
+ dev_info(dev, "Begin to suspend.\n");
+ if (ae_dev->ops && ae_dev->ops->reset_prepare)
+ ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused hns3_resume(struct device *dev)
+{
+ struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev);
+
+ if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) {
+ dev_info(dev, "Begin to resume.\n");
+ if (ae_dev->ops && ae_dev->ops->reset_done)
+ ae_dev->ops->reset_done(ae_dev);
+ }
+
+ return 0;
+}
+
static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
@@ -2381,8 +2449,8 @@ static void hns3_reset_prepare(struct pci_dev *pdev)
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "FLR prepare\n");
- if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
- ae_dev->ops->flr_prepare(ae_dev);
+ if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare)
+ ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET);
}
static void hns3_reset_done(struct pci_dev *pdev)
@@ -2390,8 +2458,8 @@ static void hns3_reset_done(struct pci_dev *pdev)
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "FLR done\n");
- if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
- ae_dev->ops->flr_done(ae_dev);
+ if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done)
+ ae_dev->ops->reset_done(ae_dev);
}
static const struct pci_error_handlers hns3_err_handler = {
@@ -2401,12 +2469,15 @@ static const struct pci_error_handlers hns3_err_handler = {
.reset_done = hns3_reset_done,
};
+static SIMPLE_DEV_PM_OPS(hns3_pm_ops, hns3_suspend, hns3_resume);
+
static struct pci_driver hns3_driver = {
.name = hns3_driver_name,
.id_table = hns3_pci_tbl,
.probe = hns3_probe,
.remove = hns3_remove,
.shutdown = hns3_shutdown,
+ .driver.pm = &hns3_pm_ops,
.sriov_configure = hns3_pci_sriov_configure,
.err_handler = &hns3_err_handler,
};
@@ -2691,8 +2762,12 @@ static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
break;
desc_cb = &ring->desc_cb[ntc];
- (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
- (*bytes) += desc_cb->length;
+
+ if (desc_cb->type == DESC_TYPE_SKB) {
+ (*pkts)++;
+ (*bytes) += desc_cb->send_bytes;
+ }
+
/* desc_cb will be cleaned, after hnae3_free_buffer_detach */
hns3_free_buffer_detach(ring, ntc, budget);
@@ -2965,7 +3040,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
HNS3_RXD_L3ID_S);
l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
HNS3_RXD_L4ID_S);
-
/* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
if ((l3_type == HNS3_L3_TYPE_IPV4 ||
l3_type == HNS3_L3_TYPE_IPV6) &&
@@ -3295,7 +3369,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
if (!skb) {
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
-
/* Check valid BD */
if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
return -ENXIO;
@@ -3557,7 +3630,6 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
hns3_for_each_ring(ring, tqp_vector->rx_group) {
int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
hns3_rx_skb);
-
if (rx_cleaned >= rx_budget)
clean_complete = false;
@@ -3704,7 +3776,6 @@ static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
{
- struct hnae3_ring_chain_node vector_ring_chain;
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
int ret;
@@ -3736,6 +3807,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
}
for (i = 0; i < priv->vector_num; i++) {
+ struct hnae3_ring_chain_node vector_ring_chain;
+
tqp_vector = &priv->tqp_vector[i];
tqp_vector->rx_group.total_bytes = 0;
@@ -4024,7 +4097,6 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
hns3_buf_size2type(ring->buf_size));
hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
ring->desc_num / 8 - 1);
-
} else {
hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
(u32)dma);
@@ -4143,14 +4215,6 @@ static void hns3_uninit_phy(struct net_device *netdev)
h->ae_algo->ops->mac_disconnect_phy(h);
}
-static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
-{
- struct hnae3_handle *h = hns3_get_handle(netdev);
-
- if (h->ae_algo->ops->del_all_fd_entries)
- h->ae_algo->ops->del_all_fd_entries(h, clear_list);
-}
-
static int hns3_client_start(struct hnae3_handle *handle)
{
if (!handle->ae_algo->ops->client_start)
@@ -4337,8 +4401,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_nic_uninit_irq(priv);
- hns3_del_all_fd_rules(netdev, true);
-
hns3_clear_all_ring(handle, true);
hns3_nic_uninit_vector_data(priv);
@@ -4472,11 +4534,11 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
int i, j;
int ret;
- for (i = 0; i < h->kinfo.num_tqps; i++) {
- ret = h->ae_algo->ops->reset_queue(h, i);
- if (ret)
- return ret;
+ ret = h->ae_algo->ops->reset_queue(h);
+ if (ret)
+ return ret;
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
hns3_init_ring_hw(&priv->ring[i]);
/* We need to clear tx ring here because self test will
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index d069b04ee587..daa04aeb0942 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -298,7 +298,12 @@ struct hns3_desc_cb {
/* priv data for the desc, e.g. skb when use with ip stack */
void *priv;
- u32 page_offset;
+
+ union {
+ u32 page_offset; /* for rx */
+ u32 send_bytes; /* for tx */
+ };
+
u32 length; /* length of the buffer */
u16 reuse_flag;
@@ -376,6 +381,8 @@ struct ring_stats {
u64 tx_l4_proto_err;
u64 tx_l2l3l4_err;
u64 tx_tso_err;
+ u64 over_max_recursion;
+ u64 hw_limitation;
};
struct {
u64 rx_pkts;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index adcec4ea7cb9..b48faf769b1c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -44,6 +44,8 @@ static const struct hns3_stats hns3_txq_stats[] = {
HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
HNS3_TQP_STAT("tso_err", tx_tso_err),
+ HNS3_TQP_STAT("over_max_recursion", over_max_recursion),
+ HNS3_TQP_STAT("hw_limitation", hw_limitation),
};
#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
@@ -307,7 +309,7 @@ out:
}
/**
- * hns3_nic_self_test - self test
+ * hns3_self_test - self test
* @ndev: net device
* @eth_test: test cmd
* @data: test result
@@ -642,6 +644,10 @@ static void hns3_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
+ return;
if (h->ae_algo->ops->get_pauseparam)
h->ae_algo->ops->get_pauseparam(h, &param->autoneg,
@@ -652,6 +658,10 @@ static int hns3_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
+ return -EOPNOTSUPP;
netif_dbg(h, drv, netdev,
"set pauseparam: autoneg=%u, rx:%u, tx:%u\n",
@@ -692,6 +702,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
const struct hnae3_ae_ops *ops;
u8 module_type;
u8 media_type;
@@ -722,7 +733,10 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
break;
case HNAE3_MEDIA_TYPE_COPPER:
cmd->base.port = PORT_TP;
- if (!netdev->phydev)
+ if (test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps) &&
+ ops->get_phy_link_ksettings)
+ ops->get_phy_link_ksettings(h, cmd);
+ else if (!netdev->phydev)
hns3_get_ksettings(h, cmd);
else
phy_ethtool_ksettings_get(netdev->phydev, cmd);
@@ -815,6 +829,9 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
return -EINVAL;
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
+ } else if (test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps) &&
+ ops->set_phy_link_ksettings) {
+ return ops->set_phy_link_ksettings(handle, cmd);
}
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 1bd0ddfaec4d..76a482456f1f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -353,7 +353,10 @@ static void hclge_set_default_capability(struct hclge_dev *hdev)
set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
- set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+ if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
+ set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
+ }
}
static void hclge_parse_capability(struct hclge_dev *hdev,
@@ -363,7 +366,6 @@ static void hclge_parse_capability(struct hclge_dev *hdev,
u32 caps;
caps = __le32_to_cpu(cmd->caps[0]);
-
if (hnae3_get_bit(caps, HCLGE_CAP_UDP_GSO_B))
set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_PTP_B))
@@ -378,6 +380,12 @@ static void hclge_parse_capability(struct hclge_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_FD_FORWARD_TC_B))
set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_FEC_B))
+ set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_PAUSE_B))
+ set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_PHY_IMP_B))
+ set_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps);
}
static __le32 hclge_build_api_caps(void)
@@ -467,6 +475,8 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev)
hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
+ if (hnae3_dev_phy_imp_supported(hdev))
+ hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
req->compat = cpu_to_le32(compat);
return hclge_cmd_send(&hdev->hw, &desc, 1);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 057dda735492..c6fc22e29581 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -127,7 +127,7 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
- HCLGE_OPC_SERDES_LOOPBACK = 0x0315,
+ HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
/* PFC/Pause commands */
@@ -243,6 +243,7 @@ enum hclge_opcode_type {
HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
HCLGE_OPC_FD_TCAM_OP = 0x1203,
HCLGE_OPC_FD_AD_OP = 0x1204,
+ HCLGE_OPC_FD_USER_DEF_OP = 0x1207,
/* MDIO command */
HCLGE_OPC_MDIO_CONFIG = 0x1900,
@@ -303,6 +304,10 @@ enum hclge_opcode_type {
HCLGE_PPP_CMD1_INT_CMD = 0x2101,
HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
HCLGE_NCSI_INT_EN = 0x2401,
+
+ /* PHY command */
+ HCLGE_OPC_PHY_LINK_KSETTING = 0x7025,
+ HCLGE_OPC_PHY_REG = 0x7026,
};
#define HCLGE_TQP_REG_OFFSET 0x80000
@@ -384,6 +389,8 @@ enum HCLGE_CAP_BITS {
HCLGE_CAP_HW_PAD_B,
HCLGE_CAP_STASH_B,
HCLGE_CAP_UDP_TUNNEL_CSUM_B,
+ HCLGE_CAP_FEC_B = 13,
+ HCLGE_CAP_PAUSE_B = 14,
};
enum HCLGE_API_CAP_BITS {
@@ -499,8 +506,6 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_RD_LEN_BYTES 16
#define HCLGE_CFG_RD_LEN_UNIT 4
-#define HCLGE_CFG_VMDQ_S 0
-#define HCLGE_CFG_VMDQ_M GENMASK(7, 0)
#define HCLGE_CFG_TC_NUM_S 8
#define HCLGE_CFG_TC_NUM_M GENMASK(15, 8)
#define HCLGE_CFG_TQP_DESC_N_S 16
@@ -943,10 +948,16 @@ struct hclge_reset_tqp_queue_cmd {
#define HCLGE_CFG_RESET_MAC_B 3
#define HCLGE_CFG_RESET_FUNC_B 7
+#define HCLGE_CFG_RESET_RCB_B 1
struct hclge_reset_cmd {
u8 mac_func_reset;
u8 fun_reset_vfid;
- u8 rsv[22];
+ u8 fun_reset_rcb;
+ u8 rsv;
+ __le16 fun_reset_rcb_vqid_start;
+ __le16 fun_reset_rcb_vqid_num;
+ u8 fun_reset_rcb_return_status;
+ u8 rsv1[15];
};
#define HCLGE_PF_RESET_DONE_BIT BIT(0)
@@ -958,9 +969,10 @@ struct hclge_pf_rst_done_cmd {
#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0)
#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2)
-#define HCLGE_CMD_SERDES_DONE_B BIT(0)
-#define HCLGE_CMD_SERDES_SUCCESS_B BIT(1)
-struct hclge_serdes_lb_cmd {
+#define HCLGE_CMD_GE_PHY_INNER_LOOP_B BIT(3)
+#define HCLGE_CMD_COMMON_LB_DONE_B BIT(0)
+#define HCLGE_CMD_COMMON_LB_SUCCESS_B BIT(1)
+struct hclge_common_lb_cmd {
u8 mask;
u8 enable;
u8 result;
@@ -1075,6 +1087,19 @@ struct hclge_fd_ad_config_cmd {
u8 rsv2[8];
};
+#define HCLGE_FD_USER_DEF_OFT_S 0
+#define HCLGE_FD_USER_DEF_OFT_M GENMASK(14, 0)
+#define HCLGE_FD_USER_DEF_EN_B 15
+struct hclge_fd_user_def_cfg_cmd {
+ __le16 ol2_cfg;
+ __le16 l2_cfg;
+ __le16 ol3_cfg;
+ __le16 l3_cfg;
+ __le16 ol4_cfg;
+ __le16 l4_cfg;
+ u8 rsv[12];
+};
+
struct hclge_get_m7_bd_cmd {
__le32 bd_num;
u8 rsv[20];
@@ -1096,6 +1121,7 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd {
#define HCLGE_LINK_EVENT_REPORT_EN_B 0
#define HCLGE_NCSI_ERROR_REPORT_EN_B 1
+#define HCLGE_PHY_IMP_EN_B 2
struct hclge_firmware_compat_cmd {
__le32 compat;
u8 rsv[20];
@@ -1137,6 +1163,36 @@ struct hclge_dev_specs_1_cmd {
u8 rsv1[18];
};
+#define HCLGE_PHY_LINK_SETTING_BD_NUM 2
+
+struct hclge_phy_link_ksetting_0_cmd {
+ __le32 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 eth_tp_mdix;
+ u8 eth_tp_mdix_ctrl;
+ u8 port;
+ u8 transceiver;
+ u8 phy_address;
+ u8 rsv;
+ __le32 supported;
+ __le32 advertising;
+ __le32 lp_advertising;
+};
+
+struct hclge_phy_link_ksetting_1_cmd {
+ u8 master_slave_cfg;
+ u8 master_slave_state;
+ u8 rsv[22];
+};
+
+struct hclge_phy_reg_cmd {
+ __le16 reg_addr;
+ u8 rsv0[2];
+ __le16 reg_val;
+ u8 rsv1[18];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 6b1d197df881..85d306459e36 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -1541,18 +1541,17 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
}
}
-static void hclge_dbg_dump_loopback(struct hclge_dev *hdev,
- const char *cmd_buf)
+static void hclge_dbg_dump_loopback(struct hclge_dev *hdev)
{
struct phy_device *phydev = hdev->hw.mac.phydev;
struct hclge_config_mac_mode_cmd *req_app;
- struct hclge_serdes_lb_cmd *req_serdes;
+ struct hclge_common_lb_cmd *req_common;
struct hclge_desc desc;
u8 loopback_en;
int ret;
req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
- req_serdes = (struct hclge_serdes_lb_cmd *)desc.data;
+ req_common = (struct hclge_common_lb_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "mac id: %u\n", hdev->hw.mac.mac_id);
@@ -1569,27 +1568,33 @@ static void hclge_dbg_dump_loopback(struct hclge_dev *hdev,
dev_info(&hdev->pdev->dev, "app loopback: %s\n",
loopback_en ? "on" : "off");
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, true);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "failed to dump serdes loopback status, ret = %d\n",
+ "failed to dump common loopback status, ret = %d\n",
ret);
return;
}
- loopback_en = req_serdes->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
dev_info(&hdev->pdev->dev, "serdes serial loopback: %s\n",
loopback_en ? "on" : "off");
- loopback_en = req_serdes->enable &
+ loopback_en = req_common->enable &
HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
dev_info(&hdev->pdev->dev, "serdes parallel loopback: %s\n",
loopback_en ? "on" : "off");
- if (phydev)
+ if (phydev) {
dev_info(&hdev->pdev->dev, "phy loopback: %s\n",
phydev->loopback_enabled ? "on" : "off");
+ } else if (hnae3_dev_phy_imp_supported(hdev)) {
+ loopback_en = req_common->enable &
+ HCLGE_CMD_GE_PHY_INNER_LOOP_B;
+ dev_info(&hdev->pdev->dev, "phy loopback: %s\n",
+ loopback_en ? "on" : "off");
+ }
}
/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
@@ -1772,7 +1777,7 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
hclge_dbg_dump_mac_tnl_status(hdev);
} else if (strncmp(cmd_buf, DUMP_LOOPBACK,
strlen(DUMP_LOOPBACK)) == 0) {
- hclge_dbg_dump_loopback(hdev, &cmd_buf[sizeof(DUMP_LOOPBACK)]);
+ hclge_dbg_dump_loopback(hdev);
} else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
hclge_dbg_dump_qs_shaper(hdev,
&cmd_buf[sizeof("dump qs shaper")]);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 0ca7f1b984bf..d25291916b31 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -865,13 +865,7 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
}
/* configure TM QCN hw errors */
- ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, 0);
- if (ret) {
- dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret);
- return ret;
- }
-
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false);
if (en)
desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
@@ -1497,7 +1491,6 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
}
status = le32_to_cpu(desc[0].data[0]);
-
if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) {
if (status & HCLGE_ROCEE_RERR_INT_MASK)
dev_err(dev, "ROCEE RAS AXI rresp error\n");
@@ -1647,7 +1640,6 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
}
status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
-
if (status & HCLGE_RAS_REG_NFE_MASK ||
status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
ae_dev->hw_err_reset_req = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index b0dbe6dcaa7b..c296ab64fb0a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -62,7 +62,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
-static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
+static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
unsigned long *addr);
static int hclge_set_default_loopback(struct hclge_dev *hdev);
@@ -70,6 +70,7 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev);
static void hclge_sync_mac_table(struct hclge_dev *hdev);
static void hclge_restore_hw_table(struct hclge_dev *hdev);
static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
+static void hclge_sync_fd_table(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@@ -384,36 +385,62 @@ static const struct key_info meta_data_key_info[] = {
};
static const struct key_info tuple_key_info[] = {
- { OUTER_DST_MAC, 48},
- { OUTER_SRC_MAC, 48},
- { OUTER_VLAN_TAG_FST, 16},
- { OUTER_VLAN_TAG_SEC, 16},
- { OUTER_ETH_TYPE, 16},
- { OUTER_L2_RSV, 16},
- { OUTER_IP_TOS, 8},
- { OUTER_IP_PROTO, 8},
- { OUTER_SRC_IP, 32},
- { OUTER_DST_IP, 32},
- { OUTER_L3_RSV, 16},
- { OUTER_SRC_PORT, 16},
- { OUTER_DST_PORT, 16},
- { OUTER_L4_RSV, 32},
- { OUTER_TUN_VNI, 24},
- { OUTER_TUN_FLOW_ID, 8},
- { INNER_DST_MAC, 48},
- { INNER_SRC_MAC, 48},
- { INNER_VLAN_TAG_FST, 16},
- { INNER_VLAN_TAG_SEC, 16},
- { INNER_ETH_TYPE, 16},
- { INNER_L2_RSV, 16},
- { INNER_IP_TOS, 8},
- { INNER_IP_PROTO, 8},
- { INNER_SRC_IP, 32},
- { INNER_DST_IP, 32},
- { INNER_L3_RSV, 16},
- { INNER_SRC_PORT, 16},
- { INNER_DST_PORT, 16},
- { INNER_L4_RSV, 32},
+ { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
+ { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
+ { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
+ { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
+ { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
+ { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
+ { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
+ { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
+ { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
+ { INNER_DST_MAC, 48, KEY_OPT_MAC,
+ offsetof(struct hclge_fd_rule, tuples.dst_mac),
+ offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
+ { INNER_SRC_MAC, 48, KEY_OPT_MAC,
+ offsetof(struct hclge_fd_rule, tuples.src_mac),
+ offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
+ { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
+ offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
+ { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
+ { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.ether_proto),
+ offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
+ { INNER_L2_RSV, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.l2_user_def),
+ offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
+ { INNER_IP_TOS, 8, KEY_OPT_U8,
+ offsetof(struct hclge_fd_rule, tuples.ip_tos),
+ offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
+ { INNER_IP_PROTO, 8, KEY_OPT_U8,
+ offsetof(struct hclge_fd_rule, tuples.ip_proto),
+ offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
+ { INNER_SRC_IP, 32, KEY_OPT_IP,
+ offsetof(struct hclge_fd_rule, tuples.src_ip),
+ offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
+ { INNER_DST_IP, 32, KEY_OPT_IP,
+ offsetof(struct hclge_fd_rule, tuples.dst_ip),
+ offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
+ { INNER_L3_RSV, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.l3_user_def),
+ offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
+ { INNER_SRC_PORT, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.src_port),
+ offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
+ { INNER_DST_PORT, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.dst_port),
+ offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
+ { INNER_L4_RSV, 32, KEY_OPT_LE32,
+ offsetof(struct hclge_fd_rule, tuples.l4_user_def),
+ offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
};
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
@@ -526,7 +553,6 @@ static int hclge_mac_update_stats(struct hclge_dev *hdev)
int ret;
ret = hclge_mac_query_reg_num(hdev, &desc_num);
-
/* The firmware supports the new statistics acquisition method */
if (!ret)
ret = hclge_mac_update_stats_complete(hdev, desc_num);
@@ -751,12 +777,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
- if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
- hdev->hw.mac.phydev->drv->set_loopback) {
+ if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
+ hdev->hw.mac.phydev->drv->set_loopback) ||
+ hnae3_dev_phy_imp_supported(hdev)) {
count += 1;
handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
}
-
} else if (stringset == ETH_SS_STATS) {
count = ARRAY_SIZE(g_mac_stats_string) +
hclge_tqps_get_sset_count(handle, stringset);
@@ -1150,8 +1176,10 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
+ if (hnae3_dev_pause_supported(hdev))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
+
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
}
@@ -1163,8 +1191,11 @@ static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
hclge_convert_setting_kr(mac, speed_ability);
if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
+
+ if (hnae3_dev_pause_supported(hdev))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
+
linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
}
@@ -1193,10 +1224,13 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
}
+ if (hnae3_dev_pause_supported(hdev)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
+ }
+
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
}
static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
@@ -1256,9 +1290,6 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
req = (struct hclge_cfg_param_cmd *)desc[0].data;
/* get the configuration */
- cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
- HCLGE_CFG_VMDQ_M,
- HCLGE_CFG_VMDQ_S);
cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
@@ -1475,7 +1506,7 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
"Running kdump kernel. Using minimal resources\n");
/* minimal queue pairs equals to the number of vports */
- hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
+ hdev->num_tqps = hdev->num_req_vfs + 1;
hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
}
@@ -1490,7 +1521,6 @@ static int hclge_configure(struct hclge_dev *hdev)
if (ret)
return ret;
- hdev->num_vmdq_vport = cfg.vmdq_vport_num;
hdev->base_tqp_pid = 0;
hdev->vf_rss_size_max = cfg.vf_rss_size_max;
hdev->pf_rss_size_max = cfg.pf_rss_size_max;
@@ -1741,7 +1771,7 @@ static int hclge_map_tqp(struct hclge_dev *hdev)
struct hclge_vport *vport = hdev->vport;
u16 i, num_vport;
- num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
+ num_vport = hdev->num_req_vfs + 1;
for (i = 0; i < num_vport; i++) {
int ret;
@@ -1783,7 +1813,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
int ret;
/* We need to alloc a vport for main NIC of PF */
- num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
+ num_vport = hdev->num_req_vfs + 1;
if (hdev->num_tqps < num_vport) {
dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
@@ -2159,7 +2189,6 @@ static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
COMPENSATE_HALF_MPS_NUM * half_mps;
min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
-
if (rx_priv < min_rx_priv)
return false;
@@ -2188,7 +2217,7 @@ static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
* @hdev: pointer to struct hclge_dev
* @buf_alloc: pointer to buffer calculation data
- * @return: 0: calculate sucessful, negative: fail
+ * @return: 0: calculate successful, negative: fail
*/
static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
@@ -2851,15 +2880,36 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
return hclge_get_mac_link_status(hdev, link_status);
}
+static void hclge_push_link_status(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
+ vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
+
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
+ vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
+ continue;
+
+ ret = hclge_push_vf_link_status(vport);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to push link status to vf%u, ret = %d\n",
+ i, ret);
+ }
+ }
+}
+
static void hclge_update_link_status(struct hclge_dev *hdev)
{
+ struct hnae3_handle *rhandle = &hdev->vport[0].roce;
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hnae3_client *rclient = hdev->roce_client;
struct hnae3_client *client = hdev->nic_client;
- struct hnae3_handle *rhandle;
- struct hnae3_handle *handle;
int state;
int ret;
- int i;
if (!client)
return;
@@ -2874,25 +2924,24 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
}
if (state != hdev->hw.mac.link) {
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- handle = &hdev->vport[i].nic;
- client->ops->link_status_change(handle, state);
- hclge_config_mac_tnl_int(hdev, state);
- rhandle = &hdev->vport[i].roce;
- if (rclient && rclient->ops->link_status_change)
- rclient->ops->link_status_change(rhandle,
- state);
- }
+ client->ops->link_status_change(handle, state);
+ hclge_config_mac_tnl_int(hdev, state);
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle, state);
+
hdev->hw.mac.link = state;
+ hclge_push_link_status(hdev);
}
clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
}
-static void hclge_update_port_capability(struct hclge_mac *mac)
+static void hclge_update_port_capability(struct hclge_dev *hdev,
+ struct hclge_mac *mac)
{
- /* update fec ability by speed */
- hclge_convert_setting_fec(mac);
+ if (hnae3_dev_fec_supported(hdev))
+ /* update fec ability by speed */
+ hclge_convert_setting_fec(mac);
/* firmware can not identify back plane type, the media type
* read from configuration can help deal it
@@ -2984,6 +3033,141 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
return 0;
}
+static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_phy_link_ksetting_0_cmd *req0;
+ struct hclge_phy_link_ksetting_1_cmd *req1;
+ u32 supported, advertising, lp_advertising;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
+ true);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get phy link ksetting, ret = %d.\n", ret);
+ return ret;
+ }
+
+ req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
+ cmd->base.autoneg = req0->autoneg;
+ cmd->base.speed = le32_to_cpu(req0->speed);
+ cmd->base.duplex = req0->duplex;
+ cmd->base.port = req0->port;
+ cmd->base.transceiver = req0->transceiver;
+ cmd->base.phy_address = req0->phy_address;
+ cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
+ cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
+ supported = le32_to_cpu(req0->supported);
+ advertising = le32_to_cpu(req0->advertising);
+ lp_advertising = le32_to_cpu(req0->lp_advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
+
+ req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
+ cmd->base.master_slave_cfg = req1->master_slave_cfg;
+ cmd->base.master_slave_state = req1->master_slave_state;
+
+ return 0;
+}
+
+static int
+hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_phy_link_ksetting_0_cmd *req0;
+ struct hclge_phy_link_ksetting_1_cmd *req1;
+ struct hclge_dev *hdev = vport->back;
+ u32 advertising;
+ int ret;
+
+ if (cmd->base.autoneg == AUTONEG_DISABLE &&
+ ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
+ false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
+ false);
+
+ req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
+ req0->autoneg = cmd->base.autoneg;
+ req0->speed = cpu_to_le32(cmd->base.speed);
+ req0->duplex = cmd->base.duplex;
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+ req0->advertising = cpu_to_le32(advertising);
+ req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
+
+ req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
+ req1->master_slave_cfg = cmd->base.master_slave_cfg;
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set phy link ksettings, ret = %d.\n", ret);
+ return ret;
+ }
+
+ hdev->hw.mac.autoneg = cmd->base.autoneg;
+ hdev->hw.mac.speed = cmd->base.speed;
+ hdev->hw.mac.duplex = cmd->base.duplex;
+ linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
+
+ return 0;
+}
+
+static int hclge_update_tp_port_info(struct hclge_dev *hdev)
+{
+ struct ethtool_link_ksettings cmd;
+ int ret;
+
+ if (!hnae3_dev_phy_imp_supported(hdev))
+ return 0;
+
+ ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
+ if (ret)
+ return ret;
+
+ hdev->hw.mac.autoneg = cmd.base.autoneg;
+ hdev->hw.mac.speed = cmd.base.speed;
+ hdev->hw.mac.duplex = cmd.base.duplex;
+
+ return 0;
+}
+
+static int hclge_tp_port_init(struct hclge_dev *hdev)
+{
+ struct ethtool_link_ksettings cmd;
+
+ if (!hnae3_dev_phy_imp_supported(hdev))
+ return 0;
+
+ cmd.base.autoneg = hdev->hw.mac.autoneg;
+ cmd.base.speed = hdev->hw.mac.speed;
+ cmd.base.duplex = hdev->hw.mac.duplex;
+ linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
+
+ return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
+}
+
static int hclge_update_port_info(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
@@ -2992,7 +3176,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
/* get the port info from SFP cmd if not copper port */
if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
- return 0;
+ return hclge_update_tp_port_info(hdev);
/* if IMP does not support get SFP/qSFP info, return directly */
if (!hdev->support_sfp_query)
@@ -3012,7 +3196,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
if (mac->speed_type == QUERY_ACTIVE_SPEED) {
- hclge_update_port_capability(mac);
+ hclge_update_port_capability(hdev, mac);
return 0;
}
return hclge_cfg_mac_speed_dup(hdev, mac->speed,
@@ -3085,14 +3269,24 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ int link_state_old;
+ int ret;
vport = hclge_get_vf_vport(hdev, vf);
if (!vport)
return -EINVAL;
+ link_state_old = vport->vf_info.link_state;
vport->vf_info.link_state = link_state;
- return 0;
+ ret = hclge_push_vf_link_status(vport);
+ if (ret) {
+ vport->vf_info.link_state = link_state_old;
+ dev_err(&hdev->pdev->dev,
+ "failed to push vf%d link status, ret = %d\n", vf, ret);
+ }
+
+ return ret;
}
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
@@ -3197,7 +3391,7 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
* caused this event. Therefore, we will do below for now:
* 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
* have defered type of reset to be used.
- * 2. Schedule the reset serivce task.
+ * 2. Schedule the reset service task.
* 3. When service task receives HNAE3_UNKNOWN_RESET type it
* will fetch the correct type of reset. This would be done
* by first decoding the types of errors.
@@ -3325,8 +3519,9 @@ static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type)
{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hnae3_client *client = hdev->nic_client;
- u16 i;
+ int ret;
if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
return 0;
@@ -3334,27 +3529,20 @@ int hclge_notify_client(struct hclge_dev *hdev,
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- struct hnae3_handle *handle = &hdev->vport[i].nic;
- int ret;
-
- ret = client->ops->reset_notify(handle, type);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "notify nic client failed %d(%d)\n", type, ret);
- return ret;
- }
- }
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
+ type, ret);
- return 0;
+ return ret;
}
static int hclge_notify_roce_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type)
{
+ struct hnae3_handle *handle = &hdev->vport[0].roce;
struct hnae3_client *client = hdev->roce_client;
int ret;
- u16 i;
if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
return 0;
@@ -3362,17 +3550,10 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- struct hnae3_handle *handle = &hdev->vport[i].roce;
-
- ret = client->ops->reset_notify(handle, type);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "notify roce client failed %d(%d)",
- type, ret);
- return ret;
- }
- }
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
+ type, ret);
return ret;
}
@@ -3440,7 +3621,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
{
int i;
- for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
+ for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i];
int ret;
@@ -3521,14 +3702,12 @@ void hclge_report_hw_error(struct hclge_dev *hdev,
enum hnae3_hw_error_type type)
{
struct hnae3_client *client = hdev->nic_client;
- u16 i;
if (!client || !client->ops->process_hw_error ||
!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
return;
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
- client->ops->process_hw_error(&hdev->vport[i].nic, type);
+ client->ops->process_hw_error(&hdev->vport[0].nic, type);
}
static void hclge_handle_imp_error(struct hclge_dev *hdev)
@@ -3794,6 +3973,21 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
return false;
}
+static void hclge_update_reset_level(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ enum hnae3_reset_type reset_level;
+
+ /* if default_reset_request has a higher level reset request,
+ * it should be handled as soon as possible. since some errors
+ * need this kind of reset to fix.
+ */
+ reset_level = hclge_get_reset_level(ae_dev,
+ &hdev->default_reset_request);
+ if (reset_level != HNAE3_NONE_RESET)
+ set_bit(reset_level, &hdev->reset_request);
+}
+
static int hclge_set_rst_done(struct hclge_dev *hdev)
{
struct hclge_pf_rst_done_cmd *req;
@@ -3881,8 +4075,6 @@ static int hclge_reset_prepare(struct hclge_dev *hdev)
static int hclge_reset_rebuild(struct hclge_dev *hdev)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- enum hnae3_reset_type reset_level;
int ret;
hdev->rst_stats.hw_reset_done_cnt++;
@@ -3926,14 +4118,7 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev)
hdev->rst_stats.reset_done_cnt++;
clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
- /* if default_reset_request has a higher level reset request,
- * it should be handled as soon as possible. since some errors
- * need this kind of reset to fix.
- */
- reset_level = hclge_get_reset_level(ae_dev,
- &hdev->default_reset_request);
- if (reset_level != HNAE3_NONE_RESET)
- set_bit(reset_level, &hdev->reset_request);
+ hclge_update_reset_level(hdev);
return 0;
}
@@ -4094,6 +4279,7 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev)
hclge_update_link_status(hdev);
hclge_sync_mac_table(hdev);
hclge_sync_promisc_mode(hdev);
+ hclge_sync_fd_table(hdev);
if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
delta = jiffies - hdev->last_serv_processed;
@@ -4738,58 +4924,44 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
{
- struct hclge_vport *vport = hdev->vport;
- int i, j;
+ struct hclge_vport *vport = &hdev->vport[0];
+ int i;
- for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
- for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
- vport[j].rss_indirection_tbl[i] =
- i % vport[j].alloc_rss_size;
- }
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
+ vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
}
static int hclge_rss_init_cfg(struct hclge_dev *hdev)
{
u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
- int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- struct hclge_vport *vport = hdev->vport;
+ int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
+ struct hclge_vport *vport = &hdev->vport[0];
+ u16 *rss_ind_tbl;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- u16 *rss_ind_tbl;
-
- vport[i].rss_tuple_sets.ipv4_tcp_en =
- HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport[i].rss_tuple_sets.ipv4_udp_en =
- HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport[i].rss_tuple_sets.ipv4_sctp_en =
- HCLGE_RSS_INPUT_TUPLE_SCTP;
- vport[i].rss_tuple_sets.ipv4_fragment_en =
- HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport[i].rss_tuple_sets.ipv6_tcp_en =
- HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport[i].rss_tuple_sets.ipv6_udp_en =
- HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport[i].rss_tuple_sets.ipv6_sctp_en =
- hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
- HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
- HCLGE_RSS_INPUT_TUPLE_SCTP;
- vport[i].rss_tuple_sets.ipv6_fragment_en =
- HCLGE_RSS_INPUT_TUPLE_OTHER;
-
- vport[i].rss_algo = rss_algo;
-
- rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
- sizeof(*rss_ind_tbl), GFP_KERNEL);
- if (!rss_ind_tbl)
- return -ENOMEM;
+ vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
+ vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
+ vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
+ vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
+ vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
+ vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
+ vport->rss_tuple_sets.ipv6_sctp_en =
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+ HCLGE_RSS_INPUT_TUPLE_SCTP;
+ vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
+
+ vport->rss_algo = rss_algo;
+
+ rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
+ sizeof(*rss_ind_tbl), GFP_KERNEL);
+ if (!rss_ind_tbl)
+ return -ENOMEM;
- vport[i].rss_indirection_tbl = rss_ind_tbl;
- memcpy(vport[i].rss_hash_key, hclge_hash_key,
- HCLGE_RSS_KEY_SIZE);
- }
+ vport->rss_indirection_tbl = rss_ind_tbl;
+ memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
hclge_rss_indir_init_cfg(hdev);
@@ -4995,6 +5167,285 @@ static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
}
+static void hclge_sync_fd_state(struct hclge_dev *hdev)
+{
+ if (hlist_empty(&hdev->fd_rule_list))
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+}
+
+static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
+{
+ if (!test_bit(location, hdev->fd_bmap)) {
+ set_bit(location, hdev->fd_bmap);
+ hdev->hclge_fd_rule_num++;
+ }
+}
+
+static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
+{
+ if (test_bit(location, hdev->fd_bmap)) {
+ clear_bit(location, hdev->fd_bmap);
+ hdev->hclge_fd_rule_num--;
+ }
+}
+
+static void hclge_fd_free_node(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ hclge_sync_fd_state(hdev);
+}
+
+static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
+ struct hclge_fd_rule *old_rule,
+ struct hclge_fd_rule *new_rule,
+ enum HCLGE_FD_NODE_STATE state)
+{
+ switch (state) {
+ case HCLGE_FD_TO_ADD:
+ case HCLGE_FD_ACTIVE:
+ /* 1) if the new state is TO_ADD, just replace the old rule
+ * with the same location, no matter its state, because the
+ * new rule will be configured to the hardware.
+ * 2) if the new state is ACTIVE, it means the new rule
+ * has been configured to the hardware, so just replace
+ * the old rule node with the same location.
+ * 3) for it doesn't add a new node to the list, so it's
+ * unnecessary to update the rule number and fd_bmap.
+ */
+ new_rule->rule_node.next = old_rule->rule_node.next;
+ new_rule->rule_node.pprev = old_rule->rule_node.pprev;
+ memcpy(old_rule, new_rule, sizeof(*old_rule));
+ kfree(new_rule);
+ break;
+ case HCLGE_FD_DELETED:
+ hclge_fd_dec_rule_cnt(hdev, old_rule->location);
+ hclge_fd_free_node(hdev, old_rule);
+ break;
+ case HCLGE_FD_TO_DEL:
+ /* if new request is TO_DEL, and old rule is existent
+ * 1) the state of old rule is TO_DEL, we need do nothing,
+ * because we delete rule by location, other rule content
+ * is unncessary.
+ * 2) the state of old rule is ACTIVE, we need to change its
+ * state to TO_DEL, so the rule will be deleted when periodic
+ * task being scheduled.
+ * 3) the state of old rule is TO_ADD, it means the rule hasn't
+ * been added to hardware, so we just delete the rule node from
+ * fd_rule_list directly.
+ */
+ if (old_rule->state == HCLGE_FD_TO_ADD) {
+ hclge_fd_dec_rule_cnt(hdev, old_rule->location);
+ hclge_fd_free_node(hdev, old_rule);
+ return;
+ }
+ old_rule->state = HCLGE_FD_TO_DEL;
+ break;
+ }
+}
+
+static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
+ u16 location,
+ struct hclge_fd_rule **parent)
+{
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
+ if (rule->location == location)
+ return rule;
+ else if (rule->location > location)
+ return NULL;
+ /* record the parent node, use to keep the nodes in fd_rule_list
+ * in ascend order.
+ */
+ *parent = rule;
+ }
+
+ return NULL;
+}
+
+/* insert fd rule node in ascend order according to rule->location */
+static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
+ struct hclge_fd_rule *rule,
+ struct hclge_fd_rule *parent)
+{
+ INIT_HLIST_NODE(&rule->rule_node);
+
+ if (parent)
+ hlist_add_behind(&rule->rule_node, &parent->rule_node);
+ else
+ hlist_add_head(&rule->rule_node, hlist);
+}
+
+static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
+ struct hclge_fd_user_def_cfg *cfg)
+{
+ struct hclge_fd_user_def_cfg_cmd *req;
+ struct hclge_desc desc;
+ u16 data = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
+
+ req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
+
+ hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
+ hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
+ HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
+ req->ol2_cfg = cpu_to_le16(data);
+
+ data = 0;
+ hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
+ hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
+ HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
+ req->ol3_cfg = cpu_to_le16(data);
+
+ data = 0;
+ hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
+ hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
+ HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
+ req->ol4_cfg = cpu_to_le16(data);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to set fd user def data, ret= %d\n", ret);
+ return ret;
+}
+
+static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
+{
+ int ret;
+
+ if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
+ return;
+
+ if (!locked)
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
+ if (ret)
+ set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
+
+ if (!locked)
+ spin_unlock_bh(&hdev->fd_rule_lock);
+}
+
+static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ struct hlist_head *hlist = &hdev->fd_rule_list;
+ struct hclge_fd_rule *fd_rule, *parent = NULL;
+ struct hclge_fd_user_def_info *info, *old_info;
+ struct hclge_fd_user_def_cfg *cfg;
+
+ if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
+ rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
+ return 0;
+
+ /* for valid layer is start from 1, so need minus 1 to get the cfg */
+ cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
+ info = &rule->ep.user_def;
+
+ if (!cfg->ref_cnt || cfg->offset == info->offset)
+ return 0;
+
+ if (cfg->ref_cnt > 1)
+ goto error;
+
+ fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
+ if (fd_rule) {
+ old_info = &fd_rule->ep.user_def;
+ if (info->layer == old_info->layer)
+ return 0;
+ }
+
+error:
+ dev_err(&hdev->pdev->dev,
+ "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
+ info->layer + 1);
+ return -ENOSPC;
+}
+
+static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_fd_user_def_cfg *cfg;
+
+ if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
+ rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
+ return;
+
+ cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
+ if (!cfg->ref_cnt) {
+ cfg->offset = rule->ep.user_def.offset;
+ set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
+ }
+ cfg->ref_cnt++;
+}
+
+static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_fd_user_def_cfg *cfg;
+
+ if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
+ rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
+ return;
+
+ cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
+ if (!cfg->ref_cnt)
+ return;
+
+ cfg->ref_cnt--;
+ if (!cfg->ref_cnt) {
+ cfg->offset = 0;
+ set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
+ }
+}
+
+static void hclge_update_fd_list(struct hclge_dev *hdev,
+ enum HCLGE_FD_NODE_STATE state, u16 location,
+ struct hclge_fd_rule *new_rule)
+{
+ struct hlist_head *hlist = &hdev->fd_rule_list;
+ struct hclge_fd_rule *fd_rule, *parent = NULL;
+
+ fd_rule = hclge_find_fd_rule(hlist, location, &parent);
+ if (fd_rule) {
+ hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
+ if (state == HCLGE_FD_ACTIVE)
+ hclge_fd_inc_user_def_refcnt(hdev, new_rule);
+ hclge_sync_fd_user_def_cfg(hdev, true);
+
+ hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
+ return;
+ }
+
+ /* it's unlikely to fail here, because we have checked the rule
+ * exist before.
+ */
+ if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
+ dev_warn(&hdev->pdev->dev,
+ "failed to delete fd rule %u, it's inexistent\n",
+ location);
+ return;
+ }
+
+ hclge_fd_inc_user_def_refcnt(hdev, new_rule);
+ hclge_sync_fd_user_def_cfg(hdev, true);
+
+ hclge_fd_insert_rule_node(hlist, new_rule, parent);
+ hclge_fd_inc_rule_cnt(hdev, new_rule->location);
+
+ if (state == HCLGE_FD_TO_ADD) {
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
+ hclge_task_schedule(hdev, 0);
+ }
+}
+
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
{
struct hclge_get_fd_mode_cmd *req;
@@ -5073,6 +5524,17 @@ static int hclge_set_fd_key_config(struct hclge_dev *hdev,
return ret;
}
+static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
+{
+ struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ hclge_fd_set_user_def_cmd(hdev, cfg);
+}
+
static int hclge_init_fd_config(struct hclge_dev *hdev)
{
#define LOW_2_WORDS 0x03
@@ -5113,9 +5575,12 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
/* If use max 400bit key, we can support tuples for ether type */
- if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
+ if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
key_cfg->tuple_active |=
BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
+ }
/* roce_type is used to filter roce frames
* dst_vport is used to specify the rule
@@ -5224,96 +5689,57 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
struct hclge_fd_rule *rule)
{
+ int offset, moffset, ip_offset;
+ enum HCLGE_FD_KEY_OPT key_opt;
u16 tmp_x_s, tmp_y_s;
u32 tmp_x_l, tmp_y_l;
+ u8 *p = (u8 *)rule;
int i;
- if (rule->unused_tuple & tuple_bit)
+ if (rule->unused_tuple & BIT(tuple_bit))
return true;
- switch (tuple_bit) {
- case BIT(INNER_DST_MAC):
- for (i = 0; i < ETH_ALEN; i++) {
- calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
- rule->tuples_mask.dst_mac[i]);
- calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
- rule->tuples_mask.dst_mac[i]);
- }
+ key_opt = tuple_key_info[tuple_bit].key_opt;
+ offset = tuple_key_info[tuple_bit].offset;
+ moffset = tuple_key_info[tuple_bit].moffset;
- return true;
- case BIT(INNER_SRC_MAC):
- for (i = 0; i < ETH_ALEN; i++) {
- calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
- rule->tuples_mask.src_mac[i]);
- calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
- rule->tuples_mask.src_mac[i]);
- }
+ switch (key_opt) {
+ case KEY_OPT_U8:
+ calc_x(*key_x, p[offset], p[moffset]);
+ calc_y(*key_y, p[offset], p[moffset]);
return true;
- case BIT(INNER_VLAN_TAG_FST):
- calc_x(tmp_x_s, rule->tuples.vlan_tag1,
- rule->tuples_mask.vlan_tag1);
- calc_y(tmp_y_s, rule->tuples.vlan_tag1,
- rule->tuples_mask.vlan_tag1);
+ case KEY_OPT_LE16:
+ calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
+ calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
return true;
- case BIT(INNER_ETH_TYPE):
- calc_x(tmp_x_s, rule->tuples.ether_proto,
- rule->tuples_mask.ether_proto);
- calc_y(tmp_y_s, rule->tuples.ether_proto,
- rule->tuples_mask.ether_proto);
- *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
- *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
-
- return true;
- case BIT(INNER_IP_TOS):
- calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
- calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
-
- return true;
- case BIT(INNER_IP_PROTO):
- calc_x(*key_x, rule->tuples.ip_proto,
- rule->tuples_mask.ip_proto);
- calc_y(*key_y, rule->tuples.ip_proto,
- rule->tuples_mask.ip_proto);
-
- return true;
- case BIT(INNER_SRC_IP):
- calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
- rule->tuples_mask.src_ip[IPV4_INDEX]);
- calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
- rule->tuples_mask.src_ip[IPV4_INDEX]);
+ case KEY_OPT_LE32:
+ calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
+ calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
return true;
- case BIT(INNER_DST_IP):
- calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
- rule->tuples_mask.dst_ip[IPV4_INDEX]);
- calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
- rule->tuples_mask.dst_ip[IPV4_INDEX]);
- *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
- *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
-
- return true;
- case BIT(INNER_SRC_PORT):
- calc_x(tmp_x_s, rule->tuples.src_port,
- rule->tuples_mask.src_port);
- calc_y(tmp_y_s, rule->tuples.src_port,
- rule->tuples_mask.src_port);
- *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
- *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+ case KEY_OPT_MAC:
+ for (i = 0; i < ETH_ALEN; i++) {
+ calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
+ p[moffset + i]);
+ calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
+ p[moffset + i]);
+ }
return true;
- case BIT(INNER_DST_PORT):
- calc_x(tmp_x_s, rule->tuples.dst_port,
- rule->tuples_mask.dst_port);
- calc_y(tmp_y_s, rule->tuples.dst_port,
- rule->tuples_mask.dst_port);
- *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
- *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+ case KEY_OPT_IP:
+ ip_offset = IPV4_INDEX * sizeof(u32);
+ calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
+ *(u32 *)(&p[moffset + ip_offset]));
+ calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
+ *(u32 *)(&p[moffset + ip_offset]));
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
return true;
default:
@@ -5401,12 +5827,12 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
for (i = 0 ; i < MAX_TUPLE; i++) {
bool tuple_valid;
- u32 check_tuple;
tuple_size = tuple_key_info[i].key_length / 8;
- check_tuple = key_cfg->tuple_active & BIT(i);
+ if (!(key_cfg->tuple_active & BIT(i)))
+ continue;
- tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
+ tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
cur_key_y, rule);
if (tuple_valid) {
cur_key_x += tuple_size;
@@ -5537,8 +5963,7 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
if (!spec || !unused_tuple)
return -EINVAL;
- *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
- BIT(INNER_IP_TOS);
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
/* check whether src/dst ip address used */
if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
@@ -5553,8 +5978,8 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
if (!spec->pdst)
*unused_tuple |= BIT(INNER_DST_PORT);
- if (spec->tclass)
- return -EOPNOTSUPP;
+ if (!spec->tclass)
+ *unused_tuple |= BIT(INNER_IP_TOS);
return 0;
}
@@ -5566,7 +5991,7 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
return -EINVAL;
*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
- BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
/* check whether src/dst ip address used */
if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
@@ -5578,8 +6003,8 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
if (!spec->l4_proto)
*unused_tuple |= BIT(INNER_IP_PROTO);
- if (spec->tclass)
- return -EOPNOTSUPP;
+ if (!spec->tclass)
+ *unused_tuple |= BIT(INNER_IP_TOS);
if (spec->l4_4_bytes)
return -EOPNOTSUPP;
@@ -5649,9 +6074,98 @@ static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
return 0;
}
+static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
+ struct hclge_fd_user_def_info *info)
+{
+ switch (flow_type) {
+ case ETHER_FLOW:
+ info->layer = HCLGE_FD_USER_DEF_L2;
+ *unused_tuple &= ~BIT(INNER_L2_RSV);
+ break;
+ case IP_USER_FLOW:
+ case IPV6_USER_FLOW:
+ info->layer = HCLGE_FD_USER_DEF_L3;
+ *unused_tuple &= ~BIT(INNER_L3_RSV);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ info->layer = HCLGE_FD_USER_DEF_L4;
+ *unused_tuple &= ~BIT(INNER_L4_RSV);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
+{
+ return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
+}
+
+static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ u32 *unused_tuple,
+ struct hclge_fd_user_def_info *info)
+{
+ u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
+ u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ u16 data, offset, data_mask, offset_mask;
+ int ret;
+
+ info->layer = HCLGE_FD_USER_DEF_NONE;
+ *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
+
+ if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
+ return 0;
+
+ /* user-def data from ethtool is 64 bit value, the bit0~15 is used
+ * for data, and bit32~47 is used for offset.
+ */
+ data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
+ data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
+ offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
+ offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
+
+ if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
+ dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
+ dev_err(&hdev->pdev->dev,
+ "user-def offset[%u] should be no more than %u\n",
+ offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
+ return -EINVAL;
+ }
+
+ if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
+ dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
+ return -EINVAL;
+ }
+
+ ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "unsupported flow type for user-def bytes, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ info->data = data;
+ info->data_mask = data_mask;
+ info->offset = offset;
+
+ return 0;
+}
+
static int hclge_fd_check_spec(struct hclge_dev *hdev,
struct ethtool_rx_flow_spec *fs,
- u32 *unused_tuple)
+ u32 *unused_tuple,
+ struct hclge_fd_user_def_info *info)
{
u32 flow_type;
int ret;
@@ -5664,11 +6178,9 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev,
return -EINVAL;
}
- if ((fs->flow_type & FLOW_EXT) &&
- (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
- dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
- return -EOPNOTSUPP;
- }
+ ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
+ if (ret)
+ return ret;
flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
switch (flow_type) {
@@ -5720,217 +6232,194 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev,
return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
}
-static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
+static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule, u8 ip_proto)
{
- struct hclge_fd_rule *rule = NULL;
- struct hlist_node *node2;
+ rule->tuples.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
- spin_lock_bh(&hdev->fd_rule_lock);
- hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
- if (rule->location >= location)
- break;
- }
+ rule->tuples.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
- spin_unlock_bh(&hdev->fd_rule_lock);
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
+ rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
- return rule && rule->location == location;
-}
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
+ rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
-/* make sure being called after lock up with fd_rule_lock */
-static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
- struct hclge_fd_rule *new_rule,
- u16 location,
- bool is_add)
-{
- struct hclge_fd_rule *rule = NULL, *parent = NULL;
- struct hlist_node *node2;
+ rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
+ rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
- if (is_add && !new_rule)
- return -EINVAL;
-
- hlist_for_each_entry_safe(rule, node2,
- &hdev->fd_rule_list, rule_node) {
- if (rule->location >= location)
- break;
- parent = rule;
- }
-
- if (rule && rule->location == location) {
- hlist_del(&rule->rule_node);
- kfree(rule);
- hdev->hclge_fd_rule_num--;
+ rule->tuples.ether_proto = ETH_P_IP;
+ rule->tuples_mask.ether_proto = 0xFFFF;
- if (!is_add) {
- if (!hdev->hclge_fd_rule_num)
- hdev->fd_active_type = HCLGE_FD_RULE_NONE;
- clear_bit(location, hdev->fd_bmap);
+ rule->tuples.ip_proto = ip_proto;
+ rule->tuples_mask.ip_proto = 0xFF;
+}
- return 0;
- }
- } else if (!is_add) {
- dev_err(&hdev->pdev->dev,
- "delete fail, rule %u is inexistent\n",
- location);
- return -EINVAL;
- }
+static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ rule->tuples.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
- INIT_HLIST_NODE(&new_rule->rule_node);
+ rule->tuples.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
- if (parent)
- hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
- else
- hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
+ rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
+ rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
- set_bit(location, hdev->fd_bmap);
- hdev->hclge_fd_rule_num++;
- hdev->fd_active_type = new_rule->rule_type;
+ rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
- return 0;
+ rule->tuples.ether_proto = ETH_P_IP;
+ rule->tuples_mask.ether_proto = 0xFFFF;
}
-static int hclge_fd_get_tuple(struct hclge_dev *hdev,
- struct ethtool_rx_flow_spec *fs,
- struct hclge_fd_rule *rule)
+static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule, u8 ip_proto)
{
- u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
+ IPV6_SIZE);
- switch (flow_type) {
- case SCTP_V4_FLOW:
- case TCP_V4_FLOW:
- case UDP_V4_FLOW:
- rule->tuples.src_ip[IPV4_INDEX] =
- be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
- rule->tuples_mask.src_ip[IPV4_INDEX] =
- be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
-
- rule->tuples.dst_ip[IPV4_INDEX] =
- be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
- rule->tuples_mask.dst_ip[IPV4_INDEX] =
- be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
-
- rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
- rule->tuples_mask.src_port =
- be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
+ be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
+ IPV6_SIZE);
- rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
- rule->tuples_mask.dst_port =
- be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
+ rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
- rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
- rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
+ rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
- rule->tuples.ether_proto = ETH_P_IP;
- rule->tuples_mask.ether_proto = 0xFFFF;
+ rule->tuples.ether_proto = ETH_P_IPV6;
+ rule->tuples_mask.ether_proto = 0xFFFF;
- break;
- case IP_USER_FLOW:
- rule->tuples.src_ip[IPV4_INDEX] =
- be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
- rule->tuples_mask.src_ip[IPV4_INDEX] =
- be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
+ rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
+ rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
- rule->tuples.dst_ip[IPV4_INDEX] =
- be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
- rule->tuples_mask.dst_ip[IPV4_INDEX] =
- be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
+ rule->tuples.ip_proto = ip_proto;
+ rule->tuples_mask.ip_proto = 0xFF;
+}
- rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
- rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
+static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
+ IPV6_SIZE);
- rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
- rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
+ be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
+ IPV6_SIZE);
- rule->tuples.ether_proto = ETH_P_IP;
- rule->tuples_mask.ether_proto = 0xFFFF;
+ rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
- break;
- case SCTP_V6_FLOW:
- case TCP_V6_FLOW:
- case UDP_V6_FLOW:
- be32_to_cpu_array(rule->tuples.src_ip,
- fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip,
- fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
+ rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
+ rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
- be32_to_cpu_array(rule->tuples.dst_ip,
- fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip,
- fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
+ rule->tuples.ether_proto = ETH_P_IPV6;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+}
- rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
- rule->tuples_mask.src_port =
- be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
+static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
+ ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
- rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
- rule->tuples_mask.dst_port =
- be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
+ ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
+ ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
- rule->tuples.ether_proto = ETH_P_IPV6;
- rule->tuples_mask.ether_proto = 0xFFFF;
+ rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
+ rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
+}
+static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
+ struct hclge_fd_rule *rule)
+{
+ switch (info->layer) {
+ case HCLGE_FD_USER_DEF_L2:
+ rule->tuples.l2_user_def = info->data;
+ rule->tuples_mask.l2_user_def = info->data_mask;
break;
- case IPV6_USER_FLOW:
- be32_to_cpu_array(rule->tuples.src_ip,
- fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip,
- fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
-
- be32_to_cpu_array(rule->tuples.dst_ip,
- fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip,
- fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
-
- rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
- rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
-
- rule->tuples.ether_proto = ETH_P_IPV6;
- rule->tuples_mask.ether_proto = 0xFFFF;
-
+ case HCLGE_FD_USER_DEF_L3:
+ rule->tuples.l3_user_def = info->data;
+ rule->tuples_mask.l3_user_def = info->data_mask;
break;
- case ETHER_FLOW:
- ether_addr_copy(rule->tuples.src_mac,
- fs->h_u.ether_spec.h_source);
- ether_addr_copy(rule->tuples_mask.src_mac,
- fs->m_u.ether_spec.h_source);
-
- ether_addr_copy(rule->tuples.dst_mac,
- fs->h_u.ether_spec.h_dest);
- ether_addr_copy(rule->tuples_mask.dst_mac,
- fs->m_u.ether_spec.h_dest);
-
- rule->tuples.ether_proto =
- be16_to_cpu(fs->h_u.ether_spec.h_proto);
- rule->tuples_mask.ether_proto =
- be16_to_cpu(fs->m_u.ether_spec.h_proto);
-
+ case HCLGE_FD_USER_DEF_L4:
+ rule->tuples.l4_user_def = (u32)info->data << 16;
+ rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
break;
default:
- return -EOPNOTSUPP;
+ break;
}
+ rule->ep.user_def = *info;
+}
+
+static int hclge_fd_get_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule,
+ struct hclge_fd_user_def_info *info)
+{
+ u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+
switch (flow_type) {
case SCTP_V4_FLOW:
- case SCTP_V6_FLOW:
- rule->tuples.ip_proto = IPPROTO_SCTP;
- rule->tuples_mask.ip_proto = 0xFF;
+ hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
break;
case TCP_V4_FLOW:
- case TCP_V6_FLOW:
- rule->tuples.ip_proto = IPPROTO_TCP;
- rule->tuples_mask.ip_proto = 0xFF;
+ hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
break;
case UDP_V4_FLOW:
+ hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
+ break;
+ case IP_USER_FLOW:
+ hclge_fd_get_ip4_tuple(hdev, fs, rule);
+ break;
+ case SCTP_V6_FLOW:
+ hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
+ break;
+ case TCP_V6_FLOW:
+ hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
+ break;
case UDP_V6_FLOW:
- rule->tuples.ip_proto = IPPROTO_UDP;
- rule->tuples_mask.ip_proto = 0xFF;
+ hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
break;
- default:
+ case IPV6_USER_FLOW:
+ hclge_fd_get_ip6_tuple(hdev, fs, rule);
break;
+ case ETHER_FLOW:
+ hclge_fd_get_ether_tuple(hdev, fs, rule);
+ break;
+ default:
+ return -EOPNOTSUPP;
}
if (fs->flow_type & FLOW_EXT) {
rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
+ hclge_fd_get_user_def_tuple(info, rule);
}
if (fs->flow_type & FLOW_MAC_EXT) {
@@ -5941,33 +6430,53 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
return 0;
}
-/* make sure being called after lock up with fd_rule_lock */
static int hclge_fd_config_rule(struct hclge_dev *hdev,
struct hclge_fd_rule *rule)
{
int ret;
- if (!rule) {
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ return ret;
+
+ return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+}
+
+static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ int ret;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ if (hdev->fd_active_type != rule->rule_type &&
+ (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
+ hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
dev_err(&hdev->pdev->dev,
- "The flow director rule is NULL\n");
+ "mode conflict(new type %d, active type %d), please delete existent rules first\n",
+ rule->rule_type, hdev->fd_active_type);
+ spin_unlock_bh(&hdev->fd_rule_lock);
return -EINVAL;
}
- /* it will never fail here, so needn't to check return value */
- hclge_fd_update_rule_list(hdev, rule, rule->location, true);
+ ret = hclge_fd_check_user_def_refcnt(hdev, rule);
+ if (ret)
+ goto out;
- ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ ret = hclge_clear_arfs_rules(hdev);
if (ret)
- goto clear_rule;
+ goto out;
- ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+ ret = hclge_fd_config_rule(hdev, rule);
if (ret)
- goto clear_rule;
+ goto out;
- return 0;
+ rule->state = HCLGE_FD_ACTIVE;
+ hdev->fd_active_type = rule->rule_type;
+ hclge_update_fd_list(hdev, rule->state, rule->location, rule);
-clear_rule:
- hclge_fd_update_rule_list(hdev, rule, rule->location, false);
+out:
+ spin_unlock_bh(&hdev->fd_rule_lock);
return ret;
}
@@ -5979,11 +6488,48 @@ static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
}
+static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
+ u16 *vport_id, u8 *action, u16 *queue_id)
+{
+ struct hclge_vport *vport = hdev->vport;
+
+ if (ring_cookie == RX_CLS_FLOW_DISC) {
+ *action = HCLGE_FD_ACTION_DROP_PACKET;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
+ u16 tqps;
+
+ if (vf > hdev->num_req_vfs) {
+ dev_err(&hdev->pdev->dev,
+ "Error: vf id (%u) > max vf num (%u)\n",
+ vf, hdev->num_req_vfs);
+ return -EINVAL;
+ }
+
+ *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
+ tqps = hdev->vport[vf].nic.kinfo.num_tqps;
+
+ if (ring >= tqps) {
+ dev_err(&hdev->pdev->dev,
+ "Error: queue id (%u) > max tqp num (%u)\n",
+ ring, tqps - 1);
+ return -EINVAL;
+ }
+
+ *action = HCLGE_FD_ACTION_SELECT_QUEUE;
+ *queue_id = ring;
+ }
+
+ return 0;
+}
+
static int hclge_add_fd_entry(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_user_def_info info;
u16 dst_vport_id = 0, q_index = 0;
struct ethtool_rx_flow_spec *fs;
struct hclge_fd_rule *rule;
@@ -6003,51 +6549,22 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
return -EOPNOTSUPP;
}
- if (hclge_is_cls_flower_active(handle)) {
- dev_err(&hdev->pdev->dev,
- "please delete all exist cls flower rules first\n");
- return -EINVAL;
- }
-
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
- ret = hclge_fd_check_spec(hdev, fs, &unused);
+ ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
if (ret)
return ret;
- if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
- action = HCLGE_FD_ACTION_DROP_PACKET;
- } else {
- u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
- u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
- u16 tqps;
-
- if (vf > hdev->num_req_vfs) {
- dev_err(&hdev->pdev->dev,
- "Error: vf id (%u) > max vf num (%u)\n",
- vf, hdev->num_req_vfs);
- return -EINVAL;
- }
-
- dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
- tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
-
- if (ring >= tqps) {
- dev_err(&hdev->pdev->dev,
- "Error: queue id (%u) > max tqp num (%u)\n",
- ring, tqps - 1);
- return -EINVAL;
- }
-
- action = HCLGE_FD_ACTION_SELECT_QUEUE;
- q_index = ring;
- }
+ ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
+ &action, &q_index);
+ if (ret)
+ return ret;
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return -ENOMEM;
- ret = hclge_fd_get_tuple(hdev, fs, rule);
+ ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
if (ret) {
kfree(rule);
return ret;
@@ -6061,15 +6578,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
rule->action = action;
rule->rule_type = HCLGE_FD_EP_ACTIVE;
- /* to avoid rule conflict, when user configure rule by ethtool,
- * we need to clear all arfs rules
- */
- spin_lock_bh(&hdev->fd_rule_lock);
- hclge_clear_arfs_rules(handle);
-
- ret = hclge_fd_config_rule(hdev, rule);
-
- spin_unlock_bh(&hdev->fd_rule_lock);
+ ret = hclge_add_fd_entry_common(hdev, rule);
+ if (ret)
+ kfree(rule);
return ret;
}
@@ -6090,32 +6601,30 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
return -EINVAL;
- if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
- !hclge_fd_rule_exist(hdev, fs->location)) {
+ spin_lock_bh(&hdev->fd_rule_lock);
+ if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
+ !test_bit(fs->location, hdev->fd_bmap)) {
dev_err(&hdev->pdev->dev,
"Delete fail, rule %u is inexistent\n", fs->location);
+ spin_unlock_bh(&hdev->fd_rule_lock);
return -ENOENT;
}
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
NULL, false);
if (ret)
- return ret;
+ goto out;
- spin_lock_bh(&hdev->fd_rule_lock);
- ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
+ hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
+out:
spin_unlock_bh(&hdev->fd_rule_lock);
-
return ret;
}
-/* make sure being called after lock up with fd_rule_lock */
-static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
- bool clear_list)
+static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
+ bool clear_list)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
struct hlist_node *node;
u16 location;
@@ -6123,6 +6632,8 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
if (!hnae3_dev_fd_supported(hdev))
return;
+ spin_lock_bh(&hdev->fd_rule_lock);
+
for_each_set_bit(location, hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
@@ -6139,6 +6650,14 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bitmap_zero(hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
}
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+}
+
+static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
+{
+ hclge_clear_fd_rules_in_list(hdev, true);
+ hclge_fd_disable_user_def(hdev);
}
static int hclge_restore_fd_entries(struct hnae3_handle *handle)
@@ -6147,7 +6666,6 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
struct hlist_node *node;
- int ret;
/* Return ok here, because reset error handling will check this
* return value. If error is returned here, the reset process will
@@ -6162,25 +6680,11 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
- ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
- if (!ret)
- ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
-
- if (ret) {
- dev_warn(&hdev->pdev->dev,
- "Restore rule %u failed, remove it\n",
- rule->location);
- clear_bit(rule->location, hdev->fd_bmap);
- hlist_del(&rule->rule_node);
- kfree(rule);
- hdev->hclge_fd_rule_num--;
- }
+ if (rule->state == HCLGE_FD_ACTIVE)
+ rule->state = HCLGE_FD_TO_ADD;
}
-
- if (hdev->hclge_fd_rule_num)
- hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
-
spin_unlock_bh(&hdev->fd_rule_lock);
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
return 0;
}
@@ -6268,6 +6772,10 @@ static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
IPV6_SIZE);
+ spec->tclass = rule->tuples.ip_tos;
+ spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+
spec->psrc = cpu_to_be16(rule->tuples.src_port);
spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
0 : cpu_to_be16(rule->tuples_mask.src_port);
@@ -6295,6 +6803,10 @@ static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
cpu_to_be32_array(spec_mask->ip6dst,
rule->tuples_mask.dst_ip, IPV6_SIZE);
+ spec->tclass = rule->tuples.ip_tos;
+ spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+
spec->l4_proto = rule->tuples.ip_proto;
spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
0 : rule->tuples_mask.ip_proto;
@@ -6322,6 +6834,24 @@ static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
0 : cpu_to_be16(rule->tuples_mask.ether_proto);
}
+static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
+ HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
+ fs->h_ext.data[0] = 0;
+ fs->h_ext.data[1] = 0;
+ fs->m_ext.data[0] = 0;
+ fs->m_ext.data[1] = 0;
+ } else {
+ fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
+ fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
+ fs->m_ext.data[0] =
+ cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
+ fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
+ }
+}
+
static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule)
{
@@ -6330,6 +6860,8 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
fs->m_ext.vlan_tci =
rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
+
+ hclge_fd_get_user_def_info(fs, rule);
}
if (fs->flow_type & FLOW_MAC_EXT) {
@@ -6441,6 +6973,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
return -EMSGSIZE;
}
+ if (rule->state == HCLGE_FD_TO_DEL)
+ continue;
+
rule_locs[cnt] = rule->location;
cnt++;
}
@@ -6500,6 +7035,7 @@ static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
rule->action = 0;
rule->vf_id = 0;
rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
+ rule->state = HCLGE_FD_TO_ADD;
if (tuples->ether_proto == ETH_P_IP) {
if (tuples->ip_proto == IPPROTO_TCP)
rule->flow_type = TCP_V4_FLOW;
@@ -6522,9 +7058,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
struct hclge_fd_rule_tuples new_tuples = {};
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
- u16 tmp_queue_id;
u16 bit_id;
- int ret;
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
@@ -6560,34 +7094,19 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
return -ENOMEM;
}
- set_bit(bit_id, hdev->fd_bmap);
rule->location = bit_id;
rule->arfs.flow_id = flow_id;
rule->queue_id = queue_id;
hclge_fd_build_arfs_rule(&new_tuples, rule);
- ret = hclge_fd_config_rule(hdev, rule);
-
- spin_unlock_bh(&hdev->fd_rule_lock);
-
- if (ret)
- return ret;
-
- return rule->location;
+ hclge_update_fd_list(hdev, rule->state, rule->location, rule);
+ hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
+ } else if (rule->queue_id != queue_id) {
+ rule->queue_id = queue_id;
+ rule->state = HCLGE_FD_TO_ADD;
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
+ hclge_task_schedule(hdev, 0);
}
-
spin_unlock_bh(&hdev->fd_rule_lock);
-
- if (rule->queue_id == queue_id)
- return rule->location;
-
- tmp_queue_id = rule->queue_id;
- rule->queue_id = queue_id;
- ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
- if (ret) {
- rule->queue_id = tmp_queue_id;
- return ret;
- }
-
return rule->location;
}
@@ -6597,7 +7116,6 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hclge_fd_rule *rule;
struct hlist_node *node;
- HLIST_HEAD(del_list);
spin_lock_bh(&hdev->fd_rule_lock);
if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
@@ -6605,34 +7123,51 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
return;
}
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (rule->state != HCLGE_FD_ACTIVE)
+ continue;
if (rps_may_expire_flow(handle->netdev, rule->queue_id,
rule->arfs.flow_id, rule->location)) {
- hlist_del_init(&rule->rule_node);
- hlist_add_head(&rule->rule_node, &del_list);
- hdev->hclge_fd_rule_num--;
- clear_bit(rule->location, hdev->fd_bmap);
+ rule->state = HCLGE_FD_TO_DEL;
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
}
}
spin_unlock_bh(&hdev->fd_rule_lock);
-
- hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
- hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
- rule->location, NULL, false);
- kfree(rule);
- }
#endif
}
/* make sure being called after lock up with fd_rule_lock */
-static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
+static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
{
#ifdef CONFIG_RFS_ACCEL
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ int ret;
+
+ if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
+ return 0;
+
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ switch (rule->state) {
+ case HCLGE_FD_TO_DEL:
+ case HCLGE_FD_ACTIVE:
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ if (ret)
+ return ret;
+ fallthrough;
+ case HCLGE_FD_TO_ADD:
+ hclge_fd_dec_rule_cnt(hdev, rule->location);
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ break;
+ default:
+ break;
+ }
+ }
+ hclge_sync_fd_state(hdev);
- if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
- hclge_del_all_fd_entries(handle, true);
#endif
+ return 0;
}
static void hclge_get_cls_key_basic(const struct flow_rule *flow,
@@ -6814,12 +7349,6 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule;
int ret;
- if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
- dev_err(&hdev->pdev->dev,
- "please remove all exist fd rules via ethtool first\n");
- return -EINVAL;
- }
-
ret = hclge_check_cls_flower(hdev, cls_flower, tc);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -6832,8 +7361,10 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle,
return -ENOMEM;
ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
- if (ret)
- goto err;
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
rule->action = HCLGE_FD_ACTION_SELECT_TC;
rule->cls_flower.tc = tc;
@@ -6842,22 +7373,10 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle,
rule->cls_flower.cookie = cls_flower->cookie;
rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
- spin_lock_bh(&hdev->fd_rule_lock);
- hclge_clear_arfs_rules(handle);
-
- ret = hclge_fd_config_rule(hdev, rule);
-
- spin_unlock_bh(&hdev->fd_rule_lock);
-
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to add cls flower rule, ret = %d\n", ret);
- goto err;
- }
+ ret = hclge_add_fd_entry_common(hdev, rule);
+ if (ret)
+ kfree(rule);
- return 0;
-err:
- kfree(rule);
return ret;
}
@@ -6894,25 +7413,66 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle,
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
NULL, false);
if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to delete cls flower rule %u, ret = %d\n",
- rule->location, ret);
spin_unlock_bh(&hdev->fd_rule_lock);
return ret;
}
- ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to delete cls flower rule %u in list, ret = %d\n",
- rule->location, ret);
- spin_unlock_bh(&hdev->fd_rule_lock);
- return ret;
+ hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return 0;
+}
+
+static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
+{
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ int ret = 0;
+
+ if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
+ return;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
+ switch (rule->state) {
+ case HCLGE_FD_TO_ADD:
+ ret = hclge_fd_config_rule(hdev, rule);
+ if (ret)
+ goto out;
+ rule->state = HCLGE_FD_ACTIVE;
+ break;
+ case HCLGE_FD_TO_DEL:
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ if (ret)
+ goto out;
+ hclge_fd_dec_rule_cnt(hdev, rule->location);
+ hclge_fd_free_node(hdev, rule);
+ break;
+ default:
+ break;
+ }
}
+out:
+ if (ret)
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
+
spin_unlock_bh(&hdev->fd_rule_lock);
+}
- return 0;
+static void hclge_sync_fd_table(struct hclge_dev *hdev)
+{
+ if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
+ bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
+
+ hclge_clear_fd_rules_in_list(hdev, clear_list);
+ }
+
+ hclge_sync_fd_user_def_cfg(hdev, false);
+
+ hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
}
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
@@ -6952,18 +7512,15 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- bool clear;
hdev->fd_en = enable;
- clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
- if (!enable) {
- spin_lock_bh(&hdev->fd_rule_lock);
- hclge_del_all_fd_entries(handle, clear);
- spin_unlock_bh(&hdev->fd_rule_lock);
- } else {
+ if (!enable)
+ set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
+ else
hclge_restore_fd_entries(handle);
- }
+
+ hclge_task_schedule(hdev, 0);
}
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
@@ -7124,19 +7681,19 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
-static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
+static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
enum hnae3_loop loop_mode)
{
-#define HCLGE_SERDES_RETRY_MS 10
-#define HCLGE_SERDES_RETRY_NUM 100
+#define HCLGE_COMMON_LB_RETRY_MS 10
+#define HCLGE_COMMON_LB_RETRY_NUM 100
- struct hclge_serdes_lb_cmd *req;
+ struct hclge_common_lb_cmd *req;
struct hclge_desc desc;
int ret, i = 0;
u8 loop_mode_b;
- req = (struct hclge_serdes_lb_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
+ req = (struct hclge_common_lb_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
switch (loop_mode) {
case HNAE3_LOOP_SERIAL_SERDES:
@@ -7145,9 +7702,12 @@ static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
case HNAE3_LOOP_PARALLEL_SERDES:
loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
break;
+ case HNAE3_LOOP_PHY:
+ loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
+ break;
default:
dev_err(&hdev->pdev->dev,
- "unsupported serdes loopback mode %d\n", loop_mode);
+ "unsupported common loopback mode %d\n", loop_mode);
return -ENOTSUPP;
}
@@ -7161,39 +7721,39 @@ static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "serdes loopback set fail, ret = %d\n", ret);
+ "common loopback set fail, ret = %d\n", ret);
return ret;
}
do {
- msleep(HCLGE_SERDES_RETRY_MS);
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
+ msleep(HCLGE_COMMON_LB_RETRY_MS);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "serdes loopback get, ret = %d\n", ret);
+ "common loopback get, ret = %d\n", ret);
return ret;
}
- } while (++i < HCLGE_SERDES_RETRY_NUM &&
- !(req->result & HCLGE_CMD_SERDES_DONE_B));
+ } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
+ !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
- if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
- dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
+ if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
+ dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
return -EBUSY;
- } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
- dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
+ } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
+ dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
return -EIO;
}
return ret;
}
-static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
+static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
enum hnae3_loop loop_mode)
{
int ret;
- ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
+ ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
if (ret)
return ret;
@@ -7242,8 +7802,12 @@ static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
struct phy_device *phydev = hdev->hw.mac.phydev;
int ret;
- if (!phydev)
+ if (!phydev) {
+ if (hnae3_dev_phy_imp_supported(hdev))
+ return hclge_set_common_loopback(hdev, en,
+ HNAE3_LOOP_PHY);
return -ENOTSUPP;
+ }
if (en)
ret = hclge_enable_phy_loopback(hdev, phydev);
@@ -7265,13 +7829,12 @@ static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
-static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
- int stream_id, bool enable)
+static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
+ u16 stream_id, bool enable)
{
struct hclge_desc desc;
struct hclge_cfg_com_tqp_queue_cmd *req =
(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
- int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
req->tqp_id = cpu_to_le16(tqp_id);
@@ -7279,20 +7842,30 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
if (enable)
req->enable |= 1U << HCLGE_TQP_ENABLE_B;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Tqp enable fail, status =%d.\n", ret);
- return ret;
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < handle->kinfo.num_tqps; i++) {
+ ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
static int hclge_set_loopback(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hnae3_knic_private_info *kinfo;
struct hclge_dev *hdev = vport->back;
- int i, ret;
+ int ret;
/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
* default, SSU loopback is enabled, so if the SMAC and the DMAC are
@@ -7314,7 +7887,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
break;
case HNAE3_LOOP_SERIAL_SERDES:
case HNAE3_LOOP_PARALLEL_SERDES:
- ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
+ ret = hclge_set_common_loopback(hdev, en, loop_mode);
break;
case HNAE3_LOOP_PHY:
ret = hclge_set_phy_loopback(hdev, en);
@@ -7329,14 +7902,12 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
if (ret)
return ret;
- kinfo = &vport->nic.kinfo;
- for (i = 0; i < kinfo->num_tqps; i++) {
- ret = hclge_tqp_enable(hdev, i, 0, en);
- if (ret)
- return ret;
- }
+ ret = hclge_tqp_enable(handle, en);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
+ en ? "enable" : "disable", ret);
- return 0;
+ return ret;
}
static int hclge_set_default_loopback(struct hclge_dev *hdev)
@@ -7347,11 +7918,11 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev)
if (ret)
return ret;
- ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
+ ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
if (ret)
return ret;
- return hclge_cfg_serdes_loopback(hdev, false,
+ return hclge_cfg_common_loopback(hdev, false,
HNAE3_LOOP_PARALLEL_SERDES);
}
@@ -7423,11 +7994,10 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int i;
set_bit(HCLGE_STATE_DOWN, &hdev->state);
spin_lock_bh(&hdev->fd_rule_lock);
- hclge_clear_arfs_rules(handle);
+ hclge_clear_arfs_rules(hdev);
spin_unlock_bh(&hdev->fd_rule_lock);
/* If it is not PF reset, the firmware will disable the MAC,
@@ -7440,8 +8010,7 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
return;
}
- for (i = 0; i < handle->kinfo.num_tqps; i++)
- hclge_reset_tqp(handle, i);
+ hclge_reset_tqp(handle);
hclge_config_mac_tnl_int(hdev, false);
@@ -7891,7 +8460,7 @@ int hclge_update_mac_list(struct hclge_vport *vport,
/* if the mac addr is already in the mac list, no need to add a new
* one into it, just check the mac addr state, convert it to a new
- * new state, or just remove it, or do nothing.
+ * state, or just remove it, or do nothing.
*/
mac_node = hclge_find_mac_node(list, addr);
if (mac_node) {
@@ -8080,7 +8649,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
if (status)
return status;
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
-
/* if already overflow, not to print each time */
if (status == -ENOSPC &&
!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
@@ -8129,7 +8697,6 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
else
/* Not all the vfid is zero, update the vfid */
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
-
} else if (status == -ENOENT) {
status = 0;
}
@@ -8564,7 +9131,7 @@ static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
return true;
vf_idx += HCLGE_VF_VPORT_START_NUM;
- for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
+ for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
if (i != vf_idx &&
ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
return true;
@@ -8758,6 +9325,29 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
return 0;
}
+static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (!hnae3_dev_phy_imp_supported(hdev))
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = hdev->hw.mac.phy_addr;
+ /* this command reads phy id and register at the same time */
+ fallthrough;
+ case SIOCGMIIREG:
+ data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
+ return 0;
+
+ case SIOCSMIIREG:
+ return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
int cmd)
{
@@ -8765,7 +9355,7 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
struct hclge_dev *hdev = vport->back;
if (!hdev->hw.mac.phydev)
- return -EOPNOTSUPP;
+ return hclge_mii_ioctl(hdev, ifr, cmd);
return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
}
@@ -8922,8 +9512,7 @@ static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
}
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
- bool is_kill, u16 vlan,
- __be16 proto)
+ bool is_kill, u16 vlan)
{
struct hclge_vport *vport = &hdev->vport[vfid];
struct hclge_desc desc[2];
@@ -8989,8 +9578,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (is_kill && !vlan_id)
return 0;
- ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
- proto);
+ ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
if (ret) {
dev_err(&hdev->pdev->dev,
"Set %u vport vlan filter config fail, ret =%d.\n",
@@ -9440,7 +10028,7 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev)
hclge_restore_mac_table_common(vport);
hclge_restore_vport_vlan_table(vport);
set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
-
+ set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
hclge_restore_fd_entries(handle);
}
@@ -9796,7 +10384,7 @@ out:
return ret;
}
-static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
+static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
bool enable)
{
struct hclge_reset_tqp_queue_cmd *req;
@@ -9852,94 +10440,114 @@ u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
return tqp->index;
}
-int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int reset_try_times = 0;
+ u16 reset_try_times = 0;
int reset_status;
u16 queue_gid;
int ret;
+ u16 i;
- queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
-
- ret = hclge_tqp_enable(hdev, queue_id, 0, false);
- if (ret) {
- dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
- return ret;
- }
+ for (i = 0; i < handle->kinfo.num_tqps; i++) {
+ queue_gid = hclge_covert_handle_qid_global(handle, i);
+ ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to send reset tqp cmd, ret = %d\n",
+ ret);
+ return ret;
+ }
- ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Send reset tqp cmd fail, ret = %d\n", ret);
- return ret;
- }
+ while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
+ reset_status = hclge_get_reset_status(hdev, queue_gid);
+ if (reset_status)
+ break;
- while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- reset_status = hclge_get_reset_status(hdev, queue_gid);
- if (reset_status)
- break;
+ /* Wait for tqp hw reset */
+ usleep_range(1000, 1200);
+ }
- /* Wait for tqp hw reset */
- usleep_range(1000, 1200);
- }
+ if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
+ dev_err(&hdev->pdev->dev,
+ "wait for tqp hw reset timeout\n");
+ return -ETIME;
+ }
- if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
- dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
- return ret;
+ ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to deassert soft reset, ret = %d\n",
+ ret);
+ return ret;
+ }
+ reset_try_times = 0;
}
-
- ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Deassert the soft reset fail, ret = %d\n", ret);
-
- return ret;
+ return 0;
}
-void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
+static int hclge_reset_rcb(struct hnae3_handle *handle)
{
- struct hnae3_handle *handle = &vport->nic;
+#define HCLGE_RESET_RCB_NOT_SUPPORT 0U
+#define HCLGE_RESET_RCB_SUCCESS 1U
+
+ struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int reset_try_times = 0;
- int reset_status;
+ struct hclge_reset_cmd *req;
+ struct hclge_desc desc;
+ u8 return_status;
u16 queue_gid;
int ret;
- if (queue_id >= handle->kinfo.num_tqps) {
- dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
- queue_id);
- return;
- }
+ queue_gid = hclge_covert_handle_qid_global(handle, 0);
- queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
+ req = (struct hclge_reset_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
+ hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
+ req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
+ req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
- ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_warn(&hdev->pdev->dev,
- "Send reset tqp cmd fail, ret = %d\n", ret);
- return;
+ dev_err(&hdev->pdev->dev,
+ "failed to send rcb reset cmd, ret = %d\n", ret);
+ return ret;
}
- while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- reset_status = hclge_get_reset_status(hdev, queue_gid);
- if (reset_status)
- break;
+ return_status = req->fun_reset_rcb_return_status;
+ if (return_status == HCLGE_RESET_RCB_SUCCESS)
+ return 0;
- /* Wait for tqp hw reset */
- usleep_range(1000, 1200);
+ if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
+ dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
+ return_status);
+ return -EIO;
}
- if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
- dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
- return;
+ /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
+ * again to reset all tqps
+ */
+ return hclge_reset_tqp_cmd(handle);
+}
+
+int hclge_reset_tqp(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ /* only need to disable PF's tqp */
+ if (!vport->vport_id) {
+ ret = hclge_tqp_enable(handle, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to disable tqp, ret = %d\n", ret);
+ return ret;
+ }
}
- ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
- if (ret)
- dev_warn(&hdev->pdev->dev,
- "Deassert the soft reset fail, ret = %d\n", ret);
+ return hclge_reset_rcb(handle);
}
static u32 hclge_get_fw_version(struct hnae3_handle *handle)
@@ -10012,9 +10620,10 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct phy_device *phydev = hdev->hw.mac.phydev;
+ u8 media_type = hdev->hw.mac.media_type;
- *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
+ *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
+ hclge_get_autoneg(handle) : 0;
if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
*rx_en = 0;
@@ -10060,7 +10669,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
struct phy_device *phydev = hdev->hw.mac.phydev;
u32 fc_autoneg;
- if (phydev) {
+ if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
fc_autoneg = hclge_get_autoneg(handle);
if (auto_neg != fc_autoneg) {
dev_info(&hdev->pdev->dev,
@@ -10079,7 +10688,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
hclge_record_user_pauseparam(hdev, rx_en, tx_en);
- if (!auto_neg)
+ if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
if (phydev)
@@ -10181,7 +10790,6 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
- dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
@@ -10296,39 +10904,35 @@ static int hclge_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
- struct hclge_vport *vport;
- int i, ret;
-
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- vport = &hdev->vport[i];
+ struct hclge_vport *vport = &hdev->vport[0];
+ int ret;
- switch (client->type) {
- case HNAE3_CLIENT_KNIC:
- hdev->nic_client = client;
- vport->nic.client = client;
- ret = hclge_init_nic_client_instance(ae_dev, vport);
- if (ret)
- goto clear_nic;
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ hdev->nic_client = client;
+ vport->nic.client = client;
+ ret = hclge_init_nic_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_nic;
- ret = hclge_init_roce_client_instance(ae_dev, vport);
- if (ret)
- goto clear_roce;
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_roce;
- break;
- case HNAE3_CLIENT_ROCE:
- if (hnae3_dev_roce_supported(hdev)) {
- hdev->roce_client = client;
- vport->roce.client = client;
- }
+ break;
+ case HNAE3_CLIENT_ROCE:
+ if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_client = client;
+ vport->roce.client = client;
+ }
- ret = hclge_init_roce_client_instance(ae_dev, vport);
- if (ret)
- goto clear_roce;
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_roce;
- break;
- default:
- return -EINVAL;
- }
+ break;
+ default:
+ return -EINVAL;
}
return 0;
@@ -10347,32 +10951,27 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
- struct hclge_vport *vport;
- int i;
+ struct hclge_vport *vport = &hdev->vport[0];
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- vport = &hdev->vport[i];
- if (hdev->roce_client) {
- clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
- while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
- msleep(HCLGE_WAIT_RESET_DONE);
-
- hdev->roce_client->ops->uninit_instance(&vport->roce,
- 0);
- hdev->roce_client = NULL;
- vport->roce.client = NULL;
- }
- if (client->type == HNAE3_CLIENT_ROCE)
- return;
- if (hdev->nic_client && client->ops->uninit_instance) {
- clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
- while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
- msleep(HCLGE_WAIT_RESET_DONE);
-
- client->ops->uninit_instance(&vport->nic, 0);
- hdev->nic_client = NULL;
- vport->nic.client = NULL;
- }
+ if (hdev->roce_client) {
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
+ hdev->roce_client = NULL;
+ vport->roce.client = NULL;
+ }
+ if (client->type == HNAE3_CLIENT_ROCE)
+ return;
+ if (hdev->nic_client && client->ops->uninit_instance) {
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ client->ops->uninit_instance(&vport->nic, 0);
+ hdev->nic_client = NULL;
+ vport->nic.client = NULL;
}
}
@@ -10491,10 +11090,11 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
cancel_delayed_work_sync(&hdev->service_task);
}
-static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
+static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
{
-#define HCLGE_FLR_RETRY_WAIT_MS 500
-#define HCLGE_FLR_RETRY_CNT 5
+#define HCLGE_RESET_RETRY_WAIT_MS 500
+#define HCLGE_RESET_RETRY_CNT 5
struct hclge_dev *hdev = ae_dev->priv;
int retry_cnt = 0;
@@ -10503,30 +11103,32 @@ static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
retry:
down(&hdev->reset_sem);
set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- hdev->reset_type = HNAE3_FLR_RESET;
+ hdev->reset_type = rst_type;
ret = hclge_reset_prepare(hdev);
if (ret || hdev->reset_pending) {
- dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
+ dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
ret);
if (hdev->reset_pending ||
- retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
+ retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
dev_err(&hdev->pdev->dev,
"reset_pending:0x%lx, retry_cnt:%d\n",
hdev->reset_pending, retry_cnt);
clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
up(&hdev->reset_sem);
- msleep(HCLGE_FLR_RETRY_WAIT_MS);
+ msleep(HCLGE_RESET_RETRY_WAIT_MS);
goto retry;
}
}
- /* disable misc vector before FLR done */
+ /* disable misc vector before reset done */
hclge_enable_vector(&hdev->misc_vector, false);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- hdev->rst_stats.flr_rst_cnt++;
+
+ if (hdev->reset_type == HNAE3_FLR_RESET)
+ hdev->rst_stats.flr_rst_cnt++;
}
-static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
+static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
int ret;
@@ -10637,7 +11239,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_msi_irq_uninit;
- if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
+ !hnae3_dev_phy_imp_supported(hdev)) {
ret = hclge_mac_mdio_config(hdev);
if (ret)
goto err_msi_irq_uninit;
@@ -11030,6 +11633,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_tp_port_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
+ ret);
+ return ret;
+ }
+
ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
if (ret) {
dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
@@ -11120,6 +11730,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
hclge_uninit_mac_table(hdev);
+ hclge_del_all_fd_entries(hdev);
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
@@ -11379,7 +11990,6 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
#define REG_SEPARATOR_LINE 1
#define REG_NUM_REMAIN_MASK 3
-#define BD_LIST_MAX_NUM 30
int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
{
@@ -11473,15 +12083,19 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
{
u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
int data_len_per_desc, bd_num, i;
- int bd_num_list[BD_LIST_MAX_NUM];
+ int *bd_num_list;
u32 data_len;
int ret;
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
+ if (!bd_num_list)
+ return -ENOMEM;
+
ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get dfx reg bd num fail, status is %d.\n", ret);
- return ret;
+ goto out;
}
data_len_per_desc = sizeof_field(struct hclge_desc, data);
@@ -11492,6 +12106,8 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
}
+out:
+ kfree(bd_num_list);
return ret;
}
@@ -11499,16 +12115,20 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
{
u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
int bd_num, bd_num_max, buf_len, i;
- int bd_num_list[BD_LIST_MAX_NUM];
struct hclge_desc *desc_src;
+ int *bd_num_list;
u32 *reg = data;
int ret;
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
+ if (!bd_num_list)
+ return -ENOMEM;
+
ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get dfx reg bd num fail, status is %d.\n", ret);
- return ret;
+ goto out;
}
bd_num_max = bd_num_list[0];
@@ -11517,8 +12137,10 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
buf_len = sizeof(*desc_src) * bd_num_max;
desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src)
- return -ENOMEM;
+ if (!desc_src) {
+ ret = -ENOMEM;
+ goto out;
+ }
for (i = 0; i < dfx_reg_type_num; i++) {
bd_num = bd_num_list[i];
@@ -11534,6 +12156,8 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
}
kfree(desc_src);
+out:
+ kfree(bd_num_list);
return ret;
}
@@ -11877,8 +12501,8 @@ static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
- .flr_prepare = hclge_flr_prepare,
- .flr_done = hclge_flr_done,
+ .reset_prepare = hclge_reset_prepare_general,
+ .reset_done = hclge_reset_done,
.init_client_instance = hclge_init_client_instance,
.uninit_client_instance = hclge_uninit_client_instance,
.map_ring_to_vector = hclge_map_ring_to_vector,
@@ -11943,7 +12567,6 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_link_mode = hclge_get_link_mode,
.add_fd_entry = hclge_add_fd_entry,
.del_fd_entry = hclge_del_fd_entry,
- .del_all_fd_entries = hclge_del_all_fd_entries,
.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
.get_fd_rule_info = hclge_get_fd_rule_info,
.get_fd_all_rules = hclge_get_all_rules,
@@ -11971,6 +12594,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.add_cls_flower = hclge_add_cls_flower,
.del_cls_flower = hclge_del_cls_flower,
.cls_flower_active = hclge_is_cls_flower_active,
+ .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
+ .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 19d7f28773f3..ff1d47308c2d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -223,6 +223,9 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_LINK_UPDATING,
HCLGE_STATE_PROMISC_CHANGED,
HCLGE_STATE_RST_FAIL,
+ HCLGE_STATE_FD_TBL_CHANGED,
+ HCLGE_STATE_FD_CLEAR_ALL,
+ HCLGE_STATE_FD_USER_DEF_CHANGED,
HCLGE_STATE_MAX
};
@@ -345,7 +348,6 @@ struct hclge_tc_info {
};
struct hclge_cfg {
- u8 vmdq_vport_num;
u8 tc_num;
u16 tqp_desc_num;
u16 rx_buf_len;
@@ -536,6 +538,9 @@ enum HCLGE_FD_TUPLE {
MAX_TUPLE,
};
+#define HCLGE_FD_TUPLE_USER_DEF_TUPLES \
+ (BIT(INNER_L2_RSV) | BIT(INNER_L3_RSV) | BIT(INNER_L4_RSV))
+
enum HCLGE_FD_META_DATA {
PACKET_TYPE_ID,
IP_FRAGEMENT,
@@ -548,9 +553,21 @@ enum HCLGE_FD_META_DATA {
MAX_META_DATA,
};
+enum HCLGE_FD_KEY_OPT {
+ KEY_OPT_U8,
+ KEY_OPT_LE16,
+ KEY_OPT_LE32,
+ KEY_OPT_MAC,
+ KEY_OPT_IP,
+ KEY_OPT_VNI,
+};
+
struct key_info {
u8 key_type;
u8 key_length; /* use bit as unit */
+ enum HCLGE_FD_KEY_OPT key_opt;
+ int offset;
+ int moffset;
};
#define MAX_KEY_LENGTH 400
@@ -558,6 +575,11 @@ struct key_info {
#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
#define MAX_META_DATA_LENGTH 32
+#define HCLGE_FD_MAX_USER_DEF_OFFSET 9000
+#define HCLGE_FD_USER_DEF_DATA GENMASK(15, 0)
+#define HCLGE_FD_USER_DEF_OFFSET GENMASK(15, 0)
+#define HCLGE_FD_USER_DEF_OFFSET_UNMASK GENMASK(15, 0)
+
/* assigned by firmware, the real filter number for each pf may be less */
#define MAX_FD_FILTER_NUM 4096
#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL
@@ -580,6 +602,33 @@ enum HCLGE_FD_ACTION {
HCLGE_FD_ACTION_SELECT_TC,
};
+enum HCLGE_FD_NODE_STATE {
+ HCLGE_FD_TO_ADD,
+ HCLGE_FD_TO_DEL,
+ HCLGE_FD_ACTIVE,
+ HCLGE_FD_DELETED,
+};
+
+enum HCLGE_FD_USER_DEF_LAYER {
+ HCLGE_FD_USER_DEF_NONE,
+ HCLGE_FD_USER_DEF_L2,
+ HCLGE_FD_USER_DEF_L3,
+ HCLGE_FD_USER_DEF_L4,
+};
+
+#define HCLGE_FD_USER_DEF_LAYER_NUM 3
+struct hclge_fd_user_def_cfg {
+ u16 ref_cnt;
+ u16 offset;
+};
+
+struct hclge_fd_user_def_info {
+ enum HCLGE_FD_USER_DEF_LAYER layer;
+ u16 data;
+ u16 data_mask;
+ u16 offset;
+};
+
struct hclge_fd_key_cfg {
u8 key_sel;
u8 inner_sipv6_word_en;
@@ -596,6 +645,7 @@ struct hclge_fd_cfg {
u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
+ struct hclge_fd_user_def_cfg user_def_cfg[HCLGE_FD_USER_DEF_LAYER_NUM];
};
#define IPV4_INDEX 3
@@ -612,6 +662,9 @@ struct hclge_fd_rule_tuples {
u16 dst_port;
u16 vlan_tag1;
u16 ether_proto;
+ u16 l2_user_def;
+ u16 l3_user_def;
+ u32 l4_user_def;
u8 ip_tos;
u8 ip_proto;
};
@@ -630,11 +683,15 @@ struct hclge_fd_rule {
struct {
u16 flow_id; /* only used for arfs */
} arfs;
+ struct {
+ struct hclge_fd_user_def_info user_def;
+ } ep;
};
u16 queue_id;
u16 vf_id;
u16 location;
enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type;
+ enum HCLGE_FD_NODE_STATE state;
u8 action;
};
@@ -753,7 +810,6 @@ struct hclge_dev {
struct hclge_rst_stats rst_stats;
struct semaphore reset_sem; /* protect reset process */
u32 fw_version;
- u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
u16 num_tqps; /* Num task queue pairs of this PF */
u16 num_req_vfs; /* Num VFs requested for this PF */
@@ -997,8 +1053,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
void hclge_mbx_handler(struct hclge_dev *hdev);
-int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
-void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
+int hclge_reset_tqp(struct hnae3_handle *handle);
int hclge_cfg_flowctrl(struct hclge_dev *hdev);
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
int hclge_vport_start(struct hclge_vport *vport);
@@ -1034,4 +1089,5 @@ void hclge_report_hw_error(struct hclge_dev *hdev,
enum hnae3_hw_error_type type);
void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
void hclge_dbg_dump_rst_info(struct hclge_dev *hdev);
+int hclge_push_vf_link_status(struct hclge_vport *vport);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 51a36e74f088..5512ffe0a149 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -490,16 +490,14 @@ static void hclge_get_vf_media_type(struct hclge_vport *vport,
resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH;
}
-static int hclge_get_link_info(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+int hclge_push_vf_link_status(struct hclge_vport *vport)
{
#define HCLGE_VF_LINK_STATE_UP 1U
#define HCLGE_VF_LINK_STATE_DOWN 0U
struct hclge_dev *hdev = vport->back;
u16 link_status;
- u8 msg_data[8];
- u8 dest_vfid;
+ u8 msg_data[9];
u16 duplex;
/* mac.link can only be 0 or 1 */
@@ -520,11 +518,11 @@ static int hclge_get_link_info(struct hclge_vport *vport,
memcpy(&msg_data[0], &link_status, sizeof(u16));
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
memcpy(&msg_data[6], &duplex, sizeof(u16));
- dest_vfid = mbx_req->mbx_src_vfid;
+ msg_data[8] = HCLGE_MBX_PUSH_LINK_STATUS_EN;
/* send this requested info to VF */
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
- HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
+ HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id);
}
static void hclge_get_link_mode(struct hclge_vport *vport,
@@ -550,14 +548,32 @@ static void hclge_get_link_mode(struct hclge_vport *vport,
HCLGE_MBX_LINK_STAT_MODE, dest_vfid);
}
-static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
+#define HCLGE_RESET_ALL_QUEUE_DONE 1U
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
u16 queue_id;
+ int ret;
memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
+ resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE;
+ resp_msg->len = sizeof(u8);
+
+ /* pf will reset vf's all queues at a time. So it is unnecessary
+ * to reset queues if queue_id > 0, just return success.
+ */
+ if (queue_id > 0)
+ return 0;
- hclge_reset_vf_queue(vport, queue_id);
+ ret = hclge_reset_tqp(handle);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "failed to reset vf %u queue, ret = %d\n",
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM, ret);
+
+ return ret;
}
static int hclge_reset_vf(struct hclge_vport *vport)
@@ -776,14 +792,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
hclge_get_vf_tcinfo(vport, &resp_msg);
break;
case HCLGE_MBX_GET_LINK_STATUS:
- ret = hclge_get_link_info(vport, req);
+ ret = hclge_push_vf_link_status(vport);
if (ret)
dev_err(&hdev->pdev->dev,
"failed to inform link stat to VF, ret = %d\n",
ret);
break;
case HCLGE_MBX_QUEUE_RESET:
- hclge_mbx_reset_vf_queue(vport, req);
+ ret = hclge_mbx_reset_vf_queue(vport, req, &resp_msg);
break;
case HCLGE_MBX_RESET:
ret = hclge_reset_vf(vport);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index e89820702540..08e88d9422cd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -268,3 +268,42 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev)
phy_stop(phydev);
}
+
+u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)
+{
+ struct hclge_phy_reg_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, true);
+
+ req = (struct hclge_phy_reg_cmd *)desc.data;
+ req->reg_addr = cpu_to_le16(reg_addr);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to read phy reg, ret = %d.\n", ret);
+
+ return le16_to_cpu(req->reg_val);
+}
+
+int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val)
+{
+ struct hclge_phy_reg_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, false);
+
+ req = (struct hclge_phy_reg_cmd *)desc.data;
+ req->reg_addr = cpu_to_le16(reg_addr);
+ req->reg_val = cpu_to_le16(val);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to write phy reg, ret = %d.\n", ret);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index dd9a1218a7b0..fd0e20190b90 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -9,5 +9,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle);
void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
void hclge_mac_start_phy(struct hclge_dev *hdev);
void hclge_mac_stop_phy(struct hclge_dev *hdev);
+u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr);
+int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 151afd1f0688..ebb962bad451 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -631,13 +631,12 @@ static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport)
return sum;
}
-static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
+static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
u16 vport_max_rss_size;
u16 max_rss_size;
- u8 i;
/* TC configuration is shared by PF/VF in one port, only allow
* one tc for VF for simplicity. VF's vport_id is non zero.
@@ -665,19 +664,18 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size ||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
- /* if user not set rss, the rss_size should compare with the
- * valid msi numbers to ensure one to one map between tqp and
- * irq as default.
- */
- if (!kinfo->req_rss_size)
- max_rss_size = min_t(u16, max_rss_size,
- (hdev->num_nic_msi - 1) /
- kinfo->tc_info.num_tc);
-
/* Set to the maximum specification value (max_rss_size). */
kinfo->rss_size = max_rss_size;
}
+}
+
+static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ u8 i;
+ hclge_tm_update_kinfo_rss_size(vport);
kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
vport->dwrr = 100; /* 100 percent as init */
vport->alloc_rss_size = kinfo->rss_size;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 46700c427849..d8c5c5810b99 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -349,7 +349,6 @@ static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
u32 caps;
caps = __le32_to_cpu(cmd->caps[0]);
-
if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_GSO_B))
set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_INT_QL_B))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 8a37a22a176b..c6dc11b32aa7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -223,11 +223,14 @@ struct hclgevf_rss_indirection_table_cmd {
};
#define HCLGEVF_RSS_TC_OFFSET_S 0
-#define HCLGEVF_RSS_TC_OFFSET_M (0x3ff << HCLGEVF_RSS_TC_OFFSET_S)
+#define HCLGEVF_RSS_TC_OFFSET_M GENMASK(10, 0)
+#define HCLGEVF_RSS_TC_SIZE_MSB_B 11
#define HCLGEVF_RSS_TC_SIZE_S 12
-#define HCLGEVF_RSS_TC_SIZE_M (0x7 << HCLGEVF_RSS_TC_SIZE_S)
+#define HCLGEVF_RSS_TC_SIZE_M GENMASK(14, 12)
#define HCLGEVF_RSS_TC_VALID_B 15
#define HCLGEVF_MAX_TC_NUM 8
+#define HCLGEVF_RSS_TC_SIZE_MSB_OFFSET 3
+
struct hclgevf_rss_tc_mode_cmd {
__le16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
u8 rsv[8];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index e295d359e912..0db51ef15ef6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -497,7 +497,6 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
link_state =
test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
-
if (link_state != hdev->hw.mac.link) {
client->ops->link_status_change(handle, !!link_state);
if (rclient && rclient->ops->link_status_change)
@@ -707,6 +706,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
(tc_valid[i] & 0x1));
hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M,
HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B,
+ tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET &
+ 0x1);
hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M,
HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
@@ -1241,12 +1243,11 @@ static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
}
}
-static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
- int stream_id, bool enable)
+static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
+ u16 stream_id, bool enable)
{
struct hclgevf_cfg_com_tqp_queue_cmd *req;
struct hclgevf_desc desc;
- int status;
req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
@@ -1257,12 +1258,22 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
if (enable)
req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status)
- dev_err(&hdev->pdev->dev,
- "TQP enable fail, status =%d.\n", status);
+ return hclgevf_cmd_send(&hdev->hw, &desc, 1);
+}
- return status;
+static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+ u16 i;
+
+ for (i = 0; i < handle->kinfo.num_tqps; i++) {
+ ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
@@ -1711,20 +1722,39 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
-static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+static int hclgevf_reset_tqp(struct hnae3_handle *handle)
{
+#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclge_vf_to_pf_msg send_msg;
+ u8 return_status = 0;
int ret;
+ u16 i;
/* disable vf queue before send queue reset msg to PF */
- ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
- if (ret)
+ ret = hclgevf_tqp_enable(handle, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n",
+ ret);
return ret;
+ }
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
- memcpy(send_msg.data, &queue_id, sizeof(queue_id));
- return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status,
+ sizeof(return_status));
+ if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE)
+ return ret;
+
+ for (i = 1; i < handle->kinfo.num_tqps; i++) {
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
+ memcpy(send_msg.data, &i, sizeof(i));
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
@@ -2084,10 +2114,11 @@ static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
writel(en ? 1 : 0, vector->addr);
}
-static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
+static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
{
-#define HCLGEVF_FLR_RETRY_WAIT_MS 500
-#define HCLGEVF_FLR_RETRY_CNT 5
+#define HCLGEVF_RESET_RETRY_WAIT_MS 500
+#define HCLGEVF_RESET_RETRY_CNT 5
struct hclgevf_dev *hdev = ae_dev->priv;
int retry_cnt = 0;
@@ -2096,29 +2127,31 @@ static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
retry:
down(&hdev->reset_sem);
set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
- hdev->reset_type = HNAE3_FLR_RESET;
+ hdev->reset_type = rst_type;
ret = hclgevf_reset_prepare(hdev);
if (ret) {
- dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
+ dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
ret);
if (hdev->reset_pending ||
- retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) {
+ retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
dev_err(&hdev->pdev->dev,
"reset_pending:0x%lx, retry_cnt:%d\n",
hdev->reset_pending, retry_cnt);
clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
up(&hdev->reset_sem);
- msleep(HCLGEVF_FLR_RETRY_WAIT_MS);
+ msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
goto retry;
}
}
- /* disable misc vector before FLR done */
+ /* disable misc vector before reset done */
hclgevf_enable_vector(&hdev->misc_vector, false);
- hdev->rst_stats.flr_rst_cnt++;
+
+ if (hdev->reset_type == HNAE3_FLR_RESET)
+ hdev->rst_stats.flr_rst_cnt++;
}
-static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
+static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
{
struct hclgevf_dev *hdev = ae_dev->priv;
int ret;
@@ -2307,10 +2340,11 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
hclgevf_tqps_update_stats(handle);
- /* request the link status from the PF. PF would be able to tell VF
- * about such updates in future so we might remove this later
+ /* VF does not need to request link status when this bit is set, because
+ * PF will push its link status to VFs when link status changed.
*/
- hclgevf_request_link_info(hdev);
+ if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state))
+ hclgevf_request_link_info(hdev);
hclgevf_update_link_mode(hdev);
@@ -2356,7 +2390,6 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
/* fetch the events from their corresponding regs */
cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
HCLGEVF_VECTOR0_CMDQ_STATE_REG);
-
if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
@@ -2625,6 +2658,7 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+ clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state);
hclgevf_reset_tqp_stats(handle);
@@ -2638,14 +2672,11 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
static void hclgevf_ae_stop(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- int i;
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
if (hdev->reset_type != HNAE3_VF_RESET)
- for (i = 0; i < handle->kinfo.num_tqps; i++)
- if (hclgevf_reset_tqp(handle, i))
- break;
+ hclgevf_reset_tqp(handle);
hclgevf_reset_tqp_stats(handle);
hclgevf_update_link_status(hdev, 0);
@@ -3615,7 +3646,7 @@ static void hclgevf_get_link_mode(struct hnae3_handle *handle,
}
#define MAX_SEPARATE_NUM 4
-#define SEPARATOR_VALUE 0xFFFFFFFF
+#define SEPARATOR_VALUE 0xFDFCFBFA
#define REG_NUM_PER_LINE 4
#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
@@ -3722,8 +3753,8 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
- .flr_prepare = hclgevf_flr_prepare,
- .flr_done = hclgevf_flr_done,
+ .reset_prepare = hclgevf_reset_prepare_general,
+ .reset_done = hclgevf_reset_done,
.init_client_instance = hclgevf_init_client_instance,
.uninit_client_instance = hclgevf_uninit_client_instance,
.start = hclgevf_ae_start,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 8c27ecd819af..265c9b0b4728 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -152,6 +152,7 @@ enum hclgevf_states {
HCLGEVF_STATE_LINK_UPDATING,
HCLGEVF_STATE_PROMISC_CHANGED,
HCLGEVF_STATE_RST_FAIL,
+ HCLGEVF_STATE_PF_PUSH_LINK_STATUS,
};
struct hclgevf_mac {
@@ -176,9 +177,9 @@ struct hclgevf_hw {
/* TQP stats */
struct hlcgevf_tqp_stats {
- /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
+ /* query_tqp_tx_queue_statistics, opcode id: 0x0B03 */
u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
- /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
+ /* query_tqp_rx_queue_statistics, opcode id: 0x0B13 */
u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
};
@@ -192,7 +193,6 @@ struct hclgevf_tqp {
};
struct hclgevf_cfg {
- u8 vmdq_vport_num;
u8 tc_num;
u16 tqp_desc_num;
u16 rx_buf_len;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 5b2dcd97c107..9b17735b9f4c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -276,6 +276,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
u8 duplex;
u32 speed;
u32 tail;
+ u8 flag;
u8 idx;
/* we can safely clear it now as we are at start of the async message
@@ -300,11 +301,16 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
link_status = msg_q[1];
memcpy(&speed, &msg_q[2], sizeof(speed));
duplex = (u8)msg_q[4];
+ flag = (u8)msg_q[5];
/* update upper layer with new link link status */
hclgevf_update_link_status(hdev, link_status);
hclgevf_update_speed_duplex(hdev, speed, duplex);
+ if (flag & HCLGE_MBX_PUSH_LINK_STATUS_EN)
+ set_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS,
+ &hdev->state);
+
break;
case HCLGE_MBX_LINK_STAT_MODE:
idx = (u8)msg_q[1];
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 883d0d7c6858..3e54017a2a5b 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -279,7 +279,7 @@ static int hns_mdio_write(struct mii_bus *bus,
static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
int ret;
- u16 reg_val = 0;
+ u16 reg_val;
u8 devad = ((regnum >> 16) & 0x1f);
u8 is_c45 = !!(regnum & MII_ADDR_C45);
u16 reg = (u16)(regnum & 0xffff);
@@ -420,7 +420,7 @@ static int hns_mdio_probe(struct platform_device *pdev)
{
struct hns_mdio_device *mdio_dev;
struct mii_bus *new_bus;
- int ret = -ENODEV;
+ int ret;
if (!pdev) {
dev_err(NULL, "pdev is NULL!\r\n");
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index c340d9acba80..dc024ef521c0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -34,7 +34,7 @@
#include "hinic_rx.h"
#include "hinic_dev.h"
-#define SET_LINK_STR_MAX_LEN 128
+#define SET_LINK_STR_MAX_LEN 16
#define GET_SUPPORTED_MODE 0
#define GET_ADVERTISED_MODE 1
@@ -462,24 +462,19 @@ static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev,
{
struct hinic_link_ksettings_info settings = {0};
char set_link_str[SET_LINK_STR_MAX_LEN] = {0};
+ const char *autoneg_str;
struct net_device *netdev = nic_dev->netdev;
enum nic_speed_level speed_level = 0;
int err;
- err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN, "%s",
- (set_settings & HILINK_LINK_SET_AUTONEG) ?
- (autoneg ? "autong enable " : "autong disable ") : "");
- if (err < 0 || err >= SET_LINK_STR_MAX_LEN) {
- netif_err(nic_dev, drv, netdev, "Failed to snprintf link state, function return(%d) and dest_len(%d)\n",
- err, SET_LINK_STR_MAX_LEN);
- return -EFAULT;
- }
+ autoneg_str = (set_settings & HILINK_LINK_SET_AUTONEG) ?
+ (autoneg ? "autong enable " : "autong disable ") : "";
if (set_settings & HILINK_LINK_SET_SPEED) {
speed_level = hinic_ethtool_to_hw_speed_level(speed);
err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN,
- "%sspeed %d ", set_link_str, speed);
- if (err <= 0 || err >= SET_LINK_STR_MAX_LEN) {
+ "speed %d ", speed);
+ if (err >= SET_LINK_STR_MAX_LEN) {
netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n",
err, SET_LINK_STR_MAX_LEN);
return -EFAULT;
@@ -494,11 +489,11 @@ static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev,
err = hinic_set_link_settings(nic_dev->hwdev, &settings);
if (err != HINIC_MGMT_CMD_UNSUPPORTED) {
if (err)
- netif_err(nic_dev, drv, netdev, "Set %s failed\n",
- set_link_str);
+ netif_err(nic_dev, drv, netdev, "Set %s%sfailed\n",
+ autoneg_str, set_link_str);
else
- netif_info(nic_dev, drv, netdev, "Set %s successfully\n",
- set_link_str);
+ netif_info(nic_dev, drv, netdev, "Set %s%ssuccessfully\n",
+ autoneg_str, set_link_str);
return err;
}
@@ -543,8 +538,8 @@ static void hinic_get_drvinfo(struct net_device *netdev,
struct hinic_hwif *hwif = hwdev->hwif;
int err;
- strlcpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info));
err = hinic_get_mgmt_version(nic_dev, mgmt_ver);
if (err)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
index 4e4029d5c8e1..06586173add7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -629,10 +629,8 @@ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
&cmd_paddr, GFP_KERNEL);
- if (!cmd_vaddr) {
- dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n");
+ if (!cmd_vaddr)
return -ENOMEM;
- }
cell_ctxt = &chain->cell_ctxt[cell_idx];
@@ -679,10 +677,8 @@ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain,
node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr,
GFP_KERNEL);
- if (!node) {
- dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n");
+ if (!node)
return -ENOMEM;
- }
node->read.hw_wb_resp_paddr = 0;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index efbaed389440..cab38ff0713c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -334,7 +334,7 @@ static void set_dma_attr(struct hinic_hwif *hwif, u32 entry_idx,
}
/**
- * dma_attr_table_init - initialize the the default dma attributes
+ * dma_attr_table_init - initialize the default dma attributes
* @hwif: the HW interface of a pci function device
**/
static void dma_attr_init(struct hinic_hwif *hwif)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index 819fa13034c0..817173f1fbb7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -440,18 +440,14 @@ static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
struct hinic_recv_msg *recv_msg)
{
struct hinic_mgmt_msg_handle_work *mgmt_work = NULL;
- struct pci_dev *pdev = pf_to_mgmt->hwif->pdev;
mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL);
- if (!mgmt_work) {
- dev_err(&pdev->dev, "Allocate mgmt work memory failed\n");
+ if (!mgmt_work)
return;
- }
if (recv_msg->msg_len) {
mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL);
if (!mgmt_work->msg) {
- dev_err(&pdev->dev, "Allocate mgmt msg memory failed\n");
kfree(mgmt_work);
return;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index fcf7bfe4aa47..dcba4d009bad 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -414,7 +414,6 @@ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
&rq->pi_dma_addr, GFP_KERNEL);
if (!rq->pi_virt_addr) {
- dev_err(&pdev->dev, "Failed to allocate PI address\n");
err = -ENOMEM;
goto err_pi_virt;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 070a7cc6392e..cce08647b9b2 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -137,10 +137,8 @@ static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq,
int err;
skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
- if (!skb) {
- netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n");
+ if (!skb)
return NULL;
- }
addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
DMA_FROM_DEVICE);
@@ -212,10 +210,8 @@ static int rx_alloc_pkts(struct hinic_rxq *rxq)
for (i = 0; i < free_wqebbs; i++) {
skb = rx_alloc_skb(rxq, &dma_addr);
- if (!skb) {
- netdev_err(rxq->netdev, "Failed to alloc Rx skb\n");
+ if (!skb)
goto skb_out;
- }
hinic_set_sge(&sge, dma_addr, skb->len);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 8da7d46363b2..710c4ff7bc0e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -377,6 +377,7 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
} else if (ip.v4->version == 6) {
unsigned char *exthdr;
__be16 frag_off;
+
l3_type = IPV6_PKT;
tunnel_type = TUNNEL_UDP_CSUM;
exthdr = ip.hdr + sizeof(*ip.v6);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index c2e740475786..ea55314b209d 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -109,6 +109,7 @@ static const struct of_device_id ehea_device_table[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, ehea_device_table);
static struct platform_driver ehea_driver = {
.driver = {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index ffb2a91750c7..5788bb956d73 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -827,6 +827,30 @@ static void release_napi(struct ibmvnic_adapter *adapter)
adapter->napi_enabled = false;
}
+static const char *adapter_state_to_string(enum vnic_state state)
+{
+ switch (state) {
+ case VNIC_PROBING:
+ return "PROBING";
+ case VNIC_PROBED:
+ return "PROBED";
+ case VNIC_OPENING:
+ return "OPENING";
+ case VNIC_OPEN:
+ return "OPEN";
+ case VNIC_CLOSING:
+ return "CLOSING";
+ case VNIC_CLOSED:
+ return "CLOSED";
+ case VNIC_REMOVING:
+ return "REMOVING";
+ case VNIC_REMOVED:
+ return "REMOVED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static int ibmvnic_login(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
@@ -905,7 +929,7 @@ static int ibmvnic_login(struct net_device *netdev)
__ibmvnic_set_mac(netdev, adapter->mac_addr);
- netdev_dbg(netdev, "[S:%d] Login succeeded\n", adapter->state);
+ netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
return 0;
}
@@ -1179,8 +1203,9 @@ static int ibmvnic_open(struct net_device *netdev)
* honor our setting below.
*/
if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
- netdev_dbg(netdev, "[S:%d FOP:%d] Resetting, deferring open\n",
- adapter->state, adapter->failover_pending);
+ netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
+ adapter_state_to_string(adapter->state),
+ adapter->failover_pending);
adapter->state = VNIC_OPEN;
rc = 0;
goto out;
@@ -1344,8 +1369,9 @@ static int ibmvnic_close(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int rc;
- netdev_dbg(netdev, "[S:%d FOP:%d FRR:%d] Closing\n",
- adapter->state, adapter->failover_pending,
+ netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
+ adapter_state_to_string(adapter->state),
+ adapter->failover_pending,
adapter->force_reset_recovery);
/* If device failover is pending, just set device state and return.
@@ -1672,9 +1698,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- memcpy(dst + cur,
- page_address(skb_frag_page(frag)) +
- skb_frag_off(frag), skb_frag_size(frag));
+ memcpy(dst + cur, skb_frag_address(frag),
+ skb_frag_size(frag));
cur += skb_frag_size(frag);
}
} else {
@@ -1906,6 +1931,26 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
return rc;
}
+static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
+{
+ switch (reason) {
+ case VNIC_RESET_FAILOVER:
+ return "FAILOVER";
+ case VNIC_RESET_MOBILITY:
+ return "MOBILITY";
+ case VNIC_RESET_FATAL:
+ return "FATAL";
+ case VNIC_RESET_NON_FATAL:
+ return "NON_FATAL";
+ case VNIC_RESET_TIMEOUT:
+ return "TIMEOUT";
+ case VNIC_RESET_CHANGE_PARAM:
+ return "CHANGE_PARAM";
+ default:
+ return "UNKNOWN";
+ }
+}
+
/*
* do_reset returns zero if we are able to keep processing reset events, or
* non-zero if we hit a fatal error and must halt.
@@ -1919,9 +1964,11 @@ static int do_reset(struct ibmvnic_adapter *adapter,
int rc;
netdev_dbg(adapter->netdev,
- "[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
- adapter->state, adapter->failover_pending,
- rwi->reset_reason, reset_state);
+ "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
+ adapter_state_to_string(adapter->state),
+ adapter->failover_pending,
+ reset_reason_to_string(rwi->reset_reason),
+ adapter_state_to_string(reset_state));
adapter->reset_reason = rwi->reset_reason;
/* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
@@ -1981,8 +2028,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
* from VNIC_CLOSING state.
*/
netdev_dbg(netdev,
- "Open changed state from %d, updating.\n",
- reset_state);
+ "Open changed state from %s, updating.\n",
+ adapter_state_to_string(reset_state));
reset_state = VNIC_OPEN;
adapter->state = VNIC_CLOSING;
}
@@ -2119,8 +2166,9 @@ out:
if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
rtnl_unlock();
- netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n",
- adapter->state, adapter->failover_pending, rc);
+ netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
+ adapter_state_to_string(adapter->state),
+ adapter->failover_pending, rc);
return rc;
}
@@ -2130,8 +2178,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
struct net_device *netdev = adapter->netdev;
int rc;
- netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
- rwi->reset_reason);
+ netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
+ reset_reason_to_string(rwi->reset_reason));
/* read the state and check (again) after getting rtnl */
reset_state = adapter->state;
@@ -2197,8 +2245,9 @@ out:
/* restore adapter state if reset failed */
if (rc)
adapter->state = reset_state;
- netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Hard reset done, rc %d\n",
- adapter->state, adapter->failover_pending, rc);
+ netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
+ adapter_state_to_string(adapter->state),
+ adapter->failover_pending, rc);
return rc;
}
@@ -2233,8 +2282,9 @@ static void __ibmvnic_reset(struct work_struct *work)
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
if (test_and_set_bit_lock(0, &adapter->resetting)) {
- schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
- IBMVNIC_RESET_DELAY);
+ queue_delayed_work(system_long_wq,
+ &adapter->ibmvnic_delayed_reset,
+ IBMVNIC_RESET_DELAY);
return;
}
@@ -2277,8 +2327,8 @@ static void __ibmvnic_reset(struct work_struct *work)
if (rc) {
/* give backing device time to settle down */
netdev_dbg(adapter->netdev,
- "[S:%d] Hard reset failed, waiting 60 secs\n",
- adapter->state);
+ "[S:%s] Hard reset failed, waiting 60 secs\n",
+ adapter_state_to_string(adapter->state));
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(60 * HZ);
}
@@ -2306,8 +2356,9 @@ static void __ibmvnic_reset(struct work_struct *work)
clear_bit_unlock(0, &adapter->resetting);
netdev_dbg(adapter->netdev,
- "[S:%d FRR:%d WFR:%d] Done processing resets\n",
- adapter->state, adapter->force_reset_recovery,
+ "[S:%s FRR:%d WFR:%d] Done processing resets\n",
+ adapter_state_to_string(adapter->state),
+ adapter->force_reset_recovery,
adapter->wait_for_reset);
}
@@ -2354,8 +2405,8 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
list_for_each(entry, &adapter->rwi_list) {
tmp = list_entry(entry, struct ibmvnic_rwi, list);
if (tmp->reset_reason == reason) {
- netdev_dbg(netdev, "Skipping matching reset, reason=%d\n",
- reason);
+ netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
+ reset_reason_to_string(reason));
ret = EBUSY;
goto err;
}
@@ -2375,8 +2426,9 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
}
rwi->reset_reason = reason;
list_add_tail(&rwi->list, &adapter->rwi_list);
- netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
- schedule_work(&adapter->ibmvnic_reset);
+ netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
+ reset_reason_to_string(reason));
+ queue_work(system_long_wq, &adapter->ibmvnic_reset);
ret = 0;
err:
@@ -5445,7 +5497,7 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
if (rc) {
netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
rc);
- return -EINVAL;
+ goto last_resort;
}
session_token = (__be64)retbuf[0];
@@ -5453,15 +5505,17 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
be64_to_cpu(session_token));
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
H_SESSION_ERR_DETECTED, session_token, 0, 0);
- if (rc) {
- netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
+ if (rc)
+ netdev_err(netdev,
+ "H_VIOCTL initiated failover failed, rc %ld\n",
rc);
- return -EINVAL;
- }
+
+last_resort:
+ netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
+ ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
return count;
}
-
static DEVICE_ATTR_WO(failover);
static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 806aa75a4e86..c1d39a748546 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -412,77 +412,6 @@ struct ibmvnic_control_ip_offload {
struct ibmvnic_rc rc;
} __packed __aligned(8);
-struct ibmvnic_request_dump_size {
- u8 first;
- u8 cmd;
- u8 reserved[6];
- __be32 len;
- struct ibmvnic_rc rc;
-} __packed __aligned(8);
-
-struct ibmvnic_request_dump {
- u8 first;
- u8 cmd;
- u8 reserved1[2];
- __be32 ioba;
- __be32 len;
- u8 reserved2[4];
-} __packed __aligned(8);
-
-struct ibmvnic_request_dump_rsp {
- u8 first;
- u8 cmd;
- u8 reserved[6];
- __be32 dumped_len;
- struct ibmvnic_rc rc;
-} __packed __aligned(8);
-
-struct ibmvnic_request_ras_comp_num {
- u8 first;
- u8 cmd;
- u8 reserved1[2];
- __be32 num_components;
- u8 reserved2[4];
- struct ibmvnic_rc rc;
-} __packed __aligned(8);
-
-struct ibmvnic_request_ras_comps {
- u8 first;
- u8 cmd;
- u8 reserved[2];
- __be32 ioba;
- __be32 len;
- struct ibmvnic_rc rc;
-} __packed __aligned(8);
-
-struct ibmvnic_control_ras {
- u8 first;
- u8 cmd;
- u8 correlator;
- u8 level;
- u8 op;
-#define IBMVNIC_TRACE_LEVEL 1
-#define IBMVNIC_ERROR_LEVEL 2
-#define IBMVNIC_TRACE_PAUSE 3
-#define IBMVNIC_TRACE_RESUME 4
-#define IBMVNIC_TRACE_ON 5
-#define IBMVNIC_TRACE_OFF 6
-#define IBMVNIC_CHG_TRACE_BUFF_SZ 7
- u8 trace_buff_sz[3];
- u8 reserved[4];
- struct ibmvnic_rc rc;
-} __packed __aligned(8);
-
-struct ibmvnic_collect_fw_trace {
- u8 first;
- u8 cmd;
- u8 correlator;
- u8 reserved;
- __be32 ioba;
- __be32 len;
- struct ibmvnic_rc rc;
-} __packed __aligned(8);
-
struct ibmvnic_request_statistics {
u8 first;
u8 cmd;
@@ -494,15 +423,6 @@ struct ibmvnic_request_statistics {
u8 reserved[4];
} __packed __aligned(8);
-struct ibmvnic_request_debug_stats {
- u8 first;
- u8 cmd;
- u8 reserved[2];
- __be32 ioba;
- __be32 len;
- struct ibmvnic_rc rc;
-} __packed __aligned(8);
-
struct ibmvnic_error_indication {
u8 first;
u8 cmd;
@@ -677,22 +597,8 @@ union ibmvnic_crq {
struct ibmvnic_query_ip_offload query_ip_offload_rsp;
struct ibmvnic_control_ip_offload control_ip_offload;
struct ibmvnic_control_ip_offload control_ip_offload_rsp;
- struct ibmvnic_request_dump_size request_dump_size;
- struct ibmvnic_request_dump_size request_dump_size_rsp;
- struct ibmvnic_request_dump request_dump;
- struct ibmvnic_request_dump_rsp request_dump_rsp;
- struct ibmvnic_request_ras_comp_num request_ras_comp_num;
- struct ibmvnic_request_ras_comp_num request_ras_comp_num_rsp;
- struct ibmvnic_request_ras_comps request_ras_comps;
- struct ibmvnic_request_ras_comps request_ras_comps_rsp;
- struct ibmvnic_control_ras control_ras;
- struct ibmvnic_control_ras control_ras_rsp;
- struct ibmvnic_collect_fw_trace collect_fw_trace;
- struct ibmvnic_collect_fw_trace collect_fw_trace_rsp;
struct ibmvnic_request_statistics request_statistics;
struct ibmvnic_generic_crq request_statistics_rsp;
- struct ibmvnic_request_debug_stats request_debug_stats;
- struct ibmvnic_request_debug_stats request_debug_stats_rsp;
struct ibmvnic_error_indication error_indication;
struct ibmvnic_link_state_indication link_state_indication;
struct ibmvnic_change_mac_addr change_mac_addr;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 5aa86318ed3e..c1d155690341 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -294,6 +294,7 @@ config ICE
tristate "Intel(R) Ethernet Connection E800 Series Support"
default n
depends on PCI_MSI
+ select DIMLIB
select NET_DEVLINK
select PLDMFW
help
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 4c0c9433bd60..19cf36360933 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -1183,6 +1183,7 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
break;
case e1000_ms_auto:
phy_data &= ~CR_1000T_MS_ENABLE;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 0ac8d79a7987..590ad110d383 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -2745,7 +2745,7 @@ release:
}
/**
- * e1000_k1_gig_workaround_lv - K1 Si workaround
+ * e1000_k1_workaround_lv - K1 Si workaround
* @hw: pointer to the HW structure
*
* Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
@@ -5220,7 +5220,7 @@ void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
}
/**
- * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
+ * e1000e_igp3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
* @hw: pointer to the HW structure
*
* Workaround for 82566 power-down on D3 entry:
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a0948002ddf8..88e9035b75cf 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -25,6 +25,7 @@
#include <linux/pm_runtime.h>
#include <linux/aer.h>
#include <linux/prefetch.h>
+#include <linux/suspend.h>
#include "e1000.h"
@@ -5990,7 +5991,7 @@ static void e1000_reset_task(struct work_struct *work)
}
/**
- * e1000_get_stats64 - Get System Network Statistics
+ * e1000e_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
* @stats: rtnl_link_stats64 pointer
*
@@ -6163,7 +6164,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
}
/**
- * e1000e_hwtstamp_ioctl - control hardware time stamping
+ * e1000e_hwtstamp_set - control hardware time stamping
* @netdev: network interface device structure
* @ifr: interface request
*
@@ -6821,7 +6822,7 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
}
/**
- * e1000e_disable_aspm_locked Disable ASPM states.
+ * e1000e_disable_aspm_locked - Disable ASPM states.
* @pdev: pointer to PCI device struct
* @state: bit-mask of ASPM states to disable
*
@@ -6922,6 +6923,12 @@ static int __e1000_resume(struct pci_dev *pdev)
return 0;
}
+static __maybe_unused int e1000e_pm_prepare(struct device *dev)
+{
+ return pm_runtime_suspended(dev) &&
+ pm_suspend_via_firmware();
+}
+
static __maybe_unused int e1000e_pm_suspend(struct device *dev)
{
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
@@ -7630,9 +7637,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e1000_print_device_info(adapter);
- dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE);
- if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp)
+ if (pci_dev_run_wake(pdev) && hw->mac.type != e1000_pch_cnp)
pm_runtime_put_noidle(&pdev->dev);
return 0;
@@ -7855,6 +7862,7 @@ MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
static const struct dev_pm_ops e1000_pm_ops = {
#ifdef CONFIG_PM_SLEEP
+ .prepare = e1000e_pm_prepare,
.suspend = e1000e_pm_suspend,
.resume = e1000e_pm_resume,
.freeze = e1000e_pm_freeze,
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index bdd9dc163f15..1db35b2c7750 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -371,7 +371,7 @@ s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
}
/**
- * e1000e_write_phy_reg_igp - Write igp PHY register
+ * __e1000e_write_phy_reg_igp - Write igp PHY register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write at register offset
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index f3f671311855..9e79d672f4f1 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -142,7 +142,7 @@ static int e1000e_phc_get_syncdevicetime(ktime_t *device,
}
/**
- * e1000e_phc_getsynctime - Reads the current system/device cross timestamp
+ * e1000e_phc_getcrosststamp - Reads the current system/device cross timestamp
* @ptp: ptp clock structure
* @xtstamp: structure containing timestamp
*
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
index c45315472245..86397c564dfc 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
@@ -105,7 +105,7 @@ static int fm10k_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
}
/**
- * fm10k_dcbnl_ieee_getdcbx - get the DCBX configuration for the device
+ * fm10k_dcbnl_getdcbx - get the DCBX configuration for the device
* @dev: netdev interface for the device
*
* Returns that we support only IEEE DCB for this interface
@@ -116,7 +116,7 @@ static u8 fm10k_dcbnl_getdcbx(struct net_device __always_unused *dev)
}
/**
- * fm10k_dcbnl_ieee_setdcbx - get the DCBX configuration for the device
+ * fm10k_dcbnl_setdcbx - get the DCBX configuration for the device
* @dev: netdev interface for the device
* @mode: new mode for this device
*
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index 1d27b2fb23af..5c77054d67c6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -185,7 +185,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
}
/**
- * fm10k_dbg_free_q_vector_dir - setup debugfs for the q_vectors
+ * fm10k_dbg_q_vector_exit - setup debugfs for the q_vectors
* @q_vector: q_vector to allocate directories for
**/
void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 247f44f4cb30..3362f26d7f99 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1774,7 +1774,7 @@ static void fm10k_free_q_vectors(struct fm10k_intfc *interface)
}
/**
- * f10k_reset_msix_capability - reset MSI-X capability
+ * fm10k_reset_msix_capability - reset MSI-X capability
* @interface: board private structure to initialize
*
* Reset the MSI-X capability back to its starting state
@@ -1787,7 +1787,7 @@ static void fm10k_reset_msix_capability(struct fm10k_intfc *interface)
}
/**
- * f10k_init_msix_capability - configure MSI-X capability
+ * fm10k_init_msix_capability - configure MSI-X capability
* @interface: board private structure to initialize
*
* Attempt to configure the interrupts using the best available
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 8e2e92bf3cd4..30ca9ee1900b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -692,7 +692,7 @@ static bool fm10k_mbx_tx_complete(struct fm10k_mbx_info *mbx)
}
/**
- * fm10k_mbx_deqeueue_rx - Dequeues the message from the head in the Rx FIFO
+ * fm10k_mbx_dequeue_rx - Dequeues the message from the head in the Rx FIFO
* @hw: pointer to hardware structure
* @mbx: pointer to mailbox
*
@@ -1039,6 +1039,7 @@ static s32 fm10k_mbx_create_reply(struct fm10k_hw *hw,
case FM10K_STATE_CLOSED:
/* generate new header based on data */
fm10k_mbx_create_disconnect_hdr(mbx);
+ break;
default:
break;
}
@@ -2017,6 +2018,7 @@ static s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
case FM10K_STATE_CONNECT:
/* Update remote value to match local value */
mbx->remote = mbx->local;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index c0780c3624c8..af1b0cde3670 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1417,7 +1417,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
}
/**
- * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF
+ * fm10k_update_hw_stats_pf - Updates hardware related statistics of PF
* @hw: pointer to hardware structure
* @stats: pointer to the stats structure to update
*
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 15f93b355099..9067cd3ce243 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -66,6 +66,8 @@
#define I40E_FDIR_RING_COUNT 32
#define I40E_MAX_AQ_BUF_SIZE 4096
#define I40E_AQ_LEN 256
+#define I40E_MIN_ARQ_LEN 1
+#define I40E_MIN_ASQ_LEN 2
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
#define I40E_MAX_USER_PRIORITY 8
#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index ec19e18305ec..41b813fe07a5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -2332,7 +2332,7 @@ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
}
/**
- * i40e_get_vsi_params - get VSI configuration info
+ * i40e_aq_get_vsi_params - get VSI configuration info
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
@@ -2586,7 +2586,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
}
/**
- * i40e_updatelink_status - update status of the HW network link
+ * i40e_update_link_info - update status of the HW network link
* @hw: pointer to the hw struct
**/
noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
@@ -5059,7 +5059,7 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
}
/**
- * i40e_blink_phy_led
+ * i40e_blink_phy_link_led
* @hw: pointer to the HW structure
* @time: time how long led will blinks in secs
* @interval: gap between LED on and off in msecs
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 243b0d2b7b72..673f341f4c0c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -234,7 +234,7 @@ static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
}
/**
- * i40e_parse_ieee_etsrec_tlv
+ * i40e_parse_ieee_tlv
* @tlv: IEEE 802.1Qaz TLV
* @dcbcfg: Local store to update ETS REC data
*
@@ -1588,7 +1588,7 @@ void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
}
/**
- * i40e_dcb_hw_rx_ets_bw_config
+ * i40e_dcb_hw_rx_up2tc_config
* @hw: pointer to the hw struct
* @prio_tc: priority to tc assignment indexed by priority
*
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 0345132a0ef5..e32c61909b31 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -392,7 +392,7 @@ static void i40e_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
}
/**
- * i40e_dcbnl_set_pg_tc_cfg_tx - Set CEE PG Tx BW config
+ * i40e_dcbnl_set_pg_bwg_cfg_tx - Set CEE PG Tx BW config
* @netdev: the corresponding netdev
* @pgid: the corresponding traffic class
* @bw_pct: the BW percentage for the specified traffic class
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
index 5e08f100c413..e1069ae658ad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -77,7 +77,7 @@ static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new,
}
/**
- * i40e_ddp_does_profiles_ - checks if DDP overlaps with existing one.
+ * i40e_ddp_does_profile_overlap - checks if DDP overlaps with existing one.
* @hw: HW data structure
* @pinfo: DDP profile information structure
*
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index d627b59ad446..291e61ac3e44 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -654,7 +654,7 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
}
/**
- * i40e_dbg_dump_stats - handles dump stats write into command datum
+ * i40e_dbg_dump_eth_stats - handles dump stats write into command datum
* @pf: the i40e_pf created in command write
* @estats: the eth stats structure to be dumped
**/
@@ -1641,7 +1641,7 @@ static const struct file_operations i40e_dbg_command_fops = {
static char i40e_dbg_netdev_ops_buf[256] = "";
/**
- * i40e_dbg_netdev_ops - read for netdev_ops datum
+ * i40e_dbg_netdev_ops_read - read for netdev_ops datum
* @filp: the opened file
* @buffer: where to write the data for the user to read
* @count: the size of the user's buffer
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 0e92668012e3..040a01400b85 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -212,7 +212,7 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
}
/**
- * 40e_add_stat_strings - copy stat strings into ethtool buffer
+ * i40e_add_stat_strings - copy stat strings into ethtool buffer
* @p: ethtool supplied buffer
* @stats: stat definitions array
*
@@ -2409,21 +2409,15 @@ static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- char *p = (char *)data;
unsigned int i;
+ u8 *p = data;
- for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_priv_flags[i].flag_string);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++)
+ ethtool_sprintf(&p, i40e_gstrings_priv_flags[i].flag_string);
if (pf->hw.pf_id != 0)
return;
- for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- i40e_gl_gstrings_priv_flags[i].flag_string);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++)
+ ethtool_sprintf(&p, i40e_gl_gstrings_priv_flags[i].flag_string);
}
static void i40e_get_strings(struct net_device *netdev, u32 stringset,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index a3da422ab05b..d6e92ecddfbd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -511,7 +511,7 @@ configure_lan_hmc_out:
}
/**
- * i40e_delete_hmc_object - remove hmc objects
+ * i40e_delete_lan_hmc_object - remove hmc objects
* @hw: pointer to the HW structure
* @info: pointer to i40e_hmc_delete_obj_info struct
*
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 527023ee4c07..c2d145a56b5e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <linux/bpf.h>
#include <generated/utsrelease.h>
+#include <linux/crash_dump.h>
/* Local includes */
#include "i40e.h"
@@ -2023,7 +2024,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
}
/**
- * i40e_next_entry - Get the next non-broadcast filter from a list
+ * i40e_next_filter - Get the next non-broadcast filter from a list
* @next: pointer to filter in list
*
* Returns the next non-broadcast filter in the list. Required so that we
@@ -5203,7 +5204,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
}
/**
- * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
+ * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes
* @pf: PF being queried
*
* Return a bitmap for enabled traffic classes for this PF.
@@ -7338,7 +7339,7 @@ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
qcount = min_t(int, vsi->alloc_queue_pairs,
i40e_pf_get_max_q_per_tc(vsi->back));
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- /* For the TC that is not enabled set the offset to to default
+ /* For the TC that is not enabled set the offset to default
* queue and allocate one queue for the given TC.
*/
vsi->tc_config.tc_info[i].qoffset = 0;
@@ -9466,7 +9467,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
}
/**
- * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
+ * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed
* @pf: board private structure
**/
u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
@@ -10623,7 +10624,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
* need to rebuild the switch model in the HW.
*
* If there were VEBs but the reconstitution failed, we'll try
- * try to recover minimal use by getting the basic PF VSI working.
+ * to recover minimal use by getting the basic PF VSI working.
*/
if (vsi->uplink_seid != pf->mac_seid) {
dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
@@ -11039,6 +11040,11 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
return -ENODATA;
}
+ if (is_kdump_kernel()) {
+ vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS;
+ vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS;
+ }
+
return 0;
}
@@ -15342,8 +15348,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_check_recovery_mode(pf);
- hw->aq.num_arq_entries = I40E_AQ_LEN;
- hw->aq.num_asq_entries = I40E_AQ_LEN;
+ if (is_kdump_kernel()) {
+ hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN;
+ hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN;
+ } else {
+ hw->aq.num_arq_entries = I40E_AQ_LEN;
+ hw->aq.num_asq_entries = I40E_AQ_LEN;
+ }
hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
@@ -15506,6 +15517,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_switch_setup;
+ /* Reduce Tx and Rx pairs for kdump
+ * When MSI-X is enabled, it's not allowed to use more TC queue
+ * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus
+ * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1.
+ */
+ if (is_kdump_kernel())
+ pf->num_lan_msix = 1;
+
pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 7164f4ad8120..fe6dca846028 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -4,7 +4,7 @@
#include "i40e_prototype.h"
/**
- * i40e_init_nvm_ops - Initialize NVM function pointers
+ * i40e_init_nvm - Initialize NVM function pointers
* @hw: pointer to the HW structure
*
* Setup the function pointers and the NVM info structure. Should be called
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 7a879614ca55..f1f6fc3744e9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -216,7 +216,7 @@ static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp,
}
/**
- * i40e_ptp_update_latch_events - Read I40E_PRTTSYN_STAT_1 and latch events
+ * i40e_ptp_get_rx_events - Read I40E_PRTTSYN_STAT_1 and latch events
* @pf: the PF data structure
*
* This function reads I40E_PRTTSYN_STAT_1 and updates the corresponding timers
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 06b4271219b1..121cd99fdeff 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3331,7 +3331,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
}
/**
- * i40e_create_tx_ctx Build the Tx context descriptor
+ * i40e_create_tx_ctx - Build the Tx context descriptor
* @tx_ring: ring to create the descriptor on
* @cd_type_cmd_tso_mss: Quad Word 1
* @cd_tunneling: Quad Word 0 - bits 0-31
@@ -3833,8 +3833,8 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
* @frames: array of XDP buffer pointers
* @flags: XDP extra info
*
- * Returns number of frames successfully sent. Frames that fail are
- * free'ed via XDP return API.
+ * Returns number of frames successfully sent. Failed frames
+ * will be free'ed by XDP core.
*
* For error cases, a negative errno code is returned and no-frames
* are transmitted (caller must handle freeing frames).
@@ -3847,7 +3847,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_ring *xdp_ring;
- int drops = 0;
+ int nxmit = 0;
int i;
if (test_bit(__I40E_VSI_DOWN, vsi->state))
@@ -3867,14 +3867,13 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
int err;
err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
- if (err != I40E_XDP_TX) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ if (err != I40E_XDP_TX)
+ break;
+ nxmit++;
}
if (unlikely(flags & XDP_XMIT_FLUSH))
i40e_xdp_ring_update_tail(xdp_ring);
- return n - drops;
+ return nxmit;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 5d301a466f5c..eff0a30790dd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -40,6 +40,66 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
}
/**
+ * i40e_vc_link_speed2mbps
+ * converts i40e_aq_link_speed to integer value of Mbps
+ * @link_speed: the speed to convert
+ *
+ * return the speed as direct value of Mbps.
+ **/
+static u32
+i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
+{
+ switch (link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ return SPEED_100;
+ case I40E_LINK_SPEED_1GB:
+ return SPEED_1000;
+ case I40E_LINK_SPEED_2_5GB:
+ return SPEED_2500;
+ case I40E_LINK_SPEED_5GB:
+ return SPEED_5000;
+ case I40E_LINK_SPEED_10GB:
+ return SPEED_10000;
+ case I40E_LINK_SPEED_20GB:
+ return SPEED_20000;
+ case I40E_LINK_SPEED_25GB:
+ return SPEED_25000;
+ case I40E_LINK_SPEED_40GB:
+ return SPEED_40000;
+ case I40E_LINK_SPEED_UNKNOWN:
+ return SPEED_UNKNOWN;
+ }
+ return SPEED_UNKNOWN;
+}
+
+/**
+ * i40e_set_vf_link_state
+ * @vf: pointer to the VF structure
+ * @pfe: pointer to PF event structure
+ * @ls: pointer to link status structure
+ *
+ * set a link state on a single vf
+ **/
+static void i40e_set_vf_link_state(struct i40e_vf *vf,
+ struct virtchnl_pf_event *pfe, struct i40e_link_status *ls)
+{
+ u8 link_status = ls->link_info & I40E_AQ_LINK_UP;
+
+ if (vf->link_forced)
+ link_status = vf->link_up;
+
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+ pfe->event_data.link_event_adv.link_speed = link_status ?
+ i40e_vc_link_speed2mbps(ls->link_speed) : 0;
+ pfe->event_data.link_event_adv.link_status = link_status;
+ } else {
+ pfe->event_data.link_event.link_speed = link_status ?
+ i40e_virtchnl_link_speed(ls->link_speed) : 0;
+ pfe->event_data.link_event.link_status = link_status;
+ }
+}
+
+/**
* i40e_vc_notify_vf_link_state
* @vf: pointer to the VF structure
*
@@ -55,16 +115,9 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
- if (vf->link_forced) {
- pfe.event_data.link_event.link_status = vf->link_up;
- pfe.event_data.link_event.link_speed =
- (vf->link_up ? i40e_virtchnl_link_speed(ls->link_speed) : 0);
- } else {
- pfe.event_data.link_event.link_status =
- ls->link_info & I40E_AQ_LINK_UP;
- pfe.event_data.link_event.link_speed =
- i40e_virtchnl_link_speed(ls->link_speed);
- }
+
+ i40e_set_vf_link_state(vf, &pfe, ls);
+
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
}
@@ -1949,6 +2002,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi->info.pvid)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
@@ -3696,26 +3750,8 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
}
/* get link speed in MB to validate rate limit */
- switch (ls->link_speed) {
- case VIRTCHNL_LINK_SPEED_100MB:
- speed = SPEED_100;
- break;
- case VIRTCHNL_LINK_SPEED_1GB:
- speed = SPEED_1000;
- break;
- case VIRTCHNL_LINK_SPEED_10GB:
- speed = SPEED_10000;
- break;
- case VIRTCHNL_LINK_SPEED_20GB:
- speed = SPEED_20000;
- break;
- case VIRTCHNL_LINK_SPEED_25GB:
- speed = SPEED_25000;
- break;
- case VIRTCHNL_LINK_SPEED_40GB:
- speed = SPEED_40000;
- break;
- default:
+ speed = i40e_vc_link_speed2mbps(ls->link_speed);
+ if (speed == SPEED_UNKNOWN) {
dev_err(&pf->pdev->dev,
"Cannot detect link speed\n");
aq_ret = I40E_ERR_PARAM;
@@ -4464,23 +4500,17 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
switch (link) {
case IFLA_VF_LINK_STATE_AUTO:
vf->link_forced = false;
- pfe.event_data.link_event.link_status =
- pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
- pfe.event_data.link_event.link_speed =
- (enum virtchnl_link_speed)
- pf->hw.phy.link_info.link_speed;
+ i40e_set_vf_link_state(vf, &pfe, ls);
break;
case IFLA_VF_LINK_STATE_ENABLE:
vf->link_forced = true;
vf->link_up = true;
- pfe.event_data.link_event.link_status = true;
- pfe.event_data.link_event.link_speed = i40e_virtchnl_link_speed(ls->link_speed);
+ i40e_set_vf_link_state(vf, &pfe, ls);
break;
case IFLA_VF_LINK_STATE_DISABLE:
vf->link_forced = true;
vf->link_up = false;
- pfe.event_data.link_event.link_status = false;
- pfe.event_data.link_event.link_speed = 0;
+ i40e_set_vf_link_state(vf, &pfe, ls);
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 12ca84113587..46d884417c63 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -160,6 +160,13 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
+ if (likely(act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+ rcu_read_unlock();
+ return result;
+ }
+
switch (act) {
case XDP_PASS:
break;
@@ -167,10 +174,6 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
break;
- case XDP_REDIRECT:
- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
- break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
@@ -625,7 +628,7 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
}
/**
- * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown
+ * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown
* @tx_ring: XDP Tx ring
**/
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
index c997063ed728..9c3e45c54d01 100644
--- a/drivers/net/ethernet/intel/iavf/Makefile
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -11,5 +11,6 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IAVF) += iavf.o
-iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \
+iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
+ iavf_adv_rss.o \
iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 8a65525a7c0d..e8bd04100ecd 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -37,6 +37,8 @@
#include "iavf_type.h"
#include <linux/avf/virtchnl.h>
#include "iavf_txrx.h"
+#include "iavf_fdir.h"
+#include "iavf_adv_rss.h"
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "iavf: "
@@ -300,6 +302,10 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22)
#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23)
#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24)
+#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT(25)
+#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26)
+#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT(27)
+#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT(28)
/* OS defined structs */
struct net_device *netdev;
@@ -340,6 +346,10 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_VLAN)
#define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+#define FDIR_FLTR_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+#define ADV_RSS_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
struct virtchnl_version_info pf_version;
@@ -362,6 +372,14 @@ struct iavf_adapter {
/* lock to protect access to the cloud filter list */
spinlock_t cloud_filter_list_lock;
u16 num_cloud_filters;
+
+#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */
+ u16 fdir_active_fltr;
+ struct list_head fdir_list_head;
+ spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */
+
+ struct list_head adv_rss_list_head;
+ spinlock_t adv_rss_lock; /* protect the RSS management list */
};
@@ -432,6 +450,10 @@ void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
+void iavf_add_fdir_filter(struct iavf_adapter *adapter);
+void iavf_del_fdir_filter(struct iavf_adapter *adapter);
+void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter);
+void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter);
struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
const u8 *macaddr);
#endif /* _IAVF_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
new file mode 100644
index 000000000000..6edbf134b73f
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021, Intel Corporation. */
+
+/* advanced RSS configuration ethtool support for iavf */
+
+#include "iavf.h"
+
+/**
+ * iavf_fill_adv_rss_ip4_hdr - fill the IPv4 RSS protocol header
+ * @hdr: the virtchnl message protocol header data structure
+ * @hash_flds: the RSS configuration protocol hash fields
+ */
+static void
+iavf_fill_adv_rss_ip4_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
+{
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV4_SA)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV4_DA)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
+}
+
+/**
+ * iavf_fill_adv_rss_ip6_hdr - fill the IPv6 RSS protocol header
+ * @hdr: the virtchnl message protocol header data structure
+ * @hash_flds: the RSS configuration protocol hash fields
+ */
+static void
+iavf_fill_adv_rss_ip6_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
+{
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV6_SA)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV6_DA)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
+}
+
+/**
+ * iavf_fill_adv_rss_tcp_hdr - fill the TCP RSS protocol header
+ * @hdr: the virtchnl message protocol header data structure
+ * @hash_flds: the RSS configuration protocol hash fields
+ */
+static void
+iavf_fill_adv_rss_tcp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
+{
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
+}
+
+/**
+ * iavf_fill_adv_rss_udp_hdr - fill the UDP RSS protocol header
+ * @hdr: the virtchnl message protocol header data structure
+ * @hash_flds: the RSS configuration protocol hash fields
+ */
+static void
+iavf_fill_adv_rss_udp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
+{
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
+}
+
+/**
+ * iavf_fill_adv_rss_sctp_hdr - fill the SCTP RSS protocol header
+ * @hdr: the virtchnl message protocol header data structure
+ * @hash_flds: the RSS configuration protocol hash fields
+ */
+static void
+iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
+{
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
+}
+
+/**
+ * iavf_fill_adv_rss_cfg_msg - fill the RSS configuration into virtchnl message
+ * @rss_cfg: the virtchnl message to be filled with RSS configuration setting
+ * @packet_hdrs: the RSS configuration protocol header types
+ * @hash_flds: the RSS configuration protocol hash fields
+ *
+ * Returns 0 if the RSS configuration virtchnl message is filled successfully
+ */
+int
+iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
+ u32 packet_hdrs, u64 hash_flds)
+{
+ struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs;
+ struct virtchnl_proto_hdr *hdr;
+
+ rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+
+ proto_hdrs->tunnel_level = 0; /* always outer layer */
+
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3) {
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4:
+ iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6:
+ iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4) {
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP:
+ iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP:
+ iavf_fill_adv_rss_udp_hdr(hdr, hash_flds);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP:
+ iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_find_adv_rss_cfg_by_hdrs - find RSS configuration with header type
+ * @adapter: pointer to the VF adapter structure
+ * @packet_hdrs: protocol header type to find.
+ *
+ * Returns pointer to advance RSS configuration if found or null
+ */
+struct iavf_adv_rss *
+iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs)
+{
+ struct iavf_adv_rss *rss;
+
+ list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
+ if (rss->packet_hdrs == packet_hdrs)
+ return rss;
+
+ return NULL;
+}
+
+/**
+ * iavf_print_adv_rss_cfg
+ * @adapter: pointer to the VF adapter structure
+ * @rss: pointer to the advance RSS configuration to print
+ * @action: the string description about how to handle the RSS
+ * @result: the string description about the virtchnl result
+ *
+ * Print the advance RSS configuration
+ **/
+void
+iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss,
+ const char *action, const char *result)
+{
+ u32 packet_hdrs = rss->packet_hdrs;
+ u64 hash_flds = rss->hash_flds;
+ static char hash_opt[300];
+ const char *proto;
+
+ if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_TCP)
+ proto = "TCP";
+ else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_UDP)
+ proto = "UDP";
+ else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP)
+ proto = "SCTP";
+ else
+ return;
+
+ memset(hash_opt, 0, sizeof(hash_opt));
+
+ strcat(hash_opt, proto);
+ if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4)
+ strcat(hash_opt, "v4 ");
+ else
+ strcat(hash_opt, "v6 ");
+
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA |
+ IAVF_ADV_RSS_HASH_FLD_IPV6_SA))
+ strcat(hash_opt, "IP SA,");
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA |
+ IAVF_ADV_RSS_HASH_FLD_IPV6_DA))
+ strcat(hash_opt, "IP DA,");
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT |
+ IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT |
+ IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT))
+ strcat(hash_opt, "src port,");
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT |
+ IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT |
+ IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT))
+ strcat(hash_opt, "dst port,");
+
+ if (!action)
+ action = "";
+
+ if (!result)
+ result = "";
+
+ dev_info(&adapter->pdev->dev, "%s %s %s\n", action, hash_opt, result);
+}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
new file mode 100644
index 000000000000..4d3be11af7aa
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, Intel Corporation. */
+
+#ifndef _IAVF_ADV_RSS_H_
+#define _IAVF_ADV_RSS_H_
+
+struct iavf_adapter;
+
+/* State of advanced RSS configuration */
+enum iavf_adv_rss_state_t {
+ IAVF_ADV_RSS_ADD_REQUEST, /* User requests to add RSS */
+ IAVF_ADV_RSS_ADD_PENDING, /* RSS pending add by the PF */
+ IAVF_ADV_RSS_DEL_REQUEST, /* Driver requests to delete RSS */
+ IAVF_ADV_RSS_DEL_PENDING, /* RSS pending delete by the PF */
+ IAVF_ADV_RSS_ACTIVE, /* RSS configuration is active */
+};
+
+enum iavf_adv_rss_flow_seg_hdr {
+ IAVF_ADV_RSS_FLOW_SEG_HDR_NONE = 0x00000000,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4 = 0x00000001,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6 = 0x00000002,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_TCP = 0x00000004,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_UDP = 0x00000008,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP = 0x00000010,
+};
+
+#define IAVF_ADV_RSS_FLOW_SEG_HDR_L3 \
+ (IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4 | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6)
+
+#define IAVF_ADV_RSS_FLOW_SEG_HDR_L4 \
+ (IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP)
+
+enum iavf_adv_rss_flow_field {
+ /* L3 */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_SA,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_DA,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_SA,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_DA,
+ /* L4 */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_SRC_PORT,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_DST_PORT,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_SRC_PORT,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_DST_PORT,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT,
+
+ /* The total number of enums must not exceed 64 */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_MAX
+};
+
+#define IAVF_ADV_RSS_HASH_INVALID 0
+#define IAVF_ADV_RSS_HASH_FLD_IPV4_SA \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_SA)
+#define IAVF_ADV_RSS_HASH_FLD_IPV6_SA \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_SA)
+#define IAVF_ADV_RSS_HASH_FLD_IPV4_DA \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_DA)
+#define IAVF_ADV_RSS_HASH_FLD_IPV6_DA \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_DA)
+#define IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_SRC_PORT)
+#define IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_DST_PORT)
+#define IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_SRC_PORT)
+#define IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_DST_PORT)
+#define IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT)
+#define IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT)
+
+/* bookkeeping of advanced RSS configuration */
+struct iavf_adv_rss {
+ enum iavf_adv_rss_state_t state;
+ struct list_head list;
+
+ u32 packet_hdrs;
+ u64 hash_flds;
+
+ struct virtchnl_rss_cfg cfg_msg;
+};
+
+int
+iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
+ u32 packet_hdrs, u64 hash_flds);
+struct iavf_adv_rss *
+iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs);
+void
+iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss,
+ const char *action, const char *result);
+#endif /* _IAVF_ADV_RSS_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index c93567f4d0f7..af43fbd8cb75 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -828,6 +828,872 @@ static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
}
/**
+ * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool
+ * flow type values
+ * @flow: filter type to be converted
+ *
+ * Returns the corresponding ethtool flow type.
+ */
+static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow)
+{
+ switch (flow) {
+ case IAVF_FDIR_FLOW_IPV4_TCP:
+ return TCP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_UDP:
+ return UDP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_SCTP:
+ return SCTP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_AH:
+ return AH_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_ESP:
+ return ESP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_OTHER:
+ return IPV4_USER_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_TCP:
+ return TCP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_UDP:
+ return UDP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_SCTP:
+ return SCTP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_AH:
+ return AH_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_ESP:
+ return ESP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_OTHER:
+ return IPV6_USER_FLOW;
+ case IAVF_FDIR_FLOW_NON_IP_L2:
+ return ETHER_FLOW;
+ default:
+ /* 0 is undefined ethtool flow */
+ return 0;
+ }
+}
+
+/**
+ * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
+ * @eth: Ethtool flow type to be converted
+ *
+ * Returns flow enum
+ */
+static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth)
+{
+ switch (eth) {
+ case TCP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_TCP;
+ case UDP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_UDP;
+ case SCTP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_SCTP;
+ case AH_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_AH;
+ case ESP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_ESP;
+ case IPV4_USER_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_OTHER;
+ case TCP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_TCP;
+ case UDP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_UDP;
+ case SCTP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_SCTP;
+ case AH_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_AH;
+ case ESP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_ESP;
+ case IPV6_USER_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_OTHER;
+ case ETHER_FLOW:
+ return IAVF_FDIR_FLOW_NON_IP_L2;
+ default:
+ return IAVF_FDIR_FLOW_NONE;
+ }
+}
+
+/**
+ * iavf_is_mask_valid - check mask field set
+ * @mask: full mask to check
+ * @field: field for which mask should be valid
+ *
+ * If the mask is fully set return true. If it is not valid for field return
+ * false.
+ */
+static bool iavf_is_mask_valid(u64 mask, u64 field)
+{
+ return (mask & field) == field;
+}
+
+/**
+ * iavf_parse_rx_flow_user_data - deconstruct user-defined data
+ * @fsp: pointer to ethtool Rx flow specification
+ * @fltr: pointer to Flow Director filter for userdef data storage
+ *
+ * Returns 0 on success, negative error value on failure
+ */
+static int
+iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+ struct iavf_fdir_fltr *fltr)
+{
+ struct iavf_flex_word *flex;
+ int i, cnt = 0;
+
+ if (!(fsp->flow_type & FLOW_EXT))
+ return 0;
+
+ for (i = 0; i < IAVF_FLEX_WORD_NUM; i++) {
+#define IAVF_USERDEF_FLEX_WORD_M GENMASK(15, 0)
+#define IAVF_USERDEF_FLEX_OFFS_S 16
+#define IAVF_USERDEF_FLEX_OFFS_M GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S)
+#define IAVF_USERDEF_FLEX_FLTR_M GENMASK(31, 0)
+ u32 value = be32_to_cpu(fsp->h_ext.data[i]);
+ u32 mask = be32_to_cpu(fsp->m_ext.data[i]);
+
+ if (!value || !mask)
+ continue;
+
+ if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M))
+ return -EINVAL;
+
+ /* 504 is the maximum value for offsets, and offset is measured
+ * from the start of the MAC address.
+ */
+#define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504
+ flex = &fltr->flex_words[cnt++];
+ flex->word = value & IAVF_USERDEF_FLEX_WORD_M;
+ flex->offset = (value & IAVF_USERDEF_FLEX_OFFS_M) >>
+ IAVF_USERDEF_FLEX_OFFS_S;
+ if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL)
+ return -EINVAL;
+ }
+
+ fltr->flex_cnt = cnt;
+
+ return 0;
+}
+
+/**
+ * iavf_fill_rx_flow_ext_data - fill the additional data
+ * @fsp: pointer to ethtool Rx flow specification
+ * @fltr: pointer to Flow Director filter to get additional data
+ */
+static void
+iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp,
+ struct iavf_fdir_fltr *fltr)
+{
+ if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1])
+ return;
+
+ fsp->flow_type |= FLOW_EXT;
+
+ memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data));
+ memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data));
+}
+
+/**
+ * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data
+ * @adapter: the VF adapter structure that contains filter list
+ * @cmd: ethtool command data structure to receive the filter data
+ *
+ * Returns 0 as expected for success by ethtool
+ */
+static int
+iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct iavf_fdir_fltr *rule = NULL;
+ int ret = 0;
+
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+
+ rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
+ if (!rule) {
+ ret = -EINVAL;
+ goto release_lock;
+ }
+
+ fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type);
+
+ memset(&fsp->m_u, 0, sizeof(fsp->m_u));
+ memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
+ fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port;
+ fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos;
+ fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
+ fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
+ fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port;
+ fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port;
+ fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
+ fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
+ fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi;
+ fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos;
+ fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
+ fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
+ fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi;
+ fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos;
+ break;
+ case IPV4_USER_FLOW:
+ fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
+ fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
+ fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header;
+ fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto;
+ fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
+ fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
+ fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header;
+ fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos;
+ fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
+ fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port;
+ fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port;
+ fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass;
+ memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port;
+ fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port;
+ fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi;
+ fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass;
+ memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi;
+ fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass;
+ break;
+ case IPV6_USER_FLOW:
+ memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header;
+ fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass;
+ fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto;
+ memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header;
+ fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass;
+ fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto;
+ break;
+ case ETHER_FLOW:
+ fsp->h_u.ether_spec.h_proto = rule->eth_data.etype;
+ fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ iavf_fill_rx_flow_ext_data(fsp, rule);
+
+ if (rule->action == VIRTCHNL_ACTION_DROP)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->q_index;
+
+release_lock:
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ return ret;
+}
+
+/**
+ * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
+ * @adapter: the VF adapter structure containing the filter list
+ * @cmd: ethtool command data structure
+ * @rule_locs: ethtool array passed in from OS to receive filter IDs
+ *
+ * Returns 0 as expected for success by ethtool
+ */
+static int
+iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct iavf_fdir_fltr *fltr;
+ unsigned int cnt = 0;
+ int val = 0;
+
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ cmd->data = IAVF_MAX_FDIR_FILTERS;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+
+ list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
+ if (cnt == cmd->rule_cnt) {
+ val = -EMSGSIZE;
+ goto release_lock;
+ }
+ rule_locs[cnt] = fltr->loc;
+ cnt++;
+ }
+
+release_lock:
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ if (!val)
+ cmd->rule_cnt = cnt;
+
+ return val;
+}
+
+/**
+ * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter
+ * @adapter: pointer to the VF adapter structure
+ * @fsp: pointer to ethtool Rx flow specification
+ * @fltr: filter structure
+ */
+static int
+iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp,
+ struct iavf_fdir_fltr *fltr)
+{
+ u32 flow_type, q_index = 0;
+ enum virtchnl_action act;
+ int err;
+
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ act = VIRTCHNL_ACTION_DROP;
+ } else {
+ q_index = fsp->ring_cookie;
+ if (q_index >= adapter->num_active_queues)
+ return -EINVAL;
+
+ act = VIRTCHNL_ACTION_QUEUE;
+ }
+
+ fltr->action = act;
+ fltr->loc = fsp->location;
+ fltr->q_index = q_index;
+
+ if (fsp->flow_type & FLOW_EXT) {
+ memcpy(fltr->ext_data.usr_def, fsp->h_ext.data,
+ sizeof(fltr->ext_data.usr_def));
+ memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data,
+ sizeof(fltr->ext_mask.usr_def));
+ }
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+ fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type);
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+ fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos;
+ fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
+ fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
+ fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+ fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src;
+ fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst;
+ fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi;
+ fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos;
+ fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src;
+ fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
+ fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
+ fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
+ break;
+ case IPV4_USER_FLOW:
+ fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
+ fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
+ fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
+ fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos;
+ fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto;
+ fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
+ fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
+ fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
+ fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
+ fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc;
+ fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
+ fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass;
+ memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
+ fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
+ fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi;
+ fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass;
+ memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
+ fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
+ break;
+ case IPV6_USER_FLOW:
+ memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
+ fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass;
+ fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+ memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
+ fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
+ fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
+ break;
+ case ETHER_FLOW:
+ fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
+ fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto;
+ break;
+ default:
+ /* not doing un-parsed flow types */
+ return -EINVAL;
+ }
+
+ if (iavf_fdir_is_dup_fltr(adapter, fltr))
+ return -EEXIST;
+
+ err = iavf_parse_rx_flow_user_data(fsp, fltr);
+ if (err)
+ return err;
+
+ return iavf_fill_fdir_add_msg(adapter, fltr);
+}
+
+/**
+ * iavf_add_fdir_ethtool - add Flow Director filter
+ * @adapter: pointer to the VF adapter structure
+ * @cmd: command to add Flow Director filter
+ *
+ * Returns 0 on success and negative values for failure
+ */
+static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct iavf_fdir_fltr *fltr;
+ int count = 50;
+ int err;
+
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return -EINVAL;
+
+ if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
+ IAVF_MAX_FDIR_FILTERS);
+ return -ENOSPC;
+ }
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) {
+ dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ return -EEXIST;
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr)
+ return -ENOMEM;
+
+ while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
+ &adapter->crit_section)) {
+ if (--count == 0) {
+ kfree(fltr);
+ return -EINVAL;
+ }
+ udelay(1);
+ }
+
+ err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
+ if (err)
+ goto ret;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ iavf_fdir_list_add_fltr(adapter, fltr);
+ adapter->fdir_active_fltr++;
+ fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+
+ret:
+ if (err && fltr)
+ kfree(fltr);
+
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return err;
+}
+
+/**
+ * iavf_del_fdir_ethtool - delete Flow Director filter
+ * @adapter: pointer to the VF adapter structure
+ * @cmd: command to delete Flow Director filter
+ *
+ * Returns 0 on success and negative values for failure
+ */
+static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct iavf_fdir_fltr *fltr = NULL;
+ int err = 0;
+
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
+ if (fltr) {
+ if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
+ fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ } else {
+ err = -EBUSY;
+ }
+ } else if (adapter->fdir_active_fltr) {
+ err = -EINVAL;
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+
+ return err;
+}
+
+/**
+ * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input
+ * @cmd: ethtool rxnfc command
+ *
+ * This function parses the rxnfc command and returns intended
+ * header types for RSS configuration
+ */
+static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
+{
+ u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case UDP_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case SCTP_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case TCP_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case UDP_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case SCTP_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ default:
+ break;
+ }
+
+ return hdrs;
+}
+
+/**
+ * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
+ * @cmd: ethtool rxnfc command
+ *
+ * This function parses the rxnfc command and returns intended hash fields for
+ * RSS configuration
+ */
+static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd)
+{
+ u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
+
+ if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) {
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if (cmd->data & RXH_IP_SRC)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA;
+ if (cmd->data & RXH_IP_DST)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (cmd->data & RXH_IP_SRC)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA;
+ if (cmd->data & RXH_IP_DST)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) {
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (cmd->data & RXH_L4_B_0_1)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT;
+ if (cmd->data & RXH_L4_B_2_3)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (cmd->data & RXH_L4_B_0_1)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT;
+ if (cmd->data & RXH_L4_B_2_3)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ if (cmd->data & RXH_L4_B_0_1)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT;
+ if (cmd->data & RXH_L4_B_2_3)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return hfld;
+}
+
+/**
+ * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @adapter: pointer to the VF adapter structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ */
+static int
+iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct iavf_adv_rss *rss_old, *rss_new;
+ bool rss_new_add = false;
+ int count = 50, err = 0;
+ u64 hash_flds;
+ u32 hdrs;
+
+ if (!ADV_RSS_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ hdrs = iavf_adv_rss_parse_hdrs(cmd);
+ if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
+ return -EINVAL;
+
+ hash_flds = iavf_adv_rss_parse_hash_flds(cmd);
+ if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
+ return -EINVAL;
+
+ rss_new = kzalloc(sizeof(*rss_new), GFP_KERNEL);
+ if (!rss_new)
+ return -ENOMEM;
+
+ if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds)) {
+ kfree(rss_new);
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
+ &adapter->crit_section)) {
+ if (--count == 0) {
+ kfree(rss_new);
+ return -EINVAL;
+ }
+
+ udelay(1);
+ }
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
+ if (rss_old) {
+ if (rss_old->state != IAVF_ADV_RSS_ACTIVE) {
+ err = -EBUSY;
+ } else if (rss_old->hash_flds != hash_flds) {
+ rss_old->state = IAVF_ADV_RSS_ADD_REQUEST;
+ rss_old->hash_flds = hash_flds;
+ memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg,
+ sizeof(rss_new->cfg_msg));
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
+ } else {
+ err = -EEXIST;
+ }
+ } else {
+ rss_new_add = true;
+ rss_new->state = IAVF_ADV_RSS_ADD_REQUEST;
+ rss_new->packet_hdrs = hdrs;
+ rss_new->hash_flds = hash_flds;
+ list_add_tail(&rss_new->list, &adapter->adv_rss_list_head);
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
+ if (!err)
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+ if (!rss_new_add)
+ kfree(rss_new);
+
+ return err;
+}
+
+/**
+ * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type
+ * @adapter: pointer to the VF adapter structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ */
+static int
+iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct iavf_adv_rss *rss;
+ u64 hash_flds;
+ u32 hdrs;
+
+ if (!ADV_RSS_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ cmd->data = 0;
+
+ hdrs = iavf_adv_rss_parse_hdrs(cmd);
+ if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
+ return -EINVAL;
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
+ if (rss)
+ hash_flds = rss->hash_flds;
+ else
+ hash_flds = IAVF_ADV_RSS_HASH_INVALID;
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
+ if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
+ return -EINVAL;
+
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA |
+ IAVF_ADV_RSS_HASH_FLD_IPV6_SA))
+ cmd->data |= (u64)RXH_IP_SRC;
+
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA |
+ IAVF_ADV_RSS_HASH_FLD_IPV6_DA))
+ cmd->data |= (u64)RXH_IP_DST;
+
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT |
+ IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT |
+ IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT))
+ cmd->data |= (u64)RXH_L4_B_0_1;
+
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT |
+ IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT |
+ IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT))
+ cmd->data |= (u64)RXH_L4_B_2_3;
+
+ return 0;
+}
+
+/**
+ * iavf_set_rxnfc - command to set Rx flow rules.
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns 0 for success and negative values for errors
+ */
+static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = iavf_add_fdir_ethtool(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = iavf_del_fdir_ethtool(adapter, cmd);
+ break;
+ case ETHTOOL_SRXFH:
+ ret = iavf_set_adv_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
* iavf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -846,9 +1712,21 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
cmd->data = adapter->num_active_queues;
ret = 0;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ break;
+ cmd->rule_cnt = adapter->fdir_active_fltr;
+ cmd->data = IAVF_MAX_FDIR_FILTERS;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = iavf_get_ethtool_fdir_entry(adapter, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs);
+ break;
case ETHTOOL_GRXFH:
- netdev_info(netdev,
- "RSS hash info is not available to vf, use pf.\n");
+ ret = iavf_get_adv_rss_hash_opt(adapter, cmd);
break;
default:
break;
@@ -1025,6 +1903,7 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.set_coalesce = iavf_set_coalesce,
.get_per_queue_coalesce = iavf_get_per_queue_coalesce,
.set_per_queue_coalesce = iavf_set_per_queue_coalesce,
+ .set_rxnfc = iavf_set_rxnfc,
.get_rxnfc = iavf_get_rxnfc,
.get_rxfh_indir_size = iavf_get_rxfh_indir_size,
.get_rxfh = iavf_get_rxfh,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
new file mode 100644
index 000000000000..6146203efd84
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Intel Corporation. */
+
+/* flow director ethtool support for iavf */
+
+#include "iavf.h"
+
+#define GTPU_PORT 2152
+#define NAT_T_ESP_PORT 4500
+#define PFCP_PORT 8805
+
+static const struct in6_addr ipv6_addr_full_mask = {
+ .in6_u = {
+ .u6_addr8 = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ }
+ }
+};
+
+/**
+ * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload
+ * @fltr: Flow Director filter data structure
+ */
+static u16 iavf_pkt_udp_no_pay_len(struct iavf_fdir_fltr *fltr)
+{
+ return sizeof(struct ethhdr) +
+ (fltr->ip_ver == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
+ sizeof(struct udphdr);
+}
+
+/**
+ * iavf_fill_fdir_gtpu_hdr - fill the GTP-U protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the GTP-U protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_gtpu_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
+ struct virtchnl_proto_hdr *ghdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct virtchnl_proto_hdr *ehdr = NULL; /* Extension Header if it exists */
+ u16 adj_offs, hdr_offs;
+ int i;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(ghdr, GTPU_IP);
+
+ adj_offs = iavf_pkt_udp_no_pay_len(fltr);
+
+ for (i = 0; i < fltr->flex_cnt; i++) {
+#define IAVF_GTPU_HDR_TEID_OFFS0 4
+#define IAVF_GTPU_HDR_TEID_OFFS1 6
+#define IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS 10
+#define IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK 0x00FF /* skip N_PDU */
+/* PDU Session Container Extension Header (PSC) */
+#define IAVF_GTPU_PSC_EXTHDR_TYPE 0x85
+#define IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS 13
+#define IAVF_GTPU_HDR_PSC_PDU_QFI_MASK 0x3F /* skip Type */
+#define IAVF_GTPU_EH_QFI_IDX 1
+
+ if (fltr->flex_words[i].offset < adj_offs)
+ return -EINVAL;
+
+ hdr_offs = fltr->flex_words[i].offset - adj_offs;
+
+ switch (hdr_offs) {
+ case IAVF_GTPU_HDR_TEID_OFFS0:
+ case IAVF_GTPU_HDR_TEID_OFFS1: {
+ __be16 *pay_word = (__be16 *)ghdr->buffer;
+
+ pay_word[hdr_offs >> 1] = htons(fltr->flex_words[i].word);
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ghdr, GTPU_IP, TEID);
+ }
+ break;
+ case IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS:
+ if ((fltr->flex_words[i].word &
+ IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK) !=
+ IAVF_GTPU_PSC_EXTHDR_TYPE)
+ return -EOPNOTSUPP;
+ if (!ehdr)
+ ehdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ VIRTCHNL_SET_PROTO_HDR_TYPE(ehdr, GTPU_EH);
+ break;
+ case IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS:
+ if (!ehdr)
+ return -EINVAL;
+ ehdr->buffer[IAVF_GTPU_EH_QFI_IDX] =
+ fltr->flex_words[i].word &
+ IAVF_GTPU_HDR_PSC_PDU_QFI_MASK;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ehdr, GTPU_EH, QFI);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_pfcp_hdr - fill the PFCP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the PFCP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_pfcp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ u16 adj_offs, hdr_offs;
+ int i;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
+
+ adj_offs = iavf_pkt_udp_no_pay_len(fltr);
+
+ for (i = 0; i < fltr->flex_cnt; i++) {
+#define IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS 0
+ if (fltr->flex_words[i].offset < adj_offs)
+ return -EINVAL;
+
+ hdr_offs = fltr->flex_words[i].offset - adj_offs;
+
+ switch (hdr_offs) {
+ case IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS:
+ hdr->buffer[0] = (fltr->flex_words[i].word >> 8) & 0xff;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_nat_t_esp_hdr - fill the NAT-T-ESP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the NAT-T-ESP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_nat_t_esp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ u16 adj_offs, hdr_offs;
+ u32 spi = 0;
+ int i;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
+
+ adj_offs = iavf_pkt_udp_no_pay_len(fltr);
+
+ for (i = 0; i < fltr->flex_cnt; i++) {
+#define IAVF_NAT_T_ESP_SPI_OFFS0 0
+#define IAVF_NAT_T_ESP_SPI_OFFS1 2
+ if (fltr->flex_words[i].offset < adj_offs)
+ return -EINVAL;
+
+ hdr_offs = fltr->flex_words[i].offset - adj_offs;
+
+ switch (hdr_offs) {
+ case IAVF_NAT_T_ESP_SPI_OFFS0:
+ spi |= fltr->flex_words[i].word << 16;
+ break;
+ case IAVF_NAT_T_ESP_SPI_OFFS1:
+ spi |= fltr->flex_words[i].word;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (!spi)
+ return -EOPNOTSUPP; /* Not support IKE Header Format with SPI 0 */
+
+ *(__be32 *)hdr->buffer = htonl(spi);
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
+
+ uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_udp_flex_pay_hdr - fill the UDP payload header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the UDP payload defined protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_udp_flex_pay_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ int err;
+
+ switch (ntohs(fltr->ip_data.dst_port)) {
+ case GTPU_PORT:
+ err = iavf_fill_fdir_gtpu_hdr(fltr, proto_hdrs);
+ break;
+ case NAT_T_ESP_PORT:
+ err = iavf_fill_fdir_nat_t_esp_hdr(fltr, proto_hdrs);
+ break;
+ case PFCP_PORT:
+ err = iavf_fill_fdir_pfcp_hdr(fltr, proto_hdrs);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * iavf_fill_fdir_ip4_hdr - fill the IPv4 protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the IPv4 protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct iphdr *iph = (struct iphdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+ if (fltr->ip_mask.tos == U8_MAX) {
+ iph->tos = fltr->ip_data.tos;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
+ }
+
+ if (fltr->ip_mask.proto == U8_MAX) {
+ iph->protocol = fltr->ip_data.proto;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
+ }
+
+ if (fltr->ip_mask.v4_addrs.src_ip == htonl(U32_MAX)) {
+ iph->saddr = fltr->ip_data.v4_addrs.src_ip;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
+ }
+
+ if (fltr->ip_mask.v4_addrs.dst_ip == htonl(U32_MAX)) {
+ iph->daddr = fltr->ip_data.v4_addrs.dst_ip;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
+ }
+
+ fltr->ip_ver = 4;
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_ip6_hdr - fill the IPv6 protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the IPv6 protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct ipv6hdr *iph = (struct ipv6hdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+ if (fltr->ip_mask.tclass == U8_MAX) {
+ iph->priority = (fltr->ip_data.tclass >> 4) & 0xF;
+ iph->flow_lbl[0] = (fltr->ip_data.tclass << 4) & 0xF0;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
+ }
+
+ if (fltr->ip_mask.proto == U8_MAX) {
+ iph->nexthdr = fltr->ip_data.proto;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
+ }
+
+ if (!memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask,
+ sizeof(struct in6_addr))) {
+ memcpy(&iph->saddr, &fltr->ip_data.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
+ }
+
+ if (!memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask,
+ sizeof(struct in6_addr))) {
+ memcpy(&iph->daddr, &fltr->ip_data.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
+ }
+
+ fltr->ip_ver = 6;
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_tcp_hdr - fill the TCP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the TCP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_tcp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct tcphdr *tcph = (struct tcphdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+ if (fltr->ip_mask.src_port == htons(U16_MAX)) {
+ tcph->source = fltr->ip_data.src_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
+ }
+
+ if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
+ tcph->dest = fltr->ip_data.dst_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_udp_hdr - fill the UDP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the UDP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_udp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct udphdr *udph = (struct udphdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+ if (fltr->ip_mask.src_port == htons(U16_MAX)) {
+ udph->source = fltr->ip_data.src_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
+ }
+
+ if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
+ udph->dest = fltr->ip_data.dst_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
+ }
+
+ if (!fltr->flex_cnt)
+ return 0;
+
+ return iavf_fill_fdir_udp_flex_pay_hdr(fltr, proto_hdrs);
+}
+
+/**
+ * iavf_fill_fdir_sctp_hdr - fill the SCTP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the SCTP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_sctp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct sctphdr *sctph = (struct sctphdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
+
+ if (fltr->ip_mask.src_port == htons(U16_MAX)) {
+ sctph->source = fltr->ip_data.src_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
+ }
+
+ if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
+ sctph->dest = fltr->ip_data.dst_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_ah_hdr - fill the AH protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the AH protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_ah_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct ip_auth_hdr *ah = (struct ip_auth_hdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
+
+ if (fltr->ip_mask.spi == htonl(U32_MAX)) {
+ ah->spi = fltr->ip_data.spi;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_esp_hdr - fill the ESP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the ESP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_esp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct ip_esp_hdr *esph = (struct ip_esp_hdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
+
+ if (fltr->ip_mask.spi == htonl(U32_MAX)) {
+ esph->spi = fltr->ip_data.spi;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_l4_hdr - fill the L4 protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the L4 protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_l4_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr;
+ __be32 *l4_4_data;
+
+ if (!fltr->ip_mask.proto) /* IPv4/IPv6 header only */
+ return 0;
+
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ l4_4_data = (__be32 *)hdr->buffer;
+
+ /* L2TPv3 over IP with 'Session ID' */
+ if (fltr->ip_data.proto == 115 && fltr->ip_mask.l4_header == htonl(U32_MAX)) {
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
+
+ *l4_4_data = fltr->ip_data.l4_header;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_eth_hdr - fill the Ethernet protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the Ethernet protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_eth_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct ethhdr *ehdr = (struct ethhdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
+
+ if (fltr->eth_mask.etype == htons(U16_MAX)) {
+ if (fltr->eth_data.etype == htons(ETH_P_IP) ||
+ fltr->eth_data.etype == htons(ETH_P_IPV6))
+ return -EOPNOTSUPP;
+
+ ehdr->h_proto = fltr->eth_data.etype;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_add_msg - fill the Flow Director filter into virtchnl message
+ * @adapter: pointer to the VF adapter structure
+ * @fltr: Flow Director filter data structure
+ *
+ * Returns 0 if the add Flow Director virtchnl message is filled successfully
+ */
+int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+{
+ struct virtchnl_fdir_add *vc_msg = &fltr->vc_add_msg;
+ struct virtchnl_proto_hdrs *proto_hdrs;
+ int err;
+
+ proto_hdrs = &vc_msg->rule_cfg.proto_hdrs;
+
+ err = iavf_fill_fdir_eth_hdr(fltr, proto_hdrs); /* L2 always exists */
+ if (err)
+ return err;
+
+ switch (fltr->flow_type) {
+ case IAVF_FDIR_FLOW_IPV4_TCP:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV4_UDP:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_udp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV4_SCTP:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV4_AH:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_ah_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV4_ESP:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_esp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV4_OTHER:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_l4_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_TCP:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_UDP:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_udp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_SCTP:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_AH:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_ah_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_ESP:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_esp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_OTHER:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_l4_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_NON_IP_L2:
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ vc_msg->vsi_id = adapter->vsi.id;
+ vc_msg->rule_cfg.action_set.count = 1;
+ vc_msg->rule_cfg.action_set.actions[0].type = fltr->action;
+ vc_msg->rule_cfg.action_set.actions[0].act_conf.queue.index = fltr->q_index;
+
+ return 0;
+}
+
+/**
+ * iavf_fdir_flow_proto_name - get the flow protocol name
+ * @flow_type: Flow Director filter flow type
+ **/
+static const char *iavf_fdir_flow_proto_name(enum iavf_fdir_flow_type flow_type)
+{
+ switch (flow_type) {
+ case IAVF_FDIR_FLOW_IPV4_TCP:
+ case IAVF_FDIR_FLOW_IPV6_TCP:
+ return "TCP";
+ case IAVF_FDIR_FLOW_IPV4_UDP:
+ case IAVF_FDIR_FLOW_IPV6_UDP:
+ return "UDP";
+ case IAVF_FDIR_FLOW_IPV4_SCTP:
+ case IAVF_FDIR_FLOW_IPV6_SCTP:
+ return "SCTP";
+ case IAVF_FDIR_FLOW_IPV4_AH:
+ case IAVF_FDIR_FLOW_IPV6_AH:
+ return "AH";
+ case IAVF_FDIR_FLOW_IPV4_ESP:
+ case IAVF_FDIR_FLOW_IPV6_ESP:
+ return "ESP";
+ case IAVF_FDIR_FLOW_IPV4_OTHER:
+ case IAVF_FDIR_FLOW_IPV6_OTHER:
+ return "Other";
+ case IAVF_FDIR_FLOW_NON_IP_L2:
+ return "Ethernet";
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * iavf_print_fdir_fltr
+ * @adapter: adapter structure
+ * @fltr: Flow Director filter to print
+ *
+ * Print the Flow Director filter
+ **/
+void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+{
+ const char *proto = iavf_fdir_flow_proto_name(fltr->flow_type);
+
+ if (!proto)
+ return;
+
+ switch (fltr->flow_type) {
+ case IAVF_FDIR_FLOW_IPV4_TCP:
+ case IAVF_FDIR_FLOW_IPV4_UDP:
+ case IAVF_FDIR_FLOW_IPV4_SCTP:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: dst_port %hu src_port %hu\n",
+ fltr->loc,
+ &fltr->ip_data.v4_addrs.dst_ip,
+ &fltr->ip_data.v4_addrs.src_ip,
+ proto,
+ ntohs(fltr->ip_data.dst_port),
+ ntohs(fltr->ip_data.src_port));
+ break;
+ case IAVF_FDIR_FLOW_IPV4_AH:
+ case IAVF_FDIR_FLOW_IPV4_ESP:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: SPI %u\n",
+ fltr->loc,
+ &fltr->ip_data.v4_addrs.dst_ip,
+ &fltr->ip_data.v4_addrs.src_ip,
+ proto,
+ ntohl(fltr->ip_data.spi));
+ break;
+ case IAVF_FDIR_FLOW_IPV4_OTHER:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 proto: %u L4_bytes: 0x%x\n",
+ fltr->loc,
+ &fltr->ip_data.v4_addrs.dst_ip,
+ &fltr->ip_data.v4_addrs.src_ip,
+ fltr->ip_data.proto,
+ ntohl(fltr->ip_data.l4_header));
+ break;
+ case IAVF_FDIR_FLOW_IPV6_TCP:
+ case IAVF_FDIR_FLOW_IPV6_UDP:
+ case IAVF_FDIR_FLOW_IPV6_SCTP:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: dst_port %hu src_port %hu\n",
+ fltr->loc,
+ &fltr->ip_data.v6_addrs.dst_ip,
+ &fltr->ip_data.v6_addrs.src_ip,
+ proto,
+ ntohs(fltr->ip_data.dst_port),
+ ntohs(fltr->ip_data.src_port));
+ break;
+ case IAVF_FDIR_FLOW_IPV6_AH:
+ case IAVF_FDIR_FLOW_IPV6_ESP:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: SPI %u\n",
+ fltr->loc,
+ &fltr->ip_data.v6_addrs.dst_ip,
+ &fltr->ip_data.v6_addrs.src_ip,
+ proto,
+ ntohl(fltr->ip_data.spi));
+ break;
+ case IAVF_FDIR_FLOW_IPV6_OTHER:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 proto: %u L4_bytes: 0x%x\n",
+ fltr->loc,
+ &fltr->ip_data.v6_addrs.dst_ip,
+ &fltr->ip_data.v6_addrs.src_ip,
+ fltr->ip_data.proto,
+ ntohl(fltr->ip_data.l4_header));
+ break;
+ case IAVF_FDIR_FLOW_NON_IP_L2:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u eth_type: 0x%x\n",
+ fltr->loc,
+ ntohs(fltr->eth_data.etype));
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * iavf_fdir_is_dup_fltr - test if filter is already in list
+ * @adapter: pointer to the VF adapter structure
+ * @fltr: Flow Director filter data structure
+ *
+ * Returns true if the filter is found in the list
+ */
+bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+{
+ struct iavf_fdir_fltr *tmp;
+
+ list_for_each_entry(tmp, &adapter->fdir_list_head, list) {
+ if (tmp->flow_type != fltr->flow_type)
+ continue;
+
+ if (!memcmp(&tmp->eth_data, &fltr->eth_data,
+ sizeof(fltr->eth_data)) &&
+ !memcmp(&tmp->ip_data, &fltr->ip_data,
+ sizeof(fltr->ip_data)) &&
+ !memcmp(&tmp->ext_data, &fltr->ext_data,
+ sizeof(fltr->ext_data)))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * iavf_find_fdir_fltr_by_loc - find filter with location
+ * @adapter: pointer to the VF adapter structure
+ * @loc: location to find.
+ *
+ * Returns pointer to Flow Director filter if found or null
+ */
+struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc)
+{
+ struct iavf_fdir_fltr *rule;
+
+ list_for_each_entry(rule, &adapter->fdir_list_head, list)
+ if (rule->loc == loc)
+ return rule;
+
+ return NULL;
+}
+
+/**
+ * iavf_fdir_list_add_fltr - add a new node to the flow director filter list
+ * @adapter: pointer to the VF adapter structure
+ * @fltr: filter node to add to structure
+ */
+void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+{
+ struct iavf_fdir_fltr *rule, *parent = NULL;
+
+ list_for_each_entry(rule, &adapter->fdir_list_head, list) {
+ if (rule->loc >= fltr->loc)
+ break;
+ parent = rule;
+ }
+
+ if (parent)
+ list_add(&fltr->list, &parent->list);
+ else
+ list_add(&fltr->list, &adapter->fdir_list_head);
+}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
new file mode 100644
index 000000000000..33c55c366315
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, Intel Corporation. */
+
+#ifndef _IAVF_FDIR_H_
+#define _IAVF_FDIR_H_
+
+struct iavf_adapter;
+
+/* State of Flow Director filter */
+enum iavf_fdir_fltr_state_t {
+ IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */
+ IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */
+ IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */
+ IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */
+ IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */
+};
+
+enum iavf_fdir_flow_type {
+ /* NONE - used for undef/error */
+ IAVF_FDIR_FLOW_NONE = 0,
+ IAVF_FDIR_FLOW_IPV4_TCP,
+ IAVF_FDIR_FLOW_IPV4_UDP,
+ IAVF_FDIR_FLOW_IPV4_SCTP,
+ IAVF_FDIR_FLOW_IPV4_AH,
+ IAVF_FDIR_FLOW_IPV4_ESP,
+ IAVF_FDIR_FLOW_IPV4_OTHER,
+ IAVF_FDIR_FLOW_IPV6_TCP,
+ IAVF_FDIR_FLOW_IPV6_UDP,
+ IAVF_FDIR_FLOW_IPV6_SCTP,
+ IAVF_FDIR_FLOW_IPV6_AH,
+ IAVF_FDIR_FLOW_IPV6_ESP,
+ IAVF_FDIR_FLOW_IPV6_OTHER,
+ IAVF_FDIR_FLOW_NON_IP_L2,
+ /* MAX - this must be last and add anything new just above it */
+ IAVF_FDIR_FLOW_PTYPE_MAX,
+};
+
+/* Must not exceed the array element number of '__be32 data[2]' in the ethtool
+ * 'struct ethtool_rx_flow_spec.m_ext.data[2]' to express the flex-byte (word).
+ */
+#define IAVF_FLEX_WORD_NUM 2
+
+struct iavf_flex_word {
+ u16 offset;
+ u16 word;
+};
+
+struct iavf_ipv4_addrs {
+ __be32 src_ip;
+ __be32 dst_ip;
+};
+
+struct iavf_ipv6_addrs {
+ struct in6_addr src_ip;
+ struct in6_addr dst_ip;
+};
+
+struct iavf_fdir_eth {
+ __be16 etype;
+};
+
+struct iavf_fdir_ip {
+ union {
+ struct iavf_ipv4_addrs v4_addrs;
+ struct iavf_ipv6_addrs v6_addrs;
+ };
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 l4_header; /* first 4 bytes of the layer 4 header */
+ __be32 spi; /* security parameter index for AH/ESP */
+ union {
+ u8 tos;
+ u8 tclass;
+ };
+ u8 proto;
+};
+
+struct iavf_fdir_extra {
+ u32 usr_def[IAVF_FLEX_WORD_NUM];
+};
+
+/* bookkeeping of Flow Director filters */
+struct iavf_fdir_fltr {
+ enum iavf_fdir_fltr_state_t state;
+ struct list_head list;
+
+ enum iavf_fdir_flow_type flow_type;
+
+ struct iavf_fdir_eth eth_data;
+ struct iavf_fdir_eth eth_mask;
+
+ struct iavf_fdir_ip ip_data;
+ struct iavf_fdir_ip ip_mask;
+
+ struct iavf_fdir_extra ext_data;
+ struct iavf_fdir_extra ext_mask;
+
+ enum virtchnl_action action;
+
+ /* flex byte filter data */
+ u8 ip_ver; /* used to adjust the flex offset, 4 : IPv4, 6 : IPv6 */
+ u8 flex_cnt;
+ struct iavf_flex_word flex_words[IAVF_FLEX_WORD_NUM];
+
+ u32 flow_id;
+
+ u32 loc; /* Rule location inside the flow table */
+ u32 q_index;
+
+ struct virtchnl_fdir_add vc_add_msg;
+};
+
+int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc);
+#endif /* _IAVF_FDIR_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index dc5b3c06d1e0..e612c24fa384 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -959,8 +959,10 @@ void iavf_down(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct iavf_vlan_filter *vlf;
- struct iavf_mac_filter *f;
struct iavf_cloud_filter *cf;
+ struct iavf_fdir_fltr *fdir;
+ struct iavf_mac_filter *f;
+ struct iavf_adv_rss *rss;
if (adapter->state <= __IAVF_DOWN_PENDING)
return;
@@ -996,6 +998,19 @@ void iavf_down(struct iavf_adapter *adapter)
}
spin_unlock_bh(&adapter->cloud_filter_list_lock);
+ /* remove all Flow Director filters */
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
+ fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ /* remove all advance RSS configuration */
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
+ rss->state = IAVF_ADV_RSS_DEL_REQUEST;
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __IAVF_RESETTING) {
/* cancel any current operation */
@@ -1007,6 +1022,8 @@ void iavf_down(struct iavf_adapter *adapter)
adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
@@ -1629,6 +1646,22 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_add_cloud_filter(adapter);
return 0;
}
+ if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
+ iavf_add_fdir_filter(adapter);
+ return IAVF_SUCCESS;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
+ iavf_del_fdir_filter(adapter);
+ return IAVF_SUCCESS;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
+ iavf_add_adv_rss_cfg(adapter);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
+ iavf_del_adv_rss_cfg(adapter);
+ return 0;
+ }
return -EAGAIN;
}
@@ -2529,7 +2562,7 @@ validate_bw:
}
/**
- * iavf_validate_channel_config - validate queue mapping info
+ * iavf_validate_ch_config - validate queue mapping info
* @adapter: board private structure
* @mqprio_qopt: queue parameters
*
@@ -3525,6 +3558,8 @@ int iavf_process_config(struct iavf_adapter *adapter)
/* Enable cloud filter if ADQ is supported */
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
hw_features |= NETIF_F_HW_TC;
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
+ hw_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_features |= hw_features;
@@ -3738,10 +3773,14 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->mac_vlan_list_lock);
spin_lock_init(&adapter->cloud_filter_list_lock);
+ spin_lock_init(&adapter->fdir_fltr_lock);
+ spin_lock_init(&adapter->adv_rss_lock);
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
INIT_LIST_HEAD(&adapter->cloud_filter_list);
+ INIT_LIST_HEAD(&adapter->fdir_list_head);
+ INIT_LIST_HEAD(&adapter->adv_rss_list_head);
INIT_WORK(&adapter->reset_task, iavf_reset_task);
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
@@ -3845,7 +3884,9 @@ static void iavf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
+ struct iavf_adv_rss *rss, *rsstmp;
struct iavf_mac_filter *f, *ftmp;
struct iavf_cloud_filter *cf, *cftmp;
struct iavf_hw *hw = &adapter->hw;
@@ -3899,8 +3940,6 @@ static void iavf_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr);
pci_release_regions(pdev);
- iavf_free_all_tx_resources(adapter);
- iavf_free_all_rx_resources(adapter);
iavf_free_queues(adapter);
kfree(adapter->vf_res);
spin_lock_bh(&adapter->mac_vlan_list_lock);
@@ -3926,6 +3965,21 @@ static void iavf_remove(struct pci_dev *pdev)
}
spin_unlock_bh(&adapter->cloud_filter_list_lock);
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
+ list_del(&fdir->list);
+ kfree(fdir);
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
+ list) {
+ list_del(&rss->list);
+ kfree(rss);
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index ffaf2742a2e0..3525eab8e9f9 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1905,13 +1905,20 @@ static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
/* determine offset of inner transport header */
l4_offset = l4.hdr - skb->data;
-
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
- /* compute length of segmentation header */
- *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ /* compute length of UDP segmentation header */
+ *hdr_len = (u8)sizeof(l4.udp) + l4_offset;
+ } else {
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ /* compute length of TCP segmentation header */
+ *hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset);
+ }
/* pull values out of skb_shinfo */
gso_size = skb_shinfo(skb)->gso_size;
@@ -2098,7 +2105,7 @@ static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
}
/**
- * iavf_create_tx_ctx Build the Tx context descriptor
+ * iavf_create_tx_ctx - Build the Tx context descriptor
* @tx_ring: ring to create the descriptor on
* @cd_type_cmd_tso_mss: Quad Word 1
* @cd_tunneling: Quad Word 0 - bits 0-31
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 647e7fde11b4..0eab3c43bdc5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -140,6 +140,9 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
VIRTCHNL_VF_OFFLOAD_ADQ |
+ VIRTCHNL_VF_OFFLOAD_USO |
+ VIRTCHNL_VF_OFFLOAD_FDIR_PF |
+ VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
@@ -1005,7 +1008,7 @@ iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
}
/**
- * iavf_enable_channel
+ * iavf_enable_channels
* @adapter: adapter structure
*
* Request that the PF enable channels as specified by
@@ -1046,7 +1049,7 @@ void iavf_enable_channels(struct iavf_adapter *adapter)
}
/**
- * iavf_disable_channel
+ * iavf_disable_channels
* @adapter: adapter structure
*
* Request that the PF disable channels that are configured
@@ -1198,6 +1201,200 @@ void iavf_del_cloud_filter(struct iavf_adapter *adapter)
}
/**
+ * iavf_add_fdir_filter
+ * @adapter: the VF adapter structure
+ *
+ * Request that the PF add Flow Director filters as specified
+ * by the user via ethtool.
+ **/
+void iavf_add_fdir_filter(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *fdir;
+ struct virtchnl_fdir_add *f;
+ bool process_fltr = false;
+ int len;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ len = sizeof(struct virtchnl_fdir_add);
+ f = kzalloc(len, GFP_KERNEL);
+ if (!f)
+ return;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
+ process_fltr = true;
+ fdir->state = IAVF_FDIR_FLTR_ADD_PENDING;
+ memcpy(f, &fdir->vc_add_msg, len);
+ break;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (!process_fltr) {
+ /* prevent iavf_add_fdir_filter() from being called when there
+ * are no filters to add
+ */
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ kfree(f);
+ return;
+ }
+ adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len);
+ kfree(f);
+}
+
+/**
+ * iavf_del_fdir_filter
+ * @adapter: the VF adapter structure
+ *
+ * Request that the PF delete Flow Director filters as specified
+ * by the user via ethtool.
+ **/
+void iavf_del_fdir_filter(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *fdir;
+ struct virtchnl_fdir_del f;
+ bool process_fltr = false;
+ int len;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ len = sizeof(struct virtchnl_fdir_del);
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
+ if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
+ process_fltr = true;
+ memset(&f, 0, len);
+ f.vsi_id = fdir->vc_add_msg.vsi_id;
+ f.flow_id = fdir->flow_id;
+ fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
+ break;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (!process_fltr) {
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ return;
+ }
+
+ adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len);
+}
+
+/**
+ * iavf_add_adv_rss_cfg
+ * @adapter: the VF adapter structure
+ *
+ * Request that the PF add RSS configuration as specified
+ * by the user via ethtool.
+ **/
+void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter)
+{
+ struct virtchnl_rss_cfg *rss_cfg;
+ struct iavf_adv_rss *rss;
+ bool process_rss = false;
+ int len;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ len = sizeof(struct virtchnl_rss_cfg);
+ rss_cfg = kzalloc(len, GFP_KERNEL);
+ if (!rss_cfg)
+ return;
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
+ if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
+ process_rss = true;
+ rss->state = IAVF_ADV_RSS_ADD_PENDING;
+ memcpy(rss_cfg, &rss->cfg_msg, len);
+ iavf_print_adv_rss_cfg(adapter, rss,
+ "Input set change for",
+ "is pending");
+ break;
+ }
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
+ if (process_rss) {
+ adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG,
+ (u8 *)rss_cfg, len);
+ } else {
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
+ }
+
+ kfree(rss_cfg);
+}
+
+/**
+ * iavf_del_adv_rss_cfg
+ * @adapter: the VF adapter structure
+ *
+ * Request that the PF delete RSS configuration as specified
+ * by the user via ethtool.
+ **/
+void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
+{
+ struct virtchnl_rss_cfg *rss_cfg;
+ struct iavf_adv_rss *rss;
+ bool process_rss = false;
+ int len;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ len = sizeof(struct virtchnl_rss_cfg);
+ rss_cfg = kzalloc(len, GFP_KERNEL);
+ if (!rss_cfg)
+ return;
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
+ if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) {
+ process_rss = true;
+ rss->state = IAVF_ADV_RSS_DEL_PENDING;
+ memcpy(rss_cfg, &rss->cfg_msg, len);
+ break;
+ }
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
+ if (process_rss) {
+ adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG,
+ (u8 *)rss_cfg, len);
+ } else {
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
+ }
+
+ kfree(rss_cfg);
+}
+
+/**
* iavf_request_reset
* @adapter: adapter structure
*
@@ -1357,6 +1554,84 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
}
break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER: {
+ struct iavf_fdir_fltr *fdir, *fdir_tmp;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdir_tmp,
+ &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
+ dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n",
+ iavf_stat_str(&adapter->hw,
+ v_retval));
+ iavf_print_fdir_fltr(adapter, fdir);
+ if (msglen)
+ dev_err(&adapter->pdev->dev,
+ "%s\n", msg);
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ }
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER: {
+ struct iavf_fdir_fltr *fdir;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(fdir, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+ fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+ dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
+ iavf_stat_str(&adapter->hw,
+ v_retval));
+ iavf_print_fdir_fltr(adapter, fdir);
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ }
+ break;
+ case VIRTCHNL_OP_ADD_RSS_CFG: {
+ struct iavf_adv_rss *rss, *rss_tmp;
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry_safe(rss, rss_tmp,
+ &adapter->adv_rss_list_head,
+ list) {
+ if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
+ iavf_print_adv_rss_cfg(adapter, rss,
+ "Failed to change the input set for",
+ NULL);
+ list_del(&rss->list);
+ kfree(rss);
+ }
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+ }
+ break;
+ case VIRTCHNL_OP_DEL_RSS_CFG: {
+ struct iavf_adv_rss *rss;
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry(rss, &adapter->adv_rss_list_head,
+ list) {
+ if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
+ rss->state = IAVF_ADV_RSS_ACTIVE;
+ dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n",
+ iavf_stat_str(&adapter->hw,
+ v_retval));
+ }
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
+ break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
v_retval, iavf_stat_str(&adapter->hw, v_retval),
@@ -1490,6 +1765,87 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
}
break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER: {
+ struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg;
+ struct iavf_fdir_fltr *fdir, *fdir_tmp;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdir_tmp,
+ &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
+ if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
+ dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
+ fdir->loc);
+ fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+ fdir->flow_id = add_fltr->flow_id;
+ } else {
+ dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n",
+ add_fltr->status);
+ iavf_print_fdir_fltr(adapter, fdir);
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ }
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ }
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER: {
+ struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg;
+ struct iavf_fdir_fltr *fdir, *fdir_tmp;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+ if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
+ dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
+ fdir->loc);
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ } else {
+ fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+ dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
+ del_fltr->status);
+ iavf_print_fdir_fltr(adapter, fdir);
+ }
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ }
+ break;
+ case VIRTCHNL_OP_ADD_RSS_CFG: {
+ struct iavf_adv_rss *rss;
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
+ if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
+ iavf_print_adv_rss_cfg(adapter, rss,
+ "Input set change for",
+ "successful");
+ rss->state = IAVF_ADV_RSS_ACTIVE;
+ }
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+ }
+ break;
+ case VIRTCHNL_OP_DEL_RSS_CFG: {
+ struct iavf_adv_rss *rss, *rss_tmp;
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry_safe(rss, rss_tmp,
+ &adapter->adv_rss_list_head, list) {
+ if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
+ list_del(&rss->list);
+ kfree(rss);
+ }
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+ }
+ break;
default:
if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 73da4f71f530..07fe857e9e3a 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -26,7 +26,8 @@ ice-y := ice_main.o \
ice_fw_update.o \
ice_lag.o \
ice_ethtool.o
-ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
+ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o
+ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 17101c45cbcd..e35db3ff583b 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -36,6 +36,7 @@
#include <linux/bpf.h>
#include <linux/avf/virtchnl.h>
#include <linux/cpu_rmap.h>
+#include <linux/dim.h>
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
@@ -44,6 +45,9 @@
#include <net/gre.h>
#include <net/udp_tunnel.h>
#include <net/vxlan.h>
+#if IS_ENABLED(CONFIG_DCB)
+#include <scsi/iscsi_proto.h>
+#endif /* CONFIG_DCB */
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
@@ -73,7 +77,7 @@
#define ICE_MIN_LAN_TXRX_MSIX 1
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
-#define ICE_FDIR_MSIX 1
+#define ICE_FDIR_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -84,9 +88,12 @@
#define ICE_MAX_LG_RSS_QS 256
#define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
+/* All VF control VSIs share the same IRQ, so assign a unique ID for them */
+#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_MISC_VEC_ID - 1)
#define ICE_INVAL_Q_INDEX 0xffff
#define ICE_INVAL_VFID 256
+#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
#define ICE_MAX_RESET_WAIT 20
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
@@ -190,53 +197,58 @@ struct ice_sw {
u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */
};
-enum ice_state {
- __ICE_TESTING,
- __ICE_DOWN,
- __ICE_NEEDS_RESTART,
- __ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
- __ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
- __ICE_PFR_REQ, /* set by driver and peers */
- __ICE_CORER_REQ, /* set by driver and peers */
- __ICE_GLOBR_REQ, /* set by driver and peers */
- __ICE_CORER_RECV, /* set by OICR handler */
- __ICE_GLOBR_RECV, /* set by OICR handler */
- __ICE_EMPR_RECV, /* set by OICR handler */
- __ICE_SUSPENDED, /* set on module remove path */
- __ICE_RESET_FAILED, /* set by reset/rebuild */
+enum ice_pf_state {
+ ICE_TESTING,
+ ICE_DOWN,
+ ICE_NEEDS_RESTART,
+ ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
+ ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
+ ICE_PFR_REQ, /* set by driver and peers */
+ ICE_CORER_REQ, /* set by driver and peers */
+ ICE_GLOBR_REQ, /* set by driver and peers */
+ ICE_CORER_RECV, /* set by OICR handler */
+ ICE_GLOBR_RECV, /* set by OICR handler */
+ ICE_EMPR_RECV, /* set by OICR handler */
+ ICE_SUSPENDED, /* set on module remove path */
+ ICE_RESET_FAILED, /* set by reset/rebuild */
/* When checking for the PF to be in a nominal operating state, the
* bits that are grouped at the beginning of the list need to be
- * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
+ * checked. Bits occurring before ICE_STATE_NOMINAL_CHECK_BITS will
* be checked. If you need to add a bit into consideration for nominal
* operating state, it must be added before
- * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
+ * ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
* without appropriate consideration.
*/
- __ICE_STATE_NOMINAL_CHECK_BITS,
- __ICE_ADMINQ_EVENT_PENDING,
- __ICE_MAILBOXQ_EVENT_PENDING,
- __ICE_MDD_EVENT_PENDING,
- __ICE_VFLR_EVENT_PENDING,
- __ICE_FLTR_OVERFLOW_PROMISC,
- __ICE_VF_DIS,
- __ICE_CFG_BUSY,
- __ICE_SERVICE_SCHED,
- __ICE_SERVICE_DIS,
- __ICE_FD_FLUSH_REQ,
- __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
- __ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */
- __ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
- __ICE_LINK_DEFAULT_OVERRIDE_PENDING,
- __ICE_PHY_INIT_COMPLETE,
- __ICE_STATE_NBITS /* must be last */
+ ICE_STATE_NOMINAL_CHECK_BITS,
+ ICE_ADMINQ_EVENT_PENDING,
+ ICE_MAILBOXQ_EVENT_PENDING,
+ ICE_MDD_EVENT_PENDING,
+ ICE_VFLR_EVENT_PENDING,
+ ICE_FLTR_OVERFLOW_PROMISC,
+ ICE_VF_DIS,
+ ICE_CFG_BUSY,
+ ICE_SERVICE_SCHED,
+ ICE_SERVICE_DIS,
+ ICE_FD_FLUSH_REQ,
+ ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
+ ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */
+ ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
+ ICE_LINK_DEFAULT_OVERRIDE_PENDING,
+ ICE_PHY_INIT_COMPLETE,
+ ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */
+ ICE_STATE_NBITS /* must be last */
};
-enum ice_vsi_flags {
- ICE_VSI_FLAG_UMAC_FLTR_CHANGED,
- ICE_VSI_FLAG_MMAC_FLTR_CHANGED,
- ICE_VSI_FLAG_VLAN_FLTR_CHANGED,
- ICE_VSI_FLAG_PROMISC_CHANGED,
- ICE_VSI_FLAG_NBITS /* must be last */
+enum ice_vsi_state {
+ ICE_VSI_DOWN,
+ ICE_VSI_NEEDS_RESTART,
+ ICE_VSI_NETDEV_ALLOCD,
+ ICE_VSI_NETDEV_REGISTERED,
+ ICE_VSI_UMAC_FLTR_CHANGED,
+ ICE_VSI_MMAC_FLTR_CHANGED,
+ ICE_VSI_VLAN_FLTR_CHANGED,
+ ICE_VSI_PROMISC_CHANGED,
+ ICE_VSI_STATE_NBITS /* must be last */
};
/* struct that defines a VSI, associated with a dev */
@@ -252,14 +264,12 @@ struct ice_vsi {
irqreturn_t (*irq_handler)(int irq, void *data);
u64 tx_linearize;
- DECLARE_BITMAP(state, __ICE_STATE_NBITS);
- DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS);
+ DECLARE_BITMAP(state, ICE_VSI_STATE_NBITS);
unsigned int current_netdev_flags;
u32 tx_restart;
u32 tx_busy;
u32 rx_buf_failed;
u32 rx_page_failed;
- u32 rx_gro_dropped;
u16 num_q_vectors;
u16 base_vector; /* IRQ base for OS reserved vectors */
enum ice_vsi_type type;
@@ -342,7 +352,7 @@ struct ice_q_vector {
u16 reg_idx;
u8 num_ring_rx; /* total number of Rx rings in vector */
u8 num_ring_tx; /* total number of Tx rings in vector */
- u8 itr_countdown; /* when 0 should adjust adaptive ITR */
+ u8 wb_on_itr:1; /* if true, WB on ITR is enabled */
/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
* value to the device
*/
@@ -357,6 +367,8 @@ struct ice_q_vector {
struct irq_affinity_notify affinity_notify;
char name[ICE_INT_NAME_STR_LEN];
+
+ u16 total_events; /* net_dim(): number of interrupts processed */
} ____cacheline_internodealigned_in_smp;
enum ice_pf_flags {
@@ -414,7 +426,8 @@ struct ice_pf {
u16 num_msix_per_vf;
/* used to ratelimit the MDD event logging */
unsigned long last_printed_mdd_jiffies;
- DECLARE_BITMAP(state, __ICE_STATE_NBITS);
+ DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT);
+ DECLARE_BITMAP(state, ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */
@@ -499,7 +512,7 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
(itr << GLINT_DYN_CTL_ITR_INDX_S);
if (vsi)
- if (test_bit(__ICE_DOWN, vsi->state))
+ if (test_bit(ICE_VSI_DOWN, vsi->state))
return;
wr32(hw, GLINT_DYN_CTL(vector), val);
}
@@ -616,8 +629,10 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi);
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
-int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
-int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
+int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
+int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
+int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed);
+int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 80186589153b..5cdfe406af84 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -877,16 +877,18 @@ struct ice_aqc_get_phy_caps {
__le16 param0;
/* 18.0 - Report qualified modules */
#define ICE_AQC_GET_PHY_RQM BIT(0)
- /* 18.1 - 18.2 : Report mode
- * 00b - Report NVM capabilities
- * 01b - Report topology capabilities
- * 10b - Report SW configured
+ /* 18.1 - 18.3 : Report mode
+ * 000b - Report NVM capabilities
+ * 001b - Report topology capabilities
+ * 010b - Report SW configured
+ * 100b - Report default capabilities
*/
-#define ICE_AQC_REPORT_MODE_S 1
-#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S)
-#define ICE_AQC_REPORT_NVM_CAP 0
-#define ICE_AQC_REPORT_TOPO_CAP BIT(1)
-#define ICE_AQC_REPORT_SW_CFG BIT(2)
+#define ICE_AQC_REPORT_MODE_S 1
+#define ICE_AQC_REPORT_MODE_M (7 << ICE_AQC_REPORT_MODE_S)
+#define ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA 0
+#define ICE_AQC_REPORT_TOPO_CAP_MEDIA BIT(1)
+#define ICE_AQC_REPORT_ACTIVE_CFG BIT(2)
+#define ICE_AQC_REPORT_DFLT_CFG BIT(3)
__le32 reserved1;
__le32 addr_high;
__le32 addr_low;
@@ -1407,8 +1409,7 @@ struct ice_aqc_nvm_comp_tbl {
u8 cvs[]; /* Component Version String */
} __packed;
-/*
- * Send to PF command (indirect 0x0801) ID is only used by PF
+/* Send to PF command (indirect 0x0801) ID is only used by PF
*
* Send to VF command (indirect 0x0802) ID is only used by PF
*
@@ -1790,6 +1791,7 @@ struct ice_pkg_ver {
};
#define ICE_PKG_NAME_SIZE 32
+#define ICE_SEG_ID_SIZE 28
#define ICE_SEG_NAME_SIZE 28
struct ice_aqc_get_pkg_info {
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 6560acd76c94..88d98c9e5f91 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -581,8 +581,7 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
return;
netdev = vsi->netdev;
- if (!netdev || !netdev->rx_cpu_rmap ||
- netdev->reg_state != NETREG_REGISTERED)
+ if (!netdev || !netdev->rx_cpu_rmap)
return;
free_irq_cpu_rmap(netdev->rx_cpu_rmap);
@@ -604,8 +603,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
pf = vsi->back;
netdev = vsi->netdev;
- if (!pf || !netdev || !vsi->num_q_vectors ||
- vsi->netdev->reg_state != NETREG_REGISTERED)
+ if (!pf || !netdev || !vsi->num_q_vectors)
return -EINVAL;
netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 1148d768f8ed..5985a7e5ca8a 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -113,6 +113,9 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
q_vector->v_idx = v_idx;
q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
+ q_vector->tx.itr_mode = ITR_DYNAMIC;
+ q_vector->rx.itr_mode = ITR_DYNAMIC;
+
if (vsi->type == ICE_VSI_VF)
goto out;
/* only set affinity_mask if the CPU is online */
@@ -215,6 +218,26 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
}
/**
+ * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
+ * @ring: The Tx ring to configure
+ *
+ * This enables/disables XPS for a given Tx descriptor ring
+ * based on the TCs enabled for the VSI that ring belongs to.
+ */
+static void ice_cfg_xps_tx_ring(struct ice_ring *ring)
+{
+ if (!ring->q_vector || !ring->netdev)
+ return;
+
+ /* We only initialize XPS once, so as not to overwrite user settings */
+ if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
+ return;
+
+ netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
+ ring->q_index);
+}
+
+/**
* ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
* @ring: The Tx ring to configure
* @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
@@ -664,6 +687,9 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
u16 pf_q;
u8 tc;
+ /* Configure XPS */
+ ice_cfg_xps_tx_ring(ring);
+
pf_q = ring->reg_idx;
ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
@@ -717,25 +743,13 @@ void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
{
ice_cfg_itr_gran(hw);
- if (q_vector->num_ring_rx) {
- struct ice_ring_container *rc = &q_vector->rx;
-
- rc->target_itr = ITR_TO_REG(rc->itr_setting);
- rc->next_update = jiffies + 1;
- rc->current_itr = rc->target_itr;
- wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
- ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
- }
+ if (q_vector->num_ring_rx)
+ ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting);
- if (q_vector->num_ring_tx) {
- struct ice_ring_container *rc = &q_vector->tx;
+ if (q_vector->num_ring_tx)
+ ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting);
- rc->target_itr = ITR_TO_REG(rc->itr_setting);
- rc->next_update = jiffies + 1;
- rc->current_itr = rc->target_itr;
- wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
- ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
- }
+ ice_write_intrl(q_vector, q_vector->intrl);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index a20edf1538a0..e93b1e40f627 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -158,6 +158,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
return ICE_ERR_PARAM;
hw = pi->hw;
+ if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
+ !ice_fw_supports_report_dflt_cfg(hw))
+ return ICE_ERR_PARAM;
+
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
if (qual_mods)
@@ -191,7 +195,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
pcaps->module_type[2]);
- if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
+ if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
@@ -922,7 +926,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
/* Initialize port_info struct with PHY capabilities */
status = ice_aq_get_phy_caps(hw->port_info, false,
- ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
+ ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
+ NULL);
devm_kfree(ice_hw_to_dev(hw), pcaps);
if (status)
dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
@@ -1293,6 +1298,85 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
DEFINE_MUTEX(ice_global_cfg_lock_sw);
/**
+ * ice_should_retry_sq_send_cmd
+ * @opcode: AQ opcode
+ *
+ * Decide if we should retry the send command routine for the ATQ, depending
+ * on the opcode.
+ */
+static bool ice_should_retry_sq_send_cmd(u16 opcode)
+{
+ switch (opcode) {
+ case ice_aqc_opc_get_link_topo:
+ case ice_aqc_opc_lldp_stop:
+ case ice_aqc_opc_lldp_start:
+ case ice_aqc_opc_lldp_filter_ctrl:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
+ * @hw: pointer to the HW struct
+ * @cq: pointer to the specific Control queue
+ * @desc: prefilled descriptor describing the command
+ * @buf: buffer to use for indirect commands (or NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
+ * @cd: pointer to command details structure
+ *
+ * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
+ * Queue if the EBUSY AQ error is returned.
+ */
+static enum ice_status
+ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc_cpy;
+ enum ice_status status;
+ bool is_cmd_for_retry;
+ u8 *buf_cpy = NULL;
+ u8 idx = 0;
+ u16 opcode;
+
+ opcode = le16_to_cpu(desc->opcode);
+ is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
+ memset(&desc_cpy, 0, sizeof(desc_cpy));
+
+ if (is_cmd_for_retry) {
+ if (buf) {
+ buf_cpy = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf_cpy)
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ memcpy(&desc_cpy, desc, sizeof(desc_cpy));
+ }
+
+ do {
+ status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
+
+ if (!is_cmd_for_retry || !status ||
+ hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
+ break;
+
+ if (buf_cpy)
+ memcpy(buf, buf_cpy, buf_size);
+
+ memcpy(desc, &desc_cpy, sizeof(desc_cpy));
+
+ mdelay(ICE_SQ_SEND_DELAY_TIME_MS);
+
+ } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
+
+ kfree(buf_cpy);
+
+ return status;
+}
+
+/**
* ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
* @hw: pointer to the HW struct
* @desc: descriptor describing the command
@@ -1333,7 +1417,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
break;
}
- status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
+ status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
if (lock_acquired)
mutex_unlock(&ice_global_cfg_lock_sw);
@@ -2655,7 +2739,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
if (!pcaps)
return ICE_ERR_NO_MEMORY;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL);
devm_kfree(ice_hw_to_dev(hw), pcaps);
@@ -2815,8 +2899,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
return ICE_ERR_NO_MEMORY;
/* Get the current PHY config */
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
- NULL);
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
+ pcaps, NULL);
if (status) {
*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
goto out;
@@ -2929,17 +3013,6 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
cfg->link_fec_opt = caps->link_fec_options;
cfg->module_compliance_enforcement =
caps->module_compliance_enforcement;
-
- if (ice_fw_supports_link_override(pi->hw)) {
- struct ice_link_default_override_tlv tlv;
-
- if (ice_get_link_default_override(&tlv, pi))
- return;
-
- if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
- cfg->module_compliance_enforcement |=
- ICE_LINK_OVERRIDE_STRICT_MODE;
- }
}
/**
@@ -2954,16 +3027,21 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
{
struct ice_aqc_get_phy_caps_data *pcaps;
enum ice_status status;
+ struct ice_hw *hw;
if (!pi || !cfg)
return ICE_ERR_BAD_PTR;
+ hw = pi->hw;
+
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return ICE_ERR_NO_MEMORY;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
- NULL);
+ status = ice_aq_get_phy_caps(pi, false,
+ (ice_fw_supports_report_dflt_cfg(hw) ?
+ ICE_AQC_REPORT_DFLT_CFG :
+ ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
if (status)
goto out;
@@ -3002,7 +3080,8 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
break;
}
- if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
+ if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
+ !ice_fw_supports_report_dflt_cfg(hw)) {
struct ice_link_default_override_tlv tlv;
if (ice_get_link_default_override(&tlv, pi))
@@ -3186,7 +3265,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
cmd = &desc.params.read_write_sff_param;
- desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
+ desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
cmd->lport_num = (u8)(lport & 0xff);
cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
@@ -3206,23 +3285,33 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
- * @vsi_id: VSI FW index
- * @lut_type: LUT table type
- * @lut: pointer to the LUT buffer provided by the caller
- * @lut_size: size of the LUT buffer
- * @glob_lut_idx: global LUT index
+ * @params: RSS LUT parameters
* @set: set true to set the table, false to get the table
*
* Internal function to get (0x0B05) or set (0x0B03) RSS look up table
*/
static enum ice_status
-__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
- u16 lut_size, u8 glob_lut_idx, bool set)
+__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
{
+ u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
struct ice_aqc_get_set_rss_lut *cmd_resp;
struct ice_aq_desc desc;
enum ice_status status;
- u16 flags = 0;
+ u8 *lut;
+
+ if (!params)
+ return ICE_ERR_PARAM;
+
+ vsi_handle = params->vsi_handle;
+ lut = params->lut;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+ return ICE_ERR_PARAM;
+
+ lut_size = params->lut_size;
+ lut_type = params->lut_type;
+ glob_lut_idx = params->global_lut_id;
+ vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
cmd_resp = &desc.params.get_set_rss_lut;
@@ -3296,43 +3385,27 @@ ice_aq_get_set_rss_lut_exit:
/**
* ice_aq_get_rss_lut
* @hw: pointer to the hardware structure
- * @vsi_handle: software VSI handle
- * @lut_type: LUT table type
- * @lut: pointer to the LUT buffer provided by the caller
- * @lut_size: size of the LUT buffer
+ * @get_params: RSS LUT parameters used to specify which RSS LUT to get
*
* get the RSS lookup table, PF or VSI type
*/
enum ice_status
-ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
- u8 *lut, u16 lut_size)
+ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
{
- if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
- return ICE_ERR_PARAM;
-
- return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
- lut_type, lut, lut_size, 0, false);
+ return __ice_aq_get_set_rss_lut(hw, get_params, false);
}
/**
* ice_aq_set_rss_lut
* @hw: pointer to the hardware structure
- * @vsi_handle: software VSI handle
- * @lut_type: LUT table type
- * @lut: pointer to the LUT buffer provided by the caller
- * @lut_size: size of the LUT buffer
+ * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
*
* set the RSS lookup table, PF or VSI type
*/
enum ice_status
-ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
- u8 *lut, u16 lut_size)
+ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
{
- if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
- return ICE_ERR_PARAM;
-
- return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
- lut_type, lut, lut_size, 0, true);
+ return __ice_aq_get_set_rss_lut(hw, set_params, true);
}
/**
@@ -4373,7 +4446,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
}
/**
- * ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl
+ * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
* @hw: pointer to HW struct
*/
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
@@ -4418,3 +4491,23 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
+
+/**
+ * ice_fw_supports_report_dflt_cfg
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the firmware supports report default configuration
+ */
+bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
+{
+ if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
+ if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
+ return true;
+ if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
+ hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
+ return true;
+ } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index baf4064fcbfe..7a9d2dfb21a2 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -11,6 +11,9 @@
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
+#define ICE_SQ_SEND_DELAY_TIME_MS 10
+#define ICE_SQ_SEND_MAX_EXECUTE 3
+
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
@@ -51,11 +54,9 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
enum ice_status
-ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
- u16 lut_size);
+ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
enum ice_status
-ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
- u16 lut_size);
+ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params);
enum ice_status
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
@@ -178,4 +179,5 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
enum ice_status
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
+bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index b2d8a5932b1d..87b33bdd4960 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -892,7 +892,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* ice_sq_send_cmd - send command to Control Queue (ATQ)
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
- * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @desc: prefilled descriptor describing the command
* @buf: buffer to use for indirect commands (or NULL for direct commands)
* @buf_size: size of buffer for indirect commands (or 0 for direct commands)
* @cd: pointer to command details structure
@@ -1097,6 +1097,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending)
{
u16 ntc = cq->rq.next_to_clean;
+ enum ice_aq_err rq_last_status;
enum ice_status ret_code = 0;
struct ice_aq_desc *desc;
struct ice_dma_mem *bi;
@@ -1130,13 +1131,12 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
desc = ICE_CTL_Q_DESC(cq->rq, ntc);
desc_idx = ntc;
- cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
+ rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & ICE_AQ_FLAG_ERR) {
ret_code = ICE_ERR_AQ_ERROR;
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
- le16_to_cpu(desc->opcode),
- cq->rq_last_status);
+ le16_to_cpu(desc->opcode), rq_last_status);
}
memcpy(&e->desc, desc, sizeof(e->desc));
datalen = le16_to_cpu(desc->datalen);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 68866f4f0eb0..fe75871e48ca 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -14,8 +14,8 @@
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
#define ICE_CTL_Q_DESC_UNUSED(R) \
- (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
+ ((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1))
/* Defines that help manage the driver vs FW API checks.
* Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
@@ -83,7 +83,6 @@ struct ice_rq_event_info {
/* Control Queue information */
struct ice_ctl_q_info {
enum ice_ctl_q qtype;
- enum ice_aq_err rq_last_status; /* last status on receive queue */
struct ice_ctl_q_ring rq; /* receive queue */
struct ice_ctl_q_ring sq; /* send queue */
u32 sq_cmd_timeout; /* send queue cmd write back timeout */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 28e834a128c0..849fcf605479 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -804,7 +804,7 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FCOE_M;
ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FCOE_S;
ice_app_sel_type = ICE_APP_SEL_ETHTYPE;
- ice_app_prot_id_type = ICE_APP_PROT_ID_FCOE;
+ ice_app_prot_id_type = ETH_P_FCOE;
} else if (i == 1) {
/* iSCSI APP */
ice_aqc_cee_status_mask = ICE_AQC_CEE_ISCSI_STATUS_M;
@@ -812,14 +812,14 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_ISCSI_M;
ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S;
ice_app_sel_type = ICE_APP_SEL_TCPIP;
- ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI;
+ ice_app_prot_id_type = ISCSI_LISTEN_PORT;
for (j = 0; j < cmp_dcbcfg->numapps; j++) {
u16 prot_id = cmp_dcbcfg->app[j].prot_id;
u8 sel = cmp_dcbcfg->app[j].selector;
if (sel == ICE_APP_SEL_TCPIP &&
- (prot_id == ICE_APP_PROT_ID_ISCSI ||
+ (prot_id == ISCSI_LISTEN_PORT ||
prot_id == ICE_APP_PROT_ID_ISCSI_860)) {
ice_app_prot_id_type = prot_id;
break;
@@ -832,7 +832,7 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FIP_M;
ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FIP_S;
ice_app_sel_type = ICE_APP_SEL_ETHTYPE;
- ice_app_prot_id_type = ICE_APP_PROT_ID_FIP;
+ ice_app_prot_id_type = ETH_P_FIP;
}
status = (tlv_status & ice_aqc_cee_status_mask) >>
@@ -857,7 +857,7 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
}
/**
- * ice_get_ieee_dcb_cfg
+ * ice_get_ieee_or_cee_dcb_cfg
* @pi: port information structure
* @dcbx_mode: mode of DCBX (IEEE or CEE)
*
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 1e8f71ffc8ce..df02cffdf209 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -563,7 +563,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
dcbcfg->numapps = 1;
dcbcfg->app[0].selector = ICE_APP_SEL_ETHTYPE;
dcbcfg->app[0].priority = 3;
- dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE;
+ dcbcfg->app[0].prot_id = ETH_P_FCOE;
ret = ice_pf_dcb_cfg(pf, dcbcfg, locked);
kfree(dcbcfg);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 32ba71a16165..d9ddd0bcf65f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -60,7 +60,6 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
- ICE_VSI_STAT("rx_gro_dropped", rx_gro_dropped),
ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
ICE_VSI_STAT("tx_linearize", tx_linearize),
ICE_VSI_STAT("tx_busy", tx_busy),
@@ -807,7 +806,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
netdev_info(netdev, "offline testing starting\n");
- set_bit(__ICE_TESTING, pf->state);
+ set_bit(ICE_TESTING, pf->state);
if (ice_active_vfs(pf)) {
dev_warn(dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
@@ -817,7 +816,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
data[ICE_ETH_TEST_LOOP] = 1;
data[ICE_ETH_TEST_LINK] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
- clear_bit(__ICE_TESTING, pf->state);
+ clear_bit(ICE_TESTING, pf->state);
goto skip_ol_tests;
}
/* If the device is online then take it offline */
@@ -838,7 +837,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
data[ICE_ETH_TEST_REG])
eth_test->flags |= ETH_TEST_FL_FAILED;
- clear_bit(__ICE_TESTING, pf->state);
+ clear_bit(ICE_TESTING, pf->state);
if (if_running) {
int status = ice_open(netdev);
@@ -871,68 +870,47 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- char *p = (char *)data;
unsigned int i;
+ u8 *p = data;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ICE_VSI_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- ice_gstrings_vsi_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ICE_VSI_STATS_LEN; i++)
+ ethtool_sprintf(&p,
+ ice_gstrings_vsi_stats[i].stat_string);
ice_for_each_alloc_txq(vsi, i) {
- snprintf(p, ETH_GSTRING_LEN,
- "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "tx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
}
ice_for_each_alloc_rxq(vsi, i) {
- snprintf(p, ETH_GSTRING_LEN,
- "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "rx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
}
if (vsi->type != ICE_VSI_PF)
return;
- for (i = 0; i < ICE_PF_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- ice_gstrings_pf_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ICE_PF_STATS_LEN; i++)
+ ethtool_sprintf(&p,
+ ice_gstrings_pf_stats[i].stat_string);
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "tx_priority_%u_xon.nic", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "tx_priority_%u_xoff.nic", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i);
+ ethtool_sprintf(&p, "tx_priority_%u_xoff.nic", i);
}
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "rx_priority_%u_xon.nic", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "rx_priority_%u_xoff.nic", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "rx_priority_%u_xon.nic", i);
+ ethtool_sprintf(&p, "rx_priority_%u_xoff.nic", i);
}
break;
case ETH_SS_TEST:
memcpy(data, ice_gstrings_test, ICE_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- ice_gstrings_priv_flags[i].name);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++)
+ ethtool_sprintf(&p, ice_gstrings_priv_flags[i].name);
break;
default:
break;
@@ -1081,7 +1059,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
if (!caps)
return -ENOMEM;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
caps, NULL);
if (status) {
err = -EAGAIN;
@@ -1116,24 +1094,15 @@ static int ice_nway_reset(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- struct ice_port_info *pi;
- enum ice_status status;
+ int err;
- pi = vsi->port_info;
/* If VSI state is up, then restart autoneg with link up */
- if (!test_bit(__ICE_DOWN, vsi->back->state))
- status = ice_aq_set_link_restart_an(pi, true, NULL);
+ if (!test_bit(ICE_DOWN, vsi->back->state))
+ err = ice_set_link(vsi, true);
else
- status = ice_aq_set_link_restart_an(pi, false, NULL);
+ err = ice_set_link(vsi, false);
- if (status) {
- netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(pi->hw->adminq.sq_last_status));
- return -EIO;
- }
-
- return 0;
+ return err;
}
/**
@@ -1475,8 +1444,8 @@ void ice_mask_min_supported_speeds(u64 phy_types_high, u64 *phy_types_low)
do { \
if (req_speeds & (aq_link_speed) || \
(!req_speeds && \
- (adv_phy_type_lo & phy_type_mask_lo || \
- adv_phy_type_hi & phy_type_mask_hi))) \
+ (advert_phy_type_lo & phy_type_mask_lo || \
+ advert_phy_type_hi & phy_type_mask_hi))) \
ethtool_link_ksettings_add_link_mode(ks, advertising,\
ethtool_link_mode); \
} while (0)
@@ -1493,10 +1462,10 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ u64 advert_phy_type_lo = 0;
+ u64 advert_phy_type_hi = 0;
u64 phy_type_mask_lo = 0;
u64 phy_type_mask_hi = 0;
- u64 adv_phy_type_lo = 0;
- u64 adv_phy_type_hi = 0;
u64 phy_types_high = 0;
u64 phy_types_low = 0;
u16 req_speeds;
@@ -1514,28 +1483,35 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
* requested by user.
*/
if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) {
- struct ice_link_default_override_tlv *ldo;
-
- ldo = &pf->link_dflt_override;
phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo);
phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi);
ice_mask_min_supported_speeds(phy_types_high, &phy_types_low);
-
- /* If override enabled and PHY mask set, then
- * Advertising link mode is the intersection of the PHY
- * types without media and the override PHY mask.
+ /* determine advertised modes based on link override only
+ * if it's supported and if the FW doesn't abstract the
+ * driver from having to account for link overrides
*/
- if (ldo->options & ICE_LINK_OVERRIDE_EN &&
- (ldo->phy_type_low || ldo->phy_type_high)) {
- adv_phy_type_lo =
- le64_to_cpu(pf->nvm_phy_type_lo) &
- ldo->phy_type_low;
- adv_phy_type_hi =
- le64_to_cpu(pf->nvm_phy_type_hi) &
- ldo->phy_type_high;
+ if (ice_fw_supports_link_override(&pf->hw) &&
+ !ice_fw_supports_report_dflt_cfg(&pf->hw)) {
+ struct ice_link_default_override_tlv *ldo;
+
+ ldo = &pf->link_dflt_override;
+ /* If override enabled and PHY mask set, then
+ * Advertising link mode is the intersection of the PHY
+ * types without media and the override PHY mask.
+ */
+ if (ldo->options & ICE_LINK_OVERRIDE_EN &&
+ (ldo->phy_type_low || ldo->phy_type_high)) {
+ advert_phy_type_lo =
+ le64_to_cpu(pf->nvm_phy_type_lo) &
+ ldo->phy_type_low;
+ advert_phy_type_hi =
+ le64_to_cpu(pf->nvm_phy_type_hi) &
+ ldo->phy_type_high;
+ }
}
} else {
+ /* strict mode */
phy_types_low = vsi->port_info->phy.phy_type_low;
phy_types_high = vsi->port_info->phy.phy_type_high;
}
@@ -1543,9 +1519,9 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
/* If Advertising link mode PHY type is not using override PHY type,
* then use PHY type with media.
*/
- if (!adv_phy_type_lo && !adv_phy_type_hi) {
- adv_phy_type_lo = vsi->port_info->phy.phy_type_low;
- adv_phy_type_hi = vsi->port_info->phy.phy_type_high;
+ if (!advert_phy_type_lo && !advert_phy_type_hi) {
+ advert_phy_type_lo = vsi->port_info->phy.phy_type_low;
+ advert_phy_type_hi = vsi->port_info->phy.phy_type_high;
}
ethtool_link_ksettings_zero_link_mode(ks, supported);
@@ -2021,7 +1997,7 @@ ice_get_link_ksettings(struct net_device *netdev,
return -ENOMEM;
status = ice_aq_get_phy_caps(vsi->port_info, false,
- ICE_AQC_REPORT_SW_CFG, caps, NULL);
+ ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
if (status) {
err = -EIO;
goto done;
@@ -2058,7 +2034,7 @@ ice_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
status = ice_aq_get_phy_caps(vsi->port_info, false,
- ICE_AQC_REPORT_TOPO_CAP, caps, NULL);
+ ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL);
if (status) {
err = -EIO;
goto done;
@@ -2225,13 +2201,14 @@ ice_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ethtool_link_ksettings safe_ks, copy_ks;
- struct ice_aqc_get_phy_caps_data *abilities;
u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT;
- u16 adv_link_speed, curr_link_speed, idx;
+ struct ethtool_link_ksettings copy_ks = *ks;
+ struct ethtool_link_ksettings safe_ks = {};
+ struct ice_aqc_get_phy_caps_data *phy_caps;
struct ice_aqc_set_phy_cfg_data config;
+ u16 adv_link_speed, curr_link_speed;
struct ice_pf *pf = np->vsi->back;
- struct ice_port_info *p;
+ struct ice_port_info *pi;
u8 autoneg_changed = 0;
enum ice_status status;
u64 phy_type_high = 0;
@@ -2239,46 +2216,37 @@ ice_set_link_ksettings(struct net_device *netdev,
int err = 0;
bool linkup;
- p = np->vsi->port_info;
-
- if (!p)
- return -EOPNOTSUPP;
+ pi = np->vsi->port_info;
- /* Check if this is LAN VSI */
- ice_for_each_vsi(pf, idx)
- if (pf->vsi[idx]->type == ICE_VSI_PF) {
- if (np->vsi != pf->vsi[idx])
- return -EOPNOTSUPP;
- break;
- }
+ if (!pi)
+ return -EIO;
- if (p->phy.media_type != ICE_MEDIA_BASET &&
- p->phy.media_type != ICE_MEDIA_FIBER &&
- p->phy.media_type != ICE_MEDIA_BACKPLANE &&
- p->phy.media_type != ICE_MEDIA_DA &&
- p->phy.link_info.link_info & ICE_AQ_LINK_UP)
+ if (pi->phy.media_type != ICE_MEDIA_BASET &&
+ pi->phy.media_type != ICE_MEDIA_FIBER &&
+ pi->phy.media_type != ICE_MEDIA_BACKPLANE &&
+ pi->phy.media_type != ICE_MEDIA_DA &&
+ pi->phy.link_info.link_info & ICE_AQ_LINK_UP)
return -EOPNOTSUPP;
- abilities = kzalloc(sizeof(*abilities), GFP_KERNEL);
- if (!abilities)
+ phy_caps = kzalloc(sizeof(*phy_caps), GFP_KERNEL);
+ if (!phy_caps)
return -ENOMEM;
/* Get the PHY capabilities based on media */
- status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_TOPO_CAP,
- abilities, NULL);
+ if (ice_fw_supports_report_dflt_cfg(pi->hw))
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
+ phy_caps, NULL);
+ else
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ phy_caps, NULL);
if (status) {
- err = -EAGAIN;
+ err = -EIO;
goto done;
}
- /* copy the ksettings to copy_ks to avoid modifying the original */
- memcpy(&copy_ks, ks, sizeof(copy_ks));
-
/* save autoneg out of ksettings */
autoneg = copy_ks.base.autoneg;
- memset(&safe_ks, 0, sizeof(safe_ks));
-
/* Get link modes supported by hardware.*/
ice_phy_type_to_ethtool(netdev, &safe_ks);
@@ -2290,7 +2258,7 @@ ice_set_link_ksettings(struct net_device *netdev,
__ETHTOOL_LINK_MODE_MASK_NBITS)) {
if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags))
netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
- err = -EINVAL;
+ err = -EOPNOTSUPP;
goto done;
}
@@ -2314,7 +2282,7 @@ ice_set_link_ksettings(struct net_device *netdev,
goto done;
}
- while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
+ while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
timeout--;
if (!timeout) {
err = -EBUSY;
@@ -2327,26 +2295,26 @@ ice_set_link_ksettings(struct net_device *netdev,
* configuration is initialized during probe from PHY capabilities
* software mode, and updated on set PHY configuration.
*/
- memcpy(&config, &p->phy.curr_user_phy_cfg, sizeof(config));
+ config = pi->phy.curr_user_phy_cfg;
config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
/* Check autoneg */
- err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed,
+ err = ice_setup_autoneg(pi, &safe_ks, &config, autoneg, &autoneg_changed,
netdev);
if (err)
goto done;
/* Call to get the current link speed */
- p->phy.get_link_info = true;
- status = ice_get_link_status(p, &linkup);
+ pi->phy.get_link_info = true;
+ status = ice_get_link_status(pi, &linkup);
if (status) {
- err = -EAGAIN;
+ err = -EIO;
goto done;
}
- curr_link_speed = p->phy.link_info.link_speed;
+ curr_link_speed = pi->phy.link_info.link_speed;
adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
/* If speed didn't get set, set it to what it currently is.
@@ -2365,7 +2333,7 @@ ice_set_link_ksettings(struct net_device *netdev,
}
/* save the requested speeds */
- p->phy.link_info.req_speeds = adv_link_speed;
+ pi->phy.link_info.req_speeds = adv_link_speed;
/* set link and auto negotiation so changes take effect */
config.caps |= ICE_AQ_PHY_ENA_LINK;
@@ -2373,7 +2341,7 @@ ice_set_link_ksettings(struct net_device *netdev,
/* check if there is a PHY type for the requested advertised speed */
if (!(phy_type_low || phy_type_high)) {
netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
- err = -EAGAIN;
+ err = -EOPNOTSUPP;
goto done;
}
@@ -2381,9 +2349,9 @@ ice_set_link_ksettings(struct net_device *netdev,
* for set PHY configuration
*/
config.phy_type_high = cpu_to_le64(phy_type_high) &
- abilities->phy_type_high;
+ phy_caps->phy_type_high;
config.phy_type_low = cpu_to_le64(phy_type_low) &
- abilities->phy_type_low;
+ phy_caps->phy_type_low;
if (!(config.phy_type_high || config.phy_type_low)) {
/* If there is no intersection and lenient mode is enabled, then
@@ -2397,13 +2365,13 @@ ice_set_link_ksettings(struct net_device *netdev,
pf->nvm_phy_type_lo;
} else {
netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
- err = -EAGAIN;
+ err = -EOPNOTSUPP;
goto done;
}
}
/* If link is up put link down */
- if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) {
+ if (pi->phy.link_info.link_info & ICE_AQ_LINK_UP) {
/* Tell the OS link is going down, the link will go
* back up when fw says it is ready asynchronously
*/
@@ -2413,18 +2381,18 @@ ice_set_link_ksettings(struct net_device *netdev,
}
/* make the aq call */
- status = ice_aq_set_phy_cfg(&pf->hw, p, &config, NULL);
+ status = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL);
if (status) {
netdev_info(netdev, "Set phy config failed,\n");
- err = -EAGAIN;
+ err = -EIO;
goto done;
}
/* Save speed request */
- p->phy.curr_user_speed_req = adv_link_speed;
+ pi->phy.curr_user_speed_req = adv_link_speed;
done:
- kfree(abilities);
- clear_bit(__ICE_CFG_BUSY, pf->state);
+ kfree(phy_caps);
+ clear_bit(ICE_CFG_BUSY, pf->state);
return err;
}
@@ -2780,7 +2748,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
if (ice_xsk_any_rx_ring_ena(vsi))
return -EBUSY;
- while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
+ while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
timeout--;
if (!timeout)
return -EBUSY;
@@ -2907,7 +2875,7 @@ process_link:
/* Bring interface down, copy in the new ring info, then restore the
* interface. if VSI is up, bring it down and then back up
*/
- if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
+ if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
ice_down(vsi);
if (tx_rings) {
@@ -2959,7 +2927,7 @@ free_tx:
}
done:
- clear_bit(__ICE_CFG_BUSY, pf->state);
+ clear_bit(ICE_CFG_BUSY, pf->state);
return err;
}
@@ -2993,7 +2961,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return;
/* Get current PHY config */
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
NULL);
if (status)
goto out;
@@ -3060,7 +3028,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return -ENOMEM;
/* Get current PHY config */
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
NULL);
if (status) {
kfree(pcaps);
@@ -3078,7 +3046,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
}
/* If we have link and don't have autoneg */
- if (!test_bit(__ICE_DOWN, pf->state) &&
+ if (!test_bit(ICE_DOWN, pf->state) &&
!(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
/* Send message that it might not necessarily work*/
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
@@ -3161,7 +3129,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- int ret = 0, i;
+ int err, i;
u8 *lut;
if (hfunc)
@@ -3180,17 +3148,20 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
if (!lut)
return -ENOMEM;
- if (ice_get_rss(vsi, key, lut, vsi->rss_table_size)) {
- ret = -EIO;
+ err = ice_get_rss_key(vsi, key);
+ if (err)
+ goto out;
+
+ err = ice_get_rss_lut(vsi, lut, vsi->rss_table_size);
+ if (err)
goto out;
- }
for (i = 0; i < vsi->rss_table_size; i++)
indir[i] = (u32)(lut[i]);
out:
kfree(lut);
- return ret;
+ return err;
}
/**
@@ -3211,7 +3182,7 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct device *dev;
- u8 *seed = NULL;
+ int err;
dev = ice_pf_to_dev(pf);
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
@@ -3232,7 +3203,10 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return -ENOMEM;
}
memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
- seed = vsi->rss_hkey_user;
+
+ err = ice_set_rss_key(vsi, vsi->rss_hkey_user);
+ if (err)
+ return err;
}
if (!vsi->rss_lut_user) {
@@ -3253,8 +3227,9 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
vsi->rss_size);
}
- if (ice_set_rss(vsi, seed, vsi->rss_lut_user, vsi->rss_table_size))
- return -EIO;
+ err = ice_set_rss_lut(vsi, vsi->rss_lut_user, vsi->rss_table_size);
+ if (err)
+ return err;
return 0;
}
@@ -3350,10 +3325,9 @@ static int ice_get_valid_rss_size(struct ice_hw *hw, int new_size)
static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
struct ice_hw *hw;
- int err = 0;
+ int err;
u8 *lut;
dev = ice_pf_to_dev(pf);
@@ -3374,14 +3348,10 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
/* create/set RSS LUT */
ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
- status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut,
- vsi->rss_table_size);
- if (status) {
- dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
- ice_stat_str(status),
+ err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
+ if (err)
+ dev_err(dev, "Cannot set RSS lut, err %d aq_err %s\n", err,
ice_aq_str(hw->adminq.sq_last_status));
- err = -EIO;
- }
kfree(lut);
return err;
@@ -3540,13 +3510,13 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
switch (c_type) {
case ICE_RX_CONTAINER:
- ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
- ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
+ ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc);
+ ec->rx_coalesce_usecs = rc->itr_setting;
ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl;
break;
case ICE_TX_CONTAINER:
- ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
- ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
+ ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc);
+ ec->tx_coalesce_usecs = rc->itr_setting;
break;
default:
dev_dbg(ice_pf_to_dev(pf), "Invalid c_type %d\n", c_type);
@@ -3664,11 +3634,16 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
ICE_MAX_INTRL);
return -EINVAL;
}
+ if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl &&
+ (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) {
+ netdev_info(vsi->netdev, "Invalid value, %s-usecs-high cannot be changed if adaptive-tx or adaptive-rx is enabled\n",
+ c_type_str);
+ return -EINVAL;
+ }
if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) {
rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high;
- wr32(&pf->hw, GLINT_RATE(rc->ring->q_vector->reg_idx),
- ice_intrl_usec_to_reg(ec->rx_coalesce_usecs_high,
- pf->hw.intrl_gran));
+ ice_write_intrl(rc->ring->q_vector,
+ ec->rx_coalesce_usecs_high);
}
use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
@@ -3686,7 +3661,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
return -EINVAL;
}
- itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC;
+ itr_setting = rc->itr_setting;
if (coalesce_usecs != itr_setting && use_adaptive_coalesce) {
netdev_info(vsi->netdev, "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n",
c_type_str, c_type_str);
@@ -3700,12 +3675,18 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
}
if (use_adaptive_coalesce) {
- rc->itr_setting |= ICE_ITR_DYNAMIC;
+ rc->itr_mode = ITR_DYNAMIC;
} else {
- /* save the user set usecs */
+ rc->itr_mode = ITR_STATIC;
+ /* store user facing value how it was set */
rc->itr_setting = coalesce_usecs;
- /* device ITR granularity is in 2 usec increments */
- rc->target_itr = ITR_REG_ALIGN(rc->itr_setting);
+ /* write the change to the register */
+ ice_write_itr(rc, coalesce_usecs);
+ /* force writes to take effect immediately, the flush shouldn't
+ * be done in the functions above because the intent is for
+ * them to do lazy writes.
+ */
+ ice_flush(&pf->hw);
}
return 0;
@@ -3767,8 +3748,6 @@ ice_print_if_odd_usecs(struct net_device *netdev, u16 itr_setting,
if (use_adaptive_coalesce)
return;
- itr_setting = ITR_TO_REG(itr_setting);
-
if (itr_setting != coalesce_usecs && (coalesce_usecs % 2))
netdev_info(netdev, "User set %s-usecs to %d, device only supports even values. Rounding down and attempting to set %s-usecs to %d\n",
c_type_str, coalesce_usecs, c_type_str,
@@ -3823,7 +3802,6 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
return -EINVAL;
set_complete:
-
return 0;
}
@@ -3936,30 +3914,33 @@ ice_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+#define SFF_READ_BLOCK_SIZE 8
+ u8 value[SFF_READ_BLOCK_SIZE] = { 0 };
u8 addr = ICE_I2C_EEPROM_DEV_ADDR;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
bool is_sfp = false;
- unsigned int i;
+ unsigned int i, j;
u16 offset = 0;
- u8 value = 0;
u8 page = 0;
- status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0,
- &value, 1, 0, NULL);
- if (status)
- return -EIO;
-
if (!ee || !ee->len || !data)
return -EINVAL;
- if (value == ICE_MODULE_TYPE_SFP)
+ status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0,
+ NULL);
+ if (status)
+ return -EIO;
+
+ if (value[0] == ICE_MODULE_TYPE_SFP)
is_sfp = true;
- for (i = 0; i < ee->len; i++) {
+ memset(data, 0, ee->len);
+ for (i = 0; i < ee->len; i += SFF_READ_BLOCK_SIZE) {
offset = i + ee->offset;
+ page = 0;
/* Check if we need to access the other memory page */
if (is_sfp) {
@@ -3975,11 +3956,37 @@ ice_get_module_eeprom(struct net_device *netdev,
}
}
- status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, !is_sfp,
- &value, 1, 0, NULL);
- if (status)
- value = 0;
- data[i] = value;
+ /* Bit 2 of EEPROM address 0x02 declares upper
+ * pages are disabled on QSFP modules.
+ * SFP modules only ever use page 0.
+ */
+ if (page == 0 || !(data[0x2] & 0x4)) {
+ /* If i2c bus is busy due to slow page change or
+ * link management access, call can fail. This is normal.
+ * So we retry this a few times.
+ */
+ for (j = 0; j < 4; j++) {
+ status = ice_aq_sff_eeprom(hw, 0, addr, offset, page,
+ !is_sfp, value,
+ SFF_READ_BLOCK_SIZE,
+ 0, NULL);
+ netdev_dbg(netdev, "SFF %02X %02X %02X %X = %02X%02X%02X%02X.%02X%02X%02X%02X (%X)\n",
+ addr, offset, page, is_sfp,
+ value[0], value[1], value[2], value[3],
+ value[4], value[5], value[6], value[7],
+ status);
+ if (status) {
+ usleep_range(1500, 2500);
+ memset(value, 0, SFF_READ_BLOCK_SIZE);
+ continue;
+ }
+ break;
+ }
+
+ /* Make sure we have enough room for the new block */
+ if ((i + SFF_READ_BLOCK_SIZE) < ee->len)
+ memcpy(data + i, value, SFF_READ_BLOCK_SIZE);
+ }
}
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 192729546bbf..16de603b280c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1452,7 +1452,7 @@ int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
return -EBUSY;
}
- if (test_bit(__ICE_FD_FLUSH_REQ, pf->state))
+ if (test_bit(ICE_FD_FLUSH_REQ, pf->state))
return -EBUSY;
mutex_lock(&hw->fdir_fltr_lock);
@@ -1679,6 +1679,10 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
input->flex_offset = userdata.flex_offset;
}
+ input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
+ input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
+ input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
+
/* input struct is added to the HW filter list */
ice_fdir_update_list_entry(pf, input, fsp->location);
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 59c0c6a0f8c5..59ef68f072c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -40,6 +40,204 @@ static const u8 ice_fdir_ipv4_pkt[] = {
0x00, 0x00
};
+static const u8 ice_fdir_udp4_gtpu4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x4c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00,
+ 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00,
+ 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_tcp4_gtpu4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x58, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00,
+ 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00,
+ 0x00, 0x28, 0x00, 0x00, 0x40, 0x00, 0x40, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_icmp4_gtpu4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x4c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00,
+ 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00,
+ 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv4_gtpu4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x44, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00,
+ 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00,
+ 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv4_l2tpv3_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x73,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv6_l2tpv3_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x73, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv4_esp_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x32,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00
+};
+
+static const u8 ice_fdir_ipv6_esp_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x32, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv4_ah_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x33,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00
+};
+
+static const u8 ice_fdir_ipv6_ah_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x33, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv4_nat_t_esp_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x1C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x94, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv6_nat_t_esp_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x11, 0x94, 0x00, 0x00, 0x00, 0x08,
+};
+
+static const u8 ice_fdir_ipv4_pfcp_node_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x2C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x22, 0x65, 0x22, 0x65, 0x00, 0x00,
+ 0x00, 0x00, 0x20, 0x00, 0x00, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv4_pfcp_session_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x2C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x22, 0x65, 0x22, 0x65, 0x00, 0x00,
+ 0x00, 0x00, 0x21, 0x00, 0x00, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv6_pfcp_node_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x18, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x65,
+ 0x22, 0x65, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv6_pfcp_session_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x18, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x65,
+ 0x22, 0x65, 0x00, 0x00, 0x00, 0x00, 0x21, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_non_ip_l2_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
static const u8 ice_fdir_tcpv6_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
@@ -239,6 +437,111 @@ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = {
sizeof(ice_fdir_ip4_tun_pkt), ice_fdir_ip4_tun_pkt,
},
{
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP,
+ sizeof(ice_fdir_udp4_gtpu4_pkt),
+ ice_fdir_udp4_gtpu4_pkt,
+ sizeof(ice_fdir_udp4_gtpu4_pkt),
+ ice_fdir_udp4_gtpu4_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP,
+ sizeof(ice_fdir_tcp4_gtpu4_pkt),
+ ice_fdir_tcp4_gtpu4_pkt,
+ sizeof(ice_fdir_tcp4_gtpu4_pkt),
+ ice_fdir_tcp4_gtpu4_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP,
+ sizeof(ice_fdir_icmp4_gtpu4_pkt),
+ ice_fdir_icmp4_gtpu4_pkt,
+ sizeof(ice_fdir_icmp4_gtpu4_pkt),
+ ice_fdir_icmp4_gtpu4_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER,
+ sizeof(ice_fdir_ipv4_gtpu4_pkt),
+ ice_fdir_ipv4_gtpu4_pkt,
+ sizeof(ice_fdir_ipv4_gtpu4_pkt),
+ ice_fdir_ipv4_gtpu4_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3,
+ sizeof(ice_fdir_ipv4_l2tpv3_pkt), ice_fdir_ipv4_l2tpv3_pkt,
+ sizeof(ice_fdir_ipv4_l2tpv3_pkt), ice_fdir_ipv4_l2tpv3_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3,
+ sizeof(ice_fdir_ipv6_l2tpv3_pkt), ice_fdir_ipv6_l2tpv3_pkt,
+ sizeof(ice_fdir_ipv6_l2tpv3_pkt), ice_fdir_ipv6_l2tpv3_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_ESP,
+ sizeof(ice_fdir_ipv4_esp_pkt), ice_fdir_ipv4_esp_pkt,
+ sizeof(ice_fdir_ipv4_esp_pkt), ice_fdir_ipv4_esp_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_ESP,
+ sizeof(ice_fdir_ipv6_esp_pkt), ice_fdir_ipv6_esp_pkt,
+ sizeof(ice_fdir_ipv6_esp_pkt), ice_fdir_ipv6_esp_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_AH,
+ sizeof(ice_fdir_ipv4_ah_pkt), ice_fdir_ipv4_ah_pkt,
+ sizeof(ice_fdir_ipv4_ah_pkt), ice_fdir_ipv4_ah_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_AH,
+ sizeof(ice_fdir_ipv6_ah_pkt), ice_fdir_ipv6_ah_pkt,
+ sizeof(ice_fdir_ipv6_ah_pkt), ice_fdir_ipv6_ah_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP,
+ sizeof(ice_fdir_ipv4_nat_t_esp_pkt),
+ ice_fdir_ipv4_nat_t_esp_pkt,
+ sizeof(ice_fdir_ipv4_nat_t_esp_pkt),
+ ice_fdir_ipv4_nat_t_esp_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP,
+ sizeof(ice_fdir_ipv6_nat_t_esp_pkt),
+ ice_fdir_ipv6_nat_t_esp_pkt,
+ sizeof(ice_fdir_ipv6_nat_t_esp_pkt),
+ ice_fdir_ipv6_nat_t_esp_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE,
+ sizeof(ice_fdir_ipv4_pfcp_node_pkt),
+ ice_fdir_ipv4_pfcp_node_pkt,
+ sizeof(ice_fdir_ipv4_pfcp_node_pkt),
+ ice_fdir_ipv4_pfcp_node_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION,
+ sizeof(ice_fdir_ipv4_pfcp_session_pkt),
+ ice_fdir_ipv4_pfcp_session_pkt,
+ sizeof(ice_fdir_ipv4_pfcp_session_pkt),
+ ice_fdir_ipv4_pfcp_session_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE,
+ sizeof(ice_fdir_ipv6_pfcp_node_pkt),
+ ice_fdir_ipv6_pfcp_node_pkt,
+ sizeof(ice_fdir_ipv6_pfcp_node_pkt),
+ ice_fdir_ipv6_pfcp_node_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION,
+ sizeof(ice_fdir_ipv6_pfcp_session_pkt),
+ ice_fdir_ipv6_pfcp_session_pkt,
+ sizeof(ice_fdir_ipv6_pfcp_session_pkt),
+ ice_fdir_ipv6_pfcp_session_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NON_IP_L2,
+ sizeof(ice_fdir_non_ip_l2_pkt), ice_fdir_non_ip_l2_pkt,
+ sizeof(ice_fdir_non_ip_l2_pkt), ice_fdir_non_ip_l2_pkt,
+ },
+ {
ICE_FLTR_PTYPE_NONF_IPV6_TCP,
sizeof(ice_fdir_tcpv6_pkt), ice_fdir_tcpv6_pkt,
sizeof(ice_fdir_tcp6_tun_pkt), ice_fdir_tcp6_tun_pkt,
@@ -374,21 +677,31 @@ ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {
fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_YES;
fdir_fltr_ctx.qindex = 0;
+ } else if (input->dest_ctl ==
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER) {
+ fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO;
+ fdir_fltr_ctx.qindex = 0;
} else {
+ if (input->dest_ctl ==
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP)
+ fdir_fltr_ctx.toq = input->q_region;
fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO;
fdir_fltr_ctx.qindex = input->q_index;
}
- fdir_fltr_ctx.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
+ fdir_fltr_ctx.cnt_ena = input->cnt_ena;
fdir_fltr_ctx.cnt_index = input->cnt_index;
fdir_fltr_ctx.fd_vsi = ice_get_hw_vsi_num(hw, input->dest_vsi);
fdir_fltr_ctx.evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE;
- fdir_fltr_ctx.toq_prio = 3;
+ if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER)
+ fdir_fltr_ctx.toq_prio = 0;
+ else
+ fdir_fltr_ctx.toq_prio = 3;
fdir_fltr_ctx.pcmd = add ? ICE_FXD_FLTR_QW1_PCMD_ADD :
ICE_FXD_FLTR_QW1_PCMD_REMOVE;
fdir_fltr_ctx.swap = ICE_FXD_FLTR_QW1_SWAP_NOT_SET;
fdir_fltr_ctx.comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO;
- fdir_fltr_ctx.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
- fdir_fltr_ctx.fdid_prio = 3;
+ fdir_fltr_ctx.comp_report = input->comp_report;
+ fdir_fltr_ctx.fdid_prio = input->fdid_prio;
fdir_fltr_ctx.desc_prof = 1;
fdir_fltr_ctx.desc_prof_prio = 3;
ice_set_fd_desc_val(&fdir_fltr_ctx, fdesc);
@@ -471,6 +784,55 @@ static void ice_pkt_insert_ipv6_addr(u8 *pkt, int offset, __be32 *addr)
}
/**
+ * ice_pkt_insert_u6_qfi - insert a u6 value QFI into a memory buffer for GTPU
+ * @pkt: packet buffer
+ * @offset: offset into buffer
+ * @data: 8 bit value to convert and insert into pkt at offset
+ *
+ * This function is designed for inserting QFI (6 bits) for GTPU.
+ */
+static void ice_pkt_insert_u6_qfi(u8 *pkt, int offset, u8 data)
+{
+ u8 ret;
+
+ ret = (data & 0x3F) + (*(pkt + offset) & 0xC0);
+ memcpy(pkt + offset, &ret, sizeof(ret));
+}
+
+/**
+ * ice_pkt_insert_u8 - insert a u8 value into a memory buffer.
+ * @pkt: packet buffer
+ * @offset: offset into buffer
+ * @data: 8 bit value to convert and insert into pkt at offset
+ */
+static void ice_pkt_insert_u8(u8 *pkt, int offset, u8 data)
+{
+ memcpy(pkt + offset, &data, sizeof(data));
+}
+
+/**
+ * ice_pkt_insert_u8_tc - insert a u8 value into a memory buffer for TC ipv6.
+ * @pkt: packet buffer
+ * @offset: offset into buffer
+ * @data: 8 bit value to convert and insert into pkt at offset
+ *
+ * This function is designed for inserting Traffic Class (TC) for IPv6,
+ * since that TC is not aligned in number of bytes. Here we split it out
+ * into two part and fill each byte with data copy from pkt, then insert
+ * the two bytes data one by one.
+ */
+static void ice_pkt_insert_u8_tc(u8 *pkt, int offset, u8 data)
+{
+ u8 high, low;
+
+ high = (data >> 4) + (*(pkt + offset) & 0xF0);
+ memcpy(pkt + offset, &high, sizeof(high));
+
+ low = (*(pkt + offset + 1) & 0x0F) + ((data & 0x0F) << 4);
+ memcpy(pkt + offset + 1, &low, sizeof(low));
+}
+
+/**
* ice_pkt_insert_u16 - insert a be16 value into a memory buffer
* @pkt: packet buffer
* @offset: offset into buffer
@@ -493,6 +855,16 @@ static void ice_pkt_insert_u32(u8 *pkt, int offset, __be32 data)
}
/**
+ * ice_pkt_insert_mac_addr - insert a MAC addr into a memory buffer.
+ * @pkt: packet buffer
+ * @addr: MAC address to convert and insert into pkt at offset
+ */
+static void ice_pkt_insert_mac_addr(u8 *pkt, u8 *addr)
+{
+ ether_addr_copy(pkt, addr);
+}
+
+/**
* ice_fdir_get_gen_prgm_pkt - generate a training packet
* @hw: pointer to the hardware structure
* @input: flow director filter data structure
@@ -520,11 +892,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
case IPPROTO_SCTP:
flow = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
break;
- case IPPROTO_IP:
+ default:
flow = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
break;
- default:
- return ICE_ERR_PARAM;
}
} else if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
switch (input->ip.v6.proto) {
@@ -537,11 +907,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
case IPPROTO_SCTP:
flow = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
break;
- case IPPROTO_IP:
+ default:
flow = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
break;
- default:
- return ICE_ERR_PARAM;
}
} else {
flow = input->flow_type;
@@ -580,6 +948,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v4.dst_ip);
ice_pkt_insert_u16(loc, ICE_IPV4_TCP_SRC_PORT_OFFSET,
input->ip.v4.dst_port);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
if (frag)
loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF;
break;
@@ -592,6 +963,11 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v4.dst_ip);
ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET,
input->ip.v4.dst_port);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
+ ice_pkt_insert_mac_addr(loc + ETH_ALEN,
+ input->ext_data.src_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
@@ -602,13 +978,87 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v4.dst_ip);
ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_SRC_PORT_OFFSET,
input->ip.v4.dst_port);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
input->ip.v4.src_ip);
ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
input->ip.v4.dst_ip);
- ice_pkt_insert_u16(loc, ICE_IPV4_PROTO_OFFSET, 0);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
+ ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
+ ice_pkt_insert_u8(loc, ICE_IPV4_PROTO_OFFSET,
+ input->ip.v4.proto);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u32(loc, ICE_IPV4_GTPU_TEID_OFFSET,
+ input->gtpu_data.teid);
+ ice_pkt_insert_u6_qfi(loc, ICE_IPV4_GTPU_QFI_OFFSET,
+ input->gtpu_data.qfi);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
+ ice_pkt_insert_u32(loc, ICE_IPV4_L2TPV3_SESS_ID_OFFSET,
+ input->l2tpv3_data.session_id);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
+ ice_pkt_insert_u32(loc, ICE_IPV6_L2TPV3_SESS_ID_OFFSET,
+ input->l2tpv3_data.session_id);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
+ ice_pkt_insert_u32(loc, ICE_IPV4_ESP_SPI_OFFSET,
+ input->ip.v4.sec_parm_idx);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
+ ice_pkt_insert_u32(loc, ICE_IPV6_ESP_SPI_OFFSET,
+ input->ip.v6.sec_parm_idx);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_AH:
+ ice_pkt_insert_u32(loc, ICE_IPV4_AH_SPI_OFFSET,
+ input->ip.v4.sec_parm_idx);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_AH:
+ ice_pkt_insert_u32(loc, ICE_IPV6_AH_SPI_OFFSET,
+ input->ip.v6.sec_parm_idx);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u32(loc, ICE_IPV4_NAT_T_ESP_SPI_OFFSET,
+ input->ip.v4.sec_parm_idx);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ ice_pkt_insert_u32(loc, ICE_IPV6_NAT_T_ESP_SPI_OFFSET,
+ input->ip.v6.sec_parm_idx);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
+ case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
+ ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET,
+ input->ip.v4.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
+ case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
+ ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET,
+ input->ip.v6.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NON_IP_L2:
+ ice_pkt_insert_u16(loc, ICE_MAC_ETHTYPE_OFFSET,
+ input->ext_data.ether_type);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
@@ -619,6 +1069,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v6.src_port);
ice_pkt_insert_u16(loc, ICE_IPV6_TCP_SRC_PORT_OFFSET,
input->ip.v6.dst_port);
+ ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
+ ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
@@ -629,6 +1082,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v6.src_port);
ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET,
input->ip.v6.dst_port);
+ ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
+ ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
@@ -639,12 +1095,20 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v6.src_port);
ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_SRC_PORT_OFFSET,
input->ip.v6.dst_port);
+ ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
+ ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
input->ip.v6.src_ip);
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
input->ip.v6.dst_ip);
+ ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
+ ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
+ ice_pkt_insert_u8(loc, ICE_IPV6_PROTO_OFFSET,
+ input->ip.v6.proto);
+ ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
default:
return ICE_ERR_PARAM;
@@ -671,7 +1135,7 @@ bool ice_fdir_has_frag(enum ice_fltr_ptype flow)
}
/**
- * ice_fdir_find_by_idx - find filter with idx
+ * ice_fdir_find_fltr_by_idx - find filter with idx
* @hw: pointer to hardware structure
* @fltr_idx: index to find.
*
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index 1c587766daab..d2d40e18ae8a 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -25,6 +25,25 @@
#define ICE_IPV6_UDP_DST_PORT_OFFSET 56
#define ICE_IPV6_SCTP_SRC_PORT_OFFSET 54
#define ICE_IPV6_SCTP_DST_PORT_OFFSET 56
+#define ICE_MAC_ETHTYPE_OFFSET 12
+#define ICE_IPV4_TOS_OFFSET 15
+#define ICE_IPV4_TTL_OFFSET 22
+#define ICE_IPV6_TC_OFFSET 14
+#define ICE_IPV6_HLIM_OFFSET 21
+#define ICE_IPV6_PROTO_OFFSET 20
+#define ICE_IPV4_GTPU_TEID_OFFSET 46
+#define ICE_IPV4_GTPU_QFI_OFFSET 56
+#define ICE_IPV4_L2TPV3_SESS_ID_OFFSET 34
+#define ICE_IPV6_L2TPV3_SESS_ID_OFFSET 54
+#define ICE_IPV4_ESP_SPI_OFFSET 34
+#define ICE_IPV6_ESP_SPI_OFFSET 54
+#define ICE_IPV4_AH_SPI_OFFSET 38
+#define ICE_IPV6_AH_SPI_OFFSET 58
+#define ICE_IPV4_NAT_T_ESP_SPI_OFFSET 42
+#define ICE_IPV6_NAT_T_ESP_SPI_OFFSET 62
+
+#define ICE_FDIR_MAX_FLTRS 16384
+
/* IP v4 has 2 flag bits that enable fragment processing: DF and MF. DF
* requests that the packet not be fragmented. MF indicates that a packet has
* been fragmented.
@@ -34,6 +53,8 @@
enum ice_fltr_prgm_desc_dest {
ICE_FLTR_PRGM_DESC_DEST_DROP_PKT,
ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX,
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP,
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER,
};
enum ice_fltr_prgm_desc_fd_status {
@@ -86,6 +107,7 @@ struct ice_fdir_v4 {
u8 tos;
u8 ip_ver;
u8 proto;
+ u8 ttl;
};
#define ICE_IPV6_ADDR_LEN_AS_U32 4
@@ -99,10 +121,35 @@ struct ice_fdir_v6 {
__be32 sec_parm_idx; /* security parameter index */
u8 tc;
u8 proto;
+ u8 hlim;
+};
+
+struct ice_fdir_udp_gtp {
+ u8 flags;
+ u8 msg_type;
+ __be16 rsrvd_len;
+ __be32 teid;
+ __be16 rsrvd_seq_nbr;
+ u8 rsrvd_n_pdu_nbr;
+ u8 rsrvd_next_ext_type;
+ u8 rsvrd_ext_len;
+ u8 pdu_type:4,
+ spare:4;
+ u8 ppp:1,
+ rqi:1,
+ qfi:6;
+ u32 rsvrd;
+ u8 next_ext;
+};
+
+struct ice_fdir_l2tpv3 {
+ __be32 session_id;
};
struct ice_fdir_extra {
u8 dst_mac[ETH_ALEN]; /* dest MAC address */
+ u8 src_mac[ETH_ALEN]; /* src MAC address */
+ __be16 ether_type; /* for NON_IP_L2 */
u32 usr_def[2]; /* user data */
__be16 vlan_type; /* VLAN ethertype */
__be16 vlan_tag; /* VLAN tag info */
@@ -117,11 +164,19 @@ struct ice_fdir_fltr {
struct ice_fdir_v6 v6;
} ip, mask;
+ struct ice_fdir_udp_gtp gtpu_data;
+ struct ice_fdir_udp_gtp gtpu_mask;
+
+ struct ice_fdir_l2tpv3 l2tpv3_data;
+ struct ice_fdir_l2tpv3 l2tpv3_mask;
+
struct ice_fdir_extra ext_data;
struct ice_fdir_extra ext_mask;
/* flex byte filter data */
__be16 flex_word;
+ /* queue region size (=2^q_region) */
+ u8 q_region;
u16 flex_offset;
u16 flex_fltr;
@@ -129,9 +184,12 @@ struct ice_fdir_fltr {
u16 q_index;
u16 dest_vsi;
u8 dest_ctl;
+ u8 cnt_ena;
u8 fltr_status;
u16 cnt_index;
u32 fltr_id;
+ u8 fdid_prio;
+ u8 comp_report;
};
/* Dummy packet filter definition structure */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 5e1fd30c0a0f..06ac9badee77 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -334,6 +334,7 @@ ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
return NULL;
+ /* cppcheck-suppress nullPointer */
if (index > ICE_MAX_BST_TCAMS_IN_BUF)
return NULL;
@@ -404,6 +405,7 @@ ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
if (!section)
return NULL;
+ /* cppcheck-suppress nullPointer */
if (index > ICE_MAX_LABELS_IN_BUF)
return NULL;
@@ -1063,32 +1065,36 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
static enum ice_status
ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
- struct ice_global_metadata_seg *meta_seg;
struct ice_generic_seg_hdr *seg_hdr;
if (!pkg_hdr)
return ICE_ERR_PARAM;
- meta_seg = (struct ice_global_metadata_seg *)
- ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
- if (meta_seg) {
- hw->pkg_ver = meta_seg->pkg_ver;
- memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name));
+ seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
+ if (seg_hdr) {
+ struct ice_meta_sect *meta;
+ struct ice_pkg_enum state;
+
+ memset(&state, 0, sizeof(state));
+
+ /* Get package information from the Metadata Section */
+ meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
+ ICE_SID_METADATA);
+ if (!meta) {
+ ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
+ return ICE_ERR_CFG;
+ }
+
+ hw->pkg_ver = meta->ver;
+ memcpy(hw->pkg_name, meta->name, sizeof(meta->name));
ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
- meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
- meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
- meta_seg->pkg_name);
- } else {
- ice_debug(hw, ICE_DBG_INIT, "Did not find metadata segment in driver package\n");
- return ICE_ERR_CFG;
- }
+ meta->ver.major, meta->ver.minor, meta->ver.update,
+ meta->ver.draft, meta->name);
- seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
- if (seg_hdr) {
- hw->ice_pkg_ver = seg_hdr->seg_format_ver;
- memcpy(hw->ice_pkg_name, seg_hdr->seg_id,
- sizeof(hw->ice_pkg_name));
+ hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
+ memcpy(hw->ice_seg_id, seg_hdr->seg_id,
+ sizeof(hw->ice_seg_id));
ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
seg_hdr->seg_format_ver.major,
@@ -2063,6 +2069,7 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
count++;
list_for_each_entry(tmp2, list2, list)
chk_count++;
+ /* cppcheck-suppress knownConditionTrueFalse */
if (!count || count != chk_count)
return false;
@@ -2361,18 +2368,82 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
}
/**
- * ice_find_prof_id - find profile ID for a given field vector
+ * ice_prof_has_mask_idx - determine if profile index masking is identical
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @prof: profile to check
+ * @idx: profile index to check
+ * @mask: mask to match
+ */
+static bool
+ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
+ u16 mask)
+{
+ bool expect_no_mask = false;
+ bool found = false;
+ bool match = false;
+ u16 i;
+
+ /* If mask is 0x0000 or 0xffff, then there is no masking */
+ if (mask == 0 || mask == 0xffff)
+ expect_no_mask = true;
+
+ /* Scan the enabled masks on this profile, for the specified idx */
+ for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
+ hw->blk[blk].masks.count; i++)
+ if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
+ if (hw->blk[blk].masks.masks[i].in_use &&
+ hw->blk[blk].masks.masks[i].idx == idx) {
+ found = true;
+ if (hw->blk[blk].masks.masks[i].mask == mask)
+ match = true;
+ break;
+ }
+
+ if (expect_no_mask) {
+ if (found)
+ return false;
+ } else {
+ if (!match)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_prof_has_mask - determine if profile masking is identical
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @prof: profile to check
+ * @masks: masks to match
+ */
+static bool
+ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
+{
+ u16 i;
+
+ /* es->mask_ena[prof] will have the mask */
+ for (i = 0; i < hw->blk[blk].es.fvw; i++)
+ if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_find_prof_id_with_mask - find profile ID for a given field vector
* @hw: pointer to the hardware structure
* @blk: HW block
* @fv: field vector to search for
+ * @masks: masks for FV
* @prof_id: receives the profile ID
*/
static enum ice_status
-ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
- struct ice_fv_word *fv, u8 *prof_id)
+ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
+ struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
{
struct ice_es *es = &hw->blk[blk].es;
- u16 off;
u8 i;
/* For FD, we don't want to re-use a existed profile with the same
@@ -2382,11 +2453,15 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
return ICE_ERR_DOES_NOT_EXIST;
for (i = 0; i < (u8)es->count; i++) {
- off = i * es->fvw;
+ u16 off = i * es->fvw;
if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
continue;
+ /* check if masks settings are the same for this profile */
+ if (masks && !ice_prof_has_mask(hw, blk, i, masks))
+ continue;
+
*prof_id = i;
return 0;
}
@@ -2438,20 +2513,22 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
* ice_alloc_tcam_ent - allocate hardware TCAM entry
* @hw: pointer to the HW struct
* @blk: the block to allocate the TCAM for
+ * @btm: true to allocate from bottom of table, false to allocate from top
* @tcam_idx: pointer to variable to receive the TCAM entry
*
* This function allocates a new entry in a Profile ID TCAM for a specific
* block.
*/
static enum ice_status
-ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
+ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
+ u16 *tcam_idx)
{
u16 res_type;
if (!ice_tcam_ent_rsrc_type(blk, &res_type))
return ICE_ERR_PARAM;
- return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
+ return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
}
/**
@@ -2537,6 +2614,330 @@ ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
}
/**
+ * ice_write_prof_mask_reg - write profile mask register
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @mask_idx: mask index
+ * @idx: index of the FV which will use the mask
+ * @mask: the 16-bit mask
+ */
+static void
+ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
+ u16 idx, u16 mask)
+{
+ u32 offset;
+ u32 val;
+
+ switch (blk) {
+ case ICE_BLK_RSS:
+ offset = GLQF_HMASK(mask_idx);
+ val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M;
+ val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
+ break;
+ case ICE_BLK_FD:
+ offset = GLQF_FDMASK(mask_idx);
+ val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M;
+ val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M;
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
+ blk);
+ return;
+ }
+
+ wr32(hw, offset, val);
+ ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
+ blk, idx, offset, val);
+}
+
+/**
+ * ice_write_prof_mask_enable_res - write profile mask enable register
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @prof_id: profile ID
+ * @enable_mask: enable mask
+ */
+static void
+ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
+ u16 prof_id, u32 enable_mask)
+{
+ u32 offset;
+
+ switch (blk) {
+ case ICE_BLK_RSS:
+ offset = GLQF_HMASK_SEL(prof_id);
+ break;
+ case ICE_BLK_FD:
+ offset = GLQF_FDMASK_SEL(prof_id);
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
+ blk);
+ return;
+ }
+
+ wr32(hw, offset, enable_mask);
+ ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
+ blk, prof_id, offset, enable_mask);
+}
+
+/**
+ * ice_init_prof_masks - initial prof masks
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ */
+static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 per_pf;
+ u16 i;
+
+ mutex_init(&hw->blk[blk].masks.lock);
+
+ per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
+
+ hw->blk[blk].masks.count = per_pf;
+ hw->blk[blk].masks.first = hw->pf_id * per_pf;
+
+ memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
+
+ for (i = hw->blk[blk].masks.first;
+ i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
+ ice_write_prof_mask_reg(hw, blk, i, 0, 0);
+}
+
+/**
+ * ice_init_all_prof_masks - initialize all prof masks
+ * @hw: pointer to the HW struct
+ */
+static void ice_init_all_prof_masks(struct ice_hw *hw)
+{
+ ice_init_prof_masks(hw, ICE_BLK_RSS);
+ ice_init_prof_masks(hw, ICE_BLK_FD);
+}
+
+/**
+ * ice_alloc_prof_mask - allocate profile mask
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @idx: index of FV which will use the mask
+ * @mask: the 16-bit mask
+ * @mask_idx: variable to receive the mask index
+ */
+static enum ice_status
+ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
+ u16 *mask_idx)
+{
+ bool found_unused = false, found_copy = false;
+ enum ice_status status = ICE_ERR_MAX_LIMIT;
+ u16 unused_idx = 0, copy_idx = 0;
+ u16 i;
+
+ if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
+ return ICE_ERR_PARAM;
+
+ mutex_lock(&hw->blk[blk].masks.lock);
+
+ for (i = hw->blk[blk].masks.first;
+ i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
+ if (hw->blk[blk].masks.masks[i].in_use) {
+ /* if mask is in use and it exactly duplicates the
+ * desired mask and index, then in can be reused
+ */
+ if (hw->blk[blk].masks.masks[i].mask == mask &&
+ hw->blk[blk].masks.masks[i].idx == idx) {
+ found_copy = true;
+ copy_idx = i;
+ break;
+ }
+ } else {
+ /* save off unused index, but keep searching in case
+ * there is an exact match later on
+ */
+ if (!found_unused) {
+ found_unused = true;
+ unused_idx = i;
+ }
+ }
+
+ if (found_copy)
+ i = copy_idx;
+ else if (found_unused)
+ i = unused_idx;
+ else
+ goto err_ice_alloc_prof_mask;
+
+ /* update mask for a new entry */
+ if (found_unused) {
+ hw->blk[blk].masks.masks[i].in_use = true;
+ hw->blk[blk].masks.masks[i].mask = mask;
+ hw->blk[blk].masks.masks[i].idx = idx;
+ hw->blk[blk].masks.masks[i].ref = 0;
+ ice_write_prof_mask_reg(hw, blk, i, idx, mask);
+ }
+
+ hw->blk[blk].masks.masks[i].ref++;
+ *mask_idx = i;
+ status = 0;
+
+err_ice_alloc_prof_mask:
+ mutex_unlock(&hw->blk[blk].masks.lock);
+
+ return status;
+}
+
+/**
+ * ice_free_prof_mask - free profile mask
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @mask_idx: index of mask
+ */
+static enum ice_status
+ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
+{
+ if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
+ return ICE_ERR_PARAM;
+
+ if (!(mask_idx >= hw->blk[blk].masks.first &&
+ mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ mutex_lock(&hw->blk[blk].masks.lock);
+
+ if (!hw->blk[blk].masks.masks[mask_idx].in_use)
+ goto exit_ice_free_prof_mask;
+
+ if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
+ hw->blk[blk].masks.masks[mask_idx].ref--;
+ goto exit_ice_free_prof_mask;
+ }
+
+ /* remove mask */
+ hw->blk[blk].masks.masks[mask_idx].in_use = false;
+ hw->blk[blk].masks.masks[mask_idx].mask = 0;
+ hw->blk[blk].masks.masks[mask_idx].idx = 0;
+
+ /* update mask as unused entry */
+ ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
+ mask_idx);
+ ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
+
+exit_ice_free_prof_mask:
+ mutex_unlock(&hw->blk[blk].masks.lock);
+
+ return 0;
+}
+
+/**
+ * ice_free_prof_masks - free all profile masks for a profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @prof_id: profile ID
+ */
+static enum ice_status
+ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
+{
+ u32 mask_bm;
+ u16 i;
+
+ if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
+ return ICE_ERR_PARAM;
+
+ mask_bm = hw->blk[blk].es.mask_ena[prof_id];
+ for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
+ if (mask_bm & BIT(i))
+ ice_free_prof_mask(hw, blk, i);
+
+ return 0;
+}
+
+/**
+ * ice_shutdown_prof_masks - releases lock for masking
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ *
+ * This should be called before unloading the driver
+ */
+static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 i;
+
+ mutex_lock(&hw->blk[blk].masks.lock);
+
+ for (i = hw->blk[blk].masks.first;
+ i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
+ ice_write_prof_mask_reg(hw, blk, i, 0, 0);
+
+ hw->blk[blk].masks.masks[i].in_use = false;
+ hw->blk[blk].masks.masks[i].idx = 0;
+ hw->blk[blk].masks.masks[i].mask = 0;
+ }
+
+ mutex_unlock(&hw->blk[blk].masks.lock);
+ mutex_destroy(&hw->blk[blk].masks.lock);
+}
+
+/**
+ * ice_shutdown_all_prof_masks - releases all locks for masking
+ * @hw: pointer to the HW struct
+ *
+ * This should be called before unloading the driver
+ */
+static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
+{
+ ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
+ ice_shutdown_prof_masks(hw, ICE_BLK_FD);
+}
+
+/**
+ * ice_update_prof_masking - set registers according to masking
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @prof_id: profile ID
+ * @masks: masks
+ */
+static enum ice_status
+ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
+ u16 *masks)
+{
+ bool err = false;
+ u32 ena_mask = 0;
+ u16 idx;
+ u16 i;
+
+ /* Only support FD and RSS masking, otherwise nothing to be done */
+ if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
+ return 0;
+
+ for (i = 0; i < hw->blk[blk].es.fvw; i++)
+ if (masks[i] && masks[i] != 0xFFFF) {
+ if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
+ ena_mask |= BIT(idx);
+ } else {
+ /* not enough bitmaps */
+ err = true;
+ break;
+ }
+ }
+
+ if (err) {
+ /* free any bitmaps we have allocated */
+ for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
+ if (ena_mask & BIT(i))
+ ice_free_prof_mask(hw, blk, i);
+
+ return ICE_ERR_OUT_OF_RANGE;
+ }
+
+ /* enable the masks for this profile */
+ ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
+
+ /* store enabled masks with profile so that they can be freed later */
+ hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
+
+ return 0;
+}
+
+/**
* ice_write_es - write an extraction sequence to hardware
* @hw: pointer to the HW struct
* @blk: the block in which to write the extraction sequence
@@ -2575,6 +2976,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
if (hw->blk[blk].es.ref_count[prof_id] > 0) {
if (!--hw->blk[blk].es.ref_count[prof_id]) {
ice_write_es(hw, blk, prof_id, NULL);
+ ice_free_prof_masks(hw, blk, prof_id);
return ice_free_prof_id(hw, blk, prof_id);
}
}
@@ -2937,6 +3339,7 @@ void ice_free_hw_tbls(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
}
list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
@@ -2944,6 +3347,7 @@ void ice_free_hw_tbls(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), r);
}
mutex_destroy(&hw->rss_locks);
+ ice_shutdown_all_prof_masks(hw);
memset(hw->blk, 0, sizeof(hw->blk));
}
@@ -2997,6 +3401,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
memset(es->written, 0, es->count * sizeof(*es->written));
+ memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
}
}
@@ -3010,6 +3415,7 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
mutex_init(&hw->rss_locks);
INIT_LIST_HEAD(&hw->rss_list_head);
+ ice_init_all_prof_masks(hw);
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
@@ -3112,6 +3518,11 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
sizeof(*es->written), GFP_KERNEL);
if (!es->written)
goto err;
+
+ es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count,
+ sizeof(*es->mask_ena), GFP_KERNEL);
+ if (!es->mask_ena)
+ goto err;
}
return 0;
@@ -3711,22 +4122,79 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
return 0;
}
+/* The entries here needs to match the order of enum ice_ptype_attrib */
+static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
+ { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK },
+ { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK },
+ { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK },
+ { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK },
+};
+
+/**
+ * ice_get_ptype_attrib_info - get PTYPE attribute information
+ * @type: attribute type
+ * @info: pointer to variable to the attribute information
+ */
+static void
+ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
+ struct ice_ptype_attrib_info *info)
+{
+ *info = ice_ptype_attributes[type];
+}
+
+/**
+ * ice_add_prof_attrib - add any PTG with attributes to profile
+ * @prof: pointer to the profile to which PTG entries will be added
+ * @ptg: PTG to be added
+ * @ptype: PTYPE that needs to be looked up
+ * @attr: array of attributes that will be considered
+ * @attr_cnt: number of elements in the attribute array
+ */
+static enum ice_status
+ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
+ const struct ice_ptype_attributes *attr, u16 attr_cnt)
+{
+ bool found = false;
+ u16 i;
+
+ for (i = 0; i < attr_cnt; i++)
+ if (attr[i].ptype == ptype) {
+ found = true;
+
+ prof->ptg[prof->ptg_cnt] = ptg;
+ ice_get_ptype_attrib_info(attr[i].attrib,
+ &prof->attr[prof->ptg_cnt]);
+
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
+ return ICE_ERR_MAX_LIMIT;
+ }
+
+ if (!found)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ return 0;
+}
+
/**
* ice_add_prof - add profile
* @hw: pointer to the HW struct
* @blk: hardware block
* @id: profile tracking ID
* @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
+ * @attr: array of attributes
+ * @attr_cnt: number of elements in attr array
* @es: extraction sequence (length of array is determined by the block)
+ * @masks: mask for extraction sequence
*
- * This function registers a profile, which matches a set of PTGs with a
+ * This function registers a profile, which matches a set of PTYPES with a
* particular extraction sequence. While the hardware profile is allocated
* it will not be written until the first call to ice_add_flow that specifies
* the ID value used here.
*/
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- struct ice_fv_word *es)
+ const struct ice_ptype_attributes *attr, u16 attr_cnt,
+ struct ice_fv_word *es, u16 *masks)
{
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
@@ -3740,7 +4208,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* search for existing profile */
- status = ice_find_prof_id(hw, blk, es, &prof_id);
+ status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
if (status) {
/* allocate profile ID */
status = ice_alloc_prof_id(hw, blk, &prof_id);
@@ -3758,6 +4226,9 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
if (status)
goto err_ice_add_prof;
}
+ status = ice_update_prof_masking(hw, blk, prof_id, masks);
+ if (status)
+ goto err_ice_add_prof;
/* and write new es */
ice_write_es(hw, blk, prof_id, es);
@@ -3792,7 +4263,6 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
BITS_PER_BYTE) {
u16 ptype;
u8 ptg;
- u8 m;
ptype = byte * BITS_PER_BYTE + bit;
@@ -3807,15 +4277,25 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
continue;
set_bit(ptg, ptgs_used);
- prof->ptg[prof->ptg_cnt] = ptg;
-
- if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
+ /* Check to see there are any attributes for
+ * this PTYPE, and add them if found.
+ */
+ status = ice_add_prof_attrib(prof, ptg, ptype,
+ attr, attr_cnt);
+ if (status == ICE_ERR_MAX_LIMIT)
break;
+ if (status) {
+ /* This is simple a PTYPE/PTG with no
+ * attribute
+ */
+ prof->ptg[prof->ptg_cnt] = ptg;
+ prof->attr[prof->ptg_cnt].flags = 0;
+ prof->attr[prof->ptg_cnt].mask = 0;
- /* nothing left in byte, then exit */
- m = ~(u8)((1 << (bit + 1)) - 1);
- if (!(ptypes[byte] & m))
- break;
+ if (++prof->ptg_cnt >=
+ ICE_MAX_PTG_PER_PROFILE)
+ break;
+ }
}
bytes--;
@@ -4326,7 +4806,12 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
}
/* for re-enabling, reallocate a TCAM */
- status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
+ /* for entries with empty attribute masks, allocate entry from
+ * the bottom of the TCAM table; otherwise, allocate from the
+ * top of the table in order to give it higher priority
+ */
+ status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
+ &tcam->tcam_idx);
if (status)
return status;
@@ -4336,8 +4821,8 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
return ICE_ERR_NO_MEMORY;
status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
- tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
- nm_msk);
+ tcam->ptg, vsig, 0, tcam->attr.flags,
+ vl_msk, dc_msk, nm_msk);
if (status)
goto err_ice_prof_tcam_ena_dis;
@@ -4485,7 +4970,12 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
}
/* allocate the TCAM entry index */
- status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
+ /* for entries with empty attribute masks, allocate entry from
+ * the bottom of the TCAM table; otherwise, allocate from the
+ * top of the table in order to give it higher priority
+ */
+ status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
+ &tcam_idx);
if (status) {
devm_kfree(ice_hw_to_dev(hw), p);
goto err_ice_add_prof_id_vsig;
@@ -4494,6 +4984,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
t->tcam[i].ptg = map->ptg[i];
t->tcam[i].prof_id = map->prof_id;
t->tcam[i].tcam_idx = tcam_idx;
+ t->tcam[i].attr = map->attr[i];
t->tcam[i].in_use = true;
p->type = ICE_TCAM_ADD;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 20deddb807c5..8a58e79729b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -27,7 +27,8 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- struct ice_fv_word *es);
+ const struct ice_ptype_attributes *attr, u16 attr_cnt,
+ struct ice_fv_word *es, u16 *masks);
enum ice_status
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 24063c1351b2..7d8b517a63c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -109,6 +109,7 @@ struct ice_buf_hdr {
(ent_sz))
/* ice package section IDs */
+#define ICE_SID_METADATA 1
#define ICE_SID_XLT0_SW 10
#define ICE_SID_XLT_KEY_BUILDER_SW 11
#define ICE_SID_XLT1_SW 12
@@ -117,6 +118,14 @@ struct ice_buf_hdr {
#define ICE_SID_PROFID_REDIR_SW 15
#define ICE_SID_FLD_VEC_SW 16
#define ICE_SID_CDID_KEY_BUILDER_SW 17
+
+struct ice_meta_sect {
+ struct ice_pkg_ver ver;
+#define ICE_META_SECT_NAME_SIZE 28
+ char name[ICE_META_SECT_NAME_SIZE];
+ __le32 track_id;
+};
+
#define ICE_SID_CDID_REDIR_SW 18
#define ICE_SID_XLT0_ACL 20
@@ -190,6 +199,64 @@ enum ice_sect {
ICE_SECT_COUNT
};
+#define ICE_MAC_IPV4_GTPU_IPV4_FRAG 331
+#define ICE_MAC_IPV4_GTPU_IPV4_PAY 332
+#define ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY 333
+#define ICE_MAC_IPV4_GTPU_IPV4_TCP 334
+#define ICE_MAC_IPV4_GTPU_IPV4_ICMP 335
+#define ICE_MAC_IPV6_GTPU_IPV4_FRAG 336
+#define ICE_MAC_IPV6_GTPU_IPV4_PAY 337
+#define ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY 338
+#define ICE_MAC_IPV6_GTPU_IPV4_TCP 339
+#define ICE_MAC_IPV6_GTPU_IPV4_ICMP 340
+#define ICE_MAC_IPV4_GTPU_IPV6_FRAG 341
+#define ICE_MAC_IPV4_GTPU_IPV6_PAY 342
+#define ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY 343
+#define ICE_MAC_IPV4_GTPU_IPV6_TCP 344
+#define ICE_MAC_IPV4_GTPU_IPV6_ICMPV6 345
+#define ICE_MAC_IPV6_GTPU_IPV6_FRAG 346
+#define ICE_MAC_IPV6_GTPU_IPV6_PAY 347
+#define ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY 348
+#define ICE_MAC_IPV6_GTPU_IPV6_TCP 349
+#define ICE_MAC_IPV6_GTPU_IPV6_ICMPV6 350
+
+/* Attributes that can modify PTYPE definitions.
+ *
+ * These values will represent special attributes for PTYPEs, which will
+ * resolve into metadata packet flags definitions that can be used in the TCAM
+ * for identifying a PTYPE with specific characteristics.
+ */
+enum ice_ptype_attrib_type {
+ /* GTP PTYPEs */
+ ICE_PTYPE_ATTR_GTP_PDU_EH,
+ ICE_PTYPE_ATTR_GTP_SESSION,
+ ICE_PTYPE_ATTR_GTP_DOWNLINK,
+ ICE_PTYPE_ATTR_GTP_UPLINK,
+};
+
+struct ice_ptype_attrib_info {
+ u16 flags;
+ u16 mask;
+};
+
+/* TCAM flag definitions */
+#define ICE_GTP_PDU BIT(14)
+#define ICE_GTP_PDU_LINK BIT(13)
+
+/* GTP attributes */
+#define ICE_GTP_PDU_FLAG_MASK (ICE_GTP_PDU)
+#define ICE_GTP_PDU_EH ICE_GTP_PDU
+
+#define ICE_GTP_FLAGS_MASK (ICE_GTP_PDU | ICE_GTP_PDU_LINK)
+#define ICE_GTP_SESSION 0
+#define ICE_GTP_DOWNLINK ICE_GTP_PDU
+#define ICE_GTP_UPLINK (ICE_GTP_PDU | ICE_GTP_PDU_LINK)
+
+struct ice_ptype_attributes {
+ u16 ptype;
+ enum ice_ptype_attrib_type attrib;
+};
+
/* package labels */
struct ice_label {
__le16 value;
@@ -335,6 +402,7 @@ struct ice_es {
u16 count;
u16 fvw;
u16 *ref_count;
+ u32 *mask_ena;
struct list_head prof_map;
struct ice_fv_word *t;
struct mutex prof_map_lock; /* protect access to profiles list */
@@ -372,12 +440,14 @@ struct ice_prof_map {
u8 prof_id;
u8 ptg_cnt;
u8 ptg[ICE_MAX_PTG_PER_PROFILE];
+ struct ice_ptype_attrib_info attr[ICE_MAX_PTG_PER_PROFILE];
};
#define ICE_INVALID_TCAM 0xFFFF
struct ice_tcam_inf {
u16 tcam_idx;
+ struct ice_ptype_attrib_info attr;
u8 ptg;
u8 prof_id;
u8 in_use;
@@ -427,8 +497,8 @@ struct ice_xlt1 {
#define ICE_PF_NUM_S 13
#define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S)
#define ICE_VSIG_VALUE(vsig, pf_id) \
- (u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \
- (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M))
+ ((u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \
+ (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M)))
#define ICE_DEFAULT_VSIG 0
/* XLT2 Table */
@@ -478,6 +548,21 @@ struct ice_prof_redir {
u16 count;
};
+struct ice_mask {
+ u16 mask; /* 16-bit mask */
+ u16 idx; /* index */
+ u16 ref; /* reference count */
+ u8 in_use; /* non-zero if used */
+};
+
+struct ice_masks {
+ struct mutex lock; /* lock to protect this structure */
+ u16 first; /* first mask owned by the PF */
+ u16 count; /* number of masks owned by the PF */
+#define ICE_PROF_MASK_COUNT 32
+ struct ice_mask masks[ICE_PROF_MASK_COUNT];
+};
+
/* Tables per block */
struct ice_blk_info {
struct ice_xlt1 xlt1;
@@ -485,6 +570,7 @@ struct ice_blk_info {
struct ice_prof_tcam prof;
struct ice_prof_redir prof_redir;
struct ice_es es;
+ struct ice_masks masks;
u8 overwrite; /* set to true to allow overwrite of table entries */
u8 is_list_init;
};
@@ -513,6 +599,7 @@ struct ice_chs_chg {
u16 vsig;
u16 orig_vsig;
u16 tcam_idx;
+ struct ice_ptype_attrib_info attr;
};
#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index 89a0cef20506..f160672448a0 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -9,18 +9,50 @@ struct ice_flow_field_info {
enum ice_flow_seg_hdr hdr;
s16 off; /* Offset from start of a protocol header, in bits */
u16 size; /* Size of fields in bits */
+ u16 mask; /* 16-bit mask for field */
};
#define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
.hdr = _hdr, \
.off = (_offset_bytes) * BITS_PER_BYTE, \
.size = (_size_bytes) * BITS_PER_BYTE, \
+ .mask = 0, \
+}
+
+#define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
+ .hdr = _hdr, \
+ .off = (_offset_bytes) * BITS_PER_BYTE, \
+ .size = (_size_bytes) * BITS_PER_BYTE, \
+ .mask = _mask, \
}
/* Table containing properties of supported protocol header fields */
static const
struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
+ /* Ether */
+ /* ICE_FLOW_FIELD_IDX_ETH_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
+ /* ICE_FLOW_FIELD_IDX_ETH_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
+ /* ICE_FLOW_FIELD_IDX_S_VLAN */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_C_VLAN */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, sizeof(__be16)),
/* IPv4 / IPv6 */
+ /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, 1, 0x00fc),
+ /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, 1, 0x0ff0),
+ /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0xff00),
+ /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0x00ff),
+ /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0x00ff),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0xff00),
/* ICE_FLOW_FIELD_IDX_IPV4_SA */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
/* ICE_FLOW_FIELD_IDX_IPV4_DA */
@@ -42,22 +74,112 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1),
+ /* ARP */
+ /* ICE_FLOW_FIELD_IDX_ARP_SIP */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)),
+ /* ICE_FLOW_FIELD_IDX_ARP_DIP */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, sizeof(struct in_addr)),
+ /* ICE_FLOW_FIELD_IDX_ARP_SHA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
+ /* ICE_FLOW_FIELD_IDX_ARP_DHA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
+ /* ICE_FLOW_FIELD_IDX_ARP_OP */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, sizeof(__be16)),
+ /* ICMP */
+ /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, 1),
+ /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, 1),
/* GRE */
/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
sizeof_field(struct gre_full_hdr, key)),
+ /* GTP */
+ /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12, sizeof(__be32)),
+ /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12, sizeof(__be32)),
+ /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12, sizeof(__be32)),
+ /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16),
+ 0x3f00),
+ /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)),
+ /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)),
+ /* PPPoE */
+ /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)),
+ /* PFCP */
+ /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12, sizeof(__be64)),
+ /* L2TPv3 */
+ /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0, sizeof(__be32)),
+ /* ESP */
+ /* ICE_FLOW_FIELD_IDX_ESP_SPI */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0, sizeof(__be32)),
+ /* AH */
+ /* ICE_FLOW_FIELD_IDX_AH_SPI */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)),
+ /* NAT_T_ESP */
+ /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)),
};
/* Bitmaps indicating relevant packet types for a particular protocol header
*
- * Packet types for packets with an Outer/First/Single IPv4 header
+ * Packet types for packets with an Outer/First/Single MAC header
+ */
+static const u32 ice_ptypes_mac_ofos[] = {
+ 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
+ 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last MAC VLAN header */
+static const u32 ice_ptypes_macvlan_il[] = {
+ 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
+ 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
+ * include IPv4 other PTYPEs
*/
static const u32 ice_ptypes_ipv4_ofos[] = {
0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000155, 0x00000000, 0x00000000,
+ 0x00000000, 0x000FC000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv4 header, includes
+ * IPv4 other PTYPEs
+ */
+static const u32 ice_ptypes_ipv4_ofos_all[] = {
+ 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000155, 0x00000000, 0x00000000,
+ 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -67,7 +189,7 @@ static const u32 ice_ptypes_ipv4_ofos[] = {
static const u32 ice_ptypes_ipv4_il[] = {
0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
0x0000000E, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -75,11 +197,27 @@ static const u32 ice_ptypes_ipv4_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
-/* Packet types for packets with an Outer/First/Single IPv6 header */
+/* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
+ * include IPv6 other PTYPEs
+ */
static const u32 ice_ptypes_ipv6_ofos[] = {
0x00000000, 0x00000000, 0x77000000, 0x10002000,
+ 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
+ 0x00000000, 0x03F00000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header, includes
+ * IPv6 other PTYPEs
+ */
+static const u32 ice_ptypes_ipv6_ofos_all[] = {
+ 0x00000000, 0x00000000, 0x77000000, 0x10002000,
+ 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
+ 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -91,7 +229,7 @@ static const u32 ice_ptypes_ipv6_ofos[] = {
static const u32 ice_ptypes_ipv6_il[] = {
0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
0x00000770, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -100,7 +238,7 @@ static const u32 ice_ptypes_ipv6_il[] = {
};
/* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
-static const u32 ice_ipv4_ofos_no_l4[] = {
+static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
0x10C00000, 0x04000800, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -111,8 +249,20 @@ static const u32 ice_ipv4_ofos_no_l4[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Outermost/First ARP header */
+static const u32 ice_ptypes_arp_of[] = {
+ 0x00000800, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
-static const u32 ice_ipv4_il_no_l4[] = {
+static const u32 ice_ptypes_ipv4_il_no_l4[] = {
0x60000000, 0x18043008, 0x80000002, 0x6010c021,
0x00000008, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -124,7 +274,7 @@ static const u32 ice_ipv4_il_no_l4[] = {
};
/* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
-static const u32 ice_ipv6_ofos_no_l4[] = {
+static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
0x00000000, 0x00000000, 0x43000000, 0x10002000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -136,7 +286,7 @@ static const u32 ice_ipv6_ofos_no_l4[] = {
};
/* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
-static const u32 ice_ipv6_il_no_l4[] = {
+static const u32 ice_ptypes_ipv6_il_no_l4[] = {
0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
0x00000430, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -153,7 +303,7 @@ static const u32 ice_ipv6_il_no_l4[] = {
static const u32 ice_ptypes_udp_il[] = {
0x81000000, 0x20204040, 0x04000010, 0x80810102,
0x00000040, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00410000, 0x90842000, 0x00000007,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -165,7 +315,7 @@ static const u32 ice_ptypes_udp_il[] = {
static const u32 ice_ptypes_tcp_il[] = {
0x04000000, 0x80810102, 0x10000040, 0x02040408,
0x00000102, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00820000, 0x21084000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -177,6 +327,18 @@ static const u32 ice_ptypes_tcp_il[] = {
static const u32 ice_ptypes_sctp_il[] = {
0x08000000, 0x01020204, 0x20000081, 0x04080810,
0x00000204, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x01040000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outermost/First ICMP header */
+static const u32 ice_ptypes_icmp_of[] = {
+ 0x10000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -185,6 +347,18 @@ static const u32 ice_ptypes_sctp_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Innermost/Last ICMP header */
+static const u32 ice_ptypes_icmp_il[] = {
+ 0x00000000, 0x02040408, 0x40000102, 0x08101020,
+ 0x00000408, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x42108000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* Packet types for packets with an Outermost/First GRE header */
static const u32 ice_ptypes_gre_of[] = {
0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
@@ -197,6 +371,218 @@ static const u32 ice_ptypes_gre_of[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Innermost/Last MAC header */
+static const u32 ice_ptypes_mac_il[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for GTPC */
+static const u32 ice_ptypes_gtpc[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000180, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for GTPC with TEID */
+static const u32 ice_ptypes_gtpc_tid[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000060, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for GTPU */
+static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
+ { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
+ { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
+};
+
+static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
+ { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
+};
+
+static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
+ { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
+ { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
+};
+
+static const u32 ice_ptypes_gtpu[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for PPPoE */
+static const u32 ice_ptypes_pppoe[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with PFCP NODE header */
+static const u32 ice_ptypes_pfcp_node[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x80000000, 0x00000002,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with PFCP SESSION header */
+static const u32 ice_ptypes_pfcp_session[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000005,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for L2TPv3 */
+static const u32 ice_ptypes_l2tpv3[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000300,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for ESP */
+static const u32 ice_ptypes_esp[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000003, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for AH */
+static const u32 ice_ptypes_ah[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with NAT_T ESP header */
+static const u32 ice_ptypes_nat_t_esp[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000030, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 ice_ptypes_mac_non_ip_ofos[] = {
+ 0x00000846, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* Manage parameters and info. used during the creation of a flow profile */
struct ice_flow_prof_params {
enum ice_block blk;
@@ -208,12 +594,30 @@ struct ice_flow_prof_params {
* This will give us the direction flags.
*/
struct ice_fv_word es[ICE_MAX_FV_WORDS];
+ /* attributes can be used to add attributes to a particular PTYPE */
+ const struct ice_ptype_attributes *attr;
+ u16 attr_cnt;
+
+ u16 mask[ICE_MAX_FV_WORDS];
DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
};
+#define ICE_FLOW_RSS_HDRS_INNER_MASK \
+ (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
+ ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
+ ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
+ ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
+ ICE_FLOW_SEG_HDR_NAT_T_ESP)
+
+#define ICE_FLOW_SEG_HDRS_L2_MASK \
+ (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
#define ICE_FLOW_SEG_HDRS_L3_MASK \
- (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
+ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
#define ICE_FLOW_SEG_HDRS_L4_MASK \
+ (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
+ ICE_FLOW_SEG_HDR_SCTP)
+/* mask for L4 protocols that are NOT part of IPv4/6 OTHER PTYPE groups */
+#define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
/**
@@ -243,8 +647,11 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
/* Sizes of fixed known protocol headers without header options */
#define ICE_FLOW_PROT_HDR_SZ_MAC 14
+#define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
#define ICE_FLOW_PROT_HDR_SZ_IPV4 20
#define ICE_FLOW_PROT_HDR_SZ_IPV6 40
+#define ICE_FLOW_PROT_HDR_SZ_ARP 28
+#define ICE_FLOW_PROT_HDR_SZ_ICMP 8
#define ICE_FLOW_PROT_HDR_SZ_TCP 20
#define ICE_FLOW_PROT_HDR_SZ_UDP 8
#define ICE_FLOW_PROT_HDR_SZ_SCTP 12
@@ -256,16 +663,27 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
*/
static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
{
- u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC;
+ u16 sz;
+
+ /* L2 headers */
+ sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
+ ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
/* L3 headers */
if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
+ sz += ICE_FLOW_PROT_HDR_SZ_ARP;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
+ /* An L3 header is required if L4 is specified */
+ return 0;
/* L4 headers */
- if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
+ if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
+ sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
sz += ICE_FLOW_PROT_HDR_SZ_TCP;
else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
sz += ICE_FLOW_PROT_HDR_SZ_UDP;
@@ -298,10 +716,41 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
hdrs = prof->segs[i].hdrs;
+ if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
+ src = !i ? (const unsigned long *)ice_ptypes_mac_ofos :
+ (const unsigned long *)ice_ptypes_mac_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
+ src = (const unsigned long *)ice_ptypes_macvlan_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
+ bitmap_and(params->ptypes, params->ptypes,
+ (const unsigned long *)ice_ptypes_arp_of,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
- !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
- src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 :
- (const unsigned long *)ice_ipv4_il_no_l4;
+ (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
+ src = i ? (const unsigned long *)ice_ptypes_ipv4_il :
+ (const unsigned long *)ice_ptypes_ipv4_ofos_all;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
+ src = i ? (const unsigned long *)ice_ptypes_ipv6_il :
+ (const unsigned long *)ice_ptypes_ipv6_ofos_all;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
+ src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos_no_l4 :
+ (const unsigned long *)ice_ptypes_ipv4_il_no_l4;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
@@ -310,9 +759,9 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
- !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
- src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 :
- (const unsigned long *)ice_ipv6_il_no_l4;
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
+ src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos_no_l4 :
+ (const unsigned long *)ice_ptypes_ipv6_il_no_l4;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
@@ -322,6 +771,20 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
ICE_FLOW_PTYPE_MAX);
}
+ if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
+ src = (const unsigned long *)ice_ptypes_mac_non_ip_ofos;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
+ src = (const unsigned long *)ice_ptypes_pppoe;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else {
+ src = (const unsigned long *)ice_ptypes_pppoe;
+ bitmap_andnot(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
src = (const unsigned long *)ice_ptypes_udp_il;
bitmap_and(params->ptypes, params->ptypes, src,
@@ -334,12 +797,89 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
src = (const unsigned long *)ice_ptypes_sctp_il;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
+ src = !i ? (const unsigned long *)ice_ptypes_icmp_of :
+ (const unsigned long *)ice_ptypes_icmp_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
if (!i) {
src = (const unsigned long *)ice_ptypes_gre_of;
bitmap_and(params->ptypes, params->ptypes,
src, ICE_FLOW_PTYPE_MAX);
}
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
+ src = (const unsigned long *)ice_ptypes_gtpc;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
+ src = (const unsigned long *)ice_ptypes_gtpc_tid;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
+ src = (const unsigned long *)ice_ptypes_gtpu;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+
+ /* Attributes for GTP packet with downlink */
+ params->attr = ice_attr_gtpu_down;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
+ src = (const unsigned long *)ice_ptypes_gtpu;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+
+ /* Attributes for GTP packet with uplink */
+ params->attr = ice_attr_gtpu_up;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
+ src = (const unsigned long *)ice_ptypes_gtpu;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+
+ /* Attributes for GTP packet with Extension Header */
+ params->attr = ice_attr_gtpu_eh;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
+ src = (const unsigned long *)ice_ptypes_gtpu;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
+ src = (const unsigned long *)ice_ptypes_l2tpv3;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
+ src = (const unsigned long *)ice_ptypes_esp;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
+ src = (const unsigned long *)ice_ptypes_ah;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
+ src = (const unsigned long *)ice_ptypes_nat_t_esp;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
+ if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
+ src = (const unsigned long *)ice_ptypes_pfcp_node;
+ else
+ src = (const unsigned long *)ice_ptypes_pfcp_session;
+
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else {
+ src = (const unsigned long *)ice_ptypes_pfcp_node;
+ bitmap_andnot(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+
+ src = (const unsigned long *)ice_ptypes_pfcp_session;
+ bitmap_andnot(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
}
}
@@ -352,6 +892,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
* @params: information about the flow to be processed
* @seg: packet segment index of the field to be extracted
* @fld: ID of field to be extracted
+ * @match: bit field of all fields
*
* This function determines the protocol ID, offset, and size of the given
* field. It then allocates one or more extraction sequence entries for the
@@ -359,17 +900,73 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
*/
static enum ice_status
ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
- u8 seg, enum ice_flow_field fld)
+ u8 seg, enum ice_flow_field fld, u64 match)
{
+ enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
u8 fv_words = hw->blk[params->blk].es.fvw;
struct ice_flow_fld_info *flds;
u16 cnt, ese_bits, i;
+ u16 sib_mask = 0;
+ u16 mask;
u16 off;
flds = params->prof->segs[seg].fields;
switch (fld) {
+ case ICE_FLOW_FIELD_IDX_ETH_DA:
+ case ICE_FLOW_FIELD_IDX_ETH_SA:
+ case ICE_FLOW_FIELD_IDX_S_VLAN:
+ case ICE_FLOW_FIELD_IDX_C_VLAN:
+ prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_ETH_TYPE:
+ prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
+ prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
+ prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV4_TTL:
+ case ICE_FLOW_FIELD_IDX_IPV4_PROT:
+ prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
+
+ /* TTL and PROT share the same extraction seq. entry.
+ * Each is considered a sibling to the other in terms of sharing
+ * the same extraction sequence entry.
+ */
+ if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
+ sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
+ else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
+ sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
+
+ /* If the sibling field is also included, that field's
+ * mask needs to be included.
+ */
+ if (match & BIT(sib))
+ sib_mask = ice_flds_info[sib].mask;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV6_TTL:
+ case ICE_FLOW_FIELD_IDX_IPV6_PROT:
+ prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
+
+ /* TTL and PROT share the same extraction seq. entry.
+ * Each is considered a sibling to the other in terms of sharing
+ * the same extraction sequence entry.
+ */
+ if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
+ sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
+ else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
+ sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
+
+ /* If the sibling field is also included, that field's
+ * mask needs to be included.
+ */
+ if (match & BIT(sib))
+ sib_mask = ice_flds_info[sib].mask;
+ break;
case ICE_FLOW_FIELD_IDX_IPV4_SA:
case ICE_FLOW_FIELD_IDX_IPV4_DA:
prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
@@ -380,6 +977,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
break;
case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
+ case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
prot_id = ICE_PROT_TCP_IL;
break;
case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
@@ -390,6 +988,49 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
prot_id = ICE_PROT_SCTP_IL;
break;
+ case ICE_FLOW_FIELD_IDX_GTPC_TEID:
+ case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
+ case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
+ case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
+ case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
+ case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
+ /* GTP is accessed through UDP OF protocol */
+ prot_id = ICE_PROT_UDP_OF;
+ break;
+ case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
+ prot_id = ICE_PROT_PPPOE;
+ break;
+ case ICE_FLOW_FIELD_IDX_PFCP_SEID:
+ prot_id = ICE_PROT_UDP_IL_OR_S;
+ break;
+ case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
+ prot_id = ICE_PROT_L2TPV3;
+ break;
+ case ICE_FLOW_FIELD_IDX_ESP_SPI:
+ prot_id = ICE_PROT_ESP_F;
+ break;
+ case ICE_FLOW_FIELD_IDX_AH_SPI:
+ prot_id = ICE_PROT_ESP_2;
+ break;
+ case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
+ prot_id = ICE_PROT_UDP_IL_OR_S;
+ break;
+ case ICE_FLOW_FIELD_IDX_ARP_SIP:
+ case ICE_FLOW_FIELD_IDX_ARP_DIP:
+ case ICE_FLOW_FIELD_IDX_ARP_SHA:
+ case ICE_FLOW_FIELD_IDX_ARP_DHA:
+ case ICE_FLOW_FIELD_IDX_ARP_OP:
+ prot_id = ICE_PROT_ARP_OF;
+ break;
+ case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
+ case ICE_FLOW_FIELD_IDX_ICMP_CODE:
+ /* ICMP type and code share the same extraction seq. entry */
+ prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ?
+ ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
+ sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
+ ICE_FLOW_FIELD_IDX_ICMP_CODE :
+ ICE_FLOW_FIELD_IDX_ICMP_TYPE;
+ break;
case ICE_FLOW_FIELD_IDX_GRE_KEYID:
prot_id = ICE_PROT_GRE_OF;
break;
@@ -407,6 +1048,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
ICE_FLOW_FV_EXTRACT_SZ;
flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
flds[fld].xtrct.idx = params->es_cnt;
+ flds[fld].xtrct.mask = ice_flds_info[fld].mask;
/* Adjust the next field-entry index after accommodating the number of
* entries this field consumes
@@ -416,24 +1058,34 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
/* Fill in the extraction sequence entries needed for this field */
off = flds[fld].xtrct.off;
+ mask = flds[fld].xtrct.mask;
for (i = 0; i < cnt; i++) {
- u8 idx;
-
- /* Make sure the number of extraction sequence required
- * does not exceed the block's capability
+ /* Only consume an extraction sequence entry if there is no
+ * sibling field associated with this field or the sibling entry
+ * already extracts the word shared with this field.
*/
- if (params->es_cnt >= fv_words)
- return ICE_ERR_MAX_LIMIT;
+ if (sib == ICE_FLOW_FIELD_IDX_MAX ||
+ flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
+ flds[sib].xtrct.off != off) {
+ u8 idx;
- /* some blocks require a reversed field vector layout */
- if (hw->blk[params->blk].es.reverse)
- idx = fv_words - params->es_cnt - 1;
- else
- idx = params->es_cnt;
+ /* Make sure the number of extraction sequence required
+ * does not exceed the block's capability
+ */
+ if (params->es_cnt >= fv_words)
+ return ICE_ERR_MAX_LIMIT;
- params->es[idx].prot_id = prot_id;
- params->es[idx].off = off;
- params->es_cnt++;
+ /* some blocks require a reversed field vector layout */
+ if (hw->blk[params->blk].es.reverse)
+ idx = fv_words - params->es_cnt - 1;
+ else
+ idx = params->es_cnt;
+
+ params->es[idx].prot_id = prot_id;
+ params->es[idx].off = off;
+ params->mask[idx] = mask | sib_mask;
+ params->es_cnt++;
+ }
off += ICE_FLOW_FV_EXTRACT_SZ;
}
@@ -533,14 +1185,15 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,
u8 i;
for (i = 0; i < prof->segs_cnt; i++) {
- u8 j;
+ u64 match = params->prof->segs[i].match;
+ enum ice_flow_field j;
- for_each_set_bit(j, (unsigned long *)&prof->segs[i].match,
+ for_each_set_bit(j, (unsigned long *)&match,
ICE_FLOW_FIELD_IDX_MAX) {
- status = ice_flow_xtract_fld(hw, params, i,
- (enum ice_flow_field)j);
+ status = ice_flow_xtract_fld(hw, params, i, j, match);
if (status)
return status;
+ clear_bit(j, (unsigned long *)&match);
}
/* Process raw matching bytes */
@@ -751,7 +1404,8 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Add a HW profile for this flow profile */
status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
- params->es);
+ params->attr, params->attr_cnt, params->es,
+ params->mask);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
@@ -1158,6 +1812,9 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
seg->raws_cnt++;
}
+#define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
+ (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
+
#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
@@ -1165,7 +1822,8 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
- (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
+ (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
+ ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
/**
@@ -1193,7 +1851,8 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
ICE_FLOW_SET_HDRS(segs, flow_hdr);
- if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
+ if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
+ ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
return ICE_ERR_PARAM;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
@@ -1349,9 +2008,9 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
* [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
*/
#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
- (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
- (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
- ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
+ ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
+ (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
+ ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0)))
/**
* ice_add_rss_cfg_sync - add an RSS configuration
@@ -1490,6 +2149,94 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
return status;
}
+/**
+ * ice_rem_rss_cfg_sync - remove an existing RSS configuration
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
+ * @addl_hdrs: Protocol header fields within a packet segment
+ * @segs_cnt: packet segment count
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs, u8 segs_cnt)
+{
+ const enum ice_block blk = ICE_BLK_RSS;
+ struct ice_flow_seg_info *segs;
+ struct ice_flow_prof *prof;
+ enum ice_status status;
+
+ segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
+ if (!segs)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Construct the packet segment info from the hashed fields */
+ status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
+ addl_hdrs);
+ if (status)
+ goto out;
+
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
+ vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_FLDS);
+ if (!prof) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto out;
+ }
+
+ status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
+ if (status)
+ goto out;
+
+ /* Remove RSS configuration from VSI context before deleting
+ * the flow profile.
+ */
+ ice_rem_rss_list(hw, vsi_handle, prof);
+
+ if (bitmap_empty(prof->vsis, ICE_MAX_VSI))
+ status = ice_flow_rem_prof(hw, blk, prof->id);
+
+out:
+ kfree(segs);
+ return status;
+}
+
+/**
+ * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
+ * @addl_hdrs: Protocol header fields within a packet segment
+ *
+ * This function will lookup the flow profile based on the input
+ * hash field bitmap, iterate through the profile entry list of
+ * that profile and find entry associated with input VSI to be
+ * removed. Calls are made to underlying flow s which will APIs
+ * turn build or update buffers for RSS XLT1 section.
+ */
+enum ice_status __maybe_unused
+ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs)
+{
+ enum ice_status status;
+
+ if (hashed_flds == ICE_HASH_INVALID ||
+ !ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ mutex_lock(&hw->rss_locks);
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
+ ICE_RSS_OUTER_HEADERS);
+ if (!status)
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
+ addl_hdrs, ICE_RSS_INNER_HEADERS);
+ mutex_unlock(&hw->rss_locks);
+
+ return status;
+}
+
/* Mapping of AVF hash bit fields to an L3-L4 hash combination.
* As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
* convert its values to their appropriate flow L3, L4 values.
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 829f90b1e998..2a2d8c1536cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -8,6 +8,9 @@
#define ICE_FLOW_FLD_OFF_INVAL 0xffff
/* Generate flow hash field from flow field type(s) */
+#define ICE_FLOW_HASH_ETH \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA))
#define ICE_FLOW_HASH_IPV4 \
(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
@@ -30,6 +33,80 @@
#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_FLOW_HASH_GTP_TEID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
+
+#define ICE_FLOW_HASH_GTP_IPV4_TEID \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
+#define ICE_FLOW_HASH_GTP_IPV6_TEID \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
+
+#define ICE_FLOW_HASH_GTP_U_TEID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
+
+#define ICE_FLOW_HASH_GTP_U_IPV4_TEID \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID)
+#define ICE_FLOW_HASH_GTP_U_IPV6_TEID \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID)
+
+#define ICE_FLOW_HASH_GTP_U_EH_TEID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID))
+
+#define ICE_FLOW_HASH_GTP_U_EH_QFI \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI))
+
+#define ICE_FLOW_HASH_GTP_U_IPV4_EH \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
+ ICE_FLOW_HASH_GTP_U_EH_QFI)
+#define ICE_FLOW_HASH_GTP_U_IPV6_EH \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
+ ICE_FLOW_HASH_GTP_U_EH_QFI)
+
+#define ICE_FLOW_HASH_PPPOE_SESS_ID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
+
+#define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \
+ (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID)
+#define ICE_FLOW_HASH_PPPOE_TCP_ID \
+ (ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
+#define ICE_FLOW_HASH_PPPOE_UDP_ID \
+ (ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
+
+#define ICE_FLOW_HASH_PFCP_SEID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID))
+#define ICE_FLOW_HASH_PFCP_IPV4_SEID \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID)
+#define ICE_FLOW_HASH_PFCP_IPV6_SEID \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID)
+
+#define ICE_FLOW_HASH_L2TPV3_SESS_ID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID))
+#define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
+#define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
+
+#define ICE_FLOW_HASH_ESP_SPI \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI))
+#define ICE_FLOW_HASH_ESP_IPV4_SPI \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI)
+#define ICE_FLOW_HASH_ESP_IPV6_SPI \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI)
+
+#define ICE_FLOW_HASH_AH_SPI \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI))
+#define ICE_FLOW_HASH_AH_IPV4_SPI \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI)
+#define ICE_FLOW_HASH_AH_IPV6_SPI \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI)
+
+#define ICE_FLOW_HASH_NAT_T_ESP_SPI \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI))
+#define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
+#define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
+
/* Protocol header fields within a packet segment. A segment consists of one or
* more protocol headers that make up a logical group of protocol headers. Each
* logical group of protocol headers encapsulates or is encapsulated using/by
@@ -38,16 +115,66 @@
*/
enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_NONE = 0x00000000,
+ ICE_FLOW_SEG_HDR_ETH = 0x00000001,
+ ICE_FLOW_SEG_HDR_VLAN = 0x00000002,
ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
+ ICE_FLOW_SEG_HDR_ARP = 0x00000010,
+ ICE_FLOW_SEG_HDR_ICMP = 0x00000020,
ICE_FLOW_SEG_HDR_TCP = 0x00000040,
ICE_FLOW_SEG_HDR_UDP = 0x00000080,
ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
ICE_FLOW_SEG_HDR_GRE = 0x00000200,
+ ICE_FLOW_SEG_HDR_GTPC = 0x00000400,
+ ICE_FLOW_SEG_HDR_GTPC_TEID = 0x00000800,
+ ICE_FLOW_SEG_HDR_GTPU_IP = 0x00001000,
+ ICE_FLOW_SEG_HDR_GTPU_EH = 0x00002000,
+ ICE_FLOW_SEG_HDR_GTPU_DWN = 0x00004000,
+ ICE_FLOW_SEG_HDR_GTPU_UP = 0x00008000,
+ ICE_FLOW_SEG_HDR_PPPOE = 0x00010000,
+ ICE_FLOW_SEG_HDR_PFCP_NODE = 0x00020000,
+ ICE_FLOW_SEG_HDR_PFCP_SESSION = 0x00040000,
+ ICE_FLOW_SEG_HDR_L2TPV3 = 0x00080000,
+ ICE_FLOW_SEG_HDR_ESP = 0x00100000,
+ ICE_FLOW_SEG_HDR_AH = 0x00200000,
+ ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000,
+ ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000,
+ /* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
+ * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
+ */
+ ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000,
};
+/* These segments all have the same PTYPES, but are otherwise distinguished by
+ * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags:
+ *
+ * gtp_eh_pdu gtp_eh_pdu_link
+ * ICE_FLOW_SEG_HDR_GTPU_IP 0 0
+ * ICE_FLOW_SEG_HDR_GTPU_EH 1 don't care
+ * ICE_FLOW_SEG_HDR_GTPU_DWN 1 0
+ * ICE_FLOW_SEG_HDR_GTPU_UP 1 1
+ */
+#define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \
+ ICE_FLOW_SEG_HDR_GTPU_EH | \
+ ICE_FLOW_SEG_HDR_GTPU_DWN | \
+ ICE_FLOW_SEG_HDR_GTPU_UP)
+#define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \
+ ICE_FLOW_SEG_HDR_PFCP_SESSION)
+
enum ice_flow_field {
+ /* L2 */
+ ICE_FLOW_FIELD_IDX_ETH_DA,
+ ICE_FLOW_FIELD_IDX_ETH_SA,
+ ICE_FLOW_FIELD_IDX_S_VLAN,
+ ICE_FLOW_FIELD_IDX_C_VLAN,
+ ICE_FLOW_FIELD_IDX_ETH_TYPE,
/* L3 */
+ ICE_FLOW_FIELD_IDX_IPV4_DSCP,
+ ICE_FLOW_FIELD_IDX_IPV6_DSCP,
+ ICE_FLOW_FIELD_IDX_IPV4_TTL,
+ ICE_FLOW_FIELD_IDX_IPV4_PROT,
+ ICE_FLOW_FIELD_IDX_IPV6_TTL,
+ ICE_FLOW_FIELD_IDX_IPV6_PROT,
ICE_FLOW_FIELD_IDX_IPV4_SA,
ICE_FLOW_FIELD_IDX_IPV4_DA,
ICE_FLOW_FIELD_IDX_IPV6_SA,
@@ -59,9 +186,42 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
+ ICE_FLOW_FIELD_IDX_TCP_FLAGS,
+ /* ARP */
+ ICE_FLOW_FIELD_IDX_ARP_SIP,
+ ICE_FLOW_FIELD_IDX_ARP_DIP,
+ ICE_FLOW_FIELD_IDX_ARP_SHA,
+ ICE_FLOW_FIELD_IDX_ARP_DHA,
+ ICE_FLOW_FIELD_IDX_ARP_OP,
+ /* ICMP */
+ ICE_FLOW_FIELD_IDX_ICMP_TYPE,
+ ICE_FLOW_FIELD_IDX_ICMP_CODE,
/* GRE */
ICE_FLOW_FIELD_IDX_GRE_KEYID,
- /* The total number of enums must not exceed 64 */
+ /* GTPC_TEID */
+ ICE_FLOW_FIELD_IDX_GTPC_TEID,
+ /* GTPU_IP */
+ ICE_FLOW_FIELD_IDX_GTPU_IP_TEID,
+ /* GTPU_EH */
+ ICE_FLOW_FIELD_IDX_GTPU_EH_TEID,
+ ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
+ /* GTPU_UP */
+ ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
+ /* GTPU_DWN */
+ ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
+ /* PPPoE */
+ ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
+ /* PFCP */
+ ICE_FLOW_FIELD_IDX_PFCP_SEID,
+ /* L2TPv3 */
+ ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID,
+ /* ESP */
+ ICE_FLOW_FIELD_IDX_ESP_SPI,
+ /* AH */
+ ICE_FLOW_FIELD_IDX_AH_SPI,
+ /* NAT_T ESP */
+ ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
+ /* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX
};
@@ -138,6 +298,7 @@ struct ice_flow_seg_xtrct {
u16 off; /* Starting offset of the field in header in bytes */
u8 idx; /* Index of FV entry used */
u8 disp; /* Displacement of field in bits fr. FV entry's start */
+ u16 mask; /* Mask for field */
};
enum ice_flow_fld_match_type {
@@ -248,5 +409,8 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
+enum ice_status
+ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
#endif /* _ICE_FLOW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 093a1818a392..de38a0fc9665 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -130,6 +130,7 @@
#define GLINT_DYN_CTL_ITR_INDX_M ICE_M(0x3, 3)
#define GLINT_DYN_CTL_INTERVAL_S 5
#define GLINT_DYN_CTL_INTERVAL_M ICE_M(0xFFF, 5)
+#define GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24)
#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
#define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30)
#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
@@ -306,8 +307,23 @@
#define GLQF_FD_SIZE_FD_BSIZE_S 16
#define GLQF_FD_SIZE_FD_BSIZE_M ICE_M(0x7FFF, 16)
#define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512))
+#define GLQF_FDMASK(_i) (0x00410800 + ((_i) * 4))
+#define GLQF_FDMASK_MAX_INDEX 31
+#define GLQF_FDMASK_MSK_INDEX_S 0
+#define GLQF_FDMASK_MSK_INDEX_M ICE_M(0x1F, 0)
+#define GLQF_FDMASK_MASK_S 16
+#define GLQF_FDMASK_MASK_M ICE_M(0xFFFF, 16)
#define GLQF_FDMASK_SEL(_i) (0x00410400 + ((_i) * 4))
#define GLQF_FDSWAP(_i, _j) (0x00413000 + ((_i) * 4 + (_j) * 512))
+#define GLQF_HMASK(_i) (0x0040FC00 + ((_i) * 4))
+#define GLQF_HMASK_MAX_INDEX 31
+#define GLQF_HMASK_MSK_INDEX_S 0
+#define GLQF_HMASK_MSK_INDEX_M ICE_M(0x1F, 0)
+#define GLQF_HMASK_MASK_S 16
+#define GLQF_HMASK_MASK_M ICE_M(0xFFFF, 16)
+#define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4))
+#define GLQF_HMASK_SEL_MAX_INDEX 127
+#define GLQF_HMASK_SEL_MASK_SEL_S 0
#define PFQF_FD_ENA 0x0043A000
#define PFQF_FD_ENA_FD_ENA_M BIT(0)
#define PFQF_FD_SIZE 0x00460100
@@ -369,6 +385,9 @@
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
#define VSIQF_FD_CNT_FD_GCNT_S 0
#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
+#define VSIQF_FD_CNT_FD_BCNT_S 16
+#define VSIQF_FD_CNT_FD_BCNT_M ICE_M(0x3FFF, 16)
+#define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4))
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define PFPM_APM 0x000B8080
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 4ec24c3e813f..21329ed3087e 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -55,6 +55,7 @@ struct ice_fltr_desc {
#define ICE_FXD_FLTR_QW0_COMP_REPORT_M \
(0x3ULL << ICE_FXD_FLTR_QW0_COMP_REPORT_S)
#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL 0x1ULL
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW 0x2ULL
#define ICE_FXD_FLTR_QW0_FD_SPACE_S 14
#define ICE_FXD_FLTR_QW0_FD_SPACE_M (0x3ULL << ICE_FXD_FLTR_QW0_FD_SPACE_S)
@@ -128,6 +129,7 @@ struct ice_fltr_desc {
#define ICE_FXD_FLTR_QW1_FDID_PRI_S 25
#define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S)
#define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL
+#define ICE_FXD_FLTR_QW1_FDID_PRI_THREE 0x3ULL
#define ICE_FXD_FLTR_QW1_FDID_MDID_S 28
#define ICE_FXD_FLTR_QW1_FDID_MDID_M (0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S)
@@ -138,6 +140,26 @@ struct ice_fltr_desc {
(0xFFFFFFFFULL << ICE_FXD_FLTR_QW1_FDID_S)
#define ICE_FXD_FLTR_QW1_FDID_ZERO 0x0ULL
+/* definition for FD filter programming status descriptor WB format */
+#define ICE_FXD_FLTR_WB_QW1_DD_S 0
+#define ICE_FXD_FLTR_WB_QW1_DD_M (0x1ULL << ICE_FXD_FLTR_WB_QW1_DD_S)
+#define ICE_FXD_FLTR_WB_QW1_DD_YES 0x1ULL
+
+#define ICE_FXD_FLTR_WB_QW1_PROG_ID_S 1
+#define ICE_FXD_FLTR_WB_QW1_PROG_ID_M \
+ (0x3ULL << ICE_FXD_FLTR_WB_QW1_PROG_ID_S)
+#define ICE_FXD_FLTR_WB_QW1_PROG_ADD 0x0ULL
+#define ICE_FXD_FLTR_WB_QW1_PROG_DEL 0x1ULL
+
+#define ICE_FXD_FLTR_WB_QW1_FAIL_S 4
+#define ICE_FXD_FLTR_WB_QW1_FAIL_M (0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_S)
+#define ICE_FXD_FLTR_WB_QW1_FAIL_YES 0x1ULL
+
+#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S 5
+#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M \
+ (0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S)
+#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL
+
struct ice_rx_ptype_decoded {
u32 ptype:10;
u32 known:1;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index d13c7fc8fb0a..82e2ce23df3d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -158,6 +158,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
if (vsi->type == ICE_VSI_VF)
vsi->vf_id = vf_id;
+ else
+ vsi->vf_id = ICE_INVAL_VFID;
switch (vsi->type) {
case ICE_VSI_PF:
@@ -343,6 +345,9 @@ static int ice_vsi_clear(struct ice_vsi *vsi)
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
pf->next_vsi = vsi->idx;
+ if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL &&
+ vsi->vf_id != ICE_INVAL_VFID)
+ pf->next_vsi = vsi->idx;
ice_vsi_free_arrays(vsi);
mutex_unlock(&pf->sw_mutex);
@@ -382,6 +387,8 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
+ q_vector->total_events++;
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -419,7 +426,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
vsi->type = vsi_type;
vsi->back = pf;
- set_bit(__ICE_DOWN, vsi->state);
+ set_bit(ICE_VSI_DOWN, vsi->state);
if (vsi_type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
@@ -454,8 +461,8 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
goto unlock_pf;
}
- if (vsi->type == ICE_VSI_CTRL) {
- /* Use the last VSI slot as the index for the control VSI */
+ if (vsi->type == ICE_VSI_CTRL && vf_id == ICE_INVAL_VFID) {
+ /* Use the last VSI slot as the index for PF control VSI */
vsi->idx = pf->num_alloc_vsi - 1;
pf->ctrl_vsi_idx = vsi->idx;
pf->vsi[vsi->idx] = vsi;
@@ -468,6 +475,9 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
pf->next_vsi);
}
+
+ if (vsi->type == ICE_VSI_CTRL && vf_id != ICE_INVAL_VFID)
+ pf->vf[vf_id].ctrl_vsi_idx = vsi->idx;
goto unlock_pf;
err_rings:
@@ -506,7 +516,7 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi)
if (!b_val)
return -EPERM;
- if (vsi->type != ICE_VSI_PF)
+ if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF))
return -EPERM;
if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
@@ -517,6 +527,13 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi)
/* each VSI gets same "best_effort" quota */
vsi->num_bfltr = b_val;
+ if (vsi->type == ICE_VSI_VF) {
+ vsi->num_gfltr = 0;
+
+ /* each VSI gets same "best_effort" quota */
+ vsi->num_bfltr = b_val;
+ }
+
return 0;
}
@@ -729,11 +746,10 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
*/
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- u16 offset = 0, qmap = 0, tx_count = 0;
+ u16 offset = 0, qmap = 0, tx_count = 0, pow = 0;
+ u16 num_txq_per_tc, num_rxq_per_tc;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
- u16 tx_numq_tc, rx_numq_tc;
- u16 pow = 0, max_rss = 0;
bool ena_tc0 = false;
u8 netdev_tc = 0;
int i;
@@ -751,12 +767,15 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->tc_cfg.ena_tc |= 1;
}
- rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
- if (!rx_numq_tc)
- rx_numq_tc = 1;
- tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
- if (!tx_numq_tc)
- tx_numq_tc = 1;
+ num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
+ if (!num_rxq_per_tc)
+ num_rxq_per_tc = 1;
+ num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc;
+ if (!num_txq_per_tc)
+ num_txq_per_tc = 1;
+
+ /* find the (rounded up) power-of-2 of qcount */
+ pow = (u16)order_base_2(num_rxq_per_tc);
/* TC mapping is a function of the number of Rx queues assigned to the
* VSI for each traffic class and the offset of these queues.
@@ -769,26 +788,6 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
*
* Setup number and offset of Rx queues for all TCs for the VSI
*/
-
- qcount_rx = rx_numq_tc;
-
- /* qcount will change if RSS is enabled */
- if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
- if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
- if (vsi->type == ICE_VSI_PF)
- max_rss = ICE_MAX_LG_RSS_QS;
- else
- max_rss = ICE_MAX_RSS_QS_PER_VF;
- qcount_rx = min_t(u16, rx_numq_tc, max_rss);
- if (!vsi->req_rxq)
- qcount_rx = min_t(u16, qcount_rx,
- vsi->rss_size);
- }
- }
-
- /* find the (rounded up) power-of-2 of qcount */
- pow = (u16)order_base_2(qcount_rx);
-
ice_for_each_traffic_class(i) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
/* TC is not enabled */
@@ -802,16 +801,16 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
/* TC is enabled */
vsi->tc_cfg.tc_info[i].qoffset = offset;
- vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
- vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
+ vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc;
+ vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc;
vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
ICE_AQ_VSI_TC_Q_OFFSET_M) |
((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
ICE_AQ_VSI_TC_Q_NUM_M);
- offset += qcount_rx;
- tx_count += tx_numq_tc;
+ offset += num_rxq_per_tc;
+ tx_count += num_txq_per_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
}
@@ -824,7 +823,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
if (offset)
vsi->num_rxq = offset;
else
- vsi->num_rxq = qcount_rx;
+ vsi->num_rxq = num_rxq_per_tc;
vsi->num_txq = tx_count;
@@ -856,7 +855,8 @@ static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
u8 dflt_q_group, dflt_q_prio;
u16 dflt_q, report_q, val;
- if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL)
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
+ vsi->type != ICE_VSI_VF)
return;
val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
@@ -1179,7 +1179,24 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
- base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx);
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
+ struct ice_vf *vf;
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ vf = &pf->vf[i];
+ if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ base = pf->vsi[vf->ctrl_vsi_idx]->base_vector;
+ break;
+ }
+ }
+ if (i == pf->num_alloc_vfs)
+ base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
+ ICE_RES_VF_CTRL_VEC_ID);
+ } else {
+ base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
+ vsi->idx);
+ }
if (base < 0) {
dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
@@ -1296,14 +1313,13 @@ err_out:
* LUT, while in the event of enable request for RSS, it will reconfigure RSS
* LUT.
*/
-int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
+void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
{
- int err = 0;
u8 *lut;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
- return -ENOMEM;
+ return;
if (ena) {
if (vsi->rss_lut_user)
@@ -1313,9 +1329,8 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
vsi->rss_size);
}
- err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
+ ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
kfree(lut);
- return err;
}
/**
@@ -1324,12 +1339,10 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
*/
static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
{
- struct ice_aqc_get_set_rss_keys *key;
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
- int err = 0;
- u8 *lut;
+ u8 *lut, *key;
+ int err;
dev = ice_pf_to_dev(pf);
vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
@@ -1343,37 +1356,26 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
else
ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
- status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut,
- vsi->rss_table_size);
-
- if (status) {
- dev_err(dev, "set_rss_lut failed, error %s\n",
- ice_stat_str(status));
- err = -EIO;
+ err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
+ if (err) {
+ dev_err(dev, "set_rss_lut failed, error %d\n", err);
goto ice_vsi_cfg_rss_exit;
}
- key = kzalloc(sizeof(*key), GFP_KERNEL);
+ key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL);
if (!key) {
err = -ENOMEM;
goto ice_vsi_cfg_rss_exit;
}
if (vsi->rss_hkey_user)
- memcpy(key,
- (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user,
- ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
+ memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
else
- netdev_rss_key_fill((void *)key,
- ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
+ netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
- status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
-
- if (status) {
- dev_err(dev, "set_rss_key failed, error %s\n",
- ice_stat_str(status));
- err = -EIO;
- }
+ err = ice_set_rss_key(vsi, key);
+ if (err)
+ dev_err(dev, "set_rss_key failed, error %d\n", err);
kfree(key);
ice_vsi_cfg_rss_exit:
@@ -1502,13 +1504,13 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
*/
bool ice_pf_state_is_nominal(struct ice_pf *pf)
{
- DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
+ DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 };
if (!pf)
return false;
- bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
- if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
+ bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS);
+ if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS))
return false;
return true;
@@ -1773,7 +1775,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
* This function converts a decimal interrupt rate limit in usecs to the format
* expected by firmware.
*/
-u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
+static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
{
u32 val = intrl / gran;
@@ -1783,6 +1785,51 @@ u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
}
/**
+ * ice_write_intrl - write throttle rate limit to interrupt specific register
+ * @q_vector: pointer to interrupt specific structure
+ * @intrl: throttle rate limit in microseconds to write
+ */
+void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
+{
+ struct ice_hw *hw = &q_vector->vsi->back->hw;
+
+ wr32(hw, GLINT_RATE(q_vector->reg_idx),
+ ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25));
+}
+
+/**
+ * __ice_write_itr - write throttle rate to register
+ * @q_vector: pointer to interrupt data structure
+ * @rc: pointer to ring container
+ * @itr: throttle rate in microseconds to write
+ */
+static void __ice_write_itr(struct ice_q_vector *q_vector,
+ struct ice_ring_container *rc, u16 itr)
+{
+ struct ice_hw *hw = &q_vector->vsi->back->hw;
+
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S);
+}
+
+/**
+ * ice_write_itr - write throttle rate to queue specific register
+ * @rc: pointer to ring container
+ * @itr: throttle rate in microseconds to write
+ */
+void ice_write_itr(struct ice_ring_container *rc, u16 itr)
+{
+ struct ice_q_vector *q_vector;
+
+ if (!rc->ring)
+ return;
+
+ q_vector = rc->ring->q_vector;
+
+ __ice_write_itr(q_vector, rc, itr);
+}
+
+/**
* ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
* @vsi: the VSI being configured
*
@@ -1802,9 +1849,6 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
ice_cfg_itr(hw, q_vector);
- wr32(hw, GLINT_RATE(reg_idx),
- ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
-
/* Both Transmit Queue Interrupt Cause Control register
* and Receive Queue Interrupt Cause control register
* expects MSIX_INDX field to be the vector index
@@ -2308,7 +2352,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
- if (vsi_type == ICE_VSI_VF)
+ if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
vsi = ice_vsi_alloc(pf, vsi_type, vf_id);
else
vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID);
@@ -2323,7 +2367,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (vsi->type == ICE_VSI_PF)
vsi->ethtype = ETH_P_PAUSE;
- if (vsi->type == ICE_VSI_VF)
+ if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_CTRL)
vsi->vf_id = vf_id;
ice_alloc_fd_res(vsi);
@@ -2492,11 +2536,10 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
for (i = 0; i < vsi->num_q_vectors; i++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
- u16 reg_idx = q_vector->reg_idx;
- wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0);
- wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
+ ice_write_intrl(q_vector, 0);
for (q = 0; q < q_vector->num_ring_tx; q++) {
+ ice_write_itr(&q_vector->tx, 0);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
if (ice_is_xdp_ena_vsi(vsi)) {
u32 xdp_txq = txq + vsi->num_xdp_txq;
@@ -2507,6 +2550,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
}
for (q = 0; q < q_vector->num_ring_rx; q++) {
+ ice_write_itr(&q_vector->rx, 0);
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
rxq++;
}
@@ -2593,7 +2637,7 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
*/
void ice_vsi_close(struct ice_vsi *vsi)
{
- if (!test_and_set_bit(__ICE_DOWN, vsi->state))
+ if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
ice_down(vsi);
ice_vsi_free_irq(vsi);
@@ -2610,10 +2654,10 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
{
int err = 0;
- if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
+ if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state))
return 0;
- clear_bit(__ICE_NEEDS_RESTART, vsi->state);
+ clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
if (netif_running(vsi->netdev)) {
@@ -2639,10 +2683,10 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
*/
void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
- if (test_bit(__ICE_DOWN, vsi->state))
+ if (test_bit(ICE_VSI_DOWN, vsi->state))
return;
- set_bit(__ICE_NEEDS_RESTART, vsi->state);
+ set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
@@ -2752,11 +2796,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
* PF that is running the work queue items currently. This is done to
* avoid check_flush_dependency() warning on this wq
*/
- if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
+ if (vsi->netdev && !ice_is_reset_in_progress(pf->state) &&
+ (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state))) {
unregister_netdev(vsi->netdev);
- ice_devlink_destroy_port(vsi);
+ clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
+ ice_devlink_destroy_port(vsi);
+
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
@@ -2770,7 +2817,24 @@ int ice_vsi_release(struct ice_vsi *vsi)
* many interrupts each VF needs. SR-IOV MSIX resources are also
* cleared in the same manner.
*/
- if (vsi->type != ICE_VSI_VF) {
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
+ struct ice_vf *vf;
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ vf = &pf->vf[i];
+ if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI)
+ break;
+ }
+ if (i == pf->num_alloc_vfs) {
+ /* No other VFs left that have control VSI, reclaim SW
+ * interrupts back to the common pool
+ */
+ ice_free_res(pf->irq_tracker, vsi->base_vector,
+ ICE_RES_VF_CTRL_VEC_ID);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ }
+ } else if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
@@ -2794,10 +2858,16 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
- /* make sure unregister_netdev() was called by checking __ICE_DOWN */
- if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) {
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
+ if (vsi->netdev) {
+ if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) {
+ unregister_netdev(vsi->netdev);
+ clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
+ }
+ if (test_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state)) {
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ }
}
if (vsi->type == ICE_VSI_VF &&
@@ -2818,39 +2888,6 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
/**
- * ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector
- * @q_vector: pointer to q_vector which is being updated
- * @coalesce: pointer to array of struct with stored coalesce
- *
- * Set coalesce param in q_vector and update these parameters in HW.
- */
-static void
-ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector,
- struct ice_coalesce_stored *coalesce)
-{
- struct ice_ring_container *rx_rc = &q_vector->rx;
- struct ice_ring_container *tx_rc = &q_vector->tx;
- struct ice_hw *hw = &q_vector->vsi->back->hw;
-
- tx_rc->itr_setting = coalesce->itr_tx;
- rx_rc->itr_setting = coalesce->itr_rx;
-
- /* dynamic ITR values will be updated during Tx/Rx */
- if (!ITR_IS_DYNAMIC(tx_rc->itr_setting))
- wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx),
- ITR_REG_ALIGN(tx_rc->itr_setting) >>
- ICE_ITR_GRAN_S);
- if (!ITR_IS_DYNAMIC(rx_rc->itr_setting))
- wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx),
- ITR_REG_ALIGN(rx_rc->itr_setting) >>
- ICE_ITR_GRAN_S);
-
- q_vector->intrl = coalesce->intrl;
- wr32(hw, GLINT_RATE(q_vector->reg_idx),
- ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
-}
-
-/**
* ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
* @vsi: VSI connected with q_vectors
* @coalesce: array of struct with stored coalesce
@@ -2869,6 +2906,11 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
coalesce[i].itr_tx = q_vector->tx.itr_setting;
coalesce[i].itr_rx = q_vector->rx.itr_setting;
coalesce[i].intrl = q_vector->intrl;
+
+ if (i < vsi->num_txq)
+ coalesce[i].tx_valid = true;
+ if (i < vsi->num_rxq)
+ coalesce[i].rx_valid = true;
}
return vsi->num_q_vectors;
@@ -2888,22 +2930,75 @@ static void
ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
struct ice_coalesce_stored *coalesce, int size)
{
+ struct ice_ring_container *rc;
int i;
if ((size && !coalesce) || !vsi)
return;
- for (i = 0; i < size && i < vsi->num_q_vectors; i++)
- ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
- &coalesce[i]);
+ /* There are a couple of cases that have to be handled here:
+ * 1. The case where the number of queue vectors stays the same, but
+ * the number of Tx or Rx rings changes (the first for loop)
+ * 2. The case where the number of queue vectors increased (the
+ * second for loop)
+ */
+ for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
+ /* There are 2 cases to handle here and they are the same for
+ * both Tx and Rx:
+ * if the entry was valid previously (coalesce[i].[tr]x_valid
+ * and the loop variable is less than the number of rings
+ * allocated, then write the previous values
+ *
+ * if the entry was not valid previously, but the number of
+ * rings is less than are allocated (this means the number of
+ * rings increased from previously), then write out the
+ * values in the first element
+ *
+ * Also, always write the ITR, even if in ITR_IS_DYNAMIC
+ * as there is no harm because the dynamic algorithm
+ * will just overwrite.
+ */
+ if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
+ rc = &vsi->q_vectors[i]->rx;
+ rc->itr_setting = coalesce[i].itr_rx;
+ ice_write_itr(rc, rc->itr_setting);
+ } else if (i < vsi->alloc_rxq) {
+ rc = &vsi->q_vectors[i]->rx;
+ rc->itr_setting = coalesce[0].itr_rx;
+ ice_write_itr(rc, rc->itr_setting);
+ }
+
+ if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
+ rc = &vsi->q_vectors[i]->tx;
+ rc->itr_setting = coalesce[i].itr_tx;
+ ice_write_itr(rc, rc->itr_setting);
+ } else if (i < vsi->alloc_txq) {
+ rc = &vsi->q_vectors[i]->tx;
+ rc->itr_setting = coalesce[0].itr_tx;
+ ice_write_itr(rc, rc->itr_setting);
+ }
+
+ vsi->q_vectors[i]->intrl = coalesce[i].intrl;
+ ice_write_intrl(vsi->q_vectors[i], coalesce[i].intrl);
+ }
- /* number of q_vectors increased, so assume coalesce settings were
- * changed globally (i.e. ethtool -C eth0 instead of per-queue) and use
- * the previous settings from q_vector 0 for all of the new q_vectors
+ /* the number of queue vectors increased so write whatever is in
+ * the first element
*/
- for (; i < vsi->num_q_vectors; i++)
- ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
- &coalesce[0]);
+ for (; i < vsi->num_q_vectors; i++) {
+ /* transmit */
+ rc = &vsi->q_vectors[i]->tx;
+ rc->itr_setting = coalesce[0].itr_tx;
+ ice_write_itr(rc, rc->itr_setting);
+
+ /* receive */
+ rc = &vsi->q_vectors[i]->rx;
+ rc->itr_setting = coalesce[0].itr_rx;
+ ice_write_itr(rc, rc->itr_setting);
+
+ vsi->q_vectors[i]->intrl = coalesce[0].intrl;
+ ice_write_intrl(vsi->q_vectors[i], coalesce[0].intrl);
+ }
}
/**
@@ -2919,6 +3014,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
struct ice_coalesce_stored *coalesce;
int prev_num_q_vectors = 0;
struct ice_vf *vf = NULL;
+ enum ice_vsi_type vtype;
enum ice_status status;
struct ice_pf *pf;
int ret, i;
@@ -2927,14 +3023,17 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
return -EINVAL;
pf = vsi->back;
- if (vsi->type == ICE_VSI_VF)
+ vtype = vsi->type;
+ if (vtype == ICE_VSI_VF)
vf = &pf->vf[vsi->vf_id];
coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
- if (coalesce)
- prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi,
- coalesce);
+ if (!coalesce)
+ return -ENOMEM;
+
+ prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
+
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
@@ -2943,7 +3042,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
* many interrupts each VF needs. SR-IOV MSIX resources are also
* cleared in the same manner.
*/
- if (vsi->type != ICE_VSI_VF) {
+ if (vtype != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
@@ -2958,7 +3057,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_put_qs(vsi);
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
- if (vsi->type == ICE_VSI_VF)
+ if (vtype == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf->vf_id);
else
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
@@ -2977,7 +3076,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
if (ret < 0)
goto err_vsi;
- switch (vsi->type) {
+ switch (vtype) {
case ICE_VSI_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
@@ -3004,7 +3103,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
goto err_vectors;
}
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
- if (vsi->type != ICE_VSI_CTRL)
+ if (vtype != ICE_VSI_CTRL)
/* Do not exit if configuring RSS had an issue, at
* least receive traffic on first queue. Hence no
* need to capture return value
@@ -3066,7 +3165,7 @@ err_rings:
}
err_vsi:
ice_vsi_clear(vsi);
- set_bit(__ICE_RESET_FAILED, pf->state);
+ set_bit(ICE_RESET_FAILED, pf->state);
kfree(coalesce);
return ret;
}
@@ -3077,10 +3176,10 @@ err_vsi:
*/
bool ice_is_reset_in_progress(unsigned long *state)
{
- return test_bit(__ICE_RESET_OICR_RECV, state) ||
- test_bit(__ICE_PFR_REQ, state) ||
- test_bit(__ICE_CORER_REQ, state) ||
- test_bit(__ICE_GLOBR_REQ, state);
+ return test_bit(ICE_RESET_OICR_RECV, state) ||
+ test_bit(ICE_PFR_REQ, state) ||
+ test_bit(ICE_CORER_REQ, state) ||
+ test_bit(ICE_GLOBR_REQ, state);
}
#ifdef CONFIG_DCB
@@ -3168,20 +3267,15 @@ out:
/**
* ice_update_ring_stats - Update ring statistics
* @ring: ring to update
- * @cont: used to increment per-vector counters
* @pkts: number of processed packets
* @bytes: number of processed bytes
*
* This function assumes that caller has acquired a u64_stats_sync lock.
*/
-static void
-ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont,
- u64 pkts, u64 bytes)
+static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes)
{
ring->stats.bytes += bytes;
ring->stats.pkts += pkts;
- cont->total_bytes += bytes;
- cont->total_pkts += pkts;
}
/**
@@ -3193,7 +3287,7 @@ ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont,
void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&tx_ring->syncp);
- ice_update_ring_stats(tx_ring, &tx_ring->q_vector->tx, pkts, bytes);
+ ice_update_ring_stats(tx_ring, pkts, bytes);
u64_stats_update_end(&tx_ring->syncp);
}
@@ -3206,7 +3300,7 @@ void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&rx_ring->syncp);
- ice_update_ring_stats(rx_ring, &rx_ring->q_vector->rx, pkts, bytes);
+ ice_update_ring_stats(rx_ring, pkts, bytes);
u64_stats_update_end(&rx_ring->syncp);
}
@@ -3348,3 +3442,40 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
return 0;
}
+
+/**
+ * ice_set_link - turn on/off physical link
+ * @vsi: VSI to modify physical link on
+ * @ena: turn on/off physical link
+ */
+int ice_set_link(struct ice_vsi *vsi, bool ena)
+{
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ struct ice_port_info *pi = vsi->port_info;
+ struct ice_hw *hw = pi->hw;
+ enum ice_status status;
+
+ if (vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ status = ice_aq_set_link_restart_an(pi, ena, NULL);
+
+ /* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE.
+ * this is not a fatal error, so print a warning message and return
+ * a success code. Return an error if FW returns an error code other
+ * than ICE_AQ_RC_EMODE
+ */
+ if (status == ICE_ERR_AQ_ERROR) {
+ if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
+ dev_warn(dev, "can't set link to %s, err %s aq_err %s. not fatal, continuing\n",
+ (ena ? "ON" : "OFF"), ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ } else if (status) {
+ dev_err(dev, "can't set link to %s, err %s aq_err %s\n",
+ (ena ? "ON" : "OFF"), ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 3da17895a2b1..511c2316c40c 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -45,6 +45,8 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
+int ice_set_link(struct ice_vsi *vsi, bool ena);
+
#ifdef CONFIG_DCB
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
#endif /* CONFIG_DCB */
@@ -83,7 +85,7 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
-int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
@@ -93,7 +95,8 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
int ice_status_to_errno(enum ice_status err);
-u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
+void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
+void ice_write_itr(struct ice_ring_container *rc, u16 itr);
enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index d821c687f239..4ee85a217c6f 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -84,7 +84,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
break;
}
- if (!vsi || test_bit(__ICE_DOWN, vsi->state))
+ if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
return;
if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
@@ -140,21 +140,10 @@ static int ice_init_mac_fltr(struct ice_pf *pf)
perm_addr = vsi->port_info->mac.perm_addr;
status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
- if (!status)
- return 0;
-
- /* We aren't useful with no MAC filters, so unregister if we
- * had an error
- */
- if (vsi->netdev->reg_state == NETREG_REGISTERED) {
- dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n",
- ice_stat_str(status));
- unregister_netdev(vsi->netdev);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- }
+ if (status)
+ return -EIO;
- return -EIO;
+ return 0;
}
/**
@@ -209,9 +198,9 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
*/
static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
{
- return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) ||
- test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) ||
- test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
+ return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
+ test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
+ test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
}
/**
@@ -268,7 +257,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
if (!vsi->netdev)
return -EINVAL;
- while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state))
+ while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
usleep_range(1000, 2000);
changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
@@ -278,9 +267,9 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
INIT_LIST_HEAD(&vsi->tmp_unsync_list);
if (ice_vsi_fltr_changed(vsi)) {
- clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
- clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
- clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
+ clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
+ clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
+ clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
/* grab the netdev's addr_list_lock */
netif_addr_lock_bh(netdev);
@@ -318,7 +307,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
* space reserved for promiscuous filters.
*/
if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
- !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
+ !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
vsi->state)) {
promisc_forced_on = true;
netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
@@ -361,8 +350,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
- test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
- clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
+ test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
+ clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply Rx filter rule to get traffic from wire */
if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
@@ -395,14 +384,14 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
goto exit;
out_promisc:
- set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
+ set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
goto exit;
out:
/* if something went wrong then set the changed flag so we try again */
- set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
- set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
+ set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
+ set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
exit:
- clear_bit(__ICE_CFG_BUSY, vsi->state);
+ clear_bit(ICE_CFG_BUSY, vsi->state);
return err;
}
@@ -447,7 +436,6 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
pf->vf_agg_node[node].num_vsis = 0;
-
}
/**
@@ -463,7 +451,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
unsigned int i;
/* already prepared for reset */
- if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
+ if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
return;
/* Notify VFs of impending reset */
@@ -484,7 +472,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
ice_shutdown_all_ctrlq(hw);
- set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
+ set_bit(ICE_PREPARED_FOR_RESET, pf->state);
}
/**
@@ -505,12 +493,12 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
/* trigger the reset */
if (ice_reset(hw, reset_type)) {
dev_err(dev, "reset %d failed\n", reset_type);
- set_bit(__ICE_RESET_FAILED, pf->state);
- clear_bit(__ICE_RESET_OICR_RECV, pf->state);
- clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
- clear_bit(__ICE_PFR_REQ, pf->state);
- clear_bit(__ICE_CORER_REQ, pf->state);
- clear_bit(__ICE_GLOBR_REQ, pf->state);
+ set_bit(ICE_RESET_FAILED, pf->state);
+ clear_bit(ICE_RESET_OICR_RECV, pf->state);
+ clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
+ clear_bit(ICE_PFR_REQ, pf->state);
+ clear_bit(ICE_CORER_REQ, pf->state);
+ clear_bit(ICE_GLOBR_REQ, pf->state);
return;
}
@@ -521,8 +509,8 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
if (reset_type == ICE_RESET_PFR) {
pf->pfr_count++;
ice_rebuild(pf, reset_type);
- clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
- clear_bit(__ICE_PFR_REQ, pf->state);
+ clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
+ clear_bit(ICE_PFR_REQ, pf->state);
ice_reset_all_vfs(pf, true);
}
}
@@ -538,20 +526,20 @@ static void ice_reset_subtask(struct ice_pf *pf)
/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
* OICR interrupt. The OICR handler (ice_misc_intr) determines what type
* of reset is pending and sets bits in pf->state indicating the reset
- * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
+ * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
* prepare for pending reset if not already (for PF software-initiated
* global resets the software should already be prepared for it as
- * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
+ * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
* by firmware or software on other PFs, that bit is not set so prepare
* for the reset now), poll for reset done, rebuild and return.
*/
- if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
+ if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
/* Perform the largest reset requested */
- if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
+ if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
reset_type = ICE_RESET_CORER;
- if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
+ if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
reset_type = ICE_RESET_GLOBR;
- if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state))
+ if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
reset_type = ICE_RESET_EMPR;
/* return if no valid reset type requested */
if (reset_type == ICE_RESET_INVAL)
@@ -560,7 +548,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
/* make sure we are ready to rebuild */
if (ice_check_reset(&pf->hw)) {
- set_bit(__ICE_RESET_FAILED, pf->state);
+ set_bit(ICE_RESET_FAILED, pf->state);
} else {
/* done with reset. start rebuild */
pf->hw.reset_ongoing = false;
@@ -568,11 +556,11 @@ static void ice_reset_subtask(struct ice_pf *pf)
/* clear bit to resume normal operations, but
* ICE_NEEDS_RESTART bit is set in case rebuild failed
*/
- clear_bit(__ICE_RESET_OICR_RECV, pf->state);
- clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
- clear_bit(__ICE_PFR_REQ, pf->state);
- clear_bit(__ICE_CORER_REQ, pf->state);
- clear_bit(__ICE_GLOBR_REQ, pf->state);
+ clear_bit(ICE_RESET_OICR_RECV, pf->state);
+ clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
+ clear_bit(ICE_PFR_REQ, pf->state);
+ clear_bit(ICE_CORER_REQ, pf->state);
+ clear_bit(ICE_GLOBR_REQ, pf->state);
ice_reset_all_vfs(pf, true);
}
@@ -580,19 +568,19 @@ static void ice_reset_subtask(struct ice_pf *pf)
}
/* No pending resets to finish processing. Check for new resets */
- if (test_bit(__ICE_PFR_REQ, pf->state))
+ if (test_bit(ICE_PFR_REQ, pf->state))
reset_type = ICE_RESET_PFR;
- if (test_bit(__ICE_CORER_REQ, pf->state))
+ if (test_bit(ICE_CORER_REQ, pf->state))
reset_type = ICE_RESET_CORER;
- if (test_bit(__ICE_GLOBR_REQ, pf->state))
+ if (test_bit(ICE_GLOBR_REQ, pf->state))
reset_type = ICE_RESET_GLOBR;
/* If no valid reset type requested just return */
if (reset_type == ICE_RESET_INVAL)
return;
/* reset if not already down or busy */
- if (!test_bit(__ICE_DOWN, pf->state) &&
- !test_bit(__ICE_CFG_BUSY, pf->state)) {
+ if (!test_bit(ICE_DOWN, pf->state) &&
+ !test_bit(ICE_CFG_BUSY, pf->state)) {
ice_do_reset(pf, reset_type);
}
}
@@ -609,7 +597,7 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi)
case ICE_AQ_LINK_TOPO_UNREACH_PRT:
case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
- netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n");
+ netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
break;
case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
@@ -731,7 +719,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
status = ice_aq_get_phy_caps(vsi->port_info, false,
- ICE_AQC_REPORT_SW_CFG, caps, NULL);
+ ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
if (status)
netdev_info(vsi->netdev, "Get phy capability failed.\n");
@@ -764,7 +752,7 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
if (!vsi)
return;
- if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev)
+ if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
return;
if (vsi->type == ICE_VSI_PF) {
@@ -884,10 +872,10 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_phy_info *phy_info;
+ enum ice_status status;
struct ice_vsi *vsi;
u16 old_link_speed;
bool old_link;
- int result;
phy_info = &pi->phy;
phy_info->link_info_old = phy_info->link_info;
@@ -898,10 +886,11 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
/* update the link info structures and re-enable link events,
* don't bail on failure due to other book keeping needed
*/
- result = ice_update_link_info(pi);
- if (result)
- dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
- pi->lport);
+ status = ice_update_link_info(pi);
+ if (status)
+ dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n",
+ pi->lport, ice_stat_str(status),
+ ice_aq_str(pi->hw->adminq.sq_last_status));
/* Check if the link state is up after updating link info, and treat
* this event as an UP event since the link is actually UP now.
@@ -917,18 +906,12 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
-
- result = ice_aq_set_link_restart_an(pi, false, NULL);
- if (result) {
- dev_dbg(dev, "Failed to set link down, VSI %d error %d\n",
- vsi->vsi_num, result);
- return result;
- }
+ ice_set_link(vsi, false);
}
/* if the old link up/down and speed is the same as the new */
if (link_up == old_link && link_speed == old_link_speed)
- return result;
+ return 0;
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
@@ -942,7 +925,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
ice_vc_notify_link_state(pf);
- return result;
+ return 0;
}
/**
@@ -954,8 +937,8 @@ static void ice_watchdog_subtask(struct ice_pf *pf)
int i;
/* if interface is down do nothing */
- if (test_bit(__ICE_DOWN, pf->state) ||
- test_bit(__ICE_CFG_BUSY, pf->state))
+ if (test_bit(ICE_DOWN, pf->state) ||
+ test_bit(ICE_CFG_BUSY, pf->state))
return;
/* make sure we don't do these things too often */
@@ -1044,7 +1027,7 @@ struct ice_aq_task {
};
/**
- * ice_wait_for_aq_event - Wait for an AdminQ event from firmware
+ * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
* @pf: pointer to the PF private structure
* @opcode: the opcode to wait for
* @timeout: how long to wait, in jiffies
@@ -1199,7 +1182,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
u32 oldval, val;
/* Do not clean control queue if/when PF reset fails */
- if (test_bit(__ICE_RESET_FAILED, pf->state))
+ if (test_bit(ICE_RESET_FAILED, pf->state))
return 0;
switch (q_type) {
@@ -1210,6 +1193,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
qtype = "Mailbox";
+ /* we are going to try to detect a malicious VF, so set the
+ * state to begin detection
+ */
+ hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
break;
default:
dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
@@ -1291,7 +1278,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_vf_lan_overflow_event(pf, &event);
break;
case ice_mbx_opc_send_msg_to_pf:
- ice_vc_process_vf_msg(pf, &event);
+ if (!ice_is_malicious_vf(pf, &event, i, pending))
+ ice_vc_process_vf_msg(pf, &event);
break;
case ice_aqc_opc_fw_logging:
ice_output_fw_log(hw, &event.desc, event.msg_buf);
@@ -1334,13 +1322,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
+ if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
return;
if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
return;
- clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
+ clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
/* There might be a situation where new messages arrive to a control
* queue between processing the last message and clearing the
@@ -1361,13 +1349,13 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
+ if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
return;
if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
return;
- clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
+ clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
if (ice_ctrlq_pending(hw, &hw->mailboxq))
__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
@@ -1383,9 +1371,9 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
*/
void ice_service_task_schedule(struct ice_pf *pf)
{
- if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
- !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
- !test_bit(__ICE_NEEDS_RESTART, pf->state))
+ if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
+ !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
+ !test_bit(ICE_NEEDS_RESTART, pf->state))
queue_work(ice_wq, &pf->serv_task);
}
@@ -1395,32 +1383,32 @@ void ice_service_task_schedule(struct ice_pf *pf)
*/
static void ice_service_task_complete(struct ice_pf *pf)
{
- WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
+ WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
/* force memory (pf->state) to sync before next service task */
smp_mb__before_atomic();
- clear_bit(__ICE_SERVICE_SCHED, pf->state);
+ clear_bit(ICE_SERVICE_SCHED, pf->state);
}
/**
* ice_service_task_stop - stop service task and cancel works
* @pf: board private structure
*
- * Return 0 if the __ICE_SERVICE_DIS bit was not already set,
+ * Return 0 if the ICE_SERVICE_DIS bit was not already set,
* 1 otherwise.
*/
static int ice_service_task_stop(struct ice_pf *pf)
{
int ret;
- ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state);
+ ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
if (pf->serv_tmr.function)
del_timer_sync(&pf->serv_tmr);
if (pf->serv_task.func)
cancel_work_sync(&pf->serv_task);
- clear_bit(__ICE_SERVICE_SCHED, pf->state);
+ clear_bit(ICE_SERVICE_SCHED, pf->state);
return ret;
}
@@ -1432,7 +1420,7 @@ static int ice_service_task_stop(struct ice_pf *pf)
*/
static void ice_service_task_restart(struct ice_pf *pf)
{
- clear_bit(__ICE_SERVICE_DIS, pf->state);
+ clear_bit(ICE_SERVICE_DIS, pf->state);
ice_service_task_schedule(pf);
}
@@ -1465,7 +1453,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
unsigned int i;
u32 reg;
- if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
+ if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
/* Since the VF MDD event logging is rate limited, check if
* there are pending MDD events.
*/
@@ -1557,7 +1545,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
vf->mdd_tx_events.count++;
- set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
i);
@@ -1567,7 +1555,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
vf->mdd_tx_events.count++;
- set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
i);
@@ -1577,7 +1565,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
vf->mdd_tx_events.count++;
- set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
i);
@@ -1587,7 +1575,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF);
vf->mdd_rx_events.count++;
- set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
i);
@@ -1642,7 +1630,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
if (!pcaps)
return -ENOMEM;
- retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
NULL);
if (retcode) {
dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
@@ -1702,7 +1690,7 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi)
if (!pcaps)
return -ENOMEM;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps,
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
NULL);
if (status) {
@@ -1748,15 +1736,18 @@ static void ice_init_link_dflt_override(struct ice_port_info *pi)
* ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
* @pi: port info structure
*
- * If default override is enabled, initialized the user PHY cfg speed and FEC
+ * If default override is enabled, initialize the user PHY cfg speed and FEC
* settings using the default override mask from the NVM.
*
* The PHY should only be configured with the default override settings the
- * first time media is available. The __ICE_LINK_DEFAULT_OVERRIDE_PENDING state
+ * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
* is used to indicate that the user PHY cfg default override is initialized
* and the PHY has not been configured with the default override settings. The
* state is set here, and cleared in ice_configure_phy the first time the PHY is
* configured.
+ *
+ * This function should be called only if the FW doesn't support default
+ * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
*/
static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
{
@@ -1781,7 +1772,7 @@ static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
cfg->link_fec_opt = ldo->fec_options;
phy->curr_user_fec_req = ICE_FEC_AUTO;
- set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
+ set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
}
/**
@@ -1804,22 +1795,21 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi)
struct ice_phy_info *phy = &pi->phy;
struct ice_pf *pf = pi->hw->back;
enum ice_status status;
- struct ice_vsi *vsi;
int err = 0;
if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
return -EIO;
- vsi = ice_get_main_vsi(pf);
- if (!vsi)
- return -EINVAL;
-
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
- NULL);
+ if (ice_fw_supports_report_dflt_cfg(pi->hw))
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
+ pcaps, NULL);
+ else
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ pcaps, NULL);
if (status) {
dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
err = -EIO;
@@ -1829,22 +1819,24 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi)
ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
/* check if lenient mode is supported and enabled */
- if (ice_fw_supports_link_override(&vsi->back->hw) &&
+ if (ice_fw_supports_link_override(pi->hw) &&
!(pcaps->module_compliance_enforcement &
ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
- /* if link default override is enabled, initialize user PHY
- * configuration with link default override values
+ /* if the FW supports default PHY configuration mode, then the driver
+ * does not have to apply link override settings. If not,
+ * initialize user PHY configuration with link override values
*/
- if (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN) {
+ if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
+ (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
ice_init_phy_cfg_dflt_override(pi);
goto out;
}
}
- /* if link default override is not enabled, initialize PHY using
- * topology with media
+ /* if link default override is not enabled, set user flow control and
+ * FEC settings based on what get_phy_caps returned
*/
phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
pcaps->link_fec_options);
@@ -1852,7 +1844,7 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi)
out:
phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
- set_bit(__ICE_PHY_INIT_COMPLETE, pf->state);
+ set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
err_out:
kfree(pcaps);
return err;
@@ -1869,27 +1861,24 @@ err_out:
static int ice_configure_phy(struct ice_vsi *vsi)
{
struct device *dev = ice_pf_to_dev(vsi->back);
+ struct ice_port_info *pi = vsi->port_info;
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_aqc_set_phy_cfg_data *cfg;
- struct ice_port_info *pi;
+ struct ice_phy_info *phy = &pi->phy;
+ struct ice_pf *pf = vsi->back;
enum ice_status status;
int err = 0;
- pi = vsi->port_info;
- if (!pi)
- return -EINVAL;
-
/* Ensure we have media as we cannot configure a medialess port */
- if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
+ if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
return -EPERM;
ice_print_topo_conflict(vsi);
- if (vsi->port_info->phy.link_info.topo_media_conflict ==
- ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
+ if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
return -EPERM;
- if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
return ice_force_phys_link_state(vsi, true);
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
@@ -1897,7 +1886,7 @@ static int ice_configure_phy(struct ice_vsi *vsi)
return -ENOMEM;
/* Get current PHY config */
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
NULL);
if (status) {
dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
@@ -1910,15 +1899,19 @@ static int ice_configure_phy(struct ice_vsi *vsi)
* there's nothing to do
*/
if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
- ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg))
+ ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
goto done;
/* Use PHY topology as baseline for configuration */
memset(pcaps, 0, sizeof(*pcaps));
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
- NULL);
+ if (ice_fw_supports_report_dflt_cfg(pi->hw))
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
+ pcaps, NULL);
+ else
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ pcaps, NULL);
if (status) {
- dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n",
+ dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n",
vsi->vsi_num, ice_stat_str(status));
err = -EIO;
goto done;
@@ -1935,10 +1928,10 @@ static int ice_configure_phy(struct ice_vsi *vsi)
/* Speed - If default override pending, use curr_user_phy_cfg set in
* ice_init_phy_user_cfg_ldo.
*/
- if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING,
+ if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
vsi->back->state)) {
- cfg->phy_type_low = pi->phy.curr_user_phy_cfg.phy_type_low;
- cfg->phy_type_high = pi->phy.curr_user_phy_cfg.phy_type_high;
+ cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
+ cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
} else {
u64 phy_low = 0, phy_high = 0;
@@ -1956,7 +1949,7 @@ static int ice_configure_phy(struct ice_vsi *vsi)
}
/* FEC */
- ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req);
+ ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
/* Can't provide what was requested; use PHY capabilities */
if (cfg->link_fec_opt !=
@@ -1968,12 +1961,12 @@ static int ice_configure_phy(struct ice_vsi *vsi)
/* Flow Control - always supported; no need to check against
* capabilities
*/
- ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req);
+ ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
/* Enable link and link update */
cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
- status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
+ status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
if (status) {
dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
vsi->vsi_num, ice_stat_str(status));
@@ -2014,13 +2007,13 @@ static void ice_check_media_subtask(struct ice_pf *pf)
return;
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
- if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state))
+ if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
ice_init_phy_user_cfg(pi);
/* PHY settings are reset on media insertion, reconfigure
* PHY to preserve settings.
*/
- if (test_bit(__ICE_DOWN, vsi->state) &&
+ if (test_bit(ICE_VSI_DOWN, vsi->state) &&
test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
return;
@@ -2050,8 +2043,8 @@ static void ice_service_task(struct work_struct *work)
/* bail if a reset/recovery cycle is pending or rebuild failed */
if (ice_is_reset_in_progress(pf->state) ||
- test_bit(__ICE_SUSPENDED, pf->state) ||
- test_bit(__ICE_NEEDS_RESTART, pf->state)) {
+ test_bit(ICE_SUSPENDED, pf->state) ||
+ test_bit(ICE_NEEDS_RESTART, pf->state)) {
ice_service_task_complete(pf);
return;
}
@@ -2071,7 +2064,9 @@ static void ice_service_task(struct work_struct *work)
ice_process_vflr_event(pf);
ice_clean_mailboxq_subtask(pf);
ice_sync_arfs_fltrs(pf);
- /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
+ ice_flush_fdir_ctx(pf);
+
+ /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
ice_service_task_complete(pf);
/* If the tasks have taken longer than one service timer period
@@ -2079,10 +2074,11 @@ static void ice_service_task(struct work_struct *work)
* schedule the service task now.
*/
if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
- test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
- test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
- test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
- test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
+ test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
+ test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
+ test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
+ test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
+ test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
mod_timer(&pf->serv_tmr, jiffies);
}
@@ -2112,7 +2108,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
struct device *dev = ice_pf_to_dev(pf);
/* bail out if earlier reset has failed */
- if (test_bit(__ICE_RESET_FAILED, pf->state)) {
+ if (test_bit(ICE_RESET_FAILED, pf->state)) {
dev_dbg(dev, "earlier reset has failed\n");
return -EIO;
}
@@ -2124,13 +2120,13 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
switch (reset) {
case ICE_RESET_PFR:
- set_bit(__ICE_PFR_REQ, pf->state);
+ set_bit(ICE_PFR_REQ, pf->state);
break;
case ICE_RESET_CORER:
- set_bit(__ICE_CORER_REQ, pf->state);
+ set_bit(ICE_CORER_REQ, pf->state);
break;
case ICE_RESET_GLOBR:
- set_bit(__ICE_GLOBR_REQ, pf->state);
+ set_bit(ICE_GLOBR_REQ, pf->state);
break;
default:
return -EINVAL;
@@ -2220,8 +2216,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
- err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0,
- q_vector->name, q_vector);
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
+ err = devm_request_irq(dev, irq_num, vsi->irq_handler,
+ IRQF_SHARED, q_vector->name,
+ q_vector);
+ else
+ err = devm_request_irq(dev, irq_num, vsi->irq_handler,
+ 0, q_vector->name, q_vector);
if (err) {
netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
err);
@@ -2524,7 +2525,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
/* need to stop netdev while setting up the program for Rx rings */
- if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
+ if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
ret = ice_down(vsi);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
@@ -2630,8 +2631,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
u32 oicr, ena_mask;
dev = ice_pf_to_dev(pf);
- set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
- set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
+ set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
+ set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA);
@@ -2643,18 +2644,18 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_MAL_DETECT_M) {
ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
- set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
+ set_bit(ICE_MDD_EVENT_PENDING, pf->state);
}
if (oicr & PFINT_OICR_VFLR_M) {
/* disable any further VFLR event notifications */
- if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
+ if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
u32 reg = rd32(hw, PFINT_OICR_ENA);
reg &= ~PFINT_OICR_VFLR_M;
wr32(hw, PFINT_OICR_ENA, reg);
} else {
ena_mask &= ~PFINT_OICR_VFLR_M;
- set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
}
}
@@ -2680,13 +2681,13 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
* We also make note of which reset happened so that peer
* devices/drivers can be informed.
*/
- if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) {
+ if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
if (reset == ICE_RESET_CORER)
- set_bit(__ICE_CORER_RECV, pf->state);
+ set_bit(ICE_CORER_RECV, pf->state);
else if (reset == ICE_RESET_GLOBR)
- set_bit(__ICE_GLOBR_RECV, pf->state);
+ set_bit(ICE_GLOBR_RECV, pf->state);
else
- set_bit(__ICE_EMPR_RECV, pf->state);
+ set_bit(ICE_EMPR_RECV, pf->state);
/* There are couple of different bits at play here.
* hw->reset_ongoing indicates whether the hardware is
@@ -2694,7 +2695,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
* is received and set back to false after the driver
* has determined that the hardware is out of reset.
*
- * __ICE_RESET_OICR_RECV in pf->state indicates
+ * ICE_RESET_OICR_RECV in pf->state indicates
* that a post reset rebuild is required before the
* driver is operational again. This is set above.
*
@@ -2722,7 +2723,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & (PFINT_OICR_PE_CRITERR_M |
PFINT_OICR_PCI_EXCEPTION_M |
PFINT_OICR_ECC_ERR_M)) {
- set_bit(__ICE_PFR_REQ, pf->state);
+ set_bit(ICE_PFR_REQ, pf->state);
ice_service_task_schedule(pf);
}
}
@@ -2975,19 +2976,13 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
struct ice_netdev_priv *np;
struct net_device *netdev;
u8 mac_addr[ETH_ALEN];
- int err;
-
- err = ice_devlink_create_port(vsi);
- if (err)
- return err;
netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
vsi->alloc_rxq);
- if (!netdev) {
- err = -ENOMEM;
- goto err_destroy_devlink_port;
- }
+ if (!netdev)
+ return -ENOMEM;
+ set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
vsi->netdev = netdev;
np = netdev_priv(netdev);
np->vsi = vsi;
@@ -3014,25 +3009,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = ICE_MAX_MTU;
- err = register_netdev(vsi->netdev);
- if (err)
- goto err_free_netdev;
-
- devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
-
- netif_carrier_off(vsi->netdev);
-
- /* make sure transmit queues start off as stopped */
- netif_tx_stop_all_queues(vsi->netdev);
-
return 0;
-
-err_free_netdev:
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
-err_destroy_devlink_port:
- ice_devlink_destroy_port(vsi);
- return err;
}
/**
@@ -3107,15 +3084,6 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
struct ice_vsi *vsi = np->vsi;
int ret;
- if (vid >= VLAN_N_VID) {
- netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
- vid, VLAN_N_VID);
- return -EINVAL;
- }
-
- if (vsi->info.pvid)
- return -EINVAL;
-
/* VLAN 0 is added by default during load/reset */
if (!vid)
return 0;
@@ -3132,7 +3100,7 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
*/
ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (!ret)
- set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
+ set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
return ret;
}
@@ -3153,9 +3121,6 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
struct ice_vsi *vsi = np->vsi;
int ret;
- if (vsi->info.pvid)
- return -EINVAL;
-
/* don't allow removal of VLAN 0 */
if (!vid)
return 0;
@@ -3171,7 +3136,7 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
ret = ice_cfg_vlan_pruning(vsi, false, false);
- set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
+ set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
return ret;
}
@@ -3230,8 +3195,7 @@ unroll_napi_add:
if (vsi) {
ice_napi_del(vsi);
if (vsi->netdev) {
- if (vsi->netdev->reg_state == NETREG_REGISTERED)
- unregister_netdev(vsi->netdev);
+ clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
@@ -3365,7 +3329,7 @@ static int ice_init_pf(struct ice_pf *pf)
timer_setup(&pf->serv_tmr, ice_service_timer, 0);
pf->serv_tmr_period = HZ;
INIT_WORK(&pf->serv_task, ice_service_task);
- clear_bit(__ICE_SERVICE_SCHED, pf->state);
+ clear_bit(ICE_SERVICE_SCHED, pf->state);
mutex_init(&pf->avail_q_mutex);
pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
@@ -3574,7 +3538,7 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
if (!new_rx && !new_tx)
return -EINVAL;
- while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
+ while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
timeout--;
if (!timeout)
return -EBUSY;
@@ -3598,7 +3562,7 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
ice_pf_dcb_recfg(pf);
ice_vsi_open(vsi);
done:
- clear_bit(__ICE_CFG_BUSY, pf->state);
+ clear_bit(ICE_CFG_BUSY, pf->state);
return err;
}
@@ -3985,6 +3949,43 @@ static void ice_print_wake_reason(struct ice_pf *pf)
}
/**
+ * ice_register_netdev - register netdev and devlink port
+ * @pf: pointer to the PF struct
+ */
+static int ice_register_netdev(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi;
+ int err = 0;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi || !vsi->netdev)
+ return -EIO;
+
+ err = register_netdev(vsi->netdev);
+ if (err)
+ goto err_register_netdev;
+
+ set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_all_queues(vsi->netdev);
+ err = ice_devlink_create_port(vsi);
+ if (err)
+ goto err_devlink_create;
+
+ devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
+
+ return 0;
+err_devlink_create:
+ unregister_netdev(vsi->netdev);
+ clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
+err_register_netdev:
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ return err;
+}
+
+/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in ice_pci_tbl
@@ -4006,7 +4007,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (err)
return err;
- err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
+ err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
if (err) {
dev_err(dev, "BAR0 I/O map error %d\n", err);
return err;
@@ -4030,9 +4031,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
pf->pdev = pdev;
pci_set_drvdata(pdev, pf);
- set_bit(__ICE_DOWN, pf->state);
+ set_bit(ICE_DOWN, pf->state);
/* Disable service task until DOWN bit is cleared */
- set_bit(__ICE_SERVICE_DIS, pf->state);
+ set_bit(ICE_SERVICE_DIS, pf->state);
hw = &pf->hw;
hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
@@ -4172,7 +4173,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_alloc_sw_unroll;
}
- clear_bit(__ICE_SERVICE_DIS, pf->state);
+ clear_bit(ICE_SERVICE_DIS, pf->state);
/* tell the firmware we are up */
err = ice_send_version(pf);
@@ -4261,15 +4262,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
pcie_print_link_status(pf->pdev);
probe_done:
+ err = ice_register_netdev(pf);
+ if (err)
+ goto err_netdev_reg;
+
/* ready to go, so clear down state bit */
- clear_bit(__ICE_DOWN, pf->state);
+ clear_bit(ICE_DOWN, pf->state);
return 0;
+err_netdev_reg:
err_send_version_unroll:
ice_vsi_release_all(pf);
err_alloc_sw_unroll:
- set_bit(__ICE_SERVICE_DIS, pf->state);
- set_bit(__ICE_DOWN, pf->state);
+ set_bit(ICE_SERVICE_DIS, pf->state);
+ set_bit(ICE_DOWN, pf->state);
devm_kfree(dev, pf->first_sw);
err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
@@ -4310,7 +4316,7 @@ static void ice_set_wake(struct ice_pf *pf)
}
/**
- * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet
+ * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
* @pf: pointer to the PF struct
*
* Issue firmware command to enable multicast magic wake, making
@@ -4369,11 +4375,11 @@ static void ice_remove(struct pci_dev *pdev)
}
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
- set_bit(__ICE_VF_RESETS_DISABLED, pf->state);
+ set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
}
- set_bit(__ICE_DOWN, pf->state);
+ set_bit(ICE_DOWN, pf->state);
ice_service_task_stop(pf);
ice_aq_cancel_waiting_tasks(pf);
@@ -4533,13 +4539,13 @@ static int __maybe_unused ice_suspend(struct device *dev)
disabled = ice_service_task_stop(pf);
/* Already suspended?, then there is nothing to do */
- if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) {
+ if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
if (!disabled)
ice_service_task_restart(pf);
return 0;
}
- if (test_bit(__ICE_DOWN, pf->state) ||
+ if (test_bit(ICE_DOWN, pf->state) ||
ice_is_reset_in_progress(pf->state)) {
dev_err(dev, "can't suspend device in reset or already down\n");
if (!disabled)
@@ -4611,16 +4617,16 @@ static int __maybe_unused ice_resume(struct device *dev)
if (ret)
dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
- clear_bit(__ICE_DOWN, pf->state);
+ clear_bit(ICE_DOWN, pf->state);
/* Now perform PF reset and rebuild */
reset_type = ICE_RESET_PFR;
/* re-enable service task for reset, but allow reset to schedule it */
- clear_bit(__ICE_SERVICE_DIS, pf->state);
+ clear_bit(ICE_SERVICE_DIS, pf->state);
if (ice_schedule_reset(pf, reset_type))
dev_err(dev, "Reset during resume failed.\n");
- clear_bit(__ICE_SUSPENDED, pf->state);
+ clear_bit(ICE_SUSPENDED, pf->state);
ice_service_task_restart(pf);
/* Restart the service task */
@@ -4649,11 +4655,11 @@ ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
return PCI_ERS_RESULT_DISCONNECT;
}
- if (!test_bit(__ICE_SUSPENDED, pf->state)) {
+ if (!test_bit(ICE_SUSPENDED, pf->state)) {
ice_service_task_stop(pf);
- if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
- set_bit(__ICE_PFR_REQ, pf->state);
+ if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
+ set_bit(ICE_PFR_REQ, pf->state);
ice_prepare_for_reset(pf);
}
}
@@ -4720,7 +4726,7 @@ static void ice_pci_err_resume(struct pci_dev *pdev)
return;
}
- if (test_bit(__ICE_SUSPENDED, pf->state)) {
+ if (test_bit(ICE_SUSPENDED, pf->state)) {
dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
__func__);
return;
@@ -4741,11 +4747,11 @@ static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
- if (!test_bit(__ICE_SUSPENDED, pf->state)) {
+ if (!test_bit(ICE_SUSPENDED, pf->state)) {
ice_service_task_stop(pf);
- if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
- set_bit(__ICE_PFR_REQ, pf->state);
+ if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
+ set_bit(ICE_PFR_REQ, pf->state);
ice_prepare_for_reset(pf);
}
}
@@ -4892,7 +4898,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return 0;
}
- if (test_bit(__ICE_DOWN, pf->state) ||
+ if (test_bit(ICE_DOWN, pf->state) ||
ice_is_reset_in_progress(pf->state)) {
netdev_err(netdev, "can't set mac %pM. device not ready\n",
mac);
@@ -4961,8 +4967,8 @@ static void ice_set_rx_mode(struct net_device *netdev)
* ndo_set_rx_mode may be triggered even without a change in netdev
* flags
*/
- set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
- set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
+ set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
+ set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
/* schedule our worker thread which will take care of
@@ -5111,10 +5117,10 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
* separate if/else statements to guarantee each feature is checked
*/
if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
- ret = ice_vsi_manage_rss_lut(vsi, true);
+ ice_vsi_manage_rss_lut(vsi, true);
else if (!(features & NETIF_F_RXHASH) &&
netdev->features & NETIF_F_RXHASH)
- ret = ice_vsi_manage_rss_lut(vsi, false);
+ ice_vsi_manage_rss_lut(vsi, false);
if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
@@ -5195,6 +5201,105 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
return err;
}
+/* THEORY OF MODERATION:
+ * The below code creates custom DIM profiles for use by this driver, because
+ * the ice driver hardware works differently than the hardware that DIMLIB was
+ * originally made for. ice hardware doesn't have packet count limits that
+ * can trigger an interrupt, but it *does* have interrupt rate limit support,
+ * and this code adds that capability to be used by the driver when it's using
+ * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver
+ * for how to "respond" to traffic and interrupts, so this driver uses a
+ * slightly different set of moderation parameters to get best performance.
+ */
+struct ice_dim {
+ /* the throttle rate for interrupts, basically worst case delay before
+ * an initial interrupt fires, value is stored in microseconds.
+ */
+ u16 itr;
+ /* the rate limit for interrupts, which can cap a delay from a small
+ * ITR at a certain amount of interrupts per second. f.e. a 2us ITR
+ * could yield as much as 500,000 interrupts per second, but with a
+ * 10us rate limit, it limits to 100,000 interrupts per second. Value
+ * is stored in microseconds.
+ */
+ u16 intrl;
+};
+
+/* Make a different profile for Rx that doesn't allow quite so aggressive
+ * moderation at the high end (it maxes out at 128us or about 8k interrupts a
+ * second. The INTRL/rate parameters here are only useful to cap small ITR
+ * values, which is why for larger ITR's - like 128, which can only generate
+ * 8k interrupts per second, there is no point to rate limit and the values
+ * are set to zero. The rate limit values do affect latency, and so must
+ * be reasonably small so to not impact latency sensitive tests.
+ */
+static const struct ice_dim rx_profile[] = {
+ {2, 10},
+ {8, 16},
+ {32, 0},
+ {96, 0},
+ {128, 0}
+};
+
+/* The transmit profile, which has the same sorts of values
+ * as the previous struct
+ */
+static const struct ice_dim tx_profile[] = {
+ {2, 10},
+ {8, 16},
+ {64, 0},
+ {128, 0},
+ {256, 0}
+};
+
+static void ice_tx_dim_work(struct work_struct *work)
+{
+ struct ice_ring_container *rc;
+ struct ice_q_vector *q_vector;
+ struct dim *dim;
+ u16 itr, intrl;
+
+ dim = container_of(work, struct dim, work);
+ rc = container_of(dim, struct ice_ring_container, dim);
+ q_vector = container_of(rc, struct ice_q_vector, tx);
+
+ if (dim->profile_ix >= ARRAY_SIZE(tx_profile))
+ dim->profile_ix = ARRAY_SIZE(tx_profile) - 1;
+
+ /* look up the values in our local table */
+ itr = tx_profile[dim->profile_ix].itr;
+ intrl = tx_profile[dim->profile_ix].intrl;
+
+ ice_write_itr(rc, itr);
+ ice_write_intrl(q_vector, intrl);
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void ice_rx_dim_work(struct work_struct *work)
+{
+ struct ice_ring_container *rc;
+ struct ice_q_vector *q_vector;
+ struct dim *dim;
+ u16 itr, intrl;
+
+ dim = container_of(work, struct dim, work);
+ rc = container_of(dim, struct ice_ring_container, dim);
+ q_vector = container_of(rc, struct ice_q_vector, rx);
+
+ if (dim->profile_ix >= ARRAY_SIZE(rx_profile))
+ dim->profile_ix = ARRAY_SIZE(rx_profile) - 1;
+
+ /* look up the values in our local table */
+ itr = rx_profile[dim->profile_ix].itr;
+ intrl = rx_profile[dim->profile_ix].intrl;
+
+ ice_write_itr(rc, itr);
+ ice_write_intrl(q_vector, intrl);
+
+ dim->state = DIM_START_MEASURE;
+}
+
/**
* ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
* @vsi: the VSI being configured
@@ -5209,6 +5314,12 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
+ INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work);
+ q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+
+ INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work);
+ q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+
if (q_vector->rx.ring || q_vector->tx.ring)
napi_enable(&q_vector->napi);
}
@@ -5235,7 +5346,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
if (err)
return err;
- clear_bit(__ICE_DOWN, vsi->state);
+ clear_bit(ICE_VSI_DOWN, vsi->state);
ice_napi_enable_all(vsi);
ice_vsi_ena_irq(vsi);
@@ -5342,7 +5453,6 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi->tx_linearize = 0;
vsi->rx_buf_failed = 0;
vsi->rx_page_failed = 0;
- vsi->rx_gro_dropped = 0;
rcu_read_lock();
@@ -5357,7 +5467,6 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi_stats->rx_bytes += bytes;
vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
- vsi->rx_gro_dropped += ring->rx_stats.gro_dropped;
}
/* update XDP Tx rings counters */
@@ -5378,8 +5487,8 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
struct ice_eth_stats *cur_es = &vsi->eth_stats;
struct ice_pf *pf = vsi->back;
- if (test_bit(__ICE_DOWN, vsi->state) ||
- test_bit(__ICE_CFG_BUSY, pf->state))
+ if (test_bit(ICE_VSI_DOWN, vsi->state) ||
+ test_bit(ICE_CFG_BUSY, pf->state))
return;
/* get stats as recorded by Tx/Rx rings */
@@ -5389,7 +5498,7 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
ice_update_eth_stats(vsi);
cur_ns->tx_errors = cur_es->tx_errors;
- cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped;
+ cur_ns->rx_dropped = cur_es->rx_discards;
cur_ns->tx_dropped = cur_es->tx_discards;
cur_ns->multicast = cur_es->rx_multicast;
@@ -5583,7 +5692,7 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
* But, only call the update routine and read the registers if VSI is
* not down.
*/
- if (!test_bit(__ICE_DOWN, vsi->state))
+ if (!test_bit(ICE_VSI_DOWN, vsi->state))
ice_update_vsi_ring_stats(vsi);
stats->tx_packets = vsi_stats->tx_packets;
stats->tx_bytes = vsi_stats->tx_bytes;
@@ -5619,6 +5728,9 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
if (q_vector->rx.ring || q_vector->tx.ring)
napi_disable(&q_vector->napi);
+
+ cancel_work_sync(&q_vector->tx.dim.work);
+ cancel_work_sync(&q_vector->rx.dim.work);
}
}
@@ -5631,7 +5743,7 @@ int ice_down(struct ice_vsi *vsi)
int i, tx_err, rx_err, link_err = 0;
/* Caller of this function is expected to set the
- * vsi->state __ICE_DOWN bit
+ * vsi->state ICE_DOWN bit
*/
if (vsi->netdev) {
netif_carrier_off(vsi->netdev);
@@ -5783,7 +5895,7 @@ int ice_vsi_open_ctrl(struct ice_vsi *vsi)
if (err)
goto err_up_complete;
- clear_bit(__ICE_DOWN, vsi->state);
+ clear_bit(ICE_VSI_DOWN, vsi->state);
ice_vsi_ena_irq(vsi);
return 0;
@@ -5979,7 +6091,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
enum ice_status ret;
int err;
- if (test_bit(__ICE_DOWN, pf->state))
+ if (test_bit(ICE_DOWN, pf->state))
goto clear_recovery;
dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
@@ -6095,7 +6207,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_replay_post(hw);
/* if we get here, reset flow is successful */
- clear_bit(__ICE_RESET_FAILED, pf->state);
+ clear_bit(ICE_RESET_FAILED, pf->state);
return;
err_vsi_rebuild:
@@ -6103,10 +6215,10 @@ err_sched_init_port:
ice_sched_cleanup_all(hw);
err_init_ctrlq:
ice_shutdown_all_ctrlq(hw);
- set_bit(__ICE_RESET_FAILED, pf->state);
+ set_bit(ICE_RESET_FAILED, pf->state);
clear_recovery:
/* set this bit in PF state to control service task scheduling */
- set_bit(__ICE_NEEDS_RESTART, pf->state);
+ set_bit(ICE_NEEDS_RESTART, pf->state);
dev_err(dev, "Rebuild failed, unload and reload driver\n");
}
@@ -6170,7 +6282,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */
- if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
+ if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
int err;
err = ice_down(vsi);
@@ -6305,89 +6417,118 @@ const char *ice_stat_str(enum ice_status stat_err)
}
/**
- * ice_set_rss - Set RSS keys and lut
+ * ice_set_rss_lut - Set RSS LUT
* @vsi: Pointer to VSI structure
- * @seed: RSS hash seed
* @lut: Lookup table
* @lut_size: Lookup table size
*
* Returns 0 on success, negative on failure
*/
-int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
+ struct ice_aq_get_set_rss_lut_params params = {};
+ struct ice_hw *hw = &vsi->back->hw;
enum ice_status status;
- struct device *dev;
- dev = ice_pf_to_dev(pf);
- if (seed) {
- struct ice_aqc_get_set_rss_keys *buf =
- (struct ice_aqc_get_set_rss_keys *)seed;
+ if (!lut)
+ return -EINVAL;
- status = ice_aq_set_rss_key(hw, vsi->idx, buf);
+ params.vsi_handle = vsi->idx;
+ params.lut_size = lut_size;
+ params.lut_type = vsi->rss_lut_type;
+ params.lut = lut;
- if (status) {
- dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ status = ice_aq_set_rss_lut(hw, &params);
+ if (status) {
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return -EIO;
}
- if (lut) {
- status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
- lut, lut_size);
- if (status) {
- dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ return 0;
+}
+
+/**
+ * ice_set_rss_key - Set RSS key
+ * @vsi: Pointer to the VSI structure
+ * @seed: RSS hash seed
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ enum ice_status status;
+
+ if (!seed)
+ return -EINVAL;
+
+ status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
+ if (status) {
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return -EIO;
}
return 0;
}
/**
- * ice_get_rss - Get RSS keys and lut
+ * ice_get_rss_lut - Get RSS LUT
* @vsi: Pointer to VSI structure
- * @seed: Buffer to store the keys
* @lut: Buffer to store the lookup table entries
* @lut_size: Size of buffer to store the lookup table entries
*
* Returns 0 on success, negative on failure
*/
-int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
+ struct ice_aq_get_set_rss_lut_params params = {};
+ struct ice_hw *hw = &vsi->back->hw;
enum ice_status status;
- struct device *dev;
- dev = ice_pf_to_dev(pf);
- if (seed) {
- struct ice_aqc_get_set_rss_keys *buf =
- (struct ice_aqc_get_set_rss_keys *)seed;
+ if (!lut)
+ return -EINVAL;
- status = ice_aq_get_rss_key(hw, vsi->idx, buf);
- if (status) {
- dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ params.vsi_handle = vsi->idx;
+ params.lut_size = lut_size;
+ params.lut_type = vsi->rss_lut_type;
+ params.lut = lut;
+
+ status = ice_aq_get_rss_lut(hw, &params);
+ if (status) {
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return -EIO;
}
- if (lut) {
- status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
- lut, lut_size);
- if (status) {
- dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ return 0;
+}
+
+/**
+ * ice_get_rss_key - Get RSS key
+ * @vsi: Pointer to VSI structure
+ * @seed: Buffer to store the key in
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ enum ice_status status;
+
+ if (!seed)
+ return -EINVAL;
+
+ status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
+ if (status) {
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return -EIO;
}
return 0;
@@ -6599,19 +6740,19 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
switch (pf->tx_timeout_recovery_level) {
case 1:
- set_bit(__ICE_PFR_REQ, pf->state);
+ set_bit(ICE_PFR_REQ, pf->state);
break;
case 2:
- set_bit(__ICE_CORER_REQ, pf->state);
+ set_bit(ICE_CORER_REQ, pf->state);
break;
case 3:
- set_bit(__ICE_GLOBR_REQ, pf->state);
+ set_bit(ICE_GLOBR_REQ, pf->state);
break;
default:
netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
- set_bit(__ICE_DOWN, pf->state);
- set_bit(__ICE_NEEDS_RESTART, vsi->state);
- set_bit(__ICE_SERVICE_DIS, pf->state);
+ set_bit(ICE_DOWN, pf->state);
+ set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
+ set_bit(ICE_SERVICE_DIS, pf->state);
break;
}
@@ -6659,32 +6800,28 @@ int ice_open_internal(struct net_device *netdev)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_port_info *pi;
+ enum ice_status status;
int err;
- if (test_bit(__ICE_NEEDS_RESTART, pf->state)) {
+ if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
return -EIO;
}
- if (test_bit(__ICE_DOWN, pf->state)) {
- netdev_err(netdev, "device is not ready yet\n");
- return -EBUSY;
- }
-
netif_carrier_off(netdev);
pi = vsi->port_info;
- err = ice_update_link_info(pi);
- if (err) {
- netdev_err(netdev, "Failed to get link info, error %d\n",
- err);
- return err;
+ status = ice_update_link_info(pi);
+ if (status) {
+ netdev_err(netdev, "Failed to get link info, error %s\n",
+ ice_stat_str(status));
+ return -EIO;
}
/* Set PHY if there is media, otherwise, turn off PHY */
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
- if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) {
+ if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
err = ice_init_phy_user_cfg(pi);
if (err) {
netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
@@ -6701,12 +6838,7 @@ int ice_open_internal(struct net_device *netdev)
}
} else {
set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
- err = ice_aq_set_link_restart_an(pi, false, NULL);
- if (err) {
- netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
- vsi->vsi_num, err);
- return err;
- }
+ ice_set_link(vsi, false);
}
err = ice_vsi_open(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 75ccbfc07f99..fee37a5844cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -644,6 +644,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
/* Verify that the simple checksum is zero */
for (i = 0; i < sizeof(tmp); i++)
+ /* cppcheck-suppress objectIndex */
sum += ((u8 *)&tmp)[i];
if (sum) {
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 7f4c1ec1eff2..199aa5b71540 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -13,6 +13,9 @@
enum ice_prot_id {
ICE_PROT_ID_INVAL = 0,
ICE_PROT_MAC_OF_OR_S = 1,
+ ICE_PROT_MAC_IL = 4,
+ ICE_PROT_ETYPE_OL = 9,
+ ICE_PROT_ETYPE_IL = 10,
ICE_PROT_IPV4_OF_OR_S = 32,
ICE_PROT_IPV4_IL = 33,
ICE_PROT_IPV6_OF_OR_S = 40,
@@ -21,7 +24,14 @@ enum ice_prot_id {
ICE_PROT_UDP_OF = 52,
ICE_PROT_UDP_IL_OR_S = 53,
ICE_PROT_GRE_OF = 64,
+ ICE_PROT_ESP_F = 88,
+ ICE_PROT_ESP_2 = 89,
ICE_PROT_SCTP_IL = 96,
+ ICE_PROT_ICMP_IL = 98,
+ ICE_PROT_ICMPV6_IL = 100,
+ ICE_PROT_PPPOE = 103,
+ ICE_PROT_L2TPV3 = 104,
+ ICE_PROT_ARP_OF = 118,
ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2403cb38b93c..2f097637e405 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -919,7 +919,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
}
/**
- * ice_sched_add_nodes_to_layer - Add nodes to a given layer
+ * ice_sched_add_nodes_to_hw_layer - Add nodes to HW layer
* @pi: port information structure
* @tc_node: pointer to TC node
* @parent: pointer to parent node
@@ -928,82 +928,107 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
* @first_node_teid: pointer to the first node TEID
* @num_nodes_added: pointer to number of nodes added
*
- * This function add nodes to a given layer.
+ * Add nodes into specific HW layer.
*/
static enum ice_status
-ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
- struct ice_sched_node *tc_node,
- struct ice_sched_node *parent, u8 layer,
- u16 num_nodes, u32 *first_node_teid,
- u16 *num_nodes_added)
+ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
+ struct ice_sched_node *tc_node,
+ struct ice_sched_node *parent, u8 layer,
+ u16 num_nodes, u32 *first_node_teid,
+ u16 *num_nodes_added)
{
- u32 *first_teid_ptr = first_node_teid;
- u16 new_num_nodes, max_child_nodes;
- enum ice_status status = 0;
- struct ice_hw *hw = pi->hw;
- u16 num_added = 0;
- u32 temp;
+ u16 max_child_nodes;
*num_nodes_added = 0;
if (!num_nodes)
- return status;
+ return 0;
- if (!parent || layer < hw->sw_entry_point_layer)
+ if (!parent || layer < pi->hw->sw_entry_point_layer)
return ICE_ERR_PARAM;
/* max children per node per layer */
- max_child_nodes = hw->max_children[parent->tx_sched_layer];
+ max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
- /* current number of children + required nodes exceed max children ? */
+ /* current number of children + required nodes exceed max children */
if ((parent->num_children + num_nodes) > max_child_nodes) {
/* Fail if the parent is a TC node */
if (parent == tc_node)
return ICE_ERR_CFG;
+ return ICE_ERR_MAX_LIMIT;
+ }
+
+ return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
+ num_nodes_added, first_node_teid);
+}
+
+/**
+ * ice_sched_add_nodes_to_layer - Add nodes to a given layer
+ * @pi: port information structure
+ * @tc_node: pointer to TC node
+ * @parent: pointer to parent node
+ * @layer: layer number to add nodes
+ * @num_nodes: number of nodes to be added
+ * @first_node_teid: pointer to the first node TEID
+ * @num_nodes_added: pointer to number of nodes added
+ *
+ * This function add nodes to a given layer.
+ */
+static enum ice_status
+ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
+ struct ice_sched_node *tc_node,
+ struct ice_sched_node *parent, u8 layer,
+ u16 num_nodes, u32 *first_node_teid,
+ u16 *num_nodes_added)
+{
+ u32 *first_teid_ptr = first_node_teid;
+ u16 new_num_nodes = num_nodes;
+ enum ice_status status = 0;
+ *num_nodes_added = 0;
+ while (*num_nodes_added < num_nodes) {
+ u16 max_child_nodes, num_added = 0;
+ /* cppcheck-suppress unusedVariable */
+ u32 temp;
+
+ status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
+ layer, new_num_nodes,
+ first_teid_ptr,
+ &num_added);
+ if (!status)
+ *num_nodes_added += num_added;
+ /* added more nodes than requested ? */
+ if (*num_nodes_added > num_nodes) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
+ *num_nodes_added);
+ status = ICE_ERR_CFG;
+ break;
+ }
+ /* break if all the nodes are added successfully */
+ if (!status && (*num_nodes_added == num_nodes))
+ break;
+ /* break if the error is not max limit */
+ if (status && status != ICE_ERR_MAX_LIMIT)
+ break;
+ /* Exceeded the max children */
+ max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
/* utilize all the spaces if the parent is not full */
if (parent->num_children < max_child_nodes) {
new_num_nodes = max_child_nodes - parent->num_children;
- /* this recursion is intentional, and wouldn't
- * go more than 2 calls
+ } else {
+ /* This parent is full, try the next sibling */
+ parent = parent->sibling;
+ /* Don't modify the first node TEID memory if the
+ * first node was added already in the above call.
+ * Instead send some temp memory for all other
+ * recursive calls.
*/
- status = ice_sched_add_nodes_to_layer(pi, tc_node,
- parent, layer,
- new_num_nodes,
- first_node_teid,
- &num_added);
- if (status)
- return status;
+ if (num_added)
+ first_teid_ptr = &temp;
- *num_nodes_added += num_added;
+ new_num_nodes = num_nodes - *num_nodes_added;
}
- /* Don't modify the first node TEID memory if the first node was
- * added already in the above call. Instead send some temp
- * memory for all other recursive calls.
- */
- if (num_added)
- first_teid_ptr = &temp;
-
- new_num_nodes = num_nodes - num_added;
-
- /* This parent is full, try the next sibling */
- parent = parent->sibling;
-
- /* this recursion is intentional, for 1024 queues
- * per VSI, it goes max of 16 iterations.
- * 1024 / 8 = 128 layer 8 nodes
- * 128 /8 = 16 (add 8 nodes per iteration)
- */
- status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
- layer, new_num_nodes,
- first_teid_ptr,
- &num_added);
- *num_nodes_added += num_added;
- return status;
}
-
- status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
- num_nodes_added, first_node_teid);
return status;
}
@@ -1857,7 +1882,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
}
/**
- * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
+ * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry
* @pi: port information structure
* @vsi_handle: software VSI handle
*
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 554f567476f3..aa11d07793d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice_common.h"
-#include "ice_adminq_cmd.h"
#include "ice_sriov.h"
/**
@@ -132,3 +131,402 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
return speed;
}
+
+/* The mailbox overflow detection algorithm helps to check if there
+ * is a possibility of a malicious VF transmitting too many MBX messages to the
+ * PF.
+ * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
+ * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
+ * The struct ice_mbx_snapshot helps to track and traverse a static window of
+ * messages within the mailbox queue while looking for a malicious VF.
+ *
+ * 2. When the caller starts processing its mailbox queue in response to an
+ * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
+ * the algorithm can be run for the first time for that interrupt. This can be
+ * done via ice_mbx_reset_snapshot().
+ *
+ * 3. For every message read by the caller from the MBX Queue, the caller must
+ * call the detection algorithm's entry function ice_mbx_vf_state_handler().
+ * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
+ * filled as it is required to be passed to the algorithm.
+ *
+ * 4. Every time a message is read from the MBX queue, a VFId is received which
+ * is passed to the state handler. The boolean output is_malvf of the state
+ * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
+ * whether this VF is malicious or not.
+ *
+ * 5. When a VF is identified to be malicious, the caller can send a message
+ * to the system administrator. The caller can invoke ice_mbx_report_malvf()
+ * to help determine if a malicious VF is to be reported or not. This function
+ * requires the caller to maintain a global bitmap to track all malicious VFs
+ * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
+ * to be malicious by ice_mbx_vf_state_handler().
+ *
+ * 6. The global bitmap maintained by PF can be cleared completely if PF is in
+ * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
+ * When a VF is shut down and brought back up, we assume that the new VF
+ * brought up is not malicious and hence report it if found malicious.
+ *
+ * 7. The function ice_mbx_reset_snapshot() is called to reset the information
+ * in ice_mbx_snapshot for every new mailbox interrupt handled.
+ *
+ * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
+ * when driver is unloaded.
+ */
+#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
+/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
+ * the max messages check must be ignored in the algorithm
+ */
+#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
+
+/**
+ * ice_mbx_traverse - Pass through mailbox snapshot
+ * @hw: pointer to the HW struct
+ * @new_state: new algorithm state
+ *
+ * Traversing the mailbox static snapshot without checking
+ * for malicious VFs.
+ */
+static void
+ice_mbx_traverse(struct ice_hw *hw,
+ enum ice_mbx_snapshot_state *new_state)
+{
+ struct ice_mbx_snap_buffer_data *snap_buf;
+ u32 num_iterations;
+
+ snap_buf = &hw->mbx_snapshot.mbx_buf;
+
+ /* As mailbox buffer is circular, applying a mask
+ * on the incremented iteration count.
+ */
+ num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
+
+ /* Checking either of the below conditions to exit snapshot traversal:
+ * Condition-1: If the number of iterations in the mailbox is equal to
+ * the mailbox head which would indicate that we have reached the end
+ * of the static snapshot.
+ * Condition-2: If the maximum messages serviced in the mailbox for a
+ * given interrupt is the highest possible value then there is no need
+ * to check if the number of messages processed is equal to it. If not
+ * check if the number of messages processed is greater than or equal
+ * to the maximum number of mailbox entries serviced in current work item.
+ */
+ if (num_iterations == snap_buf->head ||
+ (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
+ ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
+ *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+}
+
+/**
+ * ice_mbx_detect_malvf - Detect malicious VF in snapshot
+ * @hw: pointer to the HW struct
+ * @vf_id: relative virtual function ID
+ * @new_state: new algorithm state
+ * @is_malvf: boolean output to indicate if VF is malicious
+ *
+ * This function tracks the number of asynchronous messages
+ * sent per VF and marks the VF as malicious if it exceeds
+ * the permissible number of messages to send.
+ */
+static enum ice_status
+ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
+ enum ice_mbx_snapshot_state *new_state,
+ bool *is_malvf)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ if (vf_id >= snap->mbx_vf.vfcntr_len)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* increment the message count in the VF array */
+ snap->mbx_vf.vf_cntr[vf_id]++;
+
+ if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
+ *is_malvf = true;
+
+ /* continue to iterate through the mailbox snapshot */
+ ice_mbx_traverse(hw, new_state);
+
+ return 0;
+}
+
+/**
+ * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
+ * @snap: pointer to mailbox snapshot structure in the ice_hw struct
+ *
+ * Reset the mailbox snapshot structure and clear VF counter array.
+ */
+static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
+{
+ u32 vfcntr_len;
+
+ if (!snap || !snap->mbx_vf.vf_cntr)
+ return;
+
+ /* Clear VF counters. */
+ vfcntr_len = snap->mbx_vf.vfcntr_len;
+ if (vfcntr_len)
+ memset(snap->mbx_vf.vf_cntr, 0,
+ (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)));
+
+ /* Reset mailbox snapshot for a new capture. */
+ memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+ snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+}
+
+/**
+ * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
+ * @hw: pointer to the HW struct
+ * @mbx_data: pointer to structure containing mailbox data
+ * @vf_id: relative virtual function (VF) ID
+ * @is_malvf: boolean output to indicate if VF is malicious
+ *
+ * The function serves as an entry point for the malicious VF
+ * detection algorithm by handling the different states and state
+ * transitions of the algorithm:
+ * New snapshot: This state is entered when creating a new static
+ * snapshot. The data from any previous mailbox snapshot is
+ * cleared and a new capture of the mailbox head and tail is
+ * logged. This will be the new static snapshot to detect
+ * asynchronous messages sent by VFs. On capturing the snapshot
+ * and depending on whether the number of pending messages in that
+ * snapshot exceed the watermark value, the state machine enters
+ * traverse or detect states.
+ * Traverse: If pending message count is below watermark then iterate
+ * through the snapshot without any action on VF.
+ * Detect: If pending message count exceeds watermark traverse
+ * the static snapshot and look for a malicious VF.
+ */
+enum ice_status
+ice_mbx_vf_state_handler(struct ice_hw *hw,
+ struct ice_mbx_data *mbx_data, u16 vf_id,
+ bool *is_malvf)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ struct ice_mbx_snap_buffer_data *snap_buf;
+ struct ice_ctl_q_info *cq = &hw->mailboxq;
+ enum ice_mbx_snapshot_state new_state;
+ enum ice_status status = 0;
+
+ if (!is_malvf || !mbx_data)
+ return ICE_ERR_BAD_PTR;
+
+ /* When entering the mailbox state machine assume that the VF
+ * is not malicious until detected.
+ */
+ *is_malvf = false;
+
+ /* Checking if max messages allowed to be processed while servicing current
+ * interrupt is not less than the defined AVF message threshold.
+ */
+ if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
+ return ICE_ERR_INVAL_SIZE;
+
+ /* The watermark value should not be lesser than the threshold limit
+ * set for the number of asynchronous messages a VF can send to mailbox
+ * nor should it be greater than the maximum number of messages in the
+ * mailbox serviced in current interrupt.
+ */
+ if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
+ mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
+ return ICE_ERR_PARAM;
+
+ new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+ snap_buf = &snap->mbx_buf;
+
+ switch (snap_buf->state) {
+ case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
+ /* Clear any previously held data in mailbox snapshot structure. */
+ ice_mbx_reset_snapshot(snap);
+
+ /* Collect the pending ARQ count, number of messages processed and
+ * the maximum number of messages allowed to be processed from the
+ * Mailbox for current interrupt.
+ */
+ snap_buf->num_pending_arq = mbx_data->num_pending_arq;
+ snap_buf->num_msg_proc = mbx_data->num_msg_proc;
+ snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
+
+ /* Capture a new static snapshot of the mailbox by logging the
+ * head and tail of snapshot and set num_iterations to the tail
+ * value to mark the start of the iteration through the snapshot.
+ */
+ snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
+ mbx_data->num_pending_arq);
+ snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
+ snap_buf->num_iterations = snap_buf->tail;
+
+ /* Pending ARQ messages returned by ice_clean_rq_elem
+ * is the difference between the head and tail of the
+ * mailbox queue. Comparing this value against the watermark
+ * helps to check if we potentially have malicious VFs.
+ */
+ if (snap_buf->num_pending_arq >=
+ mbx_data->async_watermark_val) {
+ new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+ status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
+ } else {
+ new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+ ice_mbx_traverse(hw, &new_state);
+ }
+ break;
+
+ case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
+ new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+ ice_mbx_traverse(hw, &new_state);
+ break;
+
+ case ICE_MAL_VF_DETECT_STATE_DETECT:
+ new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+ status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
+ break;
+
+ default:
+ new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+ status = ICE_ERR_CFG;
+ }
+
+ snap_buf->state = new_state;
+
+ return status;
+}
+
+/**
+ * ice_mbx_report_malvf - Track and note malicious VF
+ * @hw: pointer to the HW struct
+ * @all_malvfs: all malicious VFs tracked by PF
+ * @bitmap_len: length of bitmap in bits
+ * @vf_id: relative virtual function ID of the malicious VF
+ * @report_malvf: boolean to indicate if malicious VF must be reported
+ *
+ * This function will update a bitmap that keeps track of the malicious
+ * VFs attached to the PF. A malicious VF must be reported only once if
+ * discovered between VF resets or loading so the function checks
+ * the input vf_id against the bitmap to verify if the VF has been
+ * detected in any previous mailbox iterations.
+ */
+enum ice_status
+ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id, bool *report_malvf)
+{
+ if (!all_malvfs || !report_malvf)
+ return ICE_ERR_PARAM;
+
+ *report_malvf = false;
+
+ if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
+ return ICE_ERR_INVAL_SIZE;
+
+ if (vf_id >= bitmap_len)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* If the vf_id is found in the bitmap set bit and boolean to true */
+ if (!test_and_set_bit(vf_id, all_malvfs))
+ *report_malvf = true;
+
+ return 0;
+}
+
+/**
+ * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
+ * @snap: pointer to the mailbox snapshot structure
+ * @all_malvfs: all malicious VFs tracked by PF
+ * @bitmap_len: length of bitmap in bits
+ * @vf_id: relative virtual function ID of the malicious VF
+ *
+ * In case of a VF reset, this function can be called to clear
+ * the bit corresponding to the VF ID in the bitmap tracking all
+ * malicious VFs attached to the PF. The function also clears the
+ * VF counter array at the index of the VF ID. This is to ensure
+ * that the new VF loaded is not considered malicious before going
+ * through the overflow detection algorithm.
+ */
+enum ice_status
+ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id)
+{
+ if (!snap || !all_malvfs)
+ return ICE_ERR_PARAM;
+
+ if (bitmap_len < snap->mbx_vf.vfcntr_len)
+ return ICE_ERR_INVAL_SIZE;
+
+ /* Ensure VF ID value is not larger than bitmap or VF counter length */
+ if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
+ clear_bit(vf_id, all_malvfs);
+
+ /* Clear the VF counter in the mailbox snapshot structure for that VF ID.
+ * This is to ensure that if a VF is unloaded and a new one brought back
+ * up with the same VF ID for a snapshot currently in traversal or detect
+ * state the counter for that VF ID does not increment on top of existing
+ * values in the mailbox overflow detection algorithm.
+ */
+ snap->mbx_vf.vf_cntr[vf_id] = 0;
+
+ return 0;
+}
+
+/**
+ * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
+ * @hw: pointer to the hardware structure
+ * @vf_count: number of VFs allocated on a PF
+ *
+ * Clear the mailbox snapshot structure and allocate memory
+ * for the VF counter array based on the number of VFs allocated
+ * on that PF.
+ *
+ * Assumption: This function will assume ice_get_caps() has already been
+ * called to ensure that the vf_count can be compared against the number
+ * of VFs supported as defined in the functional capabilities of the device.
+ */
+enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ /* Ensure that the number of VFs allocated is non-zero and
+ * is not greater than the number of supported VFs defined in
+ * the functional capabilities of the PF.
+ */
+ if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
+ return ICE_ERR_INVAL_SIZE;
+
+ snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
+ sizeof(*snap->mbx_vf.vf_cntr),
+ GFP_KERNEL);
+ if (!snap->mbx_vf.vf_cntr)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Setting the VF counter length to the number of allocated
+ * VFs for given PF's functional capabilities.
+ */
+ snap->mbx_vf.vfcntr_len = vf_count;
+
+ /* Clear mbx_buf in the mailbox snaphot structure and setting the
+ * mailbox snapshot state to a new capture.
+ */
+ memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+ snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+
+ return 0;
+}
+
+/**
+ * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
+ * @hw: pointer to the hardware structure
+ *
+ * Clear the mailbox snapshot structure and free the VF counter array.
+ */
+void ice_mbx_deinit_snapshot(struct ice_hw *hw)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ /* Free VF counter array and reset VF counter length */
+ devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr);
+ snap->mbx_vf.vfcntr_len = 0;
+
+ /* Clear mbx_buf in the mailbox snaphot structure */
+ memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 3d78a0795138..161dc55d9e9c 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -4,7 +4,14 @@
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
-#include "ice_common.h"
+#include "ice_type.h"
+#include "ice_controlq.h"
+
+/* Defining the mailbox message threshold as 63 asynchronous
+ * pending messages. Normal VF functionality does not require
+ * sending more than 63 asynchronous pending message.
+ */
+#define ICE_ASYNC_VF_MSG_THRESHOLD 63
#ifdef CONFIG_PCI_IOV
enum ice_status
@@ -12,6 +19,17 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd);
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+enum ice_status
+ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
+ u16 vf_id, bool *is_mal_vf);
+enum ice_status
+ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id);
+enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
+void ice_mbx_deinit_snapshot(struct ice_hw *hw);
+enum ice_status
+ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id, bool *report_malvf);
#else /* CONFIG_PCI_IOV */
static inline enum ice_status
ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 834cbd3f7b31..357d3073d814 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -920,7 +920,7 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
struct ice_vsi_list_map_info *v_map;
int i;
- v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL);
+ v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
if (!v_map)
return NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index b91dcfd12727..e2b4b29ea207 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -309,7 +309,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->q_index) &&
- !test_bit(__ICE_DOWN, vsi->state)) {
+ !test_bit(ICE_VSI_DOWN, vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->q_index);
++tx_ring->tx_stats.restart_q;
@@ -554,8 +554,8 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
* @frames: XDP frames to be transmitted
* @flags: transmit flags
*
- * Returns number of frames successfully sent. Frames that fail are
- * free'ed via XDP return API.
+ * Returns number of frames successfully sent. Failed frames
+ * will be free'ed by XDP core.
* For error cases, a negative errno code is returned and no-frames
* are transmitted (caller must handle freeing frames).
*/
@@ -567,9 +567,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
unsigned int queue_index = smp_processor_id();
struct ice_vsi *vsi = np->vsi;
struct ice_ring *xdp_ring;
- int drops = 0, i;
+ int nxmit = 0, i;
- if (test_bit(__ICE_DOWN, vsi->state))
+ if (test_bit(ICE_VSI_DOWN, vsi->state))
return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
@@ -584,16 +584,15 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
int err;
err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
- if (err != ICE_XDP_TX) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ if (err != ICE_XDP_TX)
+ break;
+ nxmit++;
}
if (unlikely(flags & XDP_XMIT_FLUSH))
ice_xdp_ring_update_tail(xdp_ring);
- return n - drops;
+ return nxmit;
}
/**
@@ -1098,6 +1097,11 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
dma_rmb();
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
+ struct ice_vsi *ctrl_vsi = rx_ring->vsi;
+
+ if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
+ ctrl_vsi->vf_id != ICE_INVAL_VFID)
+ ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
ice_put_rx_buf(rx_ring, NULL, 0);
cleaned_count++;
continue;
@@ -1219,216 +1223,50 @@ construct_skb:
}
/**
- * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
- * @port_info: port_info structure containing the current link speed
- * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
- * @itr: ITR value to update
- *
- * Calculate how big of an increment should be applied to the ITR value passed
- * in based on wmem_default, SKB overhead, ethernet overhead, and the current
- * link speed.
- *
- * The following is a calculation derived from:
- * wmem_default / (size + overhead) = desired_pkts_per_int
- * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
- * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
+ * ice_net_dim - Update net DIM algorithm
+ * @q_vector: the vector associated with the interrupt
*
- * Assuming wmem_default is 212992 and overhead is 640 bytes per
- * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
- * formula down to:
+ * Create a DIM sample and notify net_dim() so that it can possibly decide
+ * a new ITR value based on incoming packets, bytes, and interrupts.
*
- * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24
- * ITR = -------------------------------------------- * --------------
- * rate pkt_size + 640
+ * This function is a no-op if the ring is not configured to dynamic ITR.
*/
-static unsigned int
-ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
- unsigned int avg_pkt_size,
- unsigned int itr)
+static void ice_net_dim(struct ice_q_vector *q_vector)
{
- switch (port_info->phy.link_info.link_speed) {
- case ICE_AQ_LINK_SPEED_100GB:
- itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
- avg_pkt_size + 640);
- break;
- case ICE_AQ_LINK_SPEED_50GB:
- itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
- avg_pkt_size + 640);
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
- avg_pkt_size + 640);
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
- avg_pkt_size + 640);
- break;
- case ICE_AQ_LINK_SPEED_20GB:
- itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
- avg_pkt_size + 640);
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- default:
- itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
- avg_pkt_size + 640);
- break;
- }
-
- if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
- itr &= ICE_ITR_ADAPTIVE_LATENCY;
- itr += ICE_ITR_ADAPTIVE_MAX_USECS;
- }
+ struct ice_ring_container *tx = &q_vector->tx;
+ struct ice_ring_container *rx = &q_vector->rx;
- return itr;
-}
+ if (ITR_IS_DYNAMIC(tx)) {
+ struct dim_sample dim_sample = {};
+ u64 packets = 0, bytes = 0;
+ struct ice_ring *ring;
-/**
- * ice_update_itr - update the adaptive ITR value based on statistics
- * @q_vector: structure containing interrupt and ring information
- * @rc: structure containing ring performance data
- *
- * Stores a new ITR value based on packets and byte
- * counts during the last interrupt. The advantage of per interrupt
- * computation is faster updates and more accurate ITR for the current
- * traffic pattern. Constants in this function were computed
- * based on theoretical maximum wire speed and thresholds were set based
- * on testing data as well as attempting to minimize response time
- * while increasing bulk throughput.
- */
-static void
-ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
-{
- unsigned long next_update = jiffies;
- unsigned int packets, bytes, itr;
- bool container_is_rx;
+ ice_for_each_ring(ring, q_vector->tx) {
+ packets += ring->stats.pkts;
+ bytes += ring->stats.bytes;
+ }
- if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
- return;
+ dim_update_sample(q_vector->total_events, packets, bytes,
+ &dim_sample);
- /* If itr_countdown is set it means we programmed an ITR within
- * the last 4 interrupt cycles. This has a side effect of us
- * potentially firing an early interrupt. In order to work around
- * this we need to throw out any data received for a few
- * interrupts following the update.
- */
- if (q_vector->itr_countdown) {
- itr = rc->target_itr;
- goto clear_counts;
+ net_dim(&tx->dim, dim_sample);
}
- container_is_rx = (&q_vector->rx == rc);
- /* For Rx we want to push the delay up and default to low latency.
- * for Tx we want to pull the delay down and default to high latency.
- */
- itr = container_is_rx ?
- ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
- ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
-
- /* If we didn't update within up to 1 - 2 jiffies we can assume
- * that either packets are coming in so slow there hasn't been
- * any work, or that there is so much work that NAPI is dealing
- * with interrupt moderation and we don't need to do anything.
- */
- if (time_after(next_update, rc->next_update))
- goto clear_counts;
-
- prefetch(q_vector->vsi->port_info);
-
- packets = rc->total_pkts;
- bytes = rc->total_bytes;
+ if (ITR_IS_DYNAMIC(rx)) {
+ struct dim_sample dim_sample = {};
+ u64 packets = 0, bytes = 0;
+ struct ice_ring *ring;
- if (container_is_rx) {
- /* If Rx there are 1 to 4 packets and bytes are less than
- * 9000 assume insufficient data to use bulk rate limiting
- * approach unless Tx is already in bulk rate limiting. We
- * are likely latency driven.
- */
- if (packets && packets < 4 && bytes < 9000 &&
- (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
- itr = ICE_ITR_ADAPTIVE_LATENCY;
- goto adjust_by_size_and_speed;
+ ice_for_each_ring(ring, q_vector->rx) {
+ packets += ring->stats.pkts;
+ bytes += ring->stats.bytes;
}
- } else if (packets < 4) {
- /* If we have Tx and Rx ITR maxed and Tx ITR is running in
- * bulk mode and we are receiving 4 or fewer packets just
- * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
- * that the Rx can relax.
- */
- if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
- (q_vector->rx.target_itr & ICE_ITR_MASK) ==
- ICE_ITR_ADAPTIVE_MAX_USECS)
- goto clear_counts;
- } else if (packets > 32) {
- /* If we have processed over 32 packets in a single interrupt
- * for Tx assume we need to switch over to "bulk" mode.
- */
- rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
- }
-
- /* We have no packets to actually measure against. This means
- * either one of the other queues on this vector is active or
- * we are a Tx queue doing TSO with too high of an interrupt rate.
- *
- * Between 4 and 56 we can assume that our current interrupt delay
- * is only slightly too low. As such we should increase it by a small
- * fixed amount.
- */
- if (packets < 56) {
- itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
- if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
- itr &= ICE_ITR_ADAPTIVE_LATENCY;
- itr += ICE_ITR_ADAPTIVE_MAX_USECS;
- }
- goto clear_counts;
- }
-
- if (packets <= 256) {
- itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
- itr &= ICE_ITR_MASK;
- /* Between 56 and 112 is our "goldilocks" zone where we are
- * working out "just right". Just report that our current
- * ITR is good for us.
- */
- if (packets <= 112)
- goto clear_counts;
-
- /* If packet count is 128 or greater we are likely looking
- * at a slight overrun of the delay we want. Try halving
- * our delay to see if that will cut the number of packets
- * in half per interrupt.
- */
- itr >>= 1;
- itr &= ICE_ITR_MASK;
- if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
- itr = ICE_ITR_ADAPTIVE_MIN_USECS;
+ dim_update_sample(q_vector->total_events, packets, bytes,
+ &dim_sample);
- goto clear_counts;
+ net_dim(&rx->dim, dim_sample);
}
-
- /* The paths below assume we are dealing with a bulk ITR since
- * number of packets is greater than 256. We are just going to have
- * to compute a value and try to bring the count under control,
- * though for smaller packet sizes there isn't much we can do as
- * NAPI polling will likely be kicking in sooner rather than later.
- */
- itr = ICE_ITR_ADAPTIVE_BULK;
-
-adjust_by_size_and_speed:
-
- /* based on checks above packets cannot be 0 so division is safe */
- itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
- bytes / packets, itr);
-
-clear_counts:
- /* write back value */
- rc->target_itr = itr;
-
- /* next update should occur within next jiffy */
- rc->next_update = next_update + 1;
-
- rc->total_bytes = 0;
- rc->total_pkts = 0;
}
/**
@@ -1452,72 +1290,46 @@ static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
(itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
}
-/* The act of updating the ITR will cause it to immediately trigger. In order
- * to prevent this from throwing off adaptive update statistics we defer the
- * update so that it can only happen so often. So after either Tx or Rx are
- * updated we make the adaptive scheme wait until either the ITR completely
- * expires via the next_update expiration or we have been through at least
- * 3 interrupts.
- */
-#define ITR_COUNTDOWN_START 3
-
/**
- * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
- * @q_vector: q_vector for which ITR is being updated and interrupt enabled
+ * ice_update_ena_itr - Update ITR moderation and re-enable MSI-X interrupt
+ * @q_vector: the vector associated with the interrupt to enable
+ *
+ * Update the net_dim() algorithm and re-enable the interrupt associated with
+ * this vector.
+ *
+ * If the VSI is down, the interrupt will not be re-enabled.
*/
static void ice_update_ena_itr(struct ice_q_vector *q_vector)
{
- struct ice_ring_container *tx = &q_vector->tx;
- struct ice_ring_container *rx = &q_vector->rx;
struct ice_vsi *vsi = q_vector->vsi;
+ bool wb_en = q_vector->wb_on_itr;
u32 itr_val;
- /* when exiting WB_ON_ITR just reset the countdown and let ITR
- * resume it's normal "interrupts-enabled" path
- */
- if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
- q_vector->itr_countdown = 0;
-
- /* This will do nothing if dynamic updates are not enabled */
- ice_update_itr(q_vector, tx);
- ice_update_itr(q_vector, rx);
+ if (test_bit(ICE_DOWN, vsi->state))
+ return;
- /* This block of logic allows us to get away with only updating
- * one ITR value with each interrupt. The idea is to perform a
- * pseudo-lazy update with the following criteria.
- *
- * 1. Rx is given higher priority than Tx if both are in same state
- * 2. If we must reduce an ITR that is given highest priority.
- * 3. We then give priority to increasing ITR based on amount.
+ /* When exiting WB_ON_ITR, let ITR resume its normal
+ * interrupts-enabled path.
*/
- if (rx->target_itr < rx->current_itr) {
- /* Rx ITR needs to be reduced, this is highest priority */
- itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
- rx->current_itr = rx->target_itr;
- q_vector->itr_countdown = ITR_COUNTDOWN_START;
- } else if ((tx->target_itr < tx->current_itr) ||
- ((rx->target_itr - rx->current_itr) <
- (tx->target_itr - tx->current_itr))) {
- /* Tx ITR needs to be reduced, this is second priority
- * Tx ITR needs to be increased more than Rx, fourth priority
- */
- itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
- tx->current_itr = tx->target_itr;
- q_vector->itr_countdown = ITR_COUNTDOWN_START;
- } else if (rx->current_itr != rx->target_itr) {
- /* Rx ITR needs to be increased, third priority */
- itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
- rx->current_itr = rx->target_itr;
- q_vector->itr_countdown = ITR_COUNTDOWN_START;
- } else {
- /* Still have to re-enable the interrupts */
- itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
- if (q_vector->itr_countdown)
- q_vector->itr_countdown--;
+ if (wb_en)
+ q_vector->wb_on_itr = false;
+
+ /* This will do nothing if dynamic updates are not enabled. */
+ ice_net_dim(q_vector);
+
+ /* net_dim() updates ITR out-of-band using a work item */
+ itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
+ /* trigger an immediate software interrupt when exiting
+ * busy poll, to make sure to catch any pending cleanups
+ * that might have been missed due to interrupt state
+ * transition.
+ */
+ if (wb_en) {
+ itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
+ GLINT_DYN_CTL_SW_ITR_INDX_M |
+ GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
}
-
- if (!test_bit(__ICE_DOWN, vsi->state))
- wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
}
/**
@@ -1539,7 +1351,7 @@ static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
struct ice_vsi *vsi = q_vector->vsi;
/* already in wb_on_itr mode no need to change it */
- if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
+ if (q_vector->wb_on_itr)
return;
/* use previously set ITR values for all of the ITR indices by
@@ -1551,7 +1363,7 @@ static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
GLINT_DYN_CTL_WB_ON_ITR_M);
- q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
+ q_vector->wb_on_itr = true;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 5dab77504fa5..c5a92ac787d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -192,7 +192,11 @@ struct ice_rxq_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buf_failed;
- u64 gro_dropped; /* GRO returned dropped */
+};
+
+enum ice_ring_state_t {
+ ICE_TX_XPS_INIT_DONE,
+ ICE_TX_NBITS,
};
/* this enum matches hardware bits and is meant to be used by DYN_CTLN
@@ -219,23 +223,20 @@ enum ice_rx_dtype {
#define ICE_TX_ITR ICE_IDX_ITR1
#define ICE_ITR_8K 124
#define ICE_ITR_20K 50
-#define ICE_ITR_MAX 8160
-#define ICE_DFLT_TX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
-#define ICE_DFLT_RX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
-#define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */
-#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
-#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC)
+#define ICE_ITR_MAX 8160 /* 0x1FE0 */
+#define ICE_DFLT_TX_ITR ICE_ITR_20K
+#define ICE_DFLT_RX_ITR ICE_ITR_20K
+enum ice_dynamic_itr {
+ ITR_STATIC = 0,
+ ITR_DYNAMIC = 1
+};
+
+#define ITR_IS_DYNAMIC(rc) ((rc)->itr_mode == ITR_DYNAMIC)
#define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
#define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
#define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK)
-#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002
-#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002
-#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA
-#define ICE_ITR_ADAPTIVE_LATENCY 0x8000
-#define ICE_ITR_ADAPTIVE_BULK 0x0000
-
#define ICE_DFLT_INTRL 0
#define ICE_MAX_INTRL 236
@@ -292,6 +293,7 @@ struct ice_ring {
};
struct rcu_head rcu; /* to avoid race on free */
+ DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
struct bpf_prog *xdp_prog;
struct xsk_buff_pool *xsk_pool;
u16 rx_offset;
@@ -334,23 +336,22 @@ static inline bool ice_ring_is_xdp(struct ice_ring *ring)
struct ice_ring_container {
/* head of linked-list of rings */
struct ice_ring *ring;
- unsigned long next_update; /* jiffies value of next queue update */
- unsigned int total_bytes; /* total bytes processed this int */
- unsigned int total_pkts; /* total packets processed this int */
+ struct dim dim; /* data for net_dim algorithm */
u16 itr_idx; /* index in the interrupt vector */
- u16 target_itr; /* value in usecs divided by the hw->itr_gran */
- u16 current_itr; /* value in usecs divided by the hw->itr_gran */
- /* high bit set means dynamic ITR, rest is used to store user
- * readable ITR value in usecs and must be converted before programming
- * to a register.
+ /* this matches the maximum number of ITR bits, but in usec
+ * values, so it is shifted left one bit (bit zero is ignored)
*/
- u16 itr_setting;
+ u16 itr_setting:13;
+ u16 itr_reserved:2;
+ u16 itr_mode:1;
};
struct ice_coalesce_stored {
u16 itr_tx;
u16 itr_rx;
u8 intrl;
+ u8 tx_valid;
+ u8 rx_valid;
};
/* iterator for handling rings in ring container */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 02b12736ea80..207f6ee3a7f6 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -143,6 +143,7 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
case ICE_RX_PTYPE_INNER_PROT_UDP:
case ICE_RX_PTYPE_INNER_PROT_SCTP:
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 266036b7a49a..4474dd6a7ba1 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -192,6 +192,24 @@ enum ice_fltr_ptype {
ICE_FLTR_PTYPE_NONF_IPV4_TCP,
ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP,
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP,
+ ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER,
+ ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER,
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3,
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3,
+ ICE_FLTR_PTYPE_NONF_IPV4_ESP,
+ ICE_FLTR_PTYPE_NONF_IPV6_ESP,
+ ICE_FLTR_PTYPE_NONF_IPV4_AH,
+ ICE_FLTR_PTYPE_NONF_IPV6_AH,
+ ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP,
+ ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP,
+ ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE,
+ ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION,
+ ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE,
+ ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION,
+ ICE_FLTR_PTYPE_NON_IP_L2,
ICE_FLTR_PTYPE_FRAG_IPV4,
ICE_FLTR_PTYPE_NONF_IPV6_UDP,
ICE_FLTR_PTYPE_NONF_IPV6_TCP,
@@ -533,10 +551,7 @@ struct ice_dcb_app_priority_table {
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
#define ICE_TLV_STATUS_ERR 0x4
-#define ICE_APP_PROT_ID_FCOE 0x8906
-#define ICE_APP_PROT_ID_ISCSI 0x0cbc
#define ICE_APP_PROT_ID_ISCSI_860 0x035c
-#define ICE_APP_PROT_ID_FIP 0x8914
#define ICE_APP_SEL_ETHTYPE 0x1
#define ICE_APP_SEL_TCPIP 0x2
#define ICE_CEE_APP_SEL_ETHTYPE 0x0
@@ -615,6 +630,80 @@ struct ice_fw_log_cfg {
struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX];
};
+/* Enum defining the different states of the mailbox snapshot in the
+ * PF-VF mailbox overflow detection algorithm. The snapshot can be in
+ * states:
+ * 1. ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT - generate a new static snapshot
+ * within the mailbox buffer.
+ * 2. ICE_MAL_VF_DETECT_STATE_TRAVERSE - iterate through the mailbox snaphot
+ * 3. ICE_MAL_VF_DETECT_STATE_DETECT - track the messages sent per VF via the
+ * mailbox and mark any VFs sending more messages than the threshold limit set.
+ * 4. ICE_MAL_VF_DETECT_STATE_INVALID - Invalid mailbox state set to 0xFFFFFFFF.
+ */
+enum ice_mbx_snapshot_state {
+ ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT = 0,
+ ICE_MAL_VF_DETECT_STATE_TRAVERSE,
+ ICE_MAL_VF_DETECT_STATE_DETECT,
+ ICE_MAL_VF_DETECT_STATE_INVALID = 0xFFFFFFFF,
+};
+
+/* Structure to hold information of the static snapshot and the mailbox
+ * buffer data used to generate and track the snapshot.
+ * 1. state: the state of the mailbox snapshot in the malicious VF
+ * detection state handler ice_mbx_vf_state_handler()
+ * 2. head: head of the mailbox snapshot in a circular mailbox buffer
+ * 3. tail: tail of the mailbox snapshot in a circular mailbox buffer
+ * 4. num_iterations: number of messages traversed in circular mailbox buffer
+ * 5. num_msg_proc: number of messages processed in mailbox
+ * 6. num_pending_arq: number of pending asynchronous messages
+ * 7. max_num_msgs_mbx: maximum messages in mailbox for currently
+ * serviced work item or interrupt.
+ */
+struct ice_mbx_snap_buffer_data {
+ enum ice_mbx_snapshot_state state;
+ u32 head;
+ u32 tail;
+ u32 num_iterations;
+ u16 num_msg_proc;
+ u16 num_pending_arq;
+ u16 max_num_msgs_mbx;
+};
+
+/* Structure to track messages sent by VFs on mailbox:
+ * 1. vf_cntr: a counter array of VFs to track the number of
+ * asynchronous messages sent by each VF
+ * 2. vfcntr_len: number of entries in VF counter array
+ */
+struct ice_mbx_vf_counter {
+ u32 *vf_cntr;
+ u32 vfcntr_len;
+};
+
+/* Structure to hold data relevant to the captured static snapshot
+ * of the PF-VF mailbox.
+ */
+struct ice_mbx_snapshot {
+ struct ice_mbx_snap_buffer_data mbx_buf;
+ struct ice_mbx_vf_counter mbx_vf;
+};
+
+/* Structure to hold data to be used for capturing or updating a
+ * static snapshot.
+ * 1. num_msg_proc: number of messages processed in mailbox
+ * 2. num_pending_arq: number of pending asynchronous messages
+ * 3. max_num_msgs_mbx: maximum messages in mailbox for currently
+ * serviced work item or interrupt.
+ * 4. async_watermark_val: An upper threshold set by caller to determine
+ * if the pending arq count is large enough to assume that there is
+ * the possibility of a mailicious VF.
+ */
+struct ice_mbx_data {
+ u16 num_msg_proc;
+ u16 num_pending_arq;
+ u16 max_num_msgs_mbx;
+ u16 async_watermark_val;
+};
+
/* Port hardware description */
struct ice_hw {
u8 __iomem *hw_addr;
@@ -703,13 +792,13 @@ struct ice_hw {
enum ice_aq_err pkg_dwnld_status;
- /* Driver's package ver - (from the Metadata seg) */
+ /* Driver's package ver - (from the Ice Metadata section) */
struct ice_pkg_ver pkg_ver;
u8 pkg_name[ICE_PKG_NAME_SIZE];
- /* Driver's Ice package version (from the Ice seg) */
- struct ice_pkg_ver ice_pkg_ver;
- u8 ice_pkg_name[ICE_PKG_NAME_SIZE];
+ /* Driver's Ice segment format version and ID (from the Ice seg) */
+ struct ice_pkg_ver ice_seg_fmt_ver;
+ u8 ice_seg_id[ICE_SEG_ID_SIZE];
/* Pointer to the ice segment */
struct ice_seg *seg;
@@ -746,6 +835,7 @@ struct ice_hw {
DECLARE_BITMAP(fdir_perfect_fltr, ICE_FLTR_PTYPE_MAX);
struct mutex rss_locks; /* protect RSS configuration */
struct list_head rss_list_head;
+ struct ice_mbx_snapshot mbx_snapshot;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
@@ -810,6 +900,14 @@ struct ice_hw_port_stats {
u64 fd_sb_match;
};
+struct ice_aq_get_set_rss_lut_params {
+ u16 vsi_handle; /* software VSI handle */
+ u16 lut_size; /* size of the LUT buffer */
+ u8 lut_type; /* type of the LUT (i.e. VSI, PF, Global) */
+ u8 *lut; /* input RSS LUT for set and output RSS LUT for get */
+ u8 global_lut_id; /* only valid when lut_type is global */
+};
+
/* Checksum and Shadow RAM pointers */
#define ICE_SR_NVM_CTRL_WORD 0x00
#define ICE_SR_BOOT_CFG_PTR 0x132
@@ -916,4 +1014,9 @@ struct ice_hw_port_stats {
#define ICE_FW_API_LLDP_FLTR_MIN 7
#define ICE_FW_API_LLDP_FLTR_PATCH 1
+/* AQ API version for report default configuration */
+#define ICE_FW_API_REPORT_DFLT_CFG_MAJ 1
+#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
+#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
+
#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
new file mode 100644
index 000000000000..9feebe5f556c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021, Intel Corporation. */
+
+#include "ice_virtchnl_allowlist.h"
+
+/* Purpose of this file is to share functionality to allowlist or denylist
+ * opcodes used in PF <-> VF communication. Group of opcodes:
+ * - default -> should be always allowed after creating VF,
+ * default_allowlist_opcodes
+ * - opcodes needed by VF to work correctly, but not associated with caps ->
+ * should be allowed after successful VF resources allocation,
+ * working_allowlist_opcodes
+ * - opcodes needed by VF when caps are activated
+ *
+ * Caps that don't use new opcodes (no opcodes should be allowed):
+ * - VIRTCHNL_VF_OFFLOAD_RSS_AQ
+ * - VIRTCHNL_VF_OFFLOAD_RSS_REG
+ * - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
+ * - VIRTCHNL_VF_OFFLOAD_CRC
+ * - VIRTCHNL_VF_OFFLOAD_RX_POLLING
+ * - VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2
+ * - VIRTCHNL_VF_OFFLOAD_ENCAP
+ * - VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM
+ * - VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM
+ * - VIRTCHNL_VF_OFFLOAD_USO
+ */
+
+/* default opcodes to communicate with VF */
+static const u32 default_allowlist_opcodes[] = {
+ VIRTCHNL_OP_GET_VF_RESOURCES, VIRTCHNL_OP_VERSION, VIRTCHNL_OP_RESET_VF,
+};
+
+/* opcodes supported after successful VIRTCHNL_OP_GET_VF_RESOURCES */
+static const u32 working_allowlist_opcodes[] = {
+ VIRTCHNL_OP_CONFIG_TX_QUEUE, VIRTCHNL_OP_CONFIG_RX_QUEUE,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES, VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ VIRTCHNL_OP_ENABLE_QUEUES, VIRTCHNL_OP_DISABLE_QUEUES,
+ VIRTCHNL_OP_GET_STATS, VIRTCHNL_OP_EVENT,
+};
+
+/* VIRTCHNL_VF_OFFLOAD_L2 */
+static const u32 l2_allowlist_opcodes[] = {
+ VIRTCHNL_OP_ADD_ETH_ADDR, VIRTCHNL_OP_DEL_ETH_ADDR,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+};
+
+/* VIRTCHNL_VF_OFFLOAD_REQ_QUEUES */
+static const u32 req_queues_allowlist_opcodes[] = {
+ VIRTCHNL_OP_REQUEST_QUEUES,
+};
+
+/* VIRTCHNL_VF_OFFLOAD_VLAN */
+static const u32 vlan_allowlist_opcodes[] = {
+ VIRTCHNL_OP_ADD_VLAN, VIRTCHNL_OP_DEL_VLAN,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+};
+
+/* VIRTCHNL_VF_OFFLOAD_RSS_PF */
+static const u32 rss_pf_allowlist_opcodes[] = {
+ VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
+ VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,
+};
+
+/* VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF */
+static const u32 adv_rss_pf_allowlist_opcodes[] = {
+ VIRTCHNL_OP_ADD_RSS_CFG, VIRTCHNL_OP_DEL_RSS_CFG,
+};
+
+/* VIRTCHNL_VF_OFFLOAD_FDIR_PF */
+static const u32 fdir_pf_allowlist_opcodes[] = {
+ VIRTCHNL_OP_ADD_FDIR_FILTER, VIRTCHNL_OP_DEL_FDIR_FILTER,
+};
+
+struct allowlist_opcode_info {
+ const u32 *opcodes;
+ size_t size;
+};
+
+#define BIT_INDEX(caps) (HWEIGHT((caps) - 1))
+#define ALLOW_ITEM(caps, list) \
+ [BIT_INDEX(caps)] = { \
+ .opcodes = list, \
+ .size = ARRAY_SIZE(list) \
+ }
+static const struct allowlist_opcode_info allowlist_opcodes[] = {
+ ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_L2, l2_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_REQ_QUEUES, req_queues_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN, vlan_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_RSS_PF, rss_pf_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF, adv_rss_pf_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
+};
+
+/**
+ * ice_vc_is_opcode_allowed - check if this opcode is allowed on this VF
+ * @vf: pointer to VF structure
+ * @opcode: virtchnl opcode
+ *
+ * Return true if message is allowed on this VF
+ */
+bool ice_vc_is_opcode_allowed(struct ice_vf *vf, u32 opcode)
+{
+ if (opcode >= VIRTCHNL_OP_MAX)
+ return false;
+
+ return test_bit(opcode, vf->opcodes_allowlist);
+}
+
+/**
+ * ice_vc_allowlist_opcodes - allowlist selected opcodes
+ * @vf: pointer to VF structure
+ * @opcodes: array of opocodes to allowlist
+ * @size: size of opcodes array
+ *
+ * Function should be called to allowlist opcodes on VF.
+ */
+static void
+ice_vc_allowlist_opcodes(struct ice_vf *vf, const u32 *opcodes, size_t size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ set_bit(opcodes[i], vf->opcodes_allowlist);
+}
+
+/**
+ * ice_vc_clear_allowlist - clear all allowlist opcodes
+ * @vf: pointer to VF structure
+ */
+static void ice_vc_clear_allowlist(struct ice_vf *vf)
+{
+ bitmap_zero(vf->opcodes_allowlist, VIRTCHNL_OP_MAX);
+}
+
+/**
+ * ice_vc_set_default_allowlist - allowlist default opcodes for VF
+ * @vf: pointer to VF structure
+ */
+void ice_vc_set_default_allowlist(struct ice_vf *vf)
+{
+ ice_vc_clear_allowlist(vf);
+ ice_vc_allowlist_opcodes(vf, default_allowlist_opcodes,
+ ARRAY_SIZE(default_allowlist_opcodes));
+}
+
+/**
+ * ice_vc_set_working_allowlist - allowlist opcodes needed to by VF to work
+ * @vf: pointer to VF structure
+ *
+ * allowlist opcodes that aren't associated with specific caps, but
+ * are needed by VF to work.
+ */
+void ice_vc_set_working_allowlist(struct ice_vf *vf)
+{
+ ice_vc_allowlist_opcodes(vf, working_allowlist_opcodes,
+ ARRAY_SIZE(working_allowlist_opcodes));
+}
+
+/**
+ * ice_vc_set_caps_allowlist - allowlist VF opcodes according caps
+ * @vf: pointer to VF structure
+ */
+void ice_vc_set_caps_allowlist(struct ice_vf *vf)
+{
+ unsigned long caps = vf->driver_caps;
+ unsigned int i;
+
+ for_each_set_bit(i, &caps, ARRAY_SIZE(allowlist_opcodes))
+ ice_vc_allowlist_opcodes(vf, allowlist_opcodes[i].opcodes,
+ allowlist_opcodes[i].size);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h
new file mode 100644
index 000000000000..d3ae86ded219
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021, Intel Corporation. */
+
+#ifndef _ICE_VIRTCHNL_ALLOWLIST_H_
+#define _ICE_VIRTCHNL_ALLOWLIST_H_
+#include "ice.h"
+
+bool ice_vc_is_opcode_allowed(struct ice_vf *vf, u32 opcode);
+
+void ice_vc_set_default_allowlist(struct ice_vf *vf);
+void ice_vc_set_working_allowlist(struct ice_vf *vf);
+void ice_vc_set_caps_allowlist(struct ice_vf *vf);
+#endif /* _ICE_VIRTCHNL_ALLOWLIST_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
new file mode 100644
index 000000000000..eee180d8c024
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -0,0 +1,2204 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_base.h"
+#include "ice_lib.h"
+#include "ice_flow.h"
+
+#define to_fltr_conf_from_desc(p) \
+ container_of(p, struct virtchnl_fdir_fltr_conf, input)
+
+#define ICE_FLOW_PROF_TYPE_S 0
+#define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
+#define ICE_FLOW_PROF_VSI_S 32
+#define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
+
+/* Flow profile ID format:
+ * [0:31] - flow type, flow + tun_offs
+ * [32:63] - VSI index
+ */
+#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
+ ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
+ (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
+
+#define GTPU_TEID_OFFSET 4
+#define GTPU_EH_QFI_OFFSET 1
+#define GTPU_EH_QFI_MASK 0x3F
+#define PFCP_S_OFFSET 0
+#define PFCP_S_MASK 0x1
+#define PFCP_PORT_NR 8805
+
+#define FDIR_INSET_FLAG_ESP_S 0
+#define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
+#define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
+#define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
+
+enum ice_fdir_tunnel_type {
+ ICE_FDIR_TUNNEL_TYPE_NONE = 0,
+ ICE_FDIR_TUNNEL_TYPE_GTPU,
+ ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
+};
+
+struct virtchnl_fdir_fltr_conf {
+ struct ice_fdir_fltr input;
+ enum ice_fdir_tunnel_type ttype;
+ u64 inset_flag;
+ u32 flow_id;
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ether[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_TCP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_SCTP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_TCP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_SCTP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_GTPU_IP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_GTPU_IP,
+ VIRTCHNL_PROTO_HDR_GTPU_EH,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_L2TPV3,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_L2TPV3,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_ESP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_ESP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_AH,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_AH,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_ESP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_ESP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_PFCP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = {
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_PFCP,
+ VIRTCHNL_PROTO_HDR_NONE,
+};
+
+struct virtchnl_fdir_pattern_match_item {
+ enum virtchnl_proto_hdr_type *list;
+ u64 input_set;
+ u64 *meta;
+};
+
+static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = {
+ {vc_pattern_ipv4, 0, NULL},
+ {vc_pattern_ipv4_tcp, 0, NULL},
+ {vc_pattern_ipv4_udp, 0, NULL},
+ {vc_pattern_ipv4_sctp, 0, NULL},
+ {vc_pattern_ipv6, 0, NULL},
+ {vc_pattern_ipv6_tcp, 0, NULL},
+ {vc_pattern_ipv6_udp, 0, NULL},
+ {vc_pattern_ipv6_sctp, 0, NULL},
+};
+
+static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = {
+ {vc_pattern_ipv4, 0, NULL},
+ {vc_pattern_ipv4_tcp, 0, NULL},
+ {vc_pattern_ipv4_udp, 0, NULL},
+ {vc_pattern_ipv4_sctp, 0, NULL},
+ {vc_pattern_ipv6, 0, NULL},
+ {vc_pattern_ipv6_tcp, 0, NULL},
+ {vc_pattern_ipv6_udp, 0, NULL},
+ {vc_pattern_ipv6_sctp, 0, NULL},
+ {vc_pattern_ether, 0, NULL},
+ {vc_pattern_ipv4_gtpu, 0, NULL},
+ {vc_pattern_ipv4_gtpu_eh, 0, NULL},
+ {vc_pattern_ipv4_l2tpv3, 0, NULL},
+ {vc_pattern_ipv6_l2tpv3, 0, NULL},
+ {vc_pattern_ipv4_esp, 0, NULL},
+ {vc_pattern_ipv6_esp, 0, NULL},
+ {vc_pattern_ipv4_ah, 0, NULL},
+ {vc_pattern_ipv6_ah, 0, NULL},
+ {vc_pattern_ipv4_nat_t_esp, 0, NULL},
+ {vc_pattern_ipv6_nat_t_esp, 0, NULL},
+ {vc_pattern_ipv4_pfcp, 0, NULL},
+ {vc_pattern_ipv6_pfcp, 0, NULL},
+};
+
+struct virtchnl_fdir_inset_map {
+ enum virtchnl_proto_hdr_field field;
+ enum ice_flow_field fld;
+ u64 flag;
+ u64 mask;
+};
+
+static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
+ {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
+ {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
+ {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
+ FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
+ {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
+ FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
+ {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
+ {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
+ {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
+};
+
+/**
+ * ice_vc_fdir_param_check
+ * @vf: pointer to the VF structure
+ * @vsi_id: VF relative VSI ID
+ *
+ * Check for the valid VSI ID, PF's state and VF's state
+ *
+ * Return: 0 on success, and -EINVAL on error.
+ */
+static int
+ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ return -EINVAL;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ return -EINVAL;
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
+ return -EINVAL;
+
+ if (vsi_id != vf->lan_vsi_num)
+ return -EINVAL;
+
+ if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
+ return -EINVAL;
+
+ if (!pf->vsi[vf->lan_vsi_idx])
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * ice_vf_start_ctrl_vsi
+ * @vf: pointer to the VF structure
+ *
+ * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *ctrl_vsi;
+ struct device *dev;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ return -EEXIST;
+
+ ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
+ if (!ctrl_vsi) {
+ dev_dbg(dev, "Could not setup control VSI for VF %d\n",
+ vf->vf_id);
+ return -ENOMEM;
+ }
+
+ err = ice_vsi_open_ctrl(ctrl_vsi);
+ if (err) {
+ dev_dbg(dev, "Could not open control VSI for VF %d\n",
+ vf->vf_id);
+ goto err_vsi_open;
+ }
+
+ return 0;
+
+err_vsi_open:
+ ice_vsi_release(ctrl_vsi);
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ pf->vsi[vf->ctrl_vsi_idx] = NULL;
+ vf->ctrl_vsi_idx = ICE_NO_VSI;
+ }
+ return err;
+}
+
+/**
+ * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
+ * @vf: pointer to the VF structure
+ * @flow: filter flow type
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
+{
+ struct ice_vf_fdir *fdir = &vf->fdir;
+
+ if (!fdir->fdir_prof) {
+ fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
+ ICE_FLTR_PTYPE_MAX,
+ sizeof(*fdir->fdir_prof),
+ GFP_KERNEL);
+ if (!fdir->fdir_prof)
+ return -ENOMEM;
+ }
+
+ if (!fdir->fdir_prof[flow]) {
+ fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
+ sizeof(**fdir->fdir_prof),
+ GFP_KERNEL);
+ if (!fdir->fdir_prof[flow])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_fdir_free_prof - free profile for this filter flow type
+ * @vf: pointer to the VF structure
+ * @flow: filter flow type
+ */
+static void
+ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
+{
+ struct ice_vf_fdir *fdir = &vf->fdir;
+
+ if (!fdir->fdir_prof)
+ return;
+
+ if (!fdir->fdir_prof[flow])
+ return;
+
+ devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
+ fdir->fdir_prof[flow] = NULL;
+}
+
+/**
+ * ice_vc_fdir_free_prof_all - free all the profile for this VF
+ * @vf: pointer to the VF structure
+ */
+static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
+{
+ struct ice_vf_fdir *fdir = &vf->fdir;
+ enum ice_fltr_ptype flow;
+
+ if (!fdir->fdir_prof)
+ return;
+
+ for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
+ ice_vc_fdir_free_prof(vf, flow);
+
+ devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
+ fdir->fdir_prof = NULL;
+}
+
+/**
+ * ice_vc_fdir_parse_flow_fld
+ * @proto_hdr: virtual channel protocol filter header
+ * @conf: FDIR configuration for each filter
+ * @fld: field type array
+ * @fld_cnt: field counter
+ *
+ * Parse the virtual channel filter header and store them into field type array
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
+ struct virtchnl_fdir_fltr_conf *conf,
+ enum ice_flow_field *fld, int *fld_cnt)
+{
+ struct virtchnl_proto_hdr hdr;
+ u32 i;
+
+ memcpy(&hdr, proto_hdr, sizeof(hdr));
+
+ for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
+ VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
+ if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
+ if (fdir_inset_map[i].mask &&
+ ((fdir_inset_map[i].mask & conf->inset_flag) !=
+ fdir_inset_map[i].flag))
+ continue;
+
+ fld[*fld_cnt] = fdir_inset_map[i].fld;
+ *fld_cnt += 1;
+ if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
+ return -EINVAL;
+ VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
+ fdir_inset_map[i].field);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_fdir_set_flow_fld
+ * @vf: pointer to the VF structure
+ * @fltr: virtual channel add cmd buffer
+ * @conf: FDIR configuration for each filter
+ * @seg: array of one or more packet segments that describe the flow
+ *
+ * Parse the virtual channel add msg buffer's field vector and store them into
+ * flow's packet segment field
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
+ struct virtchnl_fdir_fltr_conf *conf,
+ struct ice_flow_seg_info *seg)
+{
+ struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
+ enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct virtchnl_proto_hdrs *proto;
+ int fld_cnt = 0;
+ int i;
+
+ proto = &rule->proto_hdrs;
+ for (i = 0; i < proto->count; i++) {
+ struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
+ int ret;
+
+ ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
+ if (ret)
+ return ret;
+ }
+
+ if (fld_cnt == 0) {
+ dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < fld_cnt; i++)
+ ice_flow_set_fld(seg, fld[i],
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+
+ return 0;
+}
+
+/**
+ * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
+ * @vf: pointer to the VF structure
+ * @conf: FDIR configuration for each filter
+ * @seg: array of one or more packet segments that describe the flow
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf,
+ struct ice_flow_seg_info *seg)
+{
+ enum ice_fltr_ptype flow = conf->input.flow_type;
+ enum ice_fdir_tunnel_type ttype = conf->ttype;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+
+ switch (flow) {
+ case ICE_FLTR_PTYPE_NON_IP_L2:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_AH:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
+ if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ } else {
+ dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
+ flow, vf->vf_id);
+ return -EINVAL;
+ }
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_AH:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER);
+ break;
+ default:
+ dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
+ flow, vf->vf_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_fdir_rem_prof - remove profile for this filter flow type
+ * @vf: pointer to the VF structure
+ * @flow: filter flow type
+ * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
+ */
+static void
+ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
+{
+ struct ice_vf_fdir *fdir = &vf->fdir;
+ struct ice_fd_hw_prof *vf_prof;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vf_vsi;
+ struct device *dev;
+ struct ice_hw *hw;
+ u64 prof_id;
+ int i;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
+ return;
+
+ vf_prof = fdir->fdir_prof[flow];
+
+ vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vf_vsi) {
+ dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
+ return;
+ }
+
+ if (!fdir->prof_entry_cnt[flow][tun])
+ return;
+
+ prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
+ flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
+
+ for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
+ if (vf_prof->entry_h[i][tun]) {
+ u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
+
+ ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
+ ice_flow_rem_entry(hw, ICE_BLK_FD,
+ vf_prof->entry_h[i][tun]);
+ vf_prof->entry_h[i][tun] = 0;
+ }
+
+ ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+ devm_kfree(dev, vf_prof->fdir_seg[tun]);
+ vf_prof->fdir_seg[tun] = NULL;
+
+ for (i = 0; i < vf_prof->cnt; i++)
+ vf_prof->vsi_h[i] = 0;
+
+ fdir->prof_entry_cnt[flow][tun] = 0;
+}
+
+/**
+ * ice_vc_fdir_rem_prof_all - remove profile for this VF
+ * @vf: pointer to the VF structure
+ */
+static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
+{
+ enum ice_fltr_ptype flow;
+
+ for (flow = ICE_FLTR_PTYPE_NONF_NONE;
+ flow < ICE_FLTR_PTYPE_MAX; flow++) {
+ ice_vc_fdir_rem_prof(vf, flow, 0);
+ ice_vc_fdir_rem_prof(vf, flow, 1);
+ }
+}
+
+/**
+ * ice_vc_fdir_write_flow_prof
+ * @vf: pointer to the VF structure
+ * @flow: filter flow type
+ * @seg: array of one or more packet segments that describe the flow
+ * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
+ *
+ * Write the flow's profile config and packet segment into the hardware
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
+ struct ice_flow_seg_info *seg, int tun)
+{
+ struct ice_vf_fdir *fdir = &vf->fdir;
+ struct ice_vsi *vf_vsi, *ctrl_vsi;
+ struct ice_flow_seg_info *old_seg;
+ struct ice_flow_prof *prof = NULL;
+ struct ice_fd_hw_prof *vf_prof;
+ enum ice_status status;
+ struct device *dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ u64 entry1_h = 0;
+ u64 entry2_h = 0;
+ u64 prof_id;
+ int ret;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vf_vsi)
+ return -EINVAL;
+
+ ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
+ if (!ctrl_vsi)
+ return -EINVAL;
+
+ vf_prof = fdir->fdir_prof[flow];
+ old_seg = vf_prof->fdir_seg[tun];
+ if (old_seg) {
+ if (!memcmp(old_seg, seg, sizeof(*seg))) {
+ dev_dbg(dev, "Duplicated profile for VF %d!\n",
+ vf->vf_id);
+ return -EEXIST;
+ }
+
+ if (fdir->fdir_fltr_cnt[flow][tun]) {
+ ret = -EINVAL;
+ dev_dbg(dev, "Input set conflicts for VF %d\n",
+ vf->vf_id);
+ goto err_exit;
+ }
+
+ /* remove previously allocated profile */
+ ice_vc_fdir_rem_prof(vf, flow, tun);
+ }
+
+ prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
+ tun ? ICE_FLTR_PTYPE_MAX : 0);
+
+ status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
+ tun + 1, &prof);
+ ret = ice_status_to_errno(status);
+ if (ret) {
+ dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
+ flow, vf->vf_id);
+ goto err_exit;
+ }
+
+ status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry1_h);
+ ret = ice_status_to_errno(status);
+ if (ret) {
+ dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
+ flow, vf->vf_id);
+ goto err_prof;
+ }
+
+ status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry2_h);
+ ret = ice_status_to_errno(status);
+ if (ret) {
+ dev_dbg(dev,
+ "Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
+ flow, vf->vf_id);
+ goto err_entry_1;
+ }
+
+ vf_prof->fdir_seg[tun] = seg;
+ vf_prof->cnt = 0;
+ fdir->prof_entry_cnt[flow][tun] = 0;
+
+ vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
+ vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
+ vf_prof->cnt++;
+ fdir->prof_entry_cnt[flow][tun]++;
+
+ vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
+ vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
+ vf_prof->cnt++;
+ fdir->prof_entry_cnt[flow][tun]++;
+
+ return 0;
+
+err_entry_1:
+ ice_rem_prof_id_flow(hw, ICE_BLK_FD,
+ ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
+ ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
+err_prof:
+ ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+err_exit:
+ return ret;
+}
+
+/**
+ * ice_vc_fdir_config_input_set
+ * @vf: pointer to the VF structure
+ * @fltr: virtual channel add cmd buffer
+ * @conf: FDIR configuration for each filter
+ * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
+ *
+ * Config the input set type and value for virtual channel add msg buffer
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
+ struct virtchnl_fdir_fltr_conf *conf, int tun)
+{
+ struct ice_fdir_fltr *input = &conf->input;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_flow_seg_info *seg;
+ enum ice_fltr_ptype flow;
+ int ret;
+
+ flow = input->flow_type;
+ ret = ice_vc_fdir_alloc_prof(vf, flow);
+ if (ret) {
+ dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
+ return ret;
+ }
+
+ seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
+ if (!seg)
+ return -ENOMEM;
+
+ ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
+ if (ret) {
+ dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
+ if (ret) {
+ dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
+ if (ret == -EEXIST) {
+ devm_kfree(dev, seg);
+ } else if (ret) {
+ dev_dbg(dev, "Write flow profile for VF %d failed\n",
+ vf->vf_id);
+ goto err_exit;
+ }
+
+ return 0;
+
+err_exit:
+ devm_kfree(dev, seg);
+ return ret;
+}
+
+/**
+ * ice_vc_fdir_match_pattern
+ * @fltr: virtual channel add cmd buffer
+ * @type: virtual channel protocol filter header type
+ *
+ * Matching the header type by comparing fltr and type's value.
+ *
+ * Return: true on success, and false on error.
+ */
+static bool
+ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr,
+ enum virtchnl_proto_hdr_type *type)
+{
+ struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
+ int i = 0;
+
+ while ((i < proto->count) &&
+ (*type == proto->proto_hdr[i].type) &&
+ (*type != VIRTCHNL_PROTO_HDR_NONE)) {
+ type++;
+ i++;
+ }
+
+ return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE));
+}
+
+/**
+ * ice_vc_fdir_get_pattern - get while list pattern
+ * @vf: pointer to the VF info
+ * @len: filter list length
+ *
+ * Return: pointer to allowed filter list
+ */
+static const struct virtchnl_fdir_pattern_match_item *
+ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len)
+{
+ const struct virtchnl_fdir_pattern_match_item *item;
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
+ sizeof(hw->active_pkg_name))) {
+ item = vc_fdir_pattern_comms;
+ *len = ARRAY_SIZE(vc_fdir_pattern_comms);
+ } else {
+ item = vc_fdir_pattern_os;
+ *len = ARRAY_SIZE(vc_fdir_pattern_os);
+ }
+
+ return item;
+}
+
+/**
+ * ice_vc_fdir_search_pattern
+ * @vf: pointer to the VF info
+ * @fltr: virtual channel add cmd buffer
+ *
+ * Search for matched pattern from supported pattern list
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr)
+{
+ const struct virtchnl_fdir_pattern_match_item *pattern;
+ int len, i;
+
+ pattern = ice_vc_fdir_get_pattern(vf, &len);
+
+ for (i = 0; i < len; i++)
+ if (ice_vc_fdir_match_pattern(fltr, pattern[i].list))
+ return 0;
+
+ return -EINVAL;
+}
+
+/**
+ * ice_vc_fdir_parse_pattern
+ * @vf: pointer to the VF info
+ * @fltr: virtual channel add cmd buffer
+ * @conf: FDIR configuration for each filter
+ *
+ * Parse the virtual channel filter's pattern and store them into conf
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
+ struct virtchnl_fdir_fltr_conf *conf)
+{
+ struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
+ enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
+ enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_fdir_fltr *input = &conf->input;
+ int i;
+
+ if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
+ dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
+ proto->count, vf->vf_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < proto->count; i++) {
+ struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
+ struct ip_esp_hdr *esph;
+ struct ip_auth_hdr *ah;
+ struct sctphdr *sctph;
+ struct ipv6hdr *ip6h;
+ struct udphdr *udph;
+ struct tcphdr *tcph;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ u8 s_field;
+ u8 *rawh;
+
+ switch (hdr->type) {
+ case VIRTCHNL_PROTO_HDR_ETH:
+ eth = (struct ethhdr *)hdr->buffer;
+ input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
+
+ if (hdr->field_selector)
+ input->ext_data.ether_type = eth->h_proto;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV4:
+ iph = (struct iphdr *)hdr->buffer;
+ l3 = VIRTCHNL_PROTO_HDR_IPV4;
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+
+ if (hdr->field_selector) {
+ input->ip.v4.src_ip = iph->saddr;
+ input->ip.v4.dst_ip = iph->daddr;
+ input->ip.v4.tos = iph->tos;
+ input->ip.v4.proto = iph->protocol;
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV6:
+ ip6h = (struct ipv6hdr *)hdr->buffer;
+ l3 = VIRTCHNL_PROTO_HDR_IPV6;
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
+
+ if (hdr->field_selector) {
+ memcpy(input->ip.v6.src_ip,
+ ip6h->saddr.in6_u.u6_addr8,
+ sizeof(ip6h->saddr));
+ memcpy(input->ip.v6.dst_ip,
+ ip6h->daddr.in6_u.u6_addr8,
+ sizeof(ip6h->daddr));
+ input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
+ (ip6h->flow_lbl[0] >> 4);
+ input->ip.v6.proto = ip6h->nexthdr;
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_TCP:
+ tcph = (struct tcphdr *)hdr->buffer;
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
+
+ if (hdr->field_selector) {
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
+ input->ip.v4.src_port = tcph->source;
+ input->ip.v4.dst_port = tcph->dest;
+ } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
+ input->ip.v6.src_port = tcph->source;
+ input->ip.v6.dst_port = tcph->dest;
+ }
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_UDP:
+ udph = (struct udphdr *)hdr->buffer;
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
+
+ if (hdr->field_selector) {
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
+ input->ip.v4.src_port = udph->source;
+ input->ip.v4.dst_port = udph->dest;
+ } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
+ input->ip.v6.src_port = udph->source;
+ input->ip.v6.dst_port = udph->dest;
+ }
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_SCTP:
+ sctph = (struct sctphdr *)hdr->buffer;
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->flow_type =
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->flow_type =
+ ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
+
+ if (hdr->field_selector) {
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
+ input->ip.v4.src_port = sctph->source;
+ input->ip.v4.dst_port = sctph->dest;
+ } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
+ input->ip.v6.src_port = sctph->source;
+ input->ip.v6.dst_port = sctph->dest;
+ }
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_L2TPV3:
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
+
+ if (hdr->field_selector)
+ input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
+ break;
+ case VIRTCHNL_PROTO_HDR_ESP:
+ esph = (struct ip_esp_hdr *)hdr->buffer;
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
+ l4 == VIRTCHNL_PROTO_HDR_UDP)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
+ l4 == VIRTCHNL_PROTO_HDR_UDP)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
+ l4 == VIRTCHNL_PROTO_HDR_NONE)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
+ l4 == VIRTCHNL_PROTO_HDR_NONE)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
+
+ if (l4 == VIRTCHNL_PROTO_HDR_UDP)
+ conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
+ else
+ conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
+
+ if (hdr->field_selector) {
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->ip.v4.sec_parm_idx = esph->spi;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->ip.v6.sec_parm_idx = esph->spi;
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_AH:
+ ah = (struct ip_auth_hdr *)hdr->buffer;
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
+
+ if (hdr->field_selector) {
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->ip.v4.sec_parm_idx = ah->spi;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->ip.v6.sec_parm_idx = ah->spi;
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_PFCP:
+ rawh = (u8 *)hdr->buffer;
+ s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
+
+ if (hdr->field_selector) {
+ if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
+ input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
+ else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
+ input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
+ }
+ break;
+ case VIRTCHNL_PROTO_HDR_GTPU_IP:
+ rawh = (u8 *)hdr->buffer;
+ input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
+
+ if (hdr->field_selector)
+ input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
+ conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
+ break;
+ case VIRTCHNL_PROTO_HDR_GTPU_EH:
+ rawh = (u8 *)hdr->buffer;
+
+ if (hdr->field_selector)
+ input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
+ conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
+ break;
+ default:
+ dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
+ hdr->type, vf->vf_id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_fdir_parse_action
+ * @vf: pointer to the VF info
+ * @fltr: virtual channel add cmd buffer
+ * @conf: FDIR configuration for each filter
+ *
+ * Parse the virtual channel filter's action and store them into conf
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
+ struct virtchnl_fdir_fltr_conf *conf)
+{
+ struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_fdir_fltr *input = &conf->input;
+ u32 dest_num = 0;
+ u32 mark_num = 0;
+ int i;
+
+ if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
+ dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
+ as->count, vf->vf_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < as->count; i++) {
+ struct virtchnl_filter_action *action = &as->actions[i];
+
+ switch (action->type) {
+ case VIRTCHNL_ACTION_PASSTHRU:
+ dest_num++;
+ input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
+ break;
+ case VIRTCHNL_ACTION_DROP:
+ dest_num++;
+ input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
+ break;
+ case VIRTCHNL_ACTION_QUEUE:
+ dest_num++;
+ input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+ input->q_index = action->act_conf.queue.index;
+ break;
+ case VIRTCHNL_ACTION_Q_REGION:
+ dest_num++;
+ input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
+ input->q_index = action->act_conf.queue.index;
+ input->q_region = action->act_conf.queue.region;
+ break;
+ case VIRTCHNL_ACTION_MARK:
+ mark_num++;
+ input->fltr_id = action->act_conf.mark_id;
+ input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
+ break;
+ default:
+ dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
+ action->type, vf->vf_id);
+ return -EINVAL;
+ }
+ }
+
+ if (dest_num == 0 || dest_num >= 2) {
+ dev_dbg(dev, "Invalid destination action for VF %d\n",
+ vf->vf_id);
+ return -EINVAL;
+ }
+
+ if (mark_num >= 2) {
+ dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_validate_fdir_fltr - validate the virtual channel filter
+ * @vf: pointer to the VF info
+ * @fltr: virtual channel add cmd buffer
+ * @conf: FDIR configuration for each filter
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
+ struct virtchnl_fdir_fltr_conf *conf)
+{
+ int ret;
+
+ ret = ice_vc_fdir_search_pattern(vf, fltr);
+ if (ret)
+ return ret;
+
+ ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
+ if (ret)
+ return ret;
+
+ return ice_vc_fdir_parse_action(vf, fltr, conf);
+}
+
+/**
+ * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
+ * @conf_a: FDIR configuration for filter a
+ * @conf_b: FDIR configuration for filter b
+ *
+ * Return: 0 on success, and other on error.
+ */
+static bool
+ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
+ struct virtchnl_fdir_fltr_conf *conf_b)
+{
+ struct ice_fdir_fltr *a = &conf_a->input;
+ struct ice_fdir_fltr *b = &conf_b->input;
+
+ if (conf_a->ttype != conf_b->ttype)
+ return false;
+ if (a->flow_type != b->flow_type)
+ return false;
+ if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
+ return false;
+ if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
+ return false;
+ if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
+ return false;
+ if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
+ return false;
+ if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
+ return false;
+ if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
+ return false;
+ if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
+ return false;
+ if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_fdir_is_dup_fltr
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ *
+ * Check if there is duplicated rule with same conf value
+ *
+ * Return: 0 true success, and false on error.
+ */
+static bool
+ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
+{
+ struct ice_fdir_fltr *desc;
+ bool ret;
+
+ list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
+ struct virtchnl_fdir_fltr_conf *node =
+ to_fltr_conf_from_desc(desc);
+
+ ret = ice_vc_fdir_comp_rules(node, conf);
+ if (ret)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_vc_fdir_insert_entry
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ * @id: pointer to ID value allocated by driver
+ *
+ * Insert FDIR conf entry into list and allocate ID for this filter
+ *
+ * Return: 0 true success, and other on error.
+ */
+static int
+ice_vc_fdir_insert_entry(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf, u32 *id)
+{
+ struct ice_fdir_fltr *input = &conf->input;
+ int i;
+
+ /* alloc ID corresponding with conf */
+ i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
+ ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
+ if (i < 0)
+ return -EINVAL;
+ *id = i;
+
+ list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
+ return 0;
+}
+
+/**
+ * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ * @id: filter rule's ID
+ */
+static void
+ice_vc_fdir_remove_entry(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf, u32 id)
+{
+ struct ice_fdir_fltr *input = &conf->input;
+
+ idr_remove(&vf->fdir.fdir_rule_idr, id);
+ list_del(&input->fltr_node);
+}
+
+/**
+ * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
+ * @vf: pointer to the VF info
+ * @id: filter rule's ID
+ *
+ * Return: NULL on error, and other on success.
+ */
+static struct virtchnl_fdir_fltr_conf *
+ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
+{
+ return idr_find(&vf->fdir.fdir_rule_idr, id);
+}
+
+/**
+ * ice_vc_fdir_flush_entry - remove all FDIR conf entry
+ * @vf: pointer to the VF info
+ */
+static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
+{
+ struct virtchnl_fdir_fltr_conf *conf;
+ struct ice_fdir_fltr *desc, *temp;
+
+ list_for_each_entry_safe(desc, temp,
+ &vf->fdir.fdir_rule_list, fltr_node) {
+ conf = to_fltr_conf_from_desc(desc);
+ list_del(&desc->fltr_node);
+ devm_kfree(ice_pf_to_dev(vf->pf), conf);
+ }
+}
+
+/**
+ * ice_vc_fdir_write_fltr - write filter rule into hardware
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ * @add: true implies add rule, false implies del rules
+ * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf,
+ bool add, bool is_tun)
+{
+ struct ice_fdir_fltr *input = &conf->input;
+ struct ice_vsi *vsi, *ctrl_vsi;
+ struct ice_fltr_desc desc;
+ enum ice_status status;
+ struct device *dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int ret;
+ u8 *pkt;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
+ return -EINVAL;
+ }
+
+ input->dest_vsi = vsi->idx;
+ input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
+
+ ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
+ if (!ctrl_vsi) {
+ dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
+ return -EINVAL;
+ }
+
+ pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ ice_fdir_get_prgm_desc(hw, input, &desc, add);
+ status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
+ ret = ice_status_to_errno(status);
+ if (ret) {
+ dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
+ vf->vf_id, input->flow_type);
+ goto err_free_pkt;
+ }
+
+ ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
+ if (ret)
+ goto err_free_pkt;
+
+ return 0;
+
+err_free_pkt:
+ devm_kfree(dev, pkt);
+ return ret;
+}
+
+/**
+ * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
+ * @t: pointer to timer_list
+ */
+static void ice_vf_fdir_timer(struct timer_list *t)
+{
+ struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
+ struct ice_vf_fdir_ctx *ctx_done;
+ struct ice_vf_fdir *fdir;
+ unsigned long flags;
+ struct ice_vf *vf;
+ struct ice_pf *pf;
+
+ fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
+ vf = container_of(fdir, struct ice_vf, fdir);
+ ctx_done = &fdir->ctx_done;
+ pf = vf->pf;
+ spin_lock_irqsave(&fdir->ctx_lock, flags);
+ if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
+ spin_unlock_irqrestore(&fdir->ctx_lock, flags);
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
+
+ ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
+ ctx_done->conf = ctx_irq->conf;
+ ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
+ ctx_done->v_opcode = ctx_irq->v_opcode;
+ spin_unlock_irqrestore(&fdir->ctx_lock, flags);
+
+ set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
+ ice_service_task_schedule(pf);
+}
+
+/**
+ * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
+ * @ctrl_vsi: pointer to a VF's CTRL VSI
+ * @rx_desc: pointer to FDIR Rx queue descriptor
+ */
+void
+ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ struct ice_pf *pf = ctrl_vsi->back;
+ struct ice_vf_fdir_ctx *ctx_done;
+ struct ice_vf_fdir_ctx *ctx_irq;
+ struct ice_vf_fdir *fdir;
+ unsigned long flags;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ vf = &pf->vf[ctrl_vsi->vf_id];
+
+ fdir = &vf->fdir;
+ ctx_done = &fdir->ctx_done;
+ ctx_irq = &fdir->ctx_irq;
+ dev = ice_pf_to_dev(pf);
+ spin_lock_irqsave(&fdir->ctx_lock, flags);
+ if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
+ spin_unlock_irqrestore(&fdir->ctx_lock, flags);
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
+
+ ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
+ ctx_done->conf = ctx_irq->conf;
+ ctx_done->stat = ICE_FDIR_CTX_IRQ;
+ ctx_done->v_opcode = ctx_irq->v_opcode;
+ memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
+ spin_unlock_irqrestore(&fdir->ctx_lock, flags);
+
+ ret = del_timer(&ctx_irq->rx_tmr);
+ if (!ret)
+ dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
+
+ set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
+ ice_service_task_schedule(pf);
+}
+
+/**
+ * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
+ * @vf: pointer to the VF info
+ */
+static void ice_vf_fdir_dump_info(struct ice_vf *vf)
+{
+ struct ice_vsi *vf_vsi;
+ u32 fd_size, fd_cnt;
+ struct device *dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ u16 vsi_num;
+
+ pf = vf->pf;
+ hw = &pf->hw;
+ dev = ice_pf_to_dev(pf);
+ vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
+
+ fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
+ fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
+ dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x",
+ vf->vf_id,
+ (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
+ (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S,
+ (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
+ (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S);
+}
+
+/**
+ * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
+ * @vf: pointer to the VF info
+ * @ctx: FDIR context info for post processing
+ * @status: virtchnl FDIR program status
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
+ enum virtchnl_fdir_prgm_status *status)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ u32 stat_err, error, prog_id;
+ int ret;
+
+ stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
+ if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >>
+ ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) {
+ *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >>
+ ICE_FXD_FLTR_WB_QW1_PROG_ID_S;
+ if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
+ ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
+ dev_err(dev, "VF %d: Desc show add, but ctx not",
+ vf->vf_id);
+ *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
+ ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
+ dev_err(dev, "VF %d: Desc show del, but ctx not",
+ vf->vf_id);
+ *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >>
+ ICE_FXD_FLTR_WB_QW1_FAIL_S;
+ if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
+ if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
+ dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
+ vf->vf_id);
+ *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ } else {
+ dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
+ vf->vf_id);
+ *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
+ }
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >>
+ ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S;
+ if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
+ dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
+ *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ *status = VIRTCHNL_FDIR_SUCCESS;
+
+ return 0;
+
+err_exit:
+ ice_vf_fdir_dump_info(vf);
+ return ret;
+}
+
+/**
+ * ice_vc_add_fdir_fltr_post
+ * @vf: pointer to the VF structure
+ * @ctx: FDIR context info for post processing
+ * @status: virtchnl FDIR program status
+ * @success: true implies success, false implies failure
+ *
+ * Post process for flow director add command. If success, then do post process
+ * and send back success msg by virtchnl. Otherwise, do context reversion and
+ * send back failure msg by virtchnl.
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
+ enum virtchnl_fdir_prgm_status status,
+ bool success)
+{
+ struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ enum virtchnl_status_code v_ret;
+ struct virtchnl_fdir_add *resp;
+ int ret, len, is_tun;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ len = sizeof(*resp);
+ resp = kzalloc(len, GFP_KERNEL);
+ if (!resp) {
+ len = 0;
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
+ goto err_exit;
+ }
+
+ if (!success)
+ goto err_exit;
+
+ is_tun = 0;
+ resp->status = status;
+ resp->flow_id = conf->flow_id;
+ vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
+
+ ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ (u8 *)resp, len);
+ kfree(resp);
+
+ dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
+ vf->vf_id, conf->flow_id,
+ (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
+ "add" : "del");
+ return ret;
+
+err_exit:
+ if (resp)
+ resp->status = status;
+ ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+ devm_kfree(dev, conf);
+
+ ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ (u8 *)resp, len);
+ kfree(resp);
+ return ret;
+}
+
+/**
+ * ice_vc_del_fdir_fltr_post
+ * @vf: pointer to the VF structure
+ * @ctx: FDIR context info for post processing
+ * @status: virtchnl FDIR program status
+ * @success: true implies success, false implies failure
+ *
+ * Post process for flow director del command. If success, then do post process
+ * and send back success msg by virtchnl. Otherwise, do context reversion and
+ * send back failure msg by virtchnl.
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
+ enum virtchnl_fdir_prgm_status status,
+ bool success)
+{
+ struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ enum virtchnl_status_code v_ret;
+ struct virtchnl_fdir_del *resp;
+ int ret, len, is_tun;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ len = sizeof(*resp);
+ resp = kzalloc(len, GFP_KERNEL);
+ if (!resp) {
+ len = 0;
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
+ goto err_exit;
+ }
+
+ if (!success)
+ goto err_exit;
+
+ is_tun = 0;
+ resp->status = status;
+ ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+ vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
+
+ ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ (u8 *)resp, len);
+ kfree(resp);
+
+ dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
+ vf->vf_id, conf->flow_id,
+ (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
+ "add" : "del");
+ devm_kfree(dev, conf);
+ return ret;
+
+err_exit:
+ if (resp)
+ resp->status = status;
+ if (success)
+ devm_kfree(dev, conf);
+
+ ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ (u8 *)resp, len);
+ kfree(resp);
+ return ret;
+}
+
+/**
+ * ice_flush_fdir_ctx
+ * @pf: pointer to the PF structure
+ *
+ * Flush all the pending event on ctx_done list and process them.
+ */
+void ice_flush_fdir_ctx(struct ice_pf *pf)
+{
+ int i;
+
+ if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
+ return;
+
+ ice_for_each_vf(pf, i) {
+ struct device *dev = ice_pf_to_dev(pf);
+ enum virtchnl_fdir_prgm_status status;
+ struct ice_vf *vf = &pf->vf[i];
+ struct ice_vf_fdir_ctx *ctx;
+ unsigned long flags;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ continue;
+
+ if (vf->ctrl_vsi_idx == ICE_NO_VSI)
+ continue;
+
+ ctx = &vf->fdir.ctx_done;
+ spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
+ if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
+ spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
+ continue;
+ }
+ spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
+
+ WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
+ if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
+ status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
+ dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
+ vf->vf_id);
+ goto err_exit;
+ }
+
+ ret = ice_vf_verify_rx_desc(vf, ctx, &status);
+ if (ret)
+ goto err_exit;
+
+ if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
+ ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
+ else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
+ ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
+ else
+ dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
+
+ spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
+ ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
+ spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
+ continue;
+err_exit:
+ if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
+ ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
+ else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
+ ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
+ else
+ dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
+
+ spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
+ ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
+ spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
+ }
+}
+
+/**
+ * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
+ * @vf: pointer to the VF structure
+ * @conf: FDIR configuration for each filter
+ * @v_opcode: virtual channel operation code
+ *
+ * Return: 0 on success, and other on error.
+ */
+static int
+ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
+ enum virtchnl_ops v_opcode)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vf_fdir_ctx *ctx;
+ unsigned long flags;
+
+ ctx = &vf->fdir.ctx_irq;
+ spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
+ if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
+ (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
+ spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
+ dev_dbg(dev, "VF %d: Last request is still in progress\n",
+ vf->vf_id);
+ return -EBUSY;
+ }
+ ctx->flags |= ICE_VF_FDIR_CTX_VALID;
+ spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
+
+ ctx->conf = conf;
+ ctx->v_opcode = v_opcode;
+ ctx->stat = ICE_FDIR_CTX_READY;
+ timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
+
+ mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
+
+ return 0;
+}
+
+/**
+ * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
+ * @vf: pointer to the VF structure
+ *
+ * Return: 0 on success, and other on error.
+ */
+static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
+{
+ struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
+ unsigned long flags;
+
+ del_timer(&ctx->rx_tmr);
+ spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
+ ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
+ spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
+}
+
+/**
+ * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Return: 0 on success, and other on error.
+ */
+int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
+ struct virtchnl_fdir_add *stat = NULL;
+ struct virtchnl_fdir_fltr_conf *conf;
+ enum virtchnl_status_code v_ret;
+ struct device *dev;
+ struct ice_pf *pf;
+ int is_tun = 0;
+ int len = 0;
+ int ret;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ ret = ice_vf_start_ctrl_vsi(vf);
+ if (ret && (ret != -EEXIST)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
+ vf->vf_id, ret);
+ goto err_exit;
+ }
+
+ stat = kzalloc(sizeof(*stat), GFP_KERNEL);
+ if (!stat) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
+ if (!conf) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ len = sizeof(*stat);
+ ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
+ dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
+ goto err_free_conf;
+ }
+
+ if (fltr->validate_only) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_SUCCESS;
+ devm_kfree(dev, conf);
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
+ v_ret, (u8 *)stat, len);
+ goto exit;
+ }
+
+ ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
+ dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
+ vf->vf_id, ret);
+ goto err_free_conf;
+ }
+
+ ret = ice_vc_fdir_is_dup_fltr(vf, conf);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
+ dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
+ vf->vf_id);
+ goto err_free_conf;
+ }
+
+ ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
+ goto err_free_conf;
+ }
+
+ ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
+ goto err_free_conf;
+ }
+
+ ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
+ vf->vf_id, ret);
+ goto err_rem_entry;
+ }
+
+exit:
+ kfree(stat);
+ return ret;
+
+err_rem_entry:
+ ice_vc_fdir_clear_irq_ctx(vf);
+ ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+err_free_conf:
+ devm_kfree(dev, conf);
+err_exit:
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
+ (u8 *)stat, len);
+ kfree(stat);
+ return ret;
+}
+
+/**
+ * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Return: 0 on success, and other on error.
+ */
+int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
+ struct virtchnl_fdir_del *stat = NULL;
+ struct virtchnl_fdir_fltr_conf *conf;
+ enum virtchnl_status_code v_ret;
+ struct device *dev;
+ struct ice_pf *pf;
+ int is_tun = 0;
+ int len = 0;
+ int ret;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ stat = kzalloc(sizeof(*stat), GFP_KERNEL);
+ if (!stat) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ len = sizeof(*stat);
+
+ conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
+ if (!conf) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
+ dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
+ vf->vf_id, fltr->flow_id);
+ goto err_exit;
+ }
+
+ /* Just return failure when ctrl_vsi idx is invalid */
+ if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
+ goto err_exit;
+ }
+
+ ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
+ vf->vf_id, ret);
+ goto err_del_tmr;
+ }
+
+ kfree(stat);
+
+ return ret;
+
+err_del_tmr:
+ ice_vc_fdir_clear_irq_ctx(vf);
+err_exit:
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
+ (u8 *)stat, len);
+ kfree(stat);
+ return ret;
+}
+
+/**
+ * ice_vf_fdir_init - init FDIR resource for VF
+ * @vf: pointer to the VF info
+ */
+void ice_vf_fdir_init(struct ice_vf *vf)
+{
+ struct ice_vf_fdir *fdir = &vf->fdir;
+
+ idr_init(&fdir->fdir_rule_idr);
+ INIT_LIST_HEAD(&fdir->fdir_rule_list);
+
+ spin_lock_init(&fdir->ctx_lock);
+ fdir->ctx_irq.flags = 0;
+ fdir->ctx_done.flags = 0;
+}
+
+/**
+ * ice_vf_fdir_exit - destroy FDIR resource for VF
+ * @vf: pointer to the VF info
+ */
+void ice_vf_fdir_exit(struct ice_vf *vf)
+{
+ ice_vc_fdir_flush_entry(vf);
+ idr_destroy(&vf->fdir.fdir_rule_idr);
+ ice_vc_fdir_rem_prof_all(vf);
+ ice_vc_fdir_free_prof_all(vf);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
new file mode 100644
index 000000000000..f4e629f4c09b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021, Intel Corporation. */
+
+#ifndef _ICE_VIRTCHNL_FDIR_H_
+#define _ICE_VIRTCHNL_FDIR_H_
+
+struct ice_vf;
+struct ice_pf;
+
+enum ice_fdir_ctx_stat {
+ ICE_FDIR_CTX_READY,
+ ICE_FDIR_CTX_IRQ,
+ ICE_FDIR_CTX_TIMEOUT,
+};
+
+struct ice_vf_fdir_ctx {
+ struct timer_list rx_tmr;
+ enum virtchnl_ops v_opcode;
+ enum ice_fdir_ctx_stat stat;
+ union ice_32b_rx_flex_desc rx_desc;
+#define ICE_VF_FDIR_CTX_VALID BIT(0)
+ u32 flags;
+
+ void *conf;
+};
+
+/* VF FDIR information structure */
+struct ice_vf_fdir {
+ u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
+ int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
+ struct ice_fd_hw_prof **fdir_prof;
+
+ struct idr fdir_rule_idr;
+ struct list_head fdir_rule_list;
+
+ spinlock_t ctx_lock; /* protects FDIR context info */
+ struct ice_vf_fdir_ctx ctx_irq;
+ struct ice_vf_fdir_ctx ctx_done;
+};
+
+#ifdef CONFIG_PCI_IOV
+int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg);
+int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg);
+void ice_vf_fdir_init(struct ice_vf *vf);
+void ice_vf_fdir_exit(struct ice_vf *vf);
+void
+ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
+ union ice_32b_rx_flex_desc *rx_desc);
+void ice_flush_fdir_ctx(struct ice_pf *pf);
+#else
+static inline void
+ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, union ice_32b_rx_flex_desc *rx_desc) { }
+static inline void ice_flush_fdir_ctx(struct ice_pf *pf) { }
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_VIRTCHNL_FDIR_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 1f38a8d0c525..a1d22d2aa0bd 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -5,6 +5,256 @@
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_fltr.h"
+#include "ice_flow.h"
+#include "ice_virtchnl_allowlist.h"
+
+#define FIELD_SELECTOR(proto_hdr_field) \
+ BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
+
+struct ice_vc_hdr_match_type {
+ u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
+ u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
+};
+
+static const struct ice_vc_hdr_match_type ice_vc_hdr_list_os[] = {
+ {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
+ {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
+ {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
+ {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
+};
+
+static const struct ice_vc_hdr_match_type ice_vc_hdr_list_comms[] = {
+ {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
+ {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
+ {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
+ {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
+ {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
+ {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+ ICE_FLOW_SEG_HDR_GTPU_DWN},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+ ICE_FLOW_SEG_HDR_GTPU_UP},
+ {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
+ {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
+ {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
+ {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
+};
+
+struct ice_vc_hash_field_match_type {
+ u32 vc_hdr; /* virtchnl headers
+ * (VIRTCHNL_PROTO_HDR_XXX)
+ */
+ u32 vc_hash_field; /* virtchnl hash fields selector
+ * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
+ */
+ u64 ice_hash_field; /* ice hash fields
+ * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
+ */
+};
+
+static const struct
+ice_vc_hash_field_match_type ice_vc_hash_field_list_os[] = {
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ ICE_FLOW_HASH_IPV4},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ ICE_FLOW_HASH_IPV6},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ ICE_FLOW_HASH_TCP_PORT},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ ICE_FLOW_HASH_UDP_PORT},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ ICE_FLOW_HASH_SCTP_PORT},
+};
+
+static const struct
+ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = {
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ ICE_FLOW_HASH_ETH},
+ {VIRTCHNL_PROTO_HDR_ETH,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
+ {VIRTCHNL_PROTO_HDR_S_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
+ {VIRTCHNL_PROTO_HDR_C_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ ICE_FLOW_HASH_IPV4},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ ICE_FLOW_HASH_IPV6},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ ICE_FLOW_HASH_TCP_PORT},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ ICE_FLOW_HASH_UDP_PORT},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ ICE_FLOW_HASH_SCTP_PORT},
+ {VIRTCHNL_PROTO_HDR_PPPOE,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
+ {VIRTCHNL_PROTO_HDR_L2TPV3,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
+ {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
+ {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
+};
+
+/**
+ * ice_get_vf_vsi - get VF's VSI based on the stored index
+ * @vf: VF used to get VSI
+ */
+static struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+{
+ return vf->pf->vsi[vf->lan_vsi_idx];
+}
/**
* ice_validate_vf_id - helper to check if VF ID is valid
@@ -197,11 +447,30 @@ static void ice_vf_invalidate_vsi(struct ice_vf *vf)
*/
static void ice_vf_vsi_release(struct ice_vf *vf)
{
- ice_vsi_release(vf->pf->vsi[vf->lan_vsi_idx]);
+ ice_vsi_release(ice_get_vf_vsi(vf));
ice_vf_invalidate_vsi(vf);
}
/**
+ * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
+ * @vf: VF that control VSI is being invalidated on
+ */
+static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
+{
+ vf->ctrl_vsi_idx = ICE_NO_VSI;
+}
+
+/**
+ * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
+ * @vf: VF that control VSI is being released on
+ */
+static void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
+{
+ ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
+ ice_vf_ctrl_invalidate_vsi(vf);
+}
+
+/**
* ice_free_vf_res - Free a VF's resources
* @vf: pointer to the VF info
*/
@@ -214,6 +483,10 @@ static void ice_free_vf_res(struct ice_vf *vf)
* accessing the VF's VSI after it's freed or invalidated.
*/
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ ice_vf_fdir_exit(vf);
+ /* free VF control VSI */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_vsi_release(vf);
/* free VSI and disconnect it from the parent uplink */
if (vf->lan_vsi_idx != ICE_NO_VSI) {
@@ -250,7 +523,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
struct ice_hw *hw;
hw = &pf->hw;
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
dev = ice_pf_to_dev(pf);
wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
@@ -325,10 +598,7 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf)
*/
static void ice_dis_vf_qs(struct ice_vf *vf)
{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = pf->vsi[vf->lan_vsi_idx];
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
ice_vsi_stop_all_rx_rings(vsi);
@@ -348,7 +618,7 @@ void ice_free_vfs(struct ice_pf *pf)
if (!pf->vf)
return;
- while (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ while (test_and_set_bit(ICE_VF_DIS, pf->state))
usleep_range(1000, 2000);
/* Disable IOV before freeing resources. This lets any VF drivers
@@ -401,7 +671,15 @@ void ice_free_vfs(struct ice_pf *pf)
wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
}
}
- clear_bit(__ICE_VF_DIS, pf->state);
+
+ /* clear malicious info if the VFs are getting released */
+ for (i = 0; i < tmp; i++)
+ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs,
+ ICE_MAX_VF_COUNT, i))
+ dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
+ i);
+
+ clear_bit(ICE_VF_DIS, pf->state);
clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
}
@@ -560,6 +838,28 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
}
/**
+ * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
+ * @vf: VF to setup control VSI for
+ *
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id);
+ if (!vsi) {
+ dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
+ ice_vf_ctrl_invalidate_vsi(vf);
+ }
+
+ return vsi;
+}
+
+/**
* ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
* @pf: pointer to PF structure
* @vf: pointer to VF that the first MSIX vector index is being calculated for
@@ -585,8 +885,8 @@ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
*/
static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
{
- struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
u16 vlan_id = 0;
int err;
@@ -622,8 +922,8 @@ static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
*/
static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
{
- struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
enum ice_status status;
u8 broadcast[ETH_ALEN];
@@ -724,8 +1024,8 @@ static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
*/
static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
{
- struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
struct ice_hw *hw = &vf->pf->hw;
u32 reg;
@@ -772,7 +1072,7 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
*/
static void ice_ena_vf_mappings(struct ice_vf *vf)
{
- struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
ice_ena_vf_msix_mappings(vf);
ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
@@ -1035,7 +1335,7 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
static void ice_vf_clear_counters(struct ice_vf *vf)
{
- struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
vf->num_mac = 0;
vsi->num_vlan = 0;
@@ -1095,8 +1395,8 @@ static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
*/
static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
{
- struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
ice_vf_set_host_trust_cfg(vf);
@@ -1136,10 +1436,8 @@ static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
*/
static int ice_vf_rebuild_vsi(struct ice_vf *vf)
{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = pf->vsi[vf->lan_vsi_idx];
if (ice_vsi_rebuild(vsi, true)) {
dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
@@ -1212,8 +1510,13 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
if (!pf->num_alloc_vfs)
return false;
+ /* clear all malicious info if the VFs are getting reset */
+ ice_for_each_vf(pf, i)
+ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i))
+ dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
+
/* If VFs have been disabled, there is no need to reset */
- if (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ if (test_and_set_bit(ICE_VF_DIS, pf->state))
return false;
/* Begin reset on all VFs at once */
@@ -1256,13 +1559,23 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_for_each_vf(pf, v) {
vf = &pf->vf[v];
+ vf->driver_caps = 0;
+ ice_vc_set_default_allowlist(vf);
+
+ ice_vf_fdir_exit(vf);
+ /* clean VF control VSI when resetting VFs since it should be
+ * setup only when VF creates its first FDIR rule.
+ */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_invalidate_vsi(vf);
+
ice_vf_pre_vsi_rebuild(vf);
ice_vf_rebuild_vsi(vf);
ice_vf_post_vsi_rebuild(vf);
}
ice_flush(hw);
- clear_bit(__ICE_VF_DIS, pf->state);
+ clear_bit(ICE_VF_DIS, pf->state);
return true;
}
@@ -1282,7 +1595,7 @@ static bool ice_is_vf_disabled(struct ice_vf *vf)
* means something else is resetting the VF, so we shouldn't continue.
* Otherwise, set disable VF state bit for actual reset, and continue.
*/
- return (test_bit(__ICE_VF_DIS, pf->state) ||
+ return (test_bit(ICE_VF_DIS, pf->state) ||
test_bit(ICE_VF_STATE_DIS, vf->vf_states));
}
@@ -1307,7 +1620,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
dev = ice_pf_to_dev(pf);
- if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
+ if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
vf->vf_id);
return true;
@@ -1323,7 +1636,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
ice_trigger_vf_reset(vf, is_vflr, false);
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
ice_dis_vf_qs(vf);
@@ -1353,6 +1666,9 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
usleep_range(10, 20);
}
+ vf->driver_caps = 0;
+ ice_vc_set_default_allowlist(vf);
+
/* Display a warning if VF didn't manage to reset in time, but need to
* continue on with the operation.
*/
@@ -1369,15 +1685,26 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
else
promisc_m = ICE_UCAST_PROMISC_BITS;
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
dev_err(dev, "disabling promiscuous mode failed\n");
}
+ ice_vf_fdir_exit(vf);
+ /* clean VF control VSI when resetting VF since it should be setup
+ * only when VF creates its first FDIR rule.
+ */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_vsi_release(vf);
+
ice_vf_pre_vsi_rebuild(vf);
ice_vf_rebuild_vsi_with_release(vf);
ice_vf_post_vsi_rebuild(vf);
+ /* if the VF has been reset allow it to come up again */
+ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
+ dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
+
return true;
}
@@ -1532,7 +1859,7 @@ teardown:
}
/**
- * ice_set_dflt_settings - set VF defaults during initialization/creation
+ * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation
* @pf: PF holding reference to all VFs for default configuration
*/
static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
@@ -1549,6 +1876,13 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
vf->spoofchk = true;
vf->num_vf_qs = pf->num_qps_per_vf;
+ ice_vc_set_default_allowlist(vf);
+
+ /* ctrl_vsi_idx will be set to a valid value only when VF
+ * creates its first fdir rule.
+ */
+ ice_vf_ctrl_invalidate_vsi(vf);
+ ice_vf_fdir_init(vf);
}
}
@@ -1586,7 +1920,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
- set_bit(__ICE_OICR_INTR_DIS, pf->state);
+ set_bit(ICE_OICR_INTR_DIS, pf->state);
ice_flush(hw);
ret = pci_enable_sriov(pf->pdev, num_vfs);
@@ -1614,7 +1948,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
goto err_unroll_sriov;
}
- clear_bit(__ICE_VF_DIS, pf->state);
+ clear_bit(ICE_VF_DIS, pf->state);
return 0;
err_unroll_sriov:
@@ -1626,7 +1960,7 @@ err_pci_disable_sriov:
err_unroll_intr:
/* rearm interrupts here */
ice_irq_dynamic_ena(hw, NULL, NULL);
- clear_bit(__ICE_OICR_INTR_DIS, pf->state);
+ clear_bit(ICE_OICR_INTR_DIS, pf->state);
return ret;
}
@@ -1704,6 +2038,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
struct device *dev = ice_pf_to_dev(pf);
+ enum ice_status status;
int err;
err = ice_check_sriov_allowed(pf);
@@ -1712,6 +2047,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!num_vfs) {
if (!pci_vfs_assigned(pdev)) {
+ ice_mbx_deinit_snapshot(&pf->hw);
ice_free_vfs(pf);
if (pf->lag)
ice_enable_lag(pf->lag);
@@ -1722,9 +2058,15 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
return -EBUSY;
}
+ status = ice_mbx_init_snapshot(&pf->hw, num_vfs);
+ if (status)
+ return ice_status_to_errno(status);
+
err = ice_pci_sriov_ena(pf, num_vfs);
- if (err)
+ if (err) {
+ ice_mbx_deinit_snapshot(&pf->hw);
return err;
+ }
if (pf->lag)
ice_disable_lag(pf->lag);
@@ -1744,7 +2086,7 @@ void ice_process_vflr_event(struct ice_pf *pf)
unsigned int vf_id;
u32 reg;
- if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
+ if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
!pf->num_alloc_vfs)
return;
@@ -1789,7 +2131,7 @@ static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
struct ice_vsi *vsi;
u16 rxq_idx;
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
ice_for_each_rxq(vsi, rxq_idx)
if (vsi->rxq_map[rxq_idx] == pfq)
@@ -1848,7 +2190,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
*
* send msg to VF
*/
-static int
+int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
@@ -1929,8 +2271,7 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
*/
static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
{
- struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
- struct ice_port_info *pi = vsi->port_info;
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
u16 max_frame_size;
max_frame_size = pi->phy.link_info.max_frame_size;
@@ -1978,7 +2319,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto err;
@@ -1996,6 +2337,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
+
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
@@ -2017,6 +2361,12 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
+
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
@@ -2034,6 +2384,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
/* match guest capabilities */
vf->driver_caps = vfres->vf_cap_flags;
+ ice_vc_set_caps_allowlist(vf);
+ ice_vc_set_working_allowlist(vf);
+
set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
err:
@@ -2084,7 +2437,7 @@ static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
*
* check for the valid VSI ID
*/
-static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
@@ -2125,6 +2478,222 @@ static bool ice_vc_isvalid_ring_len(u16 ring_len)
}
/**
+ * ice_vc_parse_rss_cfg - parses hash fields and headers from
+ * a specific virtchnl RSS cfg
+ * @hw: pointer to the hardware
+ * @rss_cfg: pointer to the virtchnl RSS cfg
+ * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
+ * to configure
+ * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
+ *
+ * Return true if all the protocol header and hash fields in the RSS cfg could
+ * be parsed, else return false
+ *
+ * This function parses the virtchnl RSS cfg to be the intended
+ * hash fields and the intended header for RSS configuration
+ */
+static bool
+ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
+ u32 *addl_hdrs, u64 *hash_flds)
+{
+ const struct ice_vc_hash_field_match_type *hf_list;
+ const struct ice_vc_hdr_match_type *hdr_list;
+ int i, hf_list_len, hdr_list_len;
+
+ if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
+ sizeof(hw->active_pkg_name))) {
+ hf_list = ice_vc_hash_field_list_comms;
+ hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_comms);
+ hdr_list = ice_vc_hdr_list_comms;
+ hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_comms);
+ } else {
+ hf_list = ice_vc_hash_field_list_os;
+ hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_os);
+ hdr_list = ice_vc_hdr_list_os;
+ hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_os);
+ }
+
+ for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
+ struct virtchnl_proto_hdr *proto_hdr =
+ &rss_cfg->proto_hdrs.proto_hdr[i];
+ bool hdr_found = false;
+ int j;
+
+ /* Find matched ice headers according to virtchnl headers. */
+ for (j = 0; j < hdr_list_len; j++) {
+ struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
+
+ if (proto_hdr->type == hdr_map.vc_hdr) {
+ *addl_hdrs |= hdr_map.ice_hdr;
+ hdr_found = true;
+ }
+ }
+
+ if (!hdr_found)
+ return false;
+
+ /* Find matched ice hash fields according to
+ * virtchnl hash fields.
+ */
+ for (j = 0; j < hf_list_len; j++) {
+ struct ice_vc_hash_field_match_type hf_map = hf_list[j];
+
+ if (proto_hdr->type == hf_map.vc_hdr &&
+ proto_hdr->field_selector == hf_map.vc_hash_field) {
+ *hash_flds |= hf_map.ice_hash_field;
+ break;
+ }
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
+ * RSS offloads
+ * @caps: VF driver negotiated capabilities
+ *
+ * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
+ * else return false
+ */
+static bool ice_vf_adv_rss_offload_ena(u32 caps)
+{
+ return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
+}
+
+/**
+ * ice_vc_handle_rss_cfg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the message buffer
+ * @add: add a RSS config if true, otherwise delete a RSS config
+ *
+ * This function adds/deletes a RSS config
+ */
+static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
+{
+ u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
+ struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto error_param;
+ }
+
+ if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
+ rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
+ rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
+ struct ice_vsi_ctx *ctx;
+ enum ice_status status;
+ u8 lut_type, hash_type;
+
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+ hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
+ ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto error_param;
+ }
+
+ ctx->info.q_opt_rss = ((lut_type <<
+ ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
+ ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
+ (hash_type &
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+
+ /* Preserve existing queueing option setting */
+ ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
+ ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
+ ctx->info.q_opt_tc = vsi->info.q_opt_tc;
+ ctx->info.q_opt_flags = vsi->info.q_opt_rss;
+
+ ctx->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
+ if (status) {
+ dev_err(dev, "update VSI for RSS failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ } else {
+ vsi->info.q_opt_rss = ctx->info.q_opt_rss;
+ }
+
+ kfree(ctx);
+ } else {
+ u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ u64 hash_flds = ICE_HASH_INVALID;
+
+ if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
+ &hash_flds)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add) {
+ if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
+ addl_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
+ vsi->vsi_num, v_ret);
+ }
+ } else {
+ enum ice_status status;
+
+ status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
+ addl_hdrs);
+ /* We just ignore ICE_ERR_DOES_NOT_EXIST, because
+ * if two configurations share the same profile remove
+ * one of them actually removes both, since the
+ * profile is deleted.
+ */
+ if (status && status != ICE_ERR_DOES_NOT_EXIST) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%s\n",
+ vf->vf_id, ice_stat_str(status));
+ }
+ }
+ }
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
+}
+
+/**
* ice_vc_config_rss_key
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2136,7 +2705,6 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
- struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -2159,13 +2727,13 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (ice_set_rss(vsi, vrk->key, NULL, 0))
+ if (ice_set_rss_key(vsi, vrk->key))
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
@@ -2183,7 +2751,6 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
{
struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -2206,13 +2773,13 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
+ if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
@@ -2289,7 +2856,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
if (ret)
return ret;
- vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi) {
netdev_err(netdev, "VSI %d for VF %d is null\n",
vf->lan_vsi_idx, vf->vf_id);
@@ -2394,7 +2961,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2530,7 +3097,6 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
struct ice_eth_stats stats = { 0 };
- struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -2543,7 +3109,7 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2633,7 +3199,6 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
- struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
unsigned long q_map;
u16 vf_q_id;
@@ -2653,7 +3218,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2685,7 +3250,6 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
set_bit(vf_q_id, vf->rxq_ena);
}
- vsi = pf->vsi[vf->lan_vsi_idx];
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
@@ -2724,7 +3288,6 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
- struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
unsigned long q_map;
u16 vf_q_id;
@@ -2745,7 +3308,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2910,7 +3473,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2987,7 +3550,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -3222,7 +3785,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
goto handle_mac_exit;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
@@ -3454,7 +4017,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
}
hw = &pf->hw;
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -3621,7 +4184,6 @@ static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -3634,7 +4196,7 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (ice_vsi_manage_vlan_stripping(vsi, true))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -3652,7 +4214,6 @@ error_param:
static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -3665,7 +4226,7 @@ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -3691,7 +4252,7 @@ error_param:
*/
static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
{
- struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
if (!vsi)
return -EINVAL;
@@ -3747,6 +4308,13 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
err = -EINVAL;
}
+ if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
+ ice_vc_send_msg_to_vf(vf, v_opcode,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
+ 0);
+ return;
+ }
+
error_handler:
if (err) {
ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
@@ -3816,6 +4384,18 @@ error_handler:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
err = ice_vc_dis_vlan_stripping(vf);
break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER:
+ err = ice_vc_add_fdir_fltr(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER:
+ err = ice_vc_del_fdir_fltr(vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_RSS_CFG:
+ err = ice_vc_handle_rss_cfg(vf, msg, true);
+ break;
+ case VIRTCHNL_OP_DEL_RSS_CFG:
+ err = ice_vc_handle_rss_cfg(vf, msg, false);
+ break;
case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
@@ -4066,7 +4646,7 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id,
if (ret)
return ret;
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi)
return -EINVAL;
@@ -4108,7 +4688,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
}
/**
- * ice_print_vfs_mdd_event - print VFs malicious driver detect event
+ * ice_print_vfs_mdd_events - print VFs malicious driver detect event
* @pf: pointer to the PF structure
*
* Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
@@ -4120,7 +4700,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
int i;
/* check that there are pending MDD events to print */
- if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
+ if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
return;
/* VF MDD event logs are rate limited to one second intervals */
@@ -4160,7 +4740,6 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
*/
void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
{
- struct pci_dev *vfdev;
u16 vf_id;
int pos;
@@ -4169,6 +4748,8 @@ void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
if (pos) {
+ struct pci_dev *vfdev;
+
pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
&vf_id);
vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
@@ -4180,3 +4761,70 @@ void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
}
}
}
+
+/**
+ * ice_is_malicious_vf - helper function to detect a malicious VF
+ * @pf: ptr to struct ice_pf
+ * @event: pointer to the AQ event
+ * @num_msg_proc: the number of messages processed so far
+ * @num_msg_pending: the number of messages peinding in admin queue
+ */
+bool
+ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
+ u16 num_msg_proc, u16 num_msg_pending)
+{
+ s16 vf_id = le16_to_cpu(event->desc.retval);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_mbx_data mbxdata;
+ enum ice_status status;
+ bool malvf = false;
+ struct ice_vf *vf;
+
+ if (ice_validate_vf_id(pf, vf_id))
+ return false;
+
+ vf = &pf->vf[vf_id];
+ /* Check if VF is disabled. */
+ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
+ return false;
+
+ mbxdata.num_msg_proc = num_msg_proc;
+ mbxdata.num_pending_arq = num_msg_pending;
+ mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
+#define ICE_MBX_OVERFLOW_WATERMARK 64
+ mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
+
+ /* check to see if we have a malicious VF */
+ status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
+ if (status)
+ return false;
+
+ if (malvf) {
+ bool report_vf = false;
+
+ /* if the VF is malicious and we haven't let the user
+ * know about it, then let them know now
+ */
+ status = ice_mbx_report_malvf(&pf->hw, pf->malvfs,
+ ICE_MAX_VF_COUNT, vf_id,
+ &report_vf);
+ if (status)
+ dev_dbg(dev, "Error reporting malicious VF\n");
+
+ if (report_vf) {
+ struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
+
+ if (pf_vsi)
+ dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
+ &vf->dflt_lan_addr.addr[0],
+ pf_vsi->netdev->dev_addr);
+ }
+
+ return true;
+ }
+
+ /* if there was an error in detection or the VF is not malicious then
+ * return false
+ */
+ return false;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 0f519fba3770..d800ed83d6c3 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -4,6 +4,7 @@
#ifndef _ICE_VIRTCHNL_PF_H_
#define _ICE_VIRTCHNL_PF_H_
#include "ice.h"
+#include "ice_virtchnl_fdir.h"
/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
#define ICE_MAX_VLAN_PER_VF 8
@@ -70,6 +71,8 @@ struct ice_vf {
u16 vf_id; /* VF ID in the PF space */
u16 lan_vsi_idx; /* index into PF struct */
+ u16 ctrl_vsi_idx;
+ struct ice_vf_fdir fdir;
/* first vector index of this VF in the PF space */
int first_vector_idx;
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
@@ -100,6 +103,7 @@ struct ice_vf {
u16 num_vf_qs; /* num of queue configured per VF */
struct ice_mdd_vf_events mdd_rx_events;
struct ice_mdd_vf_events mdd_tx_events;
+ DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
};
#ifdef CONFIG_PCI_IOV
@@ -116,6 +120,9 @@ void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
+bool
+ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
+ u16 num_msg_proc, u16 num_msg_pending);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -138,6 +145,11 @@ void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
+struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
+int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
+bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
@@ -151,6 +163,15 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
#define ice_restore_all_vfs_msi_state(pdev) do {} while (0)
static inline bool
+ice_is_malicious_vf(struct ice_pf __always_unused *pf,
+ struct ice_rq_event_info __always_unused *event,
+ u16 __always_unused num_msg_proc,
+ u16 __always_unused num_msg_pending)
+{
+ return false;
+}
+
+static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
bool __always_unused is_vflr)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 9f94d9159acd..faa7b8d96adb 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -108,9 +108,6 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
ice_cfg_itr(hw, q_vector);
- wr32(hw, GLINT_RATE(reg_idx),
- ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
-
ice_for_each_ring(ring, q_vector->tx)
ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
q_vector->tx.itr_idx);
@@ -159,7 +156,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
rx_ring = vsi->rx_rings[q_idx];
q_vector = rx_ring->q_vector;
- while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) {
+ while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
timeout--;
if (!timeout)
return -EBUSY;
@@ -249,7 +246,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
- clear_bit(__ICE_CFG_BUSY, vsi->state);
+ clear_bit(ICE_CFG_BUSY, vsi->state);
ice_qvec_toggle_napi(vsi, q_vector, true);
ice_qvec_ena_irq(vsi, q_vector);
@@ -473,6 +470,14 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ if (likely(act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ rcu_read_unlock();
+ return result;
+ }
+
switch (act) {
case XDP_PASS:
break;
@@ -480,10 +485,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
result = ice_xmit_xdp_buff(xdp, xdp_ring);
break;
- case XDP_REDIRECT:
- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
- break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
@@ -754,7 +755,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
struct ice_vsi *vsi = np->vsi;
struct ice_ring *ring;
- if (test_bit(__ICE_DOWN, vsi->state))
+ if (test_bit(ICE_DOWN, vsi->state))
return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi))
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index d2e2c50ce257..ca5429774994 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -340,10 +340,10 @@
#define I210_RXPBSIZE_PB_32KB 0x00000020
#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
#define I210_TXPBSIZE_MASK 0xC0FFFFFF
-#define I210_TXPBSIZE_PB0_8KB (8 << 0)
-#define I210_TXPBSIZE_PB1_8KB (8 << 6)
-#define I210_TXPBSIZE_PB2_4KB (4 << 12)
-#define I210_TXPBSIZE_PB3_4KB (4 << 18)
+#define I210_TXPBSIZE_PB0_6KB (6 << 0)
+#define I210_TXPBSIZE_PB1_6KB (6 << 6)
+#define I210_TXPBSIZE_PB2_6KB (6 << 12)
+#define I210_TXPBSIZE_PB3_6KB (6 << 18)
#define I210_DTXMXPKTSZ_DEFAULT 0x00000098
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index fd8eb2f9ab9d..e63ee3cca5ea 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -484,6 +484,31 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
}
/**
+ * igb_i21x_hw_doublecheck - double checks potential HW issue in i21X
+ * @hw: pointer to the HW structure
+ *
+ * Checks if multicast array is wrote correctly
+ * If not then rewrites again to register
+ **/
+static void igb_i21x_hw_doublecheck(struct e1000_hw *hw)
+{
+ bool is_failed;
+ int i;
+
+ do {
+ is_failed = false;
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) {
+ if (array_rd32(E1000_MTA, i) != hw->mac.mta_shadow[i]) {
+ is_failed = true;
+ array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
+ wrfl();
+ break;
+ }
+ }
+ } while (is_failed);
+}
+
+/**
* igb_update_mc_addr_list - Update Multicast addresses
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
@@ -516,6 +541,8 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
wrfl();
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211)
+ igb_i21x_hw_doublecheck(hw);
}
/**
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 33cceb77e960..29383112bc19 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -441,7 +441,7 @@ out_no_read:
}
/**
- * e1000_init_mbx_params_pf - set initial values for pf mailbox
+ * igb_init_mbx_params_pf - set initial values for pf mailbox
* @hw: pointer to the HW structure
*
* Initializes the hw->mbx struct to correct values for pf mailbox
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 8c8eb82e6272..a018000f7db9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -836,6 +836,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
break;
case e1000_ms_auto:
data &= ~CR_1000T_MS_ENABLE;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 28baf203459a..7545da216d8b 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2347,35 +2347,23 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
IGB_TEST_LEN*ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
- for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
- memcpy(p, igb_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
- memcpy(p, igb_gstrings_net_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++)
+ ethtool_sprintf(&p,
+ igb_gstrings_stats[i].stat_string);
+ for (i = 0; i < IGB_NETDEV_STATS_LEN; i++)
+ ethtool_sprintf(&p,
+ igb_gstrings_net_stats[i].stat_string);
for (i = 0; i < adapter->num_tx_queues; i++) {
- sprintf(p, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_restart", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "tx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "tx_queue_%u_restart", i);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- sprintf(p, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_drops", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_csum_err", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_alloc_failed", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "rx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "rx_queue_%u_drops", i);
+ ethtool_sprintf(&p, "rx_queue_%u_csum_err", i);
+ ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i);
}
/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
break;
@@ -3022,6 +3010,7 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
break;
case ETHTOOL_SRXCLSRLDEL:
ret = igb_del_ethtool_nfc_entry(adapter, cmd);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a45cd2b416c8..038a9fd1af44 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1921,8 +1921,8 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
*/
val = rd32(E1000_TXPBS);
val &= ~I210_TXPBSIZE_MASK;
- val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB |
- I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB;
+ val |= I210_TXPBSIZE_PB0_6KB | I210_TXPBSIZE_PB1_6KB |
+ I210_TXPBSIZE_PB2_6KB | I210_TXPBSIZE_PB3_6KB;
wr32(E1000_TXPBS, val);
val = rd32(E1000_RXPBS);
@@ -2037,7 +2037,7 @@ static void igb_power_down_link(struct igb_adapter *adapter)
}
/**
- * Detect and switch function for Media Auto Sense
+ * igb_check_swap_media - Detect and switch function for Media Auto Sense
* @adapter: address of the board private structure
**/
static void igb_check_swap_media(struct igb_adapter *adapter)
@@ -2934,7 +2934,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
int cpu = smp_processor_id();
struct igb_ring *tx_ring;
struct netdev_queue *nq;
- int drops = 0;
+ int nxmit = 0;
int i;
if (unlikely(test_bit(__IGB_DOWN, &adapter->state)))
@@ -2961,10 +2961,9 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
int err;
err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
- if (err != IGB_XDP_TX) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ if (err != IGB_XDP_TX)
+ break;
+ nxmit++;
}
__netif_tx_unlock(nq);
@@ -2972,7 +2971,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
if (unlikely(flags & XDP_XMIT_FLUSH))
igb_xdp_ring_update_tail(tx_ring);
- return n - drops;
+ return nxmit;
}
static const struct net_device_ops igb_netdev_ops = {
@@ -3115,7 +3114,7 @@ static s32 igb_init_i2c(struct igb_adapter *adapter)
return 0;
/* Initialize the i2c bus which is controlled by the registers.
- * This bus will use the i2c_algo_bit structue that implements
+ * This bus will use the i2c_algo_bit structure that implements
* the protocol through toggling of the 4 bits in the register.
*/
adapter->i2c_adap.owner = THIS_MODULE;
@@ -4020,7 +4019,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
}
/**
- * igb_open - Called when a network interface is made active
+ * __igb_open - Called when a network interface is made active
* @netdev: network interface device structure
* @resuming: indicates whether we are in a resume call
*
@@ -4138,7 +4137,7 @@ int igb_open(struct net_device *netdev)
}
/**
- * igb_close - Disables a network interface
+ * __igb_close - Disables a network interface
* @netdev: network interface device structure
* @suspending: indicates we are in a suspend call
*
@@ -5856,7 +5855,7 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
*/
if (tx_ring->launchtime_enable) {
ts = ktime_to_timespec64(first->skb->tstamp);
- first->skb->tstamp = ktime_set(0, 0);
+ skb_txtime_consumed(first->skb);
context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
} else {
context_desc->seqnum_seed = 0;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 86a576201f5f..ba61fe9bfaf4 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -1025,6 +1025,7 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
+ break;
case HWTSTAMP_TX_ON:
break;
default:
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index 1c3051db9085..95d1e8c490a4 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -8,4 +8,4 @@
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
-igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o
+igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 1b08a7dc7bc4..25871351730b 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -28,6 +28,11 @@ void igc_ethtool_set_ops(struct net_device *);
#define MAX_ETYPE_FILTER 8
#define IGC_RETA_SIZE 128
+/* SDP support */
+#define IGC_N_EXTTS 2
+#define IGC_N_PEROUT 2
+#define IGC_N_SDP 4
+
enum igc_mac_filter_type {
IGC_MAC_FILTER_TYPE_DST = 0,
IGC_MAC_FILTER_TYPE_SRC
@@ -111,6 +116,8 @@ struct igc_ring {
struct sk_buff *skb;
};
};
+
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_internodealigned_in_smp;
/* Board specific private data structure */
@@ -219,6 +226,16 @@ struct igc_adapter {
ktime_t ptp_reset_start; /* Reset time in clock mono */
char fw_version[32];
+
+ struct bpf_prog *xdp_prog;
+
+ bool pps_sys_wrap_on;
+
+ struct ptp_pin_desc sdp_config[IGC_N_SDP];
+ struct {
+ struct timespec64 start;
+ struct timespec64 period;
+ } perout[IGC_N_PEROUT];
};
void igc_up(struct igc_adapter *adapter);
@@ -373,6 +390,8 @@ enum igc_tx_flags {
/* olinfo flags */
IGC_TX_FLAGS_IPV4 = 0x10,
IGC_TX_FLAGS_CSUM = 0x20,
+
+ IGC_TX_FLAGS_XDP = 0x100,
};
enum igc_boards {
@@ -395,7 +414,10 @@ enum igc_boards {
struct igc_tx_buffer {
union igc_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
@@ -504,6 +526,10 @@ enum igc_ring_flags_t {
#define ring_uses_large_buffer(ring) \
test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+#define set_ring_uses_large_buffer(ring) \
+ set_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+#define clear_ring_uses_large_buffer(ring) \
+ clear_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
#define ring_uses_build_skb(ring) \
test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
@@ -547,8 +573,7 @@ void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
void igc_ptp_suspend(struct igc_adapter *adapter);
void igc_ptp_stop(struct igc_adapter *adapter);
-void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
- struct sk_buff *skb);
+ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf);
int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igc_ptp_tx_hang(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index b909f00a79e6..0103dda32f39 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -8,6 +8,8 @@
#define REQ_TX_DESCRIPTOR_MULTIPLE 8
#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define IGC_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */
+#define IGC_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
/* Definitions for power management and wakeup registers */
@@ -96,6 +98,9 @@
#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
+#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
+
/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
#define MAX_JUMBO_FRAME_SIZE 0x2600
@@ -403,6 +408,64 @@
#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */
+/* Timer selection bits */
+#define IGC_AUX_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for auxiliary time stamp */
+#define IGC_AUX_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for auxiliary time stamp */
+#define IGC_AUX_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for auxiliary time stamp */
+#define IGC_AUX_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for auxiliary time stamp */
+#define IGC_TT_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for target time stamp */
+#define IGC_TT_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for target time stamp */
+#define IGC_TT_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for target time stamp */
+#define IGC_TT_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for target time stamp */
+
+/* TSAUXC Configuration Bits */
+#define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */
+#define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */
+#define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */
+#define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */
+#define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */
+#define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */
+#define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */
+#define IGC_TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */
+#define IGC_TSAUXC_PLSG BIT(17) /* Generate a pulse. */
+#define IGC_TSAUXC_DISABLE1 BIT(27) /* Disable SYSTIM0 Count Operation. */
+#define IGC_TSAUXC_DISABLE2 BIT(28) /* Disable SYSTIM1 Count Operation. */
+#define IGC_TSAUXC_DISABLE3 BIT(29) /* Disable SYSTIM2 Count Operation. */
+#define IGC_TSAUXC_DIS_TS_CLEAR BIT(30) /* Disable EN_TT0/1 auto clear. */
+#define IGC_TSAUXC_DISABLE0 BIT(31) /* Disable SYSTIM0 Count Operation. */
+
+/* SDP Configuration Bits */
+#define IGC_AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */
+#define IGC_AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */
+#define IGC_AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */
+#define IGC_AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */
+#define IGC_AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */
+#define IGC_AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */
+#define IGC_AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */
+#define IGC_AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */
+#define IGC_AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */
+#define IGC_AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */
+#define IGC_TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */
+#define IGC_TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */
+#define IGC_TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */
+#define IGC_TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */
+#define IGC_TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */
+#define IGC_TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */
+#define IGC_TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */
+#define IGC_TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */
+#define IGC_TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */
+#define IGC_TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */
+#define IGC_TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */
+#define IGC_TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */
+#define IGC_TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */
+#define IGC_TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */
+#define IGC_TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */
+#define IGC_TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */
+#define IGC_TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */
+#define IGC_TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */
+#define IGC_TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */
+#define IGC_TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */
+
/* Transmit Scheduling */
#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001
#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008
@@ -441,11 +504,6 @@
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_1000 0x0040
-#define MII_CR_SPEED_100 0x2000
-#define MII_CR_SPEED_10 0x0000
/* PHY Status Register */
#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 8722294ab90c..9722449d7633 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -65,6 +65,8 @@ static const struct igc_stats igc_gstrings_stats[] = {
IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ IGC_STAT("tx_lpi_counter", stats.tlpic),
+ IGC_STAT("rx_lpi_counter", stats.rlpic),
};
#define IGC_NETDEV_STAT(_net_stat) { \
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 7ec04e48860c..b2ef9fde97b3 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -6,7 +6,7 @@
#include "igc_hw.h"
/**
- * igc_get_hw_semaphore_i225 - Acquire hardware semaphore
+ * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM
* @hw: pointer to the HW structure
*
* Acquire the necessary semaphores for exclusive access to the EEPROM.
@@ -229,10 +229,11 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
words == 0) {
hw_dbg("nvm parameter(s) out of bounds\n");
- goto out;
+ return ret_val;
}
for (i = 0; i < words; i++) {
+ ret_val = -IGC_ERR_NVM;
eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) |
(data[i] << IGC_NVM_RW_REG_DATA) |
IGC_NVM_RW_REG_START;
@@ -254,7 +255,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
}
}
-out:
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 4d989ebc9713..069471b7ffb0 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -10,17 +10,24 @@
#include <linux/ip.h>
#include <linux/pm_runtime.h>
#include <net/pkt_sched.h>
+#include <linux/bpf_trace.h>
#include <net/ipv6.h>
#include "igc.h"
#include "igc_hw.h"
#include "igc_tsn.h"
+#include "igc_xdp.h"
#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+#define IGC_XDP_PASS 0
+#define IGC_XDP_CONSUMED BIT(0)
+#define IGC_XDP_TX BIT(1)
+#define IGC_XDP_REDIRECT BIT(2)
+
static int debug = -1;
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -176,8 +183,10 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
while (i != tx_ring->next_to_use) {
union igc_adv_tx_desc *eop_desc, *tx_desc;
- /* Free all the Tx ring sk_buffs */
- dev_kfree_skb_any(tx_buffer->skb);
+ if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP)
+ xdp_return_frame(tx_buffer->xdpf);
+ else
+ dev_kfree_skb_any(tx_buffer->skb);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -375,6 +384,8 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring)
i = 0;
}
+ clear_ring_uses_large_buffer(rx_ring);
+
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -403,6 +414,8 @@ void igc_free_rx_resources(struct igc_ring *rx_ring)
{
igc_clean_rx_ring(rx_ring);
+ igc_xdp_unregister_rxq_info(rx_ring);
+
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
@@ -440,7 +453,11 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
{
struct net_device *ndev = rx_ring->netdev;
struct device *dev = rx_ring->dev;
- int size, desc_len;
+ int size, desc_len, res;
+
+ res = igc_xdp_register_rxq_info(rx_ring);
+ if (res < 0)
+ return res;
size = sizeof(struct igc_rx_buffer) * rx_ring->count;
rx_ring->rx_buffer_info = vzalloc(size);
@@ -466,6 +483,7 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
return 0;
err:
+ igc_xdp_unregister_rxq_info(rx_ring);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
@@ -497,6 +515,11 @@ static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
return err;
}
+static bool igc_xdp_is_enabled(struct igc_adapter *adapter)
+{
+ return !!adapter->xdp_prog;
+}
+
/**
* igc_configure_rx_ring - Configure a receive ring after Reset
* @adapter: board private structure
@@ -513,6 +536,9 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,
u32 srrctl = 0, rxdctl = 0;
u64 rdba = ring->dma;
+ if (igc_xdp_is_enabled(adapter))
+ set_ring_uses_large_buffer(ring);
+
/* disable the queue */
wr32(IGC_RXDCTL(reg_idx), 0);
@@ -941,7 +967,7 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
ktime_t txtime = first->skb->tstamp;
- first->skb->tstamp = ktime_set(0, 0);
+ skb_txtime_consumed(first->skb);
context_desc->launch_time = igc_tx_launchtime(adapter,
txtime);
} else {
@@ -1029,7 +1055,7 @@ static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
((u32)((_input) & (_flag)) / ((_flag) / (_result))))
-static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
+static u32 igc_tx_cmd_type(u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
@@ -1078,7 +1104,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
u16 i = tx_ring->next_to_use;
unsigned int data_len, size;
dma_addr_t dma;
- u32 cmd_type = igc_tx_cmd_type(skb, tx_flags);
+ u32 cmd_type = igc_tx_cmd_type(tx_flags);
tx_desc = IGC_TX_DESC(tx_ring, i);
@@ -1480,11 +1506,18 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
}
static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
- const unsigned int size)
+ const unsigned int size,
+ int *rx_buffer_pgcnt)
{
struct igc_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buffer->page);
+#else
+ 0;
+#endif
prefetchw(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
@@ -1499,6 +1532,32 @@ static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
return rx_buffer;
}
+static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer,
+ unsigned int truesize)
+{
+#if (PAGE_SIZE < 8192)
+ buffer->page_offset ^= truesize;
+#else
+ buffer->page_offset += truesize;
+#endif
+}
+
+static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = igc_rx_pg_size(ring) / 2;
+#else
+ truesize = ring_uses_build_skb(ring) ?
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
/**
* igc_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
@@ -1513,20 +1572,19 @@ static void igc_add_rx_frag(struct igc_ring *rx_ring,
struct sk_buff *skb,
unsigned int size)
{
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
+ unsigned int truesize;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
- rx_buffer->page_offset, size, truesize);
- rx_buffer->page_offset ^= truesize;
+#if (PAGE_SIZE < 8192)
+ truesize = igc_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
- SKB_DATA_ALIGN(size);
+ truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
+ SKB_DATA_ALIGN(size);
+#endif
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize);
- rx_buffer->page_offset += truesize;
-#endif
+
+ igc_rx_buffer_flip(rx_buffer, truesize);
}
static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
@@ -1535,12 +1593,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
unsigned int size)
{
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(IGC_SKB_PAD + size);
-#endif
+ unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
struct sk_buff *skb;
/* prefetch first cache line of first page */
@@ -1555,27 +1608,18 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
skb_reserve(skb, IGC_SKB_PAD);
__skb_put(skb, size);
- /* update buffer offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
-
+ igc_rx_buffer_flip(rx_buffer, truesize);
return skb;
}
static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct igc_rx_buffer *rx_buffer,
- union igc_adv_rx_desc *rx_desc,
- unsigned int size)
+ struct xdp_buff *xdp,
+ ktime_t timestamp)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
-#endif
+ unsigned int size = xdp->data_end - xdp->data;
+ unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
+ void *va = xdp->data;
unsigned int headlen;
struct sk_buff *skb;
@@ -1587,11 +1631,8 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
if (unlikely(!skb))
return NULL;
- if (unlikely(igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))) {
- igc_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
- va += IGC_TS_HDR_LEN;
- size -= IGC_TS_HDR_LEN;
- }
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
/* Determine available headroom for copy */
headlen = size;
@@ -1607,11 +1648,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
skb_add_rx_frag(skb, 0, rx_buffer->page,
(va + headlen) - page_address(rx_buffer->page),
size, truesize);
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
+ igc_rx_buffer_flip(rx_buffer, truesize);
} else {
rx_buffer->pagecnt_bias++;
}
@@ -1648,7 +1685,8 @@ static void igc_reuse_rx_page(struct igc_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
-static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
+static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
@@ -1659,7 +1697,7 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define IGC_LAST_OFFSET \
@@ -1673,8 +1711,8 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
* the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
- if (unlikely(!pagecnt_bias)) {
- page_ref_add(page, USHRT_MAX);
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX - 1);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
@@ -1726,6 +1764,10 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ /* XDP packets use error pointer so abort at this point */
+ if (IS_ERR(skb))
+ return true;
+
if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
struct net_device *netdev = rx_ring->netdev;
@@ -1743,9 +1785,10 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
}
static void igc_put_rx_buffer(struct igc_ring *rx_ring,
- struct igc_rx_buffer *rx_buffer)
+ struct igc_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
- if (igc_can_reuse_rx_page(rx_buffer)) {
+ if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
igc_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -1765,7 +1808,14 @@ static void igc_put_rx_buffer(struct igc_ring *rx_ring,
static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
{
- return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
+ struct igc_adapter *adapter = rx_ring->q_vector->adapter;
+
+ if (ring_uses_build_skb(rx_ring))
+ return IGC_SKB_PAD;
+ if (igc_xdp_is_enabled(adapter))
+ return XDP_PACKET_HEADROOM;
+
+ return 0;
}
static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
@@ -1804,7 +1854,8 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = igc_rx_offset(rx_ring);
- bi->pagecnt_bias = 1;
+ page_ref_add(page, USHRT_MAX - 1);
+ bi->pagecnt_bias = USHRT_MAX;
return true;
}
@@ -1879,17 +1930,196 @@ static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
}
}
+static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
+ struct xdp_frame *xdpf,
+ struct igc_ring *ring)
+{
+ dma_addr_t dma;
+
+ dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring->dev, dma)) {
+ netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
+ return -ENOMEM;
+ }
+
+ buffer->xdpf = xdpf;
+ buffer->tx_flags = IGC_TX_FLAGS_XDP;
+ buffer->protocol = 0;
+ buffer->bytecount = xdpf->len;
+ buffer->gso_segs = 1;
+ buffer->time_stamp = jiffies;
+ dma_unmap_len_set(buffer, len, xdpf->len);
+ dma_unmap_addr_set(buffer, dma, dma);
+ return 0;
+}
+
+/* This function requires __netif_tx_lock is held by the caller. */
+static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
+ struct xdp_frame *xdpf)
+{
+ struct igc_tx_buffer *buffer;
+ union igc_adv_tx_desc *desc;
+ u32 cmd_type, olinfo_status;
+ int err;
+
+ if (!igc_desc_unused(ring))
+ return -EBUSY;
+
+ buffer = &ring->tx_buffer_info[ring->next_to_use];
+ err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
+ if (err)
+ return err;
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
+ buffer->bytecount;
+ olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
+
+ desc = IGC_TX_DESC(ring, ring->next_to_use);
+ desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
+
+ netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
+
+ buffer->next_to_watch = desc;
+
+ ring->next_to_use++;
+ if (ring->next_to_use == ring->count)
+ ring->next_to_use = 0;
+
+ return 0;
+}
+
+static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
+ int cpu)
+{
+ int index = cpu;
+
+ if (unlikely(index < 0))
+ index = 0;
+
+ while (index >= adapter->num_tx_queues)
+ index -= adapter->num_tx_queues;
+
+ return adapter->tx_ring[index];
+}
+
+static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ struct igc_ring *ring;
+ int res;
+
+ if (unlikely(!xdpf))
+ return -EFAULT;
+
+ ring = igc_xdp_get_tx_ring(adapter, cpu);
+ nq = txring_txq(ring);
+
+ __netif_tx_lock(nq, cpu);
+ res = igc_xdp_init_tx_descriptor(ring, xdpf);
+ __netif_tx_unlock(nq);
+ return res;
+}
+
+static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
+ struct xdp_buff *xdp)
+{
+ struct bpf_prog *prog;
+ int res;
+ u32 act;
+
+ rcu_read_lock();
+
+ prog = READ_ONCE(adapter->xdp_prog);
+ if (!prog) {
+ res = IGC_XDP_PASS;
+ goto unlock;
+ }
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ res = IGC_XDP_PASS;
+ break;
+ case XDP_TX:
+ if (igc_xdp_xmit_back(adapter, xdp) < 0)
+ res = IGC_XDP_CONSUMED;
+ else
+ res = IGC_XDP_TX;
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
+ res = IGC_XDP_CONSUMED;
+ else
+ res = IGC_XDP_REDIRECT;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ res = IGC_XDP_CONSUMED;
+ break;
+ }
+
+unlock:
+ rcu_read_unlock();
+ return ERR_PTR(-res);
+}
+
+/* This function assumes __netif_tx_lock is held by the caller. */
+static void igc_flush_tx_descriptors(struct igc_ring *ring)
+{
+ /* Once tail pointer is updated, hardware can fetch the descriptors
+ * any time so we issue a write membar here to ensure all memory
+ * writes are complete before the tail pointer is updated.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+}
+
+static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
+{
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ struct igc_ring *ring;
+
+ if (status & IGC_XDP_TX) {
+ ring = igc_xdp_get_tx_ring(adapter, cpu);
+ nq = txring_txq(ring);
+
+ __netif_tx_lock(nq, cpu);
+ igc_flush_tx_descriptors(ring);
+ __netif_tx_unlock(nq);
+ }
+
+ if (status & IGC_XDP_REDIRECT)
+ xdp_do_flush();
+}
+
static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
{
unsigned int total_bytes = 0, total_packets = 0;
+ struct igc_adapter *adapter = q_vector->adapter;
struct igc_ring *rx_ring = q_vector->rx.ring;
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = igc_desc_unused(rx_ring);
+ int xdp_status = 0, rx_buffer_pgcnt;
while (likely(total_packets < budget)) {
union igc_adv_rx_desc *rx_desc;
struct igc_rx_buffer *rx_buffer;
- unsigned int size;
+ unsigned int size, truesize;
+ ktime_t timestamp = 0;
+ struct xdp_buff xdp;
+ int pkt_offset = 0;
+ void *pktbuf;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
@@ -1908,16 +2138,52 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
*/
dma_rmb();
- rx_buffer = igc_get_rx_buffer(rx_ring, size);
+ rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
+ truesize = igc_get_rx_frame_truesize(rx_ring, size);
+
+ pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
- /* retrieve a buffer from the ring */
- if (skb)
+ if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
+ timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
+ pktbuf);
+ pkt_offset = IGC_TS_HDR_LEN;
+ size -= IGC_TS_HDR_LEN;
+ }
+
+ if (!skb) {
+ xdp.data = pktbuf + pkt_offset;
+ xdp.data_end = xdp.data + size;
+ xdp.data_hard_start = pktbuf - igc_rx_offset(rx_ring);
+ xdp_set_data_meta_invalid(&xdp);
+ xdp.frame_sz = truesize;
+ xdp.rxq = &rx_ring->xdp_rxq;
+
+ skb = igc_xdp_run_prog(adapter, &xdp);
+ }
+
+ if (IS_ERR(skb)) {
+ unsigned int xdp_res = -PTR_ERR(skb);
+
+ switch (xdp_res) {
+ case IGC_XDP_CONSUMED:
+ rx_buffer->pagecnt_bias++;
+ break;
+ case IGC_XDP_TX:
+ case IGC_XDP_REDIRECT:
+ igc_rx_buffer_flip(rx_buffer, truesize);
+ xdp_status |= xdp_res;
+ break;
+ }
+
+ total_packets++;
+ total_bytes += size;
+ } else if (skb)
igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
else
- skb = igc_construct_skb(rx_ring, rx_buffer,
- rx_desc, size);
+ skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
+ timestamp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -1926,7 +2192,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
break;
}
- igc_put_rx_buffer(rx_ring, rx_buffer);
+ igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
cleaned_count++;
/* fetch next buffer in frame if non-eop */
@@ -1954,6 +2220,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
total_packets++;
}
+ if (xdp_status)
+ igc_finalize_xdp(adapter, xdp_status);
+
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
@@ -2015,8 +2284,10 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
- /* free the skb */
- napi_consume_skb(tx_buffer->skb, napi_budget);
+ if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP)
+ xdp_return_frame(tx_buffer->xdpf);
+ else
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -3580,7 +3851,7 @@ void igc_up(struct igc_adapter *adapter)
netif_tx_start_all_queues(adapter->netdev);
/* start the watchdog. */
- hw->mac.get_link_status = 1;
+ hw->mac.get_link_status = true;
schedule_work(&adapter->watchdog_task);
}
@@ -3858,6 +4129,11 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct igc_adapter *adapter = netdev_priv(netdev);
+ if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) {
+ netdev_dbg(netdev, "Jumbo frames not supported with XDP");
+ return -EINVAL;
+ }
+
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
@@ -3974,9 +4250,20 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
static void igc_tsync_interrupt(struct igc_adapter *adapter)
{
+ u32 ack, tsauxc, sec, nsec, tsicr;
struct igc_hw *hw = &adapter->hw;
- u32 tsicr = rd32(IGC_TSICR);
- u32 ack = 0;
+ struct ptp_clock_event event;
+ struct timespec64 ts;
+
+ tsicr = rd32(IGC_TSICR);
+ ack = 0;
+
+ if (tsicr & IGC_TSICR_SYS_WRAP) {
+ event.type = PTP_CLOCK_PPS;
+ if (adapter->ptp_caps.pps)
+ ptp_clock_event(adapter->ptp_clock, &event);
+ ack |= IGC_TSICR_SYS_WRAP;
+ }
if (tsicr & IGC_TSICR_TXTS) {
/* retrieve hardware timestamp */
@@ -3984,6 +4271,54 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
ack |= IGC_TSICR_TXTS;
}
+ if (tsicr & IGC_TSICR_TT0) {
+ spin_lock(&adapter->tmreg_lock);
+ ts = timespec64_add(adapter->perout[0].start,
+ adapter->perout[0].period);
+ wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
+ wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec);
+ tsauxc = rd32(IGC_TSAUXC);
+ tsauxc |= IGC_TSAUXC_EN_TT0;
+ wr32(IGC_TSAUXC, tsauxc);
+ adapter->perout[0].start = ts;
+ spin_unlock(&adapter->tmreg_lock);
+ ack |= IGC_TSICR_TT0;
+ }
+
+ if (tsicr & IGC_TSICR_TT1) {
+ spin_lock(&adapter->tmreg_lock);
+ ts = timespec64_add(adapter->perout[1].start,
+ adapter->perout[1].period);
+ wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
+ wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec);
+ tsauxc = rd32(IGC_TSAUXC);
+ tsauxc |= IGC_TSAUXC_EN_TT1;
+ wr32(IGC_TSAUXC, tsauxc);
+ adapter->perout[1].start = ts;
+ spin_unlock(&adapter->tmreg_lock);
+ ack |= IGC_TSICR_TT1;
+ }
+
+ if (tsicr & IGC_TSICR_AUTT0) {
+ nsec = rd32(IGC_AUXSTMPL0);
+ sec = rd32(IGC_AUXSTMPH0);
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = sec * NSEC_PER_SEC + nsec;
+ ptp_clock_event(adapter->ptp_clock, &event);
+ ack |= IGC_TSICR_AUTT0;
+ }
+
+ if (tsicr & IGC_TSICR_AUTT1) {
+ nsec = rd32(IGC_AUXSTMPL1);
+ sec = rd32(IGC_AUXSTMPH1);
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 1;
+ event.timestamp = sec * NSEC_PER_SEC + nsec;
+ ptp_clock_event(adapter->ptp_clock, &event);
+ ack |= IGC_TSICR_AUTT1;
+ }
+
/* acknowledge the interrupts */
wr32(IGC_TSICR, ack);
}
@@ -4009,7 +4344,7 @@ static irqreturn_t igc_msix_other(int irq, void *data)
}
if (icr & IGC_ICR_LSC) {
- hw->mac.get_link_status = 1;
+ hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__IGC_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
@@ -4387,7 +4722,7 @@ static irqreturn_t igc_intr_msi(int irq, void *data)
}
if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
- hw->mac.get_link_status = 1;
+ hw->mac.get_link_status = true;
if (!test_bit(__IGC_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
@@ -4429,7 +4764,7 @@ static irqreturn_t igc_intr(int irq, void *data)
}
if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
- hw->mac.get_link_status = 1;
+ hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__IGC_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
@@ -4583,7 +4918,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)
netif_tx_start_all_queues(netdev);
/* start the watchdog. */
- hw->mac.get_link_status = 1;
+ hw->mac.get_link_status = true;
schedule_work(&adapter->watchdog_task);
return IGC_SUCCESS;
@@ -4844,6 +5179,58 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
+static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int igc_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ struct igc_ring *ring;
+ int i, drops;
+
+ if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ ring = igc_xdp_get_tx_ring(adapter, cpu);
+ nq = txring_txq(ring);
+
+ __netif_tx_lock(nq, cpu);
+
+ drops = 0;
+ for (i = 0; i < num_frames; i++) {
+ int err;
+ struct xdp_frame *xdpf = frames[i];
+
+ err = igc_xdp_init_tx_descriptor(ring, xdpf);
+ if (err) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ igc_flush_tx_descriptors(ring);
+
+ __netif_tx_unlock(nq);
+
+ return num_frames - drops;
+}
+
static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
@@ -4857,6 +5244,8 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_features_check = igc_features_check,
.ndo_do_ioctl = igc_ioctl,
.ndo_setup_tc = igc_setup_tc,
+ .ndo_bpf = igc_bpf,
+ .ndo_xdp_xmit = igc_xdp_xmit,
};
/* PCIe configuration access */
@@ -4924,7 +5313,7 @@ int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
{
struct igc_mac_info *mac = &adapter->hw.mac;
- mac->autoneg = 0;
+ mac->autoneg = false;
/* Make sure dplx is at most 1 bit and lsb of speed is not set
* for the switch() below to work
@@ -4946,13 +5335,13 @@ int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
mac->forced_speed_duplex = ADVERTISE_100_FULL;
break;
case SPEED_1000 + DUPLEX_FULL:
- mac->autoneg = 1;
+ mac->autoneg = true;
adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
goto err_inval;
case SPEED_2500 + DUPLEX_FULL:
- mac->autoneg = 1;
+ mac->autoneg = true;
adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
break;
case SPEED_2500 + DUPLEX_HALF: /* not supported */
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 545f4d0e67cf..69617d2c1be2 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -120,12 +120,289 @@ static int igc_ptp_settime_i225(struct ptp_clock_info *ptp,
return 0;
}
+static void igc_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext)
+{
+ u32 *ptr = pin < 2 ? ctrl : ctrl_ext;
+ static const u32 mask[IGC_N_SDP] = {
+ IGC_CTRL_SDP0_DIR,
+ IGC_CTRL_SDP1_DIR,
+ IGC_CTRL_EXT_SDP2_DIR,
+ IGC_CTRL_EXT_SDP3_DIR,
+ };
+
+ if (input)
+ *ptr &= ~mask[pin];
+ else
+ *ptr |= mask[pin];
+}
+
+static void igc_pin_perout(struct igc_adapter *igc, int chan, int pin, int freq)
+{
+ static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = {
+ IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3,
+ };
+ static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = {
+ IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3,
+ };
+ static const u32 igc_ts_sdp_en[IGC_N_SDP] = {
+ IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN,
+ };
+ static const u32 igc_ts_sdp_sel_tt0[IGC_N_SDP] = {
+ IGC_TS_SDP0_SEL_TT0, IGC_TS_SDP1_SEL_TT0,
+ IGC_TS_SDP2_SEL_TT0, IGC_TS_SDP3_SEL_TT0,
+ };
+ static const u32 igc_ts_sdp_sel_tt1[IGC_N_SDP] = {
+ IGC_TS_SDP0_SEL_TT1, IGC_TS_SDP1_SEL_TT1,
+ IGC_TS_SDP2_SEL_TT1, IGC_TS_SDP3_SEL_TT1,
+ };
+ static const u32 igc_ts_sdp_sel_fc0[IGC_N_SDP] = {
+ IGC_TS_SDP0_SEL_FC0, IGC_TS_SDP1_SEL_FC0,
+ IGC_TS_SDP2_SEL_FC0, IGC_TS_SDP3_SEL_FC0,
+ };
+ static const u32 igc_ts_sdp_sel_fc1[IGC_N_SDP] = {
+ IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1,
+ IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1,
+ };
+ static const u32 igc_ts_sdp_sel_clr[IGC_N_SDP] = {
+ IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1,
+ IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1,
+ };
+ struct igc_hw *hw = &igc->hw;
+ u32 ctrl, ctrl_ext, tssdp = 0;
+
+ ctrl = rd32(IGC_CTRL);
+ ctrl_ext = rd32(IGC_CTRL_EXT);
+ tssdp = rd32(IGC_TSSDP);
+
+ igc_pin_direction(pin, 0, &ctrl, &ctrl_ext);
+
+ /* Make sure this pin is not enabled as an input. */
+ if ((tssdp & IGC_AUX0_SEL_SDP3) == igc_aux0_sel_sdp[pin])
+ tssdp &= ~IGC_AUX0_TS_SDP_EN;
+
+ if ((tssdp & IGC_AUX1_SEL_SDP3) == igc_aux1_sel_sdp[pin])
+ tssdp &= ~IGC_AUX1_TS_SDP_EN;
+
+ tssdp &= ~igc_ts_sdp_sel_clr[pin];
+ if (freq) {
+ if (chan == 1)
+ tssdp |= igc_ts_sdp_sel_fc1[pin];
+ else
+ tssdp |= igc_ts_sdp_sel_fc0[pin];
+ } else {
+ if (chan == 1)
+ tssdp |= igc_ts_sdp_sel_tt1[pin];
+ else
+ tssdp |= igc_ts_sdp_sel_tt0[pin];
+ }
+ tssdp |= igc_ts_sdp_en[pin];
+
+ wr32(IGC_TSSDP, tssdp);
+ wr32(IGC_CTRL, ctrl);
+ wr32(IGC_CTRL_EXT, ctrl_ext);
+}
+
+static void igc_pin_extts(struct igc_adapter *igc, int chan, int pin)
+{
+ static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = {
+ IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3,
+ };
+ static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = {
+ IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3,
+ };
+ static const u32 igc_ts_sdp_en[IGC_N_SDP] = {
+ IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN,
+ };
+ struct igc_hw *hw = &igc->hw;
+ u32 ctrl, ctrl_ext, tssdp = 0;
+
+ ctrl = rd32(IGC_CTRL);
+ ctrl_ext = rd32(IGC_CTRL_EXT);
+ tssdp = rd32(IGC_TSSDP);
+
+ igc_pin_direction(pin, 1, &ctrl, &ctrl_ext);
+
+ /* Make sure this pin is not enabled as an output. */
+ tssdp &= ~igc_ts_sdp_en[pin];
+
+ if (chan == 1) {
+ tssdp &= ~IGC_AUX1_SEL_SDP3;
+ tssdp |= igc_aux1_sel_sdp[pin] | IGC_AUX1_TS_SDP_EN;
+ } else {
+ tssdp &= ~IGC_AUX0_SEL_SDP3;
+ tssdp |= igc_aux0_sel_sdp[pin] | IGC_AUX0_TS_SDP_EN;
+ }
+
+ wr32(IGC_TSSDP, tssdp);
+ wr32(IGC_CTRL, ctrl);
+ wr32(IGC_CTRL_EXT, ctrl_ext);
+}
+
static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
+ struct igc_adapter *igc =
+ container_of(ptp, struct igc_adapter, ptp_caps);
+ struct igc_hw *hw = &igc->hw;
+ unsigned long flags;
+ struct timespec64 ts;
+ int use_freq = 0, pin = -1;
+ u32 tsim, tsauxc, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout;
+ s64 ns;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests failing to enable both edges. */
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
+ if (on) {
+ pin = ptp_find_pin(igc->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ }
+ if (rq->extts.index == 1) {
+ tsauxc_mask = IGC_TSAUXC_EN_TS1;
+ tsim_mask = IGC_TSICR_AUTT1;
+ } else {
+ tsauxc_mask = IGC_TSAUXC_EN_TS0;
+ tsim_mask = IGC_TSICR_AUTT0;
+ }
+ spin_lock_irqsave(&igc->tmreg_lock, flags);
+ tsauxc = rd32(IGC_TSAUXC);
+ tsim = rd32(IGC_TSIM);
+ if (on) {
+ igc_pin_extts(igc, rq->extts.index, pin);
+ tsauxc |= tsauxc_mask;
+ tsim |= tsim_mask;
+ } else {
+ tsauxc &= ~tsauxc_mask;
+ tsim &= ~tsim_mask;
+ }
+ wr32(IGC_TSAUXC, tsauxc);
+ wr32(IGC_TSIM, tsim);
+ spin_unlock_irqrestore(&igc->tmreg_lock, flags);
+ return 0;
+
+ case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
+ if (on) {
+ pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+ }
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ ns = timespec64_to_ns(&ts);
+ ns = ns >> 1;
+ if (on && (ns <= 70000000LL || ns == 125000000LL ||
+ ns == 250000000LL || ns == 500000000LL)) {
+ if (ns < 8LL)
+ return -EINVAL;
+ use_freq = 1;
+ }
+ ts = ns_to_timespec64(ns);
+ if (rq->perout.index == 1) {
+ if (use_freq) {
+ tsauxc_mask = IGC_TSAUXC_EN_CLK1;
+ tsim_mask = 0;
+ } else {
+ tsauxc_mask = IGC_TSAUXC_EN_TT1;
+ tsim_mask = IGC_TSICR_TT1;
+ }
+ trgttiml = IGC_TRGTTIML1;
+ trgttimh = IGC_TRGTTIMH1;
+ freqout = IGC_FREQOUT1;
+ } else {
+ if (use_freq) {
+ tsauxc_mask = IGC_TSAUXC_EN_CLK0;
+ tsim_mask = 0;
+ } else {
+ tsauxc_mask = IGC_TSAUXC_EN_TT0;
+ tsim_mask = IGC_TSICR_TT0;
+ }
+ trgttiml = IGC_TRGTTIML0;
+ trgttimh = IGC_TRGTTIMH0;
+ freqout = IGC_FREQOUT0;
+ }
+ spin_lock_irqsave(&igc->tmreg_lock, flags);
+ tsauxc = rd32(IGC_TSAUXC);
+ tsim = rd32(IGC_TSIM);
+ if (rq->perout.index == 1) {
+ tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1);
+ tsim &= ~IGC_TSICR_TT1;
+ } else {
+ tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0);
+ tsim &= ~IGC_TSICR_TT0;
+ }
+ if (on) {
+ int i = rq->perout.index;
+
+ igc_pin_perout(igc, i, pin, use_freq);
+ igc->perout[i].start.tv_sec = rq->perout.start.sec;
+ igc->perout[i].start.tv_nsec = rq->perout.start.nsec;
+ igc->perout[i].period.tv_sec = ts.tv_sec;
+ igc->perout[i].period.tv_nsec = ts.tv_nsec;
+ wr32(trgttimh, rq->perout.start.sec);
+ /* For now, always select timer 0 as source. */
+ wr32(trgttiml, rq->perout.start.nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
+ if (use_freq)
+ wr32(freqout, ns);
+ tsauxc |= tsauxc_mask;
+ tsim |= tsim_mask;
+ }
+ wr32(IGC_TSAUXC, tsauxc);
+ wr32(IGC_TSIM, tsim);
+ spin_unlock_irqrestore(&igc->tmreg_lock, flags);
+ return 0;
+
+ case PTP_CLK_REQ_PPS:
+ spin_lock_irqsave(&igc->tmreg_lock, flags);
+ tsim = rd32(IGC_TSIM);
+ if (on)
+ tsim |= IGC_TSICR_SYS_WRAP;
+ else
+ tsim &= ~IGC_TSICR_SYS_WRAP;
+ igc->pps_sys_wrap_on = on;
+ wr32(IGC_TSIM, tsim);
+ spin_unlock_irqrestore(&igc->tmreg_lock, flags);
+ return 0;
+
+ default:
+ break;
+ }
+
return -EOPNOTSUPP;
}
+static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+
/**
* igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp
* @adapter: board private structure
@@ -153,20 +430,20 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
/**
* igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer
- * @q_vector: Pointer to interrupt specific structure
- * @va: Pointer to address containing Rx buffer
- * @skb: Buffer containing timestamp and packet
+ * @adapter: Pointer to adapter the packet buffer belongs to
+ * @buf: Pointer to packet buffer
*
* This function retrieves the timestamp saved in the beginning of packet
* buffer. While two timestamps are available, one in timer0 reference and the
* other in timer1 reference, this function considers only the timestamp in
* timer0 reference.
+ *
+ * Returns timestamp value.
*/
-void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
- struct sk_buff *skb)
+ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf)
{
- struct igc_adapter *adapter = q_vector->adapter;
- u64 regval;
+ ktime_t timestamp;
+ u32 secs, nsecs;
int adjust;
/* Timestamps are saved in little endian at the beginning of the packet
@@ -178,9 +455,10 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
* SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
* part of the timestamp.
*/
- regval = le32_to_cpu(va[2]);
- regval |= (u64)le32_to_cpu(va[3]) << 32;
- igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+ nsecs = le32_to_cpu(buf[2]);
+ secs = le32_to_cpu(buf[3]);
+
+ timestamp = ktime_set(secs, nsecs);
/* Adjust timestamp for the RX latency based on link speed */
switch (adapter->link_speed) {
@@ -201,8 +479,8 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
netdev_warn_once(adapter->netdev, "Imprecise timestamp\n");
break;
}
- skb_hwtstamps(skb)->hwtstamp =
- ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+
+ return ktime_sub_ns(timestamp, adjust);
}
static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter)
@@ -485,9 +763,17 @@ void igc_ptp_init(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
+ int i;
switch (hw->mac.type) {
case igc_i225:
+ for (i = 0; i < IGC_N_SDP; i++) {
+ struct ptp_pin_desc *ppd = &adapter->sdp_config[i];
+
+ snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i);
+ ppd->index = i;
+ ppd->func = PTP_PF_NONE;
+ }
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 62499999;
@@ -496,6 +782,12 @@ void igc_ptp_init(struct igc_adapter *adapter)
adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225;
adapter->ptp_caps.settime64 = igc_ptp_settime_i225;
adapter->ptp_caps.enable = igc_ptp_feature_enable_i225;
+ adapter->ptp_caps.pps = 1;
+ adapter->ptp_caps.pin_config = adapter->sdp_config;
+ adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS;
+ adapter->ptp_caps.n_per_out = IGC_N_PEROUT;
+ adapter->ptp_caps.n_pins = IGC_N_SDP;
+ adapter->ptp_caps.verify = igc_ptp_verify_pin;
break;
default:
adapter->ptp_clock = NULL;
@@ -597,7 +889,9 @@ void igc_ptp_reset(struct igc_adapter *adapter)
case igc_i225:
wr32(IGC_TSAUXC, 0x0);
wr32(IGC_TSSDP, 0x0);
- wr32(IGC_TSIM, IGC_TSICR_INTERRUPTS);
+ wr32(IGC_TSIM,
+ IGC_TSICR_INTERRUPTS |
+ (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0));
wr32(IGC_IMS, IGC_IMS_TS);
break;
default:
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 3e5cb7aef9da..cc174853554b 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -192,6 +192,16 @@
#define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
#define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
#define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */
+#define IGC_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */
+#define IGC_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */
+#define IGC_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */
+#define IGC_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */
+#define IGC_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */
+#define IGC_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */
+#define IGC_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */
+#define IGC_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
+#define IGC_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */
+#define IGC_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */
#define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
#define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
new file mode 100644
index 000000000000..11133c4619bb
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Intel Corporation. */
+
+#include "igc.h"
+#include "igc_xdp.h"
+
+int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev = adapter->netdev;
+ bool if_running = netif_running(dev);
+ struct bpf_prog *old_prog;
+
+ if (dev->mtu > ETH_DATA_LEN) {
+ /* For now, the driver doesn't support XDP functionality with
+ * jumbo frames so we return error.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (if_running)
+ igc_close(dev);
+
+ old_prog = xchg(&adapter->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (if_running)
+ igc_open(dev);
+
+ return 0;
+}
+
+int igc_xdp_register_rxq_info(struct igc_ring *ring)
+{
+ struct net_device *dev = ring->netdev;
+ int err;
+
+ err = xdp_rxq_info_reg(&ring->xdp_rxq, dev, ring->queue_index, 0);
+ if (err) {
+ netdev_err(dev, "Failed to register xdp rxq info\n");
+ return err;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err) {
+ netdev_err(dev, "Failed to register xdp rxq mem model\n");
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
+ return err;
+ }
+
+ return 0;
+}
+
+void igc_xdp_unregister_rxq_info(struct igc_ring *ring)
+{
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.h b/drivers/net/ethernet/intel/igc/igc_xdp.h
new file mode 100644
index 000000000000..cfecb515b718
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020, Intel Corporation. */
+
+#ifndef _IGC_XDP_H_
+#define _IGC_XDP_H_
+
+int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack);
+
+int igc_xdp_register_rxq_info(struct igc_ring *ring);
+void igc_xdp_unregister_rxq_info(struct igc_ring *ring);
+
+#endif /* _IGC_XDP_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 8d3798a32f0e..e324e42fab2d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1351,7 +1351,7 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
}
/**
- * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
+ * ixgbe_fdir_add_signature_filter_82599 - Adds a signature hash filter
* @hw: pointer to hardware structure
* @input: unique input dword
* @common: compressed common input dword
@@ -1542,6 +1542,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
switch (input_mask->formatted.vm_pool & 0x7F) {
case 0x0:
fdirm |= IXGBE_FDIRM_POOL;
+ break;
case 0x7F:
break;
default:
@@ -1557,6 +1558,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
hw_dbg(hw, " Error on src/dst port mask\n");
return IXGBE_ERR_CONFIG;
}
+ break;
case IXGBE_ATR_L4TYPE_MASK:
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 62ddb452f862..03ccbe6b66d2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -93,6 +93,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
default:
break;
}
+ break;
default:
break;
}
@@ -2707,7 +2708,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
}
/**
- * ixgbe_enable_rx_buff - Enables the receive data path
+ * ixgbe_enable_rx_buff_generic - Enables the receive data path
* @hw: pointer to hardware structure
*
* Enables the receive data path
@@ -3029,14 +3030,14 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
}
/**
+ * ixgbe_set_vmdq_san_mac_generic - Associate VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @vmdq: VMDq pool index
+ *
* This function should only be involved in the IOV mode.
* In IOV mode, Default pool is next pool after the number of
* VFs advertized and not 0.
* MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
- *
- * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
- * @hw: pointer to hardware struct
- * @vmdq: VMDq pool index
**/
s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
{
@@ -3896,7 +3897,7 @@ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
}
/**
- * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ * ixgbe_get_thermal_sensor_data_generic - Gathers thermal sensor data
* @hw: pointer to hardware structure
*
* Returns the thermal sensor data structure
@@ -4054,8 +4055,7 @@ void ixgbe_get_orom_version(struct ixgbe_hw *hw,
}
/**
- * ixgbe_get_oem_prod_version Etrack ID from EEPROM
- *
+ * ixgbe_get_oem_prod_version - Etrack ID from EEPROM
* @hw: pointer to hardware structure
* @nvm_ver: pointer to output structure
*
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index c00332d2e02a..72e6ebffea33 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -361,7 +361,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
}
#ifdef IXGBE_FCOE
- /* Reprogam FCoE hardware offloads when the traffic class
+ /* Reprogram FCoE hardware offloads when the traffic class
* FCoE is using changes. This happens if the APP info
* changes or the up2tc mapping is updated.
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index a280aa34ca1d..4ceaca0f6ce3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1368,45 +1368,33 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
- char *p = (char *)data;
unsigned int i;
+ u8 *p = data;
switch (stringset) {
case ETH_SS_TEST:
- for (i = 0; i < IXGBE_TEST_LEN; i++) {
- memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < IXGBE_TEST_LEN; i++)
+ ethtool_sprintf(&p, ixgbe_gstrings_test[i]);
break;
case ETH_SS_STATS:
- for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- memcpy(p, ixgbe_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++)
+ ethtool_sprintf(&p,
+ ixgbe_gstrings_stats[i].stat_string);
for (i = 0; i < netdev->num_tx_queues; i++) {
- sprintf(p, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "tx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
}
for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
- sprintf(p, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "rx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
}
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
- sprintf(p, "tx_pb_%u_pxon", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_pb_%u_pxoff", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "tx_pb_%u_pxon", i);
+ ethtool_sprintf(&p, "tx_pb_%u_pxoff", i);
}
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
- sprintf(p, "rx_pb_%u_pxon", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_pb_%u_pxoff", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "rx_pb_%u_pxon", i);
+ ethtool_sprintf(&p, "rx_pb_%u_pxoff", i);
}
/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index df389a11d3af..0218f6c9b925 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -132,6 +132,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
else
*tx = (tc + 4) << 4; /* 96, 112 */
}
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cffb95f8f632..c5ec17d19c59 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -225,7 +225,7 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_check_from_parent - Determine whether PCIe info should come from parent
+ * ixgbe_pcie_from_parent - Determine whether PCIe info should come from parent
* @hw: hw specific details
*
* This function is used by probe to determine whether a device's PCI-Express
@@ -6158,7 +6158,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_eee_capable - helper function to determine EEE support on X550
+ * ixgbe_set_eee_capable - helper function to determine EEE support on X550
* @adapter: board private structure
*/
static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
@@ -10201,7 +10201,7 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_ring *ring;
- int drops = 0;
+ int nxmit = 0;
int i;
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
@@ -10225,16 +10225,15 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
int err;
err = ixgbe_xmit_xdp_ring(adapter, xdpf);
- if (err != IXGBE_XDP_TX) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ if (err != IXGBE_XDP_TX)
+ break;
+ nxmit++;
}
if (unlikely(flags & XDP_XMIT_FLUSH))
ixgbe_xdp_ring_update_tail(ring);
- return n - drops;
+ return nxmit;
}
static const struct net_device_ops ixgbe_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index fc389eecdd2b..24aa97f993ca 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -380,6 +380,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case X557_PHY_ID2:
phy_type = ixgbe_phy_x550em_ext_t;
break;
+ case BCM54616S_E_PHY_ID:
+ phy_type = ixgbe_phy_ext_1g_t;
+ break;
default:
phy_type = ixgbe_phy_unknown;
break;
@@ -461,12 +464,13 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
}
/**
- * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
- * the SWFW lock
+ * ixgbe_read_phy_reg_mdi - read PHY register
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
* @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register without the SWFW lock
**/
s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
u16 *phy_data)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 22a874eee2e8..23ddfd79fc8b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -999,6 +999,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
+ break;
case HWTSTAMP_TX_ON:
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 2be1c4c72435..2647937f7f4d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1407,6 +1407,7 @@ struct ixgbe_nvm_version {
#define QT2022_PHY_ID 0x0043A400
#define ATH_PHY_ID 0x03429050
#define AQ_FW_REV 0x20
+#define BCM54616S_E_PHY_ID 0x03625D10
/* Special PHY Init Routine */
#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
@@ -3383,10 +3384,6 @@ struct ixgbe_hw_stats {
/* forward declaration */
struct ixgbe_hw;
-/* iterator type for walking multicast address lists */
-typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
- u32 *vmdq);
-
/* Function pointer table */
struct ixgbe_eeprom_operations {
s32 (*init_params)(struct ixgbe_hw *);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 4b93ba149ec5..d5cfb51ff648 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -701,7 +701,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
}
/**
- * ixgbe_release_nvm_semaphore - Release hardware semaphore
+ * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore
* @hw: pointer to hardware structure
*
* This function clears hardware semaphore bits.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 5e339afa682a..9724ffb16518 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1248,7 +1248,7 @@ static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
}
/**
- * ixgbe_fw_recovery_mode - Check FW NVM recovery mode
+ * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
* @hw: pointer t hardware structure
*
* Returns true if in FW NVM recovery mode.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 3771857cf887..91ad5b902673 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -104,6 +104,13 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
+ if (likely(act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
+ rcu_read_unlock();
+ return result;
+ }
+
switch (act) {
case XDP_PASS:
break;
@@ -115,10 +122,6 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
}
result = ixgbe_xmit_xdp_ring(adapter, xdpf);
break;
- case XDP_REDIRECT:
- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
- break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 449d7d5b280d..ba2ed8a43d2d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2633,6 +2633,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
adapter->num_rx_queues = rss;
adapter->num_tx_queues = rss;
adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index bfe6dfcec4ab..5fc347abab3c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -121,9 +121,11 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
}
/**
+ * ixgbevf_hv_reset_hw_vf - reset via Hyper-V
+ * @hw: pointer to private hardware struct
+ *
* Hyper-V variant; the VF/PF communication is through the PCI
* config space.
- * @hw: pointer to private hardware struct
*/
static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
{
@@ -513,9 +515,11 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
}
/**
- * Hyper-V variant - just a stub.
+ * ixgbevf_hv_update_mc_addr_list_vf - stub
* @hw: unused
* @netdev: unused
+ *
+ * Hyper-V variant - just a stub.
*/
static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
struct net_device *netdev)
@@ -564,9 +568,11 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
}
/**
- * Hyper-V variant - just a stub.
+ * ixgbevf_hv_update_xcast_mode - stub
* @hw: unused
* @xcast_mode: unused
+ *
+ * Hyper-V variant - just a stub.
*/
static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
{
@@ -608,7 +614,7 @@ mbx_err:
}
/**
- * Hyper-V variant - just a stub.
+ * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
* @hw: unused
* @vlan: unused
* @vind: unused
@@ -726,11 +732,13 @@ out:
}
/**
- * Hyper-V variant; there is no mailbox communication.
+ * ixgbevf_hv_check_mac_link_vf - check link
* @hw: pointer to private hardware struct
* @speed: pointer to link speed
* @link_up: true is link is up, false otherwise
* @autoneg_wait_to_complete: unused
+ *
+ * Hyper-V variant; there is no mailbox communication.
*/
static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index d1e9e306653b..1d8209df4162 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -16,9 +16,6 @@
struct ixgbe_hw;
-/* iterator type for walking multicast address lists */
-typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
- u32 *vmdq);
struct ixgbe_mac_operations {
s32 (*init_hw)(struct ixgbe_hw *);
s32 (*reset_hw)(struct ixgbe_hw *);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 925161959b9b..6f987a7ffcb3 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -41,7 +41,10 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
+#include <linux/iopoll.h>
#include <linux/in.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
@@ -54,21 +57,246 @@
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/pgtable.h>
-
-#include <asm/bootinfo.h>
-#include <asm/bitops.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <asm/mach-rc32434/rb.h>
-#include <asm/mach-rc32434/rc32434.h>
-#include <asm/mach-rc32434/eth.h>
-#include <asm/mach-rc32434/dma_v.h>
+#include <linux/clk.h>
#define DRV_NAME "korina"
#define DRV_VERSION "0.20"
#define DRV_RELDATE "15Sep2017"
+struct eth_regs {
+ u32 ethintfc;
+ u32 ethfifott;
+ u32 etharc;
+ u32 ethhash0;
+ u32 ethhash1;
+ u32 ethu0[4]; /* Reserved. */
+ u32 ethpfs;
+ u32 ethmcp;
+ u32 eth_u1[10]; /* Reserved. */
+ u32 ethspare;
+ u32 eth_u2[42]; /* Reserved. */
+ u32 ethsal0;
+ u32 ethsah0;
+ u32 ethsal1;
+ u32 ethsah1;
+ u32 ethsal2;
+ u32 ethsah2;
+ u32 ethsal3;
+ u32 ethsah3;
+ u32 ethrbc;
+ u32 ethrpc;
+ u32 ethrupc;
+ u32 ethrfc;
+ u32 ethtbc;
+ u32 ethgpf;
+ u32 eth_u9[50]; /* Reserved. */
+ u32 ethmac1;
+ u32 ethmac2;
+ u32 ethipgt;
+ u32 ethipgr;
+ u32 ethclrt;
+ u32 ethmaxf;
+ u32 eth_u10; /* Reserved. */
+ u32 ethmtest;
+ u32 miimcfg;
+ u32 miimcmd;
+ u32 miimaddr;
+ u32 miimwtd;
+ u32 miimrdd;
+ u32 miimind;
+ u32 eth_u11; /* Reserved. */
+ u32 eth_u12; /* Reserved. */
+ u32 ethcfsa0;
+ u32 ethcfsa1;
+ u32 ethcfsa2;
+};
+
+/* Ethernet interrupt registers */
+#define ETH_INT_FC_EN BIT(0)
+#define ETH_INT_FC_ITS BIT(1)
+#define ETH_INT_FC_RIP BIT(2)
+#define ETH_INT_FC_JAM BIT(3)
+#define ETH_INT_FC_OVR BIT(4)
+#define ETH_INT_FC_UND BIT(5)
+#define ETH_INT_FC_IOC 0x000000c0
+
+/* Ethernet FIFO registers */
+#define ETH_FIFI_TT_TTH_BIT 0
+#define ETH_FIFO_TT_TTH 0x0000007f
+
+/* Ethernet ARC/multicast registers */
+#define ETH_ARC_PRO BIT(0)
+#define ETH_ARC_AM BIT(1)
+#define ETH_ARC_AFM BIT(2)
+#define ETH_ARC_AB BIT(3)
+
+/* Ethernet SAL registers */
+#define ETH_SAL_BYTE_5 0x000000ff
+#define ETH_SAL_BYTE_4 0x0000ff00
+#define ETH_SAL_BYTE_3 0x00ff0000
+#define ETH_SAL_BYTE_2 0xff000000
+
+/* Ethernet SAH registers */
+#define ETH_SAH_BYTE1 0x000000ff
+#define ETH_SAH_BYTE0 0x0000ff00
+
+/* Ethernet GPF register */
+#define ETH_GPF_PTV 0x0000ffff
+
+/* Ethernet PFG register */
+#define ETH_PFS_PFD BIT(0)
+
+/* Ethernet CFSA[0-3] registers */
+#define ETH_CFSA0_CFSA4 0x000000ff
+#define ETH_CFSA0_CFSA5 0x0000ff00
+#define ETH_CFSA1_CFSA2 0x000000ff
+#define ETH_CFSA1_CFSA3 0x0000ff00
+#define ETH_CFSA1_CFSA0 0x000000ff
+#define ETH_CFSA1_CFSA1 0x0000ff00
+
+/* Ethernet MAC1 registers */
+#define ETH_MAC1_RE BIT(0)
+#define ETH_MAC1_PAF BIT(1)
+#define ETH_MAC1_RFC BIT(2)
+#define ETH_MAC1_TFC BIT(3)
+#define ETH_MAC1_LB BIT(4)
+#define ETH_MAC1_MR BIT(31)
+
+/* Ethernet MAC2 registers */
+#define ETH_MAC2_FD BIT(0)
+#define ETH_MAC2_FLC BIT(1)
+#define ETH_MAC2_HFE BIT(2)
+#define ETH_MAC2_DC BIT(3)
+#define ETH_MAC2_CEN BIT(4)
+#define ETH_MAC2_PE BIT(5)
+#define ETH_MAC2_VPE BIT(6)
+#define ETH_MAC2_APE BIT(7)
+#define ETH_MAC2_PPE BIT(8)
+#define ETH_MAC2_LPE BIT(9)
+#define ETH_MAC2_NB BIT(12)
+#define ETH_MAC2_BP BIT(13)
+#define ETH_MAC2_ED BIT(14)
+
+/* Ethernet IPGT register */
+#define ETH_IPGT 0x0000007f
+
+/* Ethernet IPGR registers */
+#define ETH_IPGR_IPGR2 0x0000007f
+#define ETH_IPGR_IPGR1 0x00007f00
+
+/* Ethernet CLRT registers */
+#define ETH_CLRT_MAX_RET 0x0000000f
+#define ETH_CLRT_COL_WIN 0x00003f00
+
+/* Ethernet MAXF register */
+#define ETH_MAXF 0x0000ffff
+
+/* Ethernet test registers */
+#define ETH_TEST_REG BIT(2)
+#define ETH_MCP_DIV 0x000000ff
+
+/* MII registers */
+#define ETH_MII_CFG_RSVD 0x0000000c
+#define ETH_MII_CMD_RD BIT(0)
+#define ETH_MII_CMD_SCN BIT(1)
+#define ETH_MII_REG_ADDR 0x0000001f
+#define ETH_MII_PHY_ADDR 0x00001f00
+#define ETH_MII_WTD_DATA 0x0000ffff
+#define ETH_MII_RDD_DATA 0x0000ffff
+#define ETH_MII_IND_BSY BIT(0)
+#define ETH_MII_IND_SCN BIT(1)
+#define ETH_MII_IND_NV BIT(2)
+
+/* Values for the DEVCS field of the Ethernet DMA Rx and Tx descriptors. */
+#define ETH_RX_FD BIT(0)
+#define ETH_RX_LD BIT(1)
+#define ETH_RX_ROK BIT(2)
+#define ETH_RX_FM BIT(3)
+#define ETH_RX_MP BIT(4)
+#define ETH_RX_BP BIT(5)
+#define ETH_RX_VLT BIT(6)
+#define ETH_RX_CF BIT(7)
+#define ETH_RX_OVR BIT(8)
+#define ETH_RX_CRC BIT(9)
+#define ETH_RX_CV BIT(10)
+#define ETH_RX_DB BIT(11)
+#define ETH_RX_LE BIT(12)
+#define ETH_RX_LOR BIT(13)
+#define ETH_RX_CES BIT(14)
+#define ETH_RX_LEN_BIT 16
+#define ETH_RX_LEN 0xffff0000
+
+#define ETH_TX_FD BIT(0)
+#define ETH_TX_LD BIT(1)
+#define ETH_TX_OEN BIT(2)
+#define ETH_TX_PEN BIT(3)
+#define ETH_TX_CEN BIT(4)
+#define ETH_TX_HEN BIT(5)
+#define ETH_TX_TOK BIT(6)
+#define ETH_TX_MP BIT(7)
+#define ETH_TX_BP BIT(8)
+#define ETH_TX_UND BIT(9)
+#define ETH_TX_OF BIT(10)
+#define ETH_TX_ED BIT(11)
+#define ETH_TX_EC BIT(12)
+#define ETH_TX_LC BIT(13)
+#define ETH_TX_TD BIT(14)
+#define ETH_TX_CRC BIT(15)
+#define ETH_TX_LE BIT(16)
+#define ETH_TX_CC 0x001E0000
+
+/* DMA descriptor (in physical memory). */
+struct dma_desc {
+ u32 control; /* Control. use DMAD_* */
+ u32 ca; /* Current Address. */
+ u32 devcs; /* Device control and status. */
+ u32 link; /* Next descriptor in chain. */
+};
+
+#define DMA_DESC_COUNT_BIT 0
+#define DMA_DESC_COUNT_MSK 0x0003ffff
+#define DMA_DESC_DS_BIT 20
+#define DMA_DESC_DS_MSK 0x00300000
+
+#define DMA_DESC_DEV_CMD_BIT 22
+#define DMA_DESC_DEV_CMD_MSK 0x01c00000
+
+/* DMA descriptors interrupts */
+#define DMA_DESC_COF BIT(25) /* Chain on finished */
+#define DMA_DESC_COD BIT(26) /* Chain on done */
+#define DMA_DESC_IOF BIT(27) /* Interrupt on finished */
+#define DMA_DESC_IOD BIT(28) /* Interrupt on done */
+#define DMA_DESC_TERM BIT(29) /* Terminated */
+#define DMA_DESC_DONE BIT(30) /* Done */
+#define DMA_DESC_FINI BIT(31) /* Finished */
+
+/* DMA register (within Internal Register Map). */
+struct dma_reg {
+ u32 dmac; /* Control. */
+ u32 dmas; /* Status. */
+ u32 dmasm; /* Mask. */
+ u32 dmadptr; /* Descriptor pointer. */
+ u32 dmandptr; /* Next descriptor pointer. */
+};
+
+/* DMA channels specific registers */
+#define DMA_CHAN_RUN_BIT BIT(0)
+#define DMA_CHAN_DONE_BIT BIT(1)
+#define DMA_CHAN_MODE_BIT BIT(2)
+#define DMA_CHAN_MODE_MSK 0x0000000c
+#define DMA_CHAN_MODE_AUTO 0
+#define DMA_CHAN_MODE_BURST 1
+#define DMA_CHAN_MODE_XFRT 2
+#define DMA_CHAN_MODE_RSVD 3
+#define DMA_CHAN_ACT_BIT BIT(4)
+
+/* DMA status registers */
+#define DMA_STAT_FINI BIT(0)
+#define DMA_STAT_DONE BIT(1)
+#define DMA_STAT_CHAIN BIT(2)
+#define DMA_STAT_ERR BIT(3)
+#define DMA_STAT_HALT BIT(4)
+
#define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
((dev)->dev_addr[1]))
#define STATION_ADDRESS_LOW(dev) (((dev)->dev_addr[2] << 24) | \
@@ -95,24 +323,30 @@
enum chain_status {
desc_filled,
- desc_empty
+ desc_is_empty
};
+#define DMA_COUNT(count) ((count) & DMA_DESC_COUNT_MSK)
#define IS_DMA_FINISHED(X) (((X) & (DMA_DESC_FINI)) != 0)
#define IS_DMA_DONE(X) (((X) & (DMA_DESC_DONE)) != 0)
#define RCVPKT_LENGTH(X) (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT)
/* Information that need to be kept for each board. */
struct korina_private {
- struct eth_regs *eth_regs;
- struct dma_reg *rx_dma_regs;
- struct dma_reg *tx_dma_regs;
+ struct eth_regs __iomem *eth_regs;
+ struct dma_reg __iomem *rx_dma_regs;
+ struct dma_reg __iomem *tx_dma_regs;
struct dma_desc *td_ring; /* transmit descriptor ring */
struct dma_desc *rd_ring; /* receive descriptor ring */
+ dma_addr_t td_dma;
+ dma_addr_t rd_dma;
struct sk_buff *tx_skb[KORINA_NUM_TDS];
struct sk_buff *rx_skb[KORINA_NUM_RDS];
+ dma_addr_t rx_skb_dma[KORINA_NUM_RDS];
+ dma_addr_t tx_skb_dma[KORINA_NUM_TDS];
+
int rx_next_done;
int rx_chain_head;
int rx_chain_tail;
@@ -137,15 +371,18 @@ struct korina_private {
struct mii_if_info mii_if;
struct work_struct restart_task;
struct net_device *dev;
- int phy_addr;
+ struct device *dmadev;
+ int mii_clock_freq;
};
-extern unsigned int idt_cpu_freq;
+static dma_addr_t korina_tx_dma(struct korina_private *lp, int idx)
+{
+ return lp->td_dma + (idx * sizeof(struct dma_desc));
+}
-static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr)
+static dma_addr_t korina_rx_dma(struct korina_private *lp, int idx)
{
- writel(0, &ch->dmandptr);
- writel(dma_addr, &ch->dmadptr);
+ return lp->rd_dma + (idx * sizeof(struct dma_desc));
}
static inline void korina_abort_dma(struct net_device *dev,
@@ -164,11 +401,6 @@ static inline void korina_abort_dma(struct net_device *dev,
writel(0, &ch->dmandptr);
}
-static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr)
-{
- writel(dma_addr, &ch->dmandptr);
-}
-
static void korina_abort_tx(struct net_device *dev)
{
struct korina_private *lp = netdev_priv(dev);
@@ -183,30 +415,21 @@ static void korina_abort_rx(struct net_device *dev)
korina_abort_dma(dev, lp->rx_dma_regs);
}
-static void korina_start_rx(struct korina_private *lp,
- struct dma_desc *rd)
-{
- korina_start_dma(lp->rx_dma_regs, CPHYSADDR(rd));
-}
-
-static void korina_chain_rx(struct korina_private *lp,
- struct dma_desc *rd)
-{
- korina_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd));
-}
-
/* transmit packet */
static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct korina_private *lp = netdev_priv(dev);
- unsigned long flags;
- u32 length;
u32 chain_prev, chain_next;
+ unsigned long flags;
struct dma_desc *td;
+ dma_addr_t ca;
+ u32 length;
+ int idx;
spin_lock_irqsave(&lp->lock, flags);
- td = &lp->td_ring[lp->tx_chain_tail];
+ idx = lp->tx_chain_tail;
+ td = &lp->td_ring[idx];
/* stop queue when full, drop pkts if queue already full */
if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
@@ -214,38 +437,37 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
if (lp->tx_count == (KORINA_NUM_TDS - 2))
netif_stop_queue(dev);
- else {
- dev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- return NETDEV_TX_OK;
- }
+ else
+ goto drop_packet;
}
lp->tx_count++;
- lp->tx_skb[lp->tx_chain_tail] = skb;
+ lp->tx_skb[idx] = skb;
length = skb->len;
- dma_cache_wback((u32)skb->data, skb->len);
/* Setup the transmit descriptor. */
- dma_cache_inv((u32) td, sizeof(*td));
- td->ca = CPHYSADDR(skb->data);
- chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
- chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;
+ ca = dma_map_single(lp->dmadev, skb->data, length, DMA_TO_DEVICE);
+ if (dma_mapping_error(lp->dmadev, ca))
+ goto drop_packet;
+
+ lp->tx_skb_dma[idx] = ca;
+ td->ca = ca;
+
+ chain_prev = (idx - 1) & KORINA_TDS_MASK;
+ chain_next = (idx + 1) & KORINA_TDS_MASK;
if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
- if (lp->tx_chain_status == desc_empty) {
+ if (lp->tx_chain_status == desc_is_empty) {
/* Update tail */
td->control = DMA_COUNT(length) |
DMA_DESC_COF | DMA_DESC_IOF;
/* Move tail */
lp->tx_chain_tail = chain_next;
/* Write to NDPTR */
- writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
- &lp->tx_dma_regs->dmandptr);
+ writel(korina_tx_dma(lp, lp->tx_chain_head),
+ &lp->tx_dma_regs->dmandptr);
/* Move head to tail */
lp->tx_chain_head = lp->tx_chain_tail;
} else {
@@ -256,18 +478,18 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
lp->td_ring[chain_prev].control &=
~DMA_DESC_COF;
/* Link to prev */
- lp->td_ring[chain_prev].link = CPHYSADDR(td);
+ lp->td_ring[chain_prev].link = korina_tx_dma(lp, idx);
/* Move tail */
lp->tx_chain_tail = chain_next;
/* Write to NDPTR */
- writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
- &(lp->tx_dma_regs->dmandptr));
+ writel(korina_tx_dma(lp, lp->tx_chain_head),
+ &lp->tx_dma_regs->dmandptr);
/* Move head to tail */
lp->tx_chain_head = lp->tx_chain_tail;
- lp->tx_chain_status = desc_empty;
+ lp->tx_chain_status = desc_is_empty;
}
} else {
- if (lp->tx_chain_status == desc_empty) {
+ if (lp->tx_chain_status == desc_is_empty) {
/* Update tail */
td->control = DMA_COUNT(length) |
DMA_DESC_COF | DMA_DESC_IOF;
@@ -280,44 +502,66 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
DMA_DESC_COF | DMA_DESC_IOF;
lp->td_ring[chain_prev].control &=
~DMA_DESC_COF;
- lp->td_ring[chain_prev].link = CPHYSADDR(td);
+ lp->td_ring[chain_prev].link = korina_tx_dma(lp, idx);
lp->tx_chain_tail = chain_next;
}
}
- dma_cache_wback((u32) td, sizeof(*td));
netif_trans_update(dev);
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
+
+drop_packet:
+ dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return NETDEV_TX_OK;
}
-static int mdio_read(struct net_device *dev, int mii_id, int reg)
+static int korina_mdio_wait(struct korina_private *lp)
+{
+ u32 value;
+
+ return readl_poll_timeout_atomic(&lp->eth_regs->miimind,
+ value, value & ETH_MII_IND_BSY,
+ 1, 1000);
+}
+
+static int korina_mdio_read(struct net_device *dev, int phy, int reg)
{
struct korina_private *lp = netdev_priv(dev);
int ret;
- mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
+ ret = korina_mdio_wait(lp);
+ if (ret < 0)
+ return ret;
- writel(0, &lp->eth_regs->miimcfg);
- writel(0, &lp->eth_regs->miimcmd);
- writel(mii_id | reg, &lp->eth_regs->miimaddr);
- writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
+ writel(phy << 8 | reg, &lp->eth_regs->miimaddr);
+ writel(1, &lp->eth_regs->miimcmd);
+
+ ret = korina_mdio_wait(lp);
+ if (ret < 0)
+ return ret;
- ret = (int)(readl(&lp->eth_regs->miimrdd));
+ if (readl(&lp->eth_regs->miimind) & ETH_MII_IND_NV)
+ return -EINVAL;
+
+ ret = readl(&lp->eth_regs->miimrdd);
+ writel(0, &lp->eth_regs->miimcmd);
return ret;
}
-static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
+static void korina_mdio_write(struct net_device *dev, int phy, int reg, int val)
{
struct korina_private *lp = netdev_priv(dev);
- mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
+ if (korina_mdio_wait(lp))
+ return;
- writel(0, &lp->eth_regs->miimcfg);
- writel(1, &lp->eth_regs->miimcmd);
- writel(mii_id | reg, &lp->eth_regs->miimaddr);
- writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
+ writel(0, &lp->eth_regs->miimcmd);
+ writel(phy << 8 | reg, &lp->eth_regs->miimaddr);
writel(val, &lp->eth_regs->miimwtd);
}
@@ -353,12 +597,10 @@ static int korina_rx(struct net_device *dev, int limit)
struct korina_private *lp = netdev_priv(dev);
struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
struct sk_buff *skb, *skb_new;
- u8 *pkt_buf;
u32 devcs, pkt_len, dmas;
+ dma_addr_t ca;
int count;
- dma_cache_inv((u32)rd, sizeof(*rd));
-
for (count = 0; count < limit; count++) {
skb = lp->rx_skb[lp->rx_next_done];
skb_new = NULL;
@@ -392,20 +634,22 @@ static int korina_rx(struct net_device *dev, int limit)
goto next;
}
- pkt_len = RCVPKT_LENGTH(devcs);
-
- /* must be the (first and) last
- * descriptor then */
- pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
-
- /* invalidate the cache */
- dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
-
/* Malloc up new buffer. */
skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
-
if (!skb_new)
break;
+
+ ca = dma_map_single(lp->dmadev, skb_new->data, KORINA_RBSIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(lp->dmadev, ca)) {
+ dev_kfree_skb_any(skb_new);
+ break;
+ }
+
+ pkt_len = RCVPKT_LENGTH(devcs);
+ dma_unmap_single(lp->dmadev, lp->rx_skb_dma[lp->rx_next_done],
+ pkt_len, DMA_FROM_DEVICE);
+
/* Do not count the CRC */
skb_put(skb, pkt_len - 4);
skb->protocol = eth_type_trans(skb, dev);
@@ -420,15 +664,13 @@ static int korina_rx(struct net_device *dev, int limit)
dev->stats.multicast++;
lp->rx_skb[lp->rx_next_done] = skb_new;
+ lp->rx_skb_dma[lp->rx_next_done] = ca;
next:
rd->devcs = 0;
/* Restore descriptor's curr_addr */
- if (skb_new)
- rd->ca = CPHYSADDR(skb_new->data);
- else
- rd->ca = CPHYSADDR(skb->data);
+ rd->ca = lp->rx_skb_dma[lp->rx_next_done];
rd->control = DMA_COUNT(KORINA_RBSIZE) |
DMA_DESC_COD | DMA_DESC_IOD;
@@ -437,23 +679,21 @@ next:
~DMA_DESC_COD;
lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
- dma_cache_wback((u32)rd, sizeof(*rd));
rd = &lp->rd_ring[lp->rx_next_done];
- writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
+ writel((u32)~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
}
dmas = readl(&lp->rx_dma_regs->dmas);
if (dmas & DMA_STAT_HALT) {
- writel(~(DMA_STAT_HALT | DMA_STAT_ERR),
- &lp->rx_dma_regs->dmas);
+ writel((u32)~(DMA_STAT_HALT | DMA_STAT_ERR),
+ &lp->rx_dma_regs->dmas);
lp->dma_halt_cnt++;
rd->devcs = 0;
- skb = lp->rx_skb[lp->rx_next_done];
- rd->ca = CPHYSADDR(skb->data);
- dma_cache_wback((u32)rd, sizeof(*rd));
- korina_chain_rx(lp, rd);
+ rd->ca = lp->rx_skb_dma[lp->rx_next_done];
+ writel(korina_rx_dma(lp, rd - lp->rd_ring),
+ &lp->rx_dma_regs->dmandptr);
}
return count;
@@ -576,6 +816,10 @@ static void korina_tx(struct net_device *dev)
/* We must always free the original skb */
if (lp->tx_skb[lp->tx_next_done]) {
+ dma_unmap_single(lp->dmadev,
+ lp->tx_skb_dma[lp->tx_next_done],
+ lp->tx_skb[lp->tx_next_done]->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
lp->tx_skb[lp->tx_next_done] = NULL;
}
@@ -622,9 +866,9 @@ korina_tx_dma_interrupt(int irq, void *dev_id)
if (lp->tx_chain_status == desc_filled &&
(readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
- writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
- &(lp->tx_dma_regs->dmandptr));
- lp->tx_chain_status = desc_empty;
+ writel(korina_tx_dma(lp, lp->tx_chain_head),
+ &lp->tx_dma_regs->dmandptr);
+ lp->tx_chain_status = desc_is_empty;
lp->tx_chain_head = lp->tx_chain_tail;
netif_trans_update(dev);
}
@@ -643,7 +887,7 @@ static void korina_check_media(struct net_device *dev, unsigned int init_media)
{
struct korina_private *lp = netdev_priv(dev);
- mii_check_media(&lp->mii_if, 0, init_media);
+ mii_check_media(&lp->mii_if, 1, init_media);
if (lp->mii_if.full_duplex)
writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD,
@@ -743,6 +987,7 @@ static int korina_alloc_ring(struct net_device *dev)
{
struct korina_private *lp = netdev_priv(dev);
struct sk_buff *skb;
+ dma_addr_t ca;
int i;
/* Initialize the transmit descriptors */
@@ -754,7 +999,7 @@ static int korina_alloc_ring(struct net_device *dev)
}
lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail =
lp->tx_full = lp->tx_count = 0;
- lp->tx_chain_status = desc_empty;
+ lp->tx_chain_status = desc_is_empty;
/* Initialize the receive descriptors */
for (i = 0; i < KORINA_NUM_RDS; i++) {
@@ -765,19 +1010,24 @@ static int korina_alloc_ring(struct net_device *dev)
lp->rd_ring[i].control = DMA_DESC_IOD |
DMA_COUNT(KORINA_RBSIZE);
lp->rd_ring[i].devcs = 0;
- lp->rd_ring[i].ca = CPHYSADDR(skb->data);
- lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
+ ca = dma_map_single(lp->dmadev, skb->data, KORINA_RBSIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(lp->dmadev, ca))
+ return -ENOMEM;
+ lp->rd_ring[i].ca = ca;
+ lp->rx_skb_dma[i] = ca;
+ lp->rd_ring[i].link = korina_rx_dma(lp, i + 1);
}
/* loop back receive descriptors, so the last
* descriptor points to the first one */
- lp->rd_ring[i - 1].link = CPHYSADDR(&lp->rd_ring[0]);
+ lp->rd_ring[i - 1].link = lp->rd_dma;
lp->rd_ring[i - 1].control |= DMA_DESC_COD;
lp->rx_next_done = 0;
lp->rx_chain_head = 0;
lp->rx_chain_tail = 0;
- lp->rx_chain_status = desc_empty;
+ lp->rx_chain_status = desc_is_empty;
return 0;
}
@@ -789,16 +1039,22 @@ static void korina_free_ring(struct net_device *dev)
for (i = 0; i < KORINA_NUM_RDS; i++) {
lp->rd_ring[i].control = 0;
- if (lp->rx_skb[i])
+ if (lp->rx_skb[i]) {
+ dma_unmap_single(lp->dmadev, lp->rx_skb_dma[i],
+ KORINA_RBSIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(lp->rx_skb[i]);
- lp->rx_skb[i] = NULL;
+ lp->rx_skb[i] = NULL;
+ }
}
for (i = 0; i < KORINA_NUM_TDS; i++) {
lp->td_ring[i].control = 0;
- if (lp->tx_skb[i])
+ if (lp->tx_skb[i]) {
+ dma_unmap_single(lp->dmadev, lp->tx_skb_dma[i],
+ lp->tx_skb[i]->len, DMA_TO_DEVICE);
dev_kfree_skb_any(lp->tx_skb[i]);
- lp->tx_skb[i] = NULL;
+ lp->tx_skb[i] = NULL;
+ }
}
}
@@ -830,7 +1086,8 @@ static int korina_init(struct net_device *dev)
writel(0, &lp->rx_dma_regs->dmas);
/* Start Rx DMA */
- korina_start_rx(lp, &lp->rd_ring[0]);
+ writel(0, &lp->rx_dma_regs->dmandptr);
+ writel(korina_rx_dma(lp, 0), &lp->rx_dma_regs->dmadptr);
writel(readl(&lp->tx_dma_regs->dmasm) &
~(DMA_STAT_FINI | DMA_STAT_ERR),
@@ -867,14 +1124,17 @@ static int korina_init(struct net_device *dev)
/* Management Clock Prescaler Divisor
* Clock independent setting */
- writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1,
- &lp->eth_regs->ethmcp);
+ writel(((lp->mii_clock_freq) / MII_CLOCK + 1) & ~1,
+ &lp->eth_regs->ethmcp);
+ writel(0, &lp->eth_regs->miimcfg);
/* don't transmit until fifo contains 48b */
writel(48, &lp->eth_regs->ethfifott);
writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1);
+ korina_check_media(dev, 1);
+
napi_enable(&lp->napi);
netif_start_queue(dev);
@@ -1022,86 +1282,94 @@ static const struct net_device_ops korina_netdev_ops = {
static int korina_probe(struct platform_device *pdev)
{
- struct korina_device *bif = platform_get_drvdata(pdev);
+ u8 *mac_addr = dev_get_platdata(&pdev->dev);
struct korina_private *lp;
struct net_device *dev;
- struct resource *r;
+ struct clk *clk;
+ void __iomem *p;
int rc;
- dev = alloc_etherdev(sizeof(struct korina_private));
+ dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct korina_private));
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
lp = netdev_priv(dev);
- bif->dev = dev;
- memcpy(dev->dev_addr, bif->mac, ETH_ALEN);
+ if (mac_addr)
+ ether_addr_copy(dev->dev_addr, mac_addr);
+ else if (of_get_mac_address(pdev->dev.of_node, dev->dev_addr) < 0)
+ eth_hw_addr_random(dev);
+
+ clk = devm_clk_get_optional(&pdev->dev, "mdioclk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ if (clk) {
+ clk_prepare_enable(clk);
+ lp->mii_clock_freq = clk_get_rate(clk);
+ } else {
+ lp->mii_clock_freq = 200000000; /* max possible input clk */
+ }
- lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
- lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
+ lp->rx_irq = platform_get_irq_byname(pdev, "rx");
+ lp->tx_irq = platform_get_irq_byname(pdev, "tx");
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
- dev->base_addr = r->start;
- lp->eth_regs = ioremap(r->start, resource_size(r));
- if (!lp->eth_regs) {
+ p = devm_platform_ioremap_resource_byname(pdev, "emac");
+ if (!p) {
printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
- rc = -ENXIO;
- goto probe_err_out;
+ return -ENOMEM;
}
+ lp->eth_regs = p;
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
- lp->rx_dma_regs = ioremap(r->start, resource_size(r));
- if (!lp->rx_dma_regs) {
+ p = devm_platform_ioremap_resource_byname(pdev, "dma_rx");
+ if (!p) {
printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
- rc = -ENXIO;
- goto probe_err_dma_rx;
+ return -ENOMEM;
}
+ lp->rx_dma_regs = p;
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
- lp->tx_dma_regs = ioremap(r->start, resource_size(r));
- if (!lp->tx_dma_regs) {
+ p = devm_platform_ioremap_resource_byname(pdev, "dma_tx");
+ if (!p) {
printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
- rc = -ENXIO;
- goto probe_err_dma_tx;
- }
-
- lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
- if (!lp->td_ring) {
- rc = -ENXIO;
- goto probe_err_td_ring;
+ return -ENOMEM;
}
+ lp->tx_dma_regs = p;
- dma_cache_inv((unsigned long)(lp->td_ring),
- TD_RING_SIZE + RD_RING_SIZE);
+ lp->td_ring = dmam_alloc_coherent(&pdev->dev, TD_RING_SIZE,
+ &lp->td_dma, GFP_KERNEL);
+ if (!lp->td_ring)
+ return -ENOMEM;
- /* now convert TD_RING pointer to KSEG1 */
- lp->td_ring = (struct dma_desc *)KSEG1ADDR(lp->td_ring);
- lp->rd_ring = &lp->td_ring[KORINA_NUM_TDS];
+ lp->rd_ring = dmam_alloc_coherent(&pdev->dev, RD_RING_SIZE,
+ &lp->rd_dma, GFP_KERNEL);
+ if (!lp->rd_ring)
+ return -ENOMEM;
spin_lock_init(&lp->lock);
/* just use the rx dma irq */
dev->irq = lp->rx_irq;
lp->dev = dev;
+ lp->dmadev = &pdev->dev;
dev->netdev_ops = &korina_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
netif_napi_add(dev, &lp->napi, korina_poll, NAPI_POLL_WEIGHT);
- lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05);
lp->mii_if.dev = dev;
- lp->mii_if.mdio_read = mdio_read;
- lp->mii_if.mdio_write = mdio_write;
- lp->mii_if.phy_id = lp->phy_addr;
+ lp->mii_if.mdio_read = korina_mdio_read;
+ lp->mii_if.mdio_write = korina_mdio_write;
+ lp->mii_if.phy_id = 1;
lp->mii_if.phy_id_mask = 0x1f;
lp->mii_if.reg_num_mask = 0x1f;
+ platform_set_drvdata(pdev, dev);
+
rc = register_netdev(dev);
if (rc < 0) {
printk(KERN_ERR DRV_NAME
": cannot register net device: %d\n", rc);
- goto probe_err_register;
+ return rc;
}
timer_setup(&lp->media_check_timer, korina_poll_media, 0);
@@ -1109,40 +1377,33 @@ static int korina_probe(struct platform_device *pdev)
printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n",
dev->name);
-out:
return rc;
-
-probe_err_register:
- kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
-probe_err_td_ring:
- iounmap(lp->tx_dma_regs);
-probe_err_dma_tx:
- iounmap(lp->rx_dma_regs);
-probe_err_dma_rx:
- iounmap(lp->eth_regs);
-probe_err_out:
- free_netdev(dev);
- goto out;
}
static int korina_remove(struct platform_device *pdev)
{
- struct korina_device *bif = platform_get_drvdata(pdev);
- struct korina_private *lp = netdev_priv(bif->dev);
-
- iounmap(lp->eth_regs);
- iounmap(lp->rx_dma_regs);
- iounmap(lp->tx_dma_regs);
- kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
+ struct net_device *dev = platform_get_drvdata(pdev);
- unregister_netdev(bif->dev);
- free_netdev(bif->dev);
+ unregister_netdev(dev);
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id korina_match[] = {
+ {
+ .compatible = "idt,3243x-emac",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, korina_match);
+#endif
+
static struct platform_driver korina_driver = {
- .driver.name = "korina",
+ .driver = {
+ .name = "korina",
+ .of_match_table = of_match_ptr(korina_match),
+ },
.probe = korina_probe,
.remove = korina_remove,
};
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 51ed8a54d380..41c2ad210bc9 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -435,7 +435,6 @@ static int xrx200_probe(struct platform_device *pdev)
struct resource *res;
struct xrx200_priv *priv;
struct net_device *net_dev;
- const u8 *mac;
int err;
/* alloc the network device */
@@ -460,10 +459,8 @@ static int xrx200_probe(struct platform_device *pdev)
}
priv->pmac_reg = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->pmac_reg)) {
- dev_err(dev, "failed to request and remap io ranges\n");
+ if (IS_ERR(priv->pmac_reg))
return PTR_ERR(priv->pmac_reg);
- }
priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
if (priv->chan_rx.dma.irq < 0)
@@ -479,10 +476,8 @@ static int xrx200_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
- mac = of_get_mac_address(np);
- if (!IS_ERR(mac))
- ether_addr_copy(net_dev->dev_addr, mac);
- else
+ err = of_get_mac_address(np, net_dev->dev_addr);
+ if (err)
eth_hw_addr_random(net_dev);
/* bring up the dma engine and IP core */
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 3bfb659b5c99..d207bfcaf31d 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -700,7 +700,8 @@ static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
ip_hdr(skb)->ihl << TX_IHL_SHIFT;
/* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
- * it seems we don't need to pass the initial checksum. */
+ * it seems we don't need to pass the initial checksum.
+ */
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
cmd |= UDP_FRAME;
@@ -790,7 +791,8 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
WARN(1, "failed to prepare checksum!");
/* Should we set this? Can't use the value from skb_tx_csum()
- * as it's not the correct initial L4 checksum to use. */
+ * as it's not the correct initial L4 checksum to use.
+ */
desc->l4i_chk = 0;
desc->byte_cnt = hdr_len;
@@ -2700,7 +2702,6 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
struct platform_device *ppdev;
struct mv643xx_eth_platform_data ppd;
struct resource res;
- const char *mac_addr;
int ret;
int dev_num = 0;
@@ -2731,9 +2732,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
return -EINVAL;
}
- mac_addr = of_get_mac_address(pnp);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(ppd.mac_addr, mac_addr);
+ of_get_mac_address(pnp, ppd.mac_addr);
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a635cf84608a..7d5cd9bc6c99 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1087,7 +1087,7 @@ static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
return 0;
}
-static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
+static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
{
u32 wsize;
u8 target, attr;
@@ -2137,7 +2137,7 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
{
struct mvneta_port *pp = netdev_priv(dev);
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
- int i, nxmit_byte = 0, nxmit = num_frame;
+ int i, nxmit_byte = 0, nxmit = 0;
int cpu = smp_processor_id();
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
@@ -2155,12 +2155,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
__netif_tx_lock(nq, cpu);
for (i = 0; i < num_frame; i++) {
ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
- if (ret == MVNETA_XDP_TX) {
- nxmit_byte += frames[i]->len;
- } else {
- xdp_return_frame_rx_napi(frames[i]);
- nxmit--;
- }
+ if (ret != MVNETA_XDP_TX)
+ break;
+
+ nxmit_byte += frames[i]->len;
+ nxmit++;
}
if (unlikely(flags & XDP_XMIT_FLUSH))
@@ -3994,7 +3993,8 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
/* Armada 370 documentation says we can only change the port mode
* and in-band enable when the link is down, so force it down
- * while making these changes. We also do this for GMAC_CTRL2 */
+ * while making these changes. We also do this for GMAC_CTRL2
+ */
if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
(new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
(new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
@@ -4176,9 +4176,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
if (cpu == elected_cpu)
- /* Map the default receive queue queue to the
- * elected CPU
- */
+ /* Map the default receive queue to the elected CPU */
rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
/* We update the TX queue map only if we have one
@@ -4908,7 +4906,8 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
u32 lpi_ctl0;
/* The Armada 37x documents do not give limits for this other than
- * it being an 8-bit register. */
+ * it being an 8-bit register.
+ */
if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
return -EINVAL;
@@ -5142,7 +5141,6 @@ static int mvneta_probe(struct platform_device *pdev)
struct net_device *dev;
struct phylink *phylink;
struct phy *comphy;
- const char *dt_mac_addr;
char hw_mac_addr[ETH_ALEN];
phy_interface_t phy_mode;
const char *mac_from;
@@ -5238,10 +5236,9 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_free_ports;
}
- dt_mac_addr = of_get_mac_address(dn);
- if (!IS_ERR(dt_mac_addr)) {
+ err = of_get_mac_address(dn, dev->dev_addr);
+ if (!err) {
mac_from = "device tree";
- ether_addr_copy(dev->dev_addr, dt_mac_addr);
} else {
mvneta_get_mac_addr(pp, hw_mac_addr);
if (is_valid_ether_addr(hw_mac_addr)) {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 1767c60056c5..ec706d614cac 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3744,7 +3744,7 @@ mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
struct xdp_frame **frames, u32 flags)
{
struct mvpp2_port *port = netdev_priv(dev);
- int i, nxmit_byte = 0, nxmit = num_frame;
+ int i, nxmit_byte = 0, nxmit = 0;
struct mvpp2_pcpu_stats *stats;
u16 txq_id;
u32 ret;
@@ -3762,12 +3762,11 @@ mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
for (i = 0; i < num_frame; i++) {
ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
- if (ret == MVPP2_XDP_TX) {
- nxmit_byte += frames[i]->len;
- } else {
- xdp_return_frame_rx_napi(frames[i]);
- nxmit--;
- }
+ if (ret != MVPP2_XDP_TX)
+ break;
+
+ nxmit_byte += frames[i]->len;
+ nxmit++;
}
if (likely(nxmit > 0))
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 4812cdb4609e..7cc7d72d761e 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -918,9 +918,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
- /* Set L4 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
- sizeof(struct iphdr) - 4,
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
@@ -1335,7 +1334,7 @@ static void mvpp2_prs_vid_init(struct mvpp2 *priv)
static int mvpp2_prs_etype_init(struct mvpp2 *priv)
{
struct mvpp2_prs_entry pe;
- int tid;
+ int tid, ihl;
/* Ethertype: PPPoE */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
@@ -1427,67 +1426,43 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
MVPP2_PRS_RI_UDF3_MASK);
mvpp2_prs_hw_write(priv, &pe);
- /* Ethertype: IPv4 without options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
- pe.index = tid;
-
- mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
- mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
- MVPP2_PRS_IPV4_HEAD_MASK |
- MVPP2_PRS_IPV4_IHL_MASK);
-
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
- sizeof(struct iphdr) - 4,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- /* Set L3 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
- MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
- priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
- priv->prs_shadow[pe.index].finish = false;
- mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Ethertype: IPv4 with options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- pe.index = tid;
+ /* Ethertype: IPv4 with header length >= 5 */
+ for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
- mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_IPV4_HEAD,
- MVPP2_PRS_IPV4_HEAD_MASK);
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
- /* Clear ri before updating */
- pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
- MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
+ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD | ihl,
+ MVPP2_PRS_IPV4_HEAD_MASK |
+ MVPP2_PRS_IPV4_IHL_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L4 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ MVPP2_ETH_TYPE_LEN + (ihl * 4),
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
- priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
- priv->prs_shadow[pe.index].finish = false;
- mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- mvpp2_prs_hw_write(priv, &pe);
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = false;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+ }
/* Ethertype: IPv6 without options */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
@@ -1674,7 +1649,8 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
pe.index = tid;
mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+ MVPP2_PRS_IPV4_HEAD |
+ MVPP2_PRS_IPV4_IHL_MIN,
MVPP2_PRS_IPV4_HEAD_MASK |
MVPP2_PRS_IPV4_IHL_MASK);
@@ -1788,9 +1764,8 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
- /* Set L4 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
- sizeof(struct iphdr) - 4,
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
index c16e5b9947bd..5ce5907be591 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -28,7 +28,8 @@
#define MVPP2_PRS_IPV4_MC 0xe0
#define MVPP2_PRS_IPV4_MC_MASK 0xf0
#define MVPP2_PRS_IPV4_BC_MASK 0xff
-#define MVPP2_PRS_IPV4_IHL 0x5
+#define MVPP2_PRS_IPV4_IHL_MIN 0x5
+#define MVPP2_PRS_IPV4_IHL_MAX 0xf
#define MVPP2_PRS_IPV4_IHL_MASK 0xf
#define MVPP2_PRS_IPV6_MC 0xff
#define MVPP2_PRS_IPV6_MC_MASK 0xff
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 68deae529bc9..fac6474ad694 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -30,10 +30,35 @@
static LIST_HEAD(cgx_list);
/* Convert firmware speed encoding to user format(Mbps) */
-static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
+static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = {
+ [CGX_LINK_NONE] = 0,
+ [CGX_LINK_10M] = 10,
+ [CGX_LINK_100M] = 100,
+ [CGX_LINK_1G] = 1000,
+ [CGX_LINK_2HG] = 2500,
+ [CGX_LINK_5G] = 5000,
+ [CGX_LINK_10G] = 10000,
+ [CGX_LINK_20G] = 20000,
+ [CGX_LINK_25G] = 25000,
+ [CGX_LINK_40G] = 40000,
+ [CGX_LINK_50G] = 50000,
+ [CGX_LINK_80G] = 80000,
+ [CGX_LINK_100G] = 100000,
+};
/* Convert firmware lmac type encoding to string */
-static char *cgx_lmactype_string[LMAC_MODE_MAX];
+static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
+ [LMAC_MODE_SGMII] = "SGMII",
+ [LMAC_MODE_XAUI] = "XAUI",
+ [LMAC_MODE_RXAUI] = "RXAUI",
+ [LMAC_MODE_10G_R] = "10G_R",
+ [LMAC_MODE_40G_R] = "40G_R",
+ [LMAC_MODE_QSGMII] = "QSGMII",
+ [LMAC_MODE_25G_R] = "25G_R",
+ [LMAC_MODE_50G_R] = "50G_R",
+ [LMAC_MODE_100G_R] = "100G_R",
+ [LMAC_MODE_USXGMII] = "USXGMII",
+};
/* CGX PHY management internal APIs */
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
@@ -659,34 +684,6 @@ int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
return err;
}
-static inline void cgx_link_usertable_init(void)
-{
- cgx_speed_mbps[CGX_LINK_NONE] = 0;
- cgx_speed_mbps[CGX_LINK_10M] = 10;
- cgx_speed_mbps[CGX_LINK_100M] = 100;
- cgx_speed_mbps[CGX_LINK_1G] = 1000;
- cgx_speed_mbps[CGX_LINK_2HG] = 2500;
- cgx_speed_mbps[CGX_LINK_5G] = 5000;
- cgx_speed_mbps[CGX_LINK_10G] = 10000;
- cgx_speed_mbps[CGX_LINK_20G] = 20000;
- cgx_speed_mbps[CGX_LINK_25G] = 25000;
- cgx_speed_mbps[CGX_LINK_40G] = 40000;
- cgx_speed_mbps[CGX_LINK_50G] = 50000;
- cgx_speed_mbps[CGX_LINK_80G] = 80000;
- cgx_speed_mbps[CGX_LINK_100G] = 100000;
-
- cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
- cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
- cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
- cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
- cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
- cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
- cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
- cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
- cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
- cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
-}
-
static int cgx_link_usertable_index_map(int speed)
{
switch (speed) {
@@ -828,7 +825,7 @@ static inline void link_status_user_format(u64 lstat,
struct cgx_link_user_info *linfo,
struct cgx *cgx, u8 lmac_id)
{
- char *lmac_string;
+ const char *lmac_string;
linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
@@ -1377,7 +1374,6 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
list_add(&cgx->cgx_list, &cgx_list);
- cgx_link_usertable_init();
cgx_populate_features(cgx);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index ea456099b33c..cedb2616c509 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -74,13 +74,13 @@ struct otx2_mbox {
struct otx2_mbox_dev *dev;
};
-/* Header which preceeds all mbox messages */
+/* Header which precedes all mbox messages */
struct mbox_hdr {
u64 msg_size; /* Total msgs size embedded */
u16 num_msgs; /* No of msgs embedded */
};
-/* Header which preceeds every msg and is also part of it */
+/* Header which precedes every msg and is also part of it */
struct mbox_msghdr {
u16 pcifunc; /* Who's sending this msg */
u16 id; /* Mbox message ID */
@@ -177,6 +177,9 @@ M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \
M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \
M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \
cpt_rd_wr_reg_msg) \
+M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
+M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
+ msg_rsp) \
/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\
npc_mcam_alloc_entry_rsp) \
@@ -216,6 +219,9 @@ M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
npc_mcam_read_entry_rsp) \
M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
msg_req, npc_mcam_read_base_rule_rsp) \
+M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
+ npc_mcam_get_stats_req, \
+ npc_mcam_get_stats_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
nix_lf_alloc_req, nix_lf_alloc_rsp) \
@@ -277,8 +283,8 @@ struct msg_req {
struct mbox_msghdr hdr;
};
-/* Generic rsponse msg used a ack or response for those mbox
- * messages which doesn't have a specific rsp msg format.
+/* Generic response msg used an ack or response for those mbox
+ * messages which don't have a specific rsp msg format.
*/
struct msg_rsp {
struct mbox_msghdr hdr;
@@ -299,7 +305,7 @@ struct ready_msg_rsp {
/* Structure for requesting resource provisioning.
* 'modify' flag to be used when either requesting more
- * or to detach partial of a cetain resource type.
+ * or to detach partial of a certain resource type.
* Rest of the fields specify how many of what type to
* be attached.
* To request LFs from two blocks of same type this mailbox
@@ -489,7 +495,7 @@ struct cgx_set_link_mode_rsp {
};
#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
-#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precison time protocol */
+#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */
#define RVU_MAC_VERSION BIT_ULL(2)
#define RVU_MAC_CGX BIT_ULL(3)
#define RVU_MAC_RPM BIT_ULL(4)
@@ -605,6 +611,7 @@ enum nix_af_status {
NIX_AF_INVAL_SSO_PF_FUNC = -420,
NIX_AF_ERR_TX_VTAG_NOSPC = -421,
NIX_AF_ERR_RX_VTAG_INUSE = -422,
+ NIX_AF_ERR_NPC_KEY_NOT_SUPP = -423,
};
/* For NIX RX vtag action */
@@ -1141,6 +1148,7 @@ struct npc_install_flow_req {
u64 features;
u16 entry;
u16 channel;
+ u16 chan_mask;
u8 intf;
u8 set_cntr; /* If counter is available set counter for this entry ? */
u8 default_rule;
@@ -1193,6 +1201,17 @@ struct npc_mcam_read_base_rule_rsp {
struct mcam_entry entry;
};
+struct npc_mcam_get_stats_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* mcam entry */
+};
+
+struct npc_mcam_get_stats_rsp {
+ struct mbox_msghdr hdr;
+ u64 stat; /* counter stats */
+ u8 stat_ena; /* enabled */
+};
+
enum ptp_op {
PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1,
@@ -1239,4 +1258,62 @@ struct cpt_lf_alloc_req_msg {
int blkaddr;
};
+/* Mailbox message request and response format for CPT stats. */
+struct cpt_sts_req {
+ struct mbox_msghdr hdr;
+ u8 blkaddr;
+};
+
+struct cpt_sts_rsp {
+ struct mbox_msghdr hdr;
+ u64 inst_req_pc;
+ u64 inst_lat_pc;
+ u64 rd_req_pc;
+ u64 rd_lat_pc;
+ u64 rd_uc_pc;
+ u64 active_cycles_pc;
+ u64 ctx_mis_pc;
+ u64 ctx_hit_pc;
+ u64 ctx_aop_pc;
+ u64 ctx_aop_lat_pc;
+ u64 ctx_ifetch_pc;
+ u64 ctx_ifetch_lat_pc;
+ u64 ctx_ffetch_pc;
+ u64 ctx_ffetch_lat_pc;
+ u64 ctx_wback_pc;
+ u64 ctx_wback_lat_pc;
+ u64 ctx_psh_pc;
+ u64 ctx_psh_lat_pc;
+ u64 ctx_err;
+ u64 ctx_enc_id;
+ u64 ctx_flush_timer;
+ u64 rxc_time;
+ u64 rxc_time_cfg;
+ u64 rxc_active_sts;
+ u64 rxc_zombie_sts;
+ u64 busy_sts_ae;
+ u64 free_sts_ae;
+ u64 busy_sts_se;
+ u64 free_sts_se;
+ u64 busy_sts_ie;
+ u64 free_sts_ie;
+ u64 exe_err_info;
+ u64 cptclk_cnt;
+ u64 diag;
+ u64 rxc_dfrg;
+ u64 x2p_link_cfg0;
+ u64 x2p_link_cfg1;
+};
+
+/* Mailbox message request format to configure reassembly timeout. */
+struct cpt_rxc_time_cfg_req {
+ struct mbox_msghdr hdr;
+ int blkaddr;
+ u32 step;
+ u16 zombie_thres;
+ u16 zombie_limit;
+ u16 active_thres;
+ u16 active_limit;
+};
+
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 3c640f6aba92..1e012e787260 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -167,6 +167,8 @@ enum key_fields {
NPC_IPPROTO_SCTP,
NPC_IPPROTO_AH,
NPC_IPPROTO_ESP,
+ NPC_IPPROTO_ICMP,
+ NPC_IPPROTO_ICMP6,
NPC_SPORT_TCP,
NPC_DPORT_TCP,
NPC_SPORT_UDP,
@@ -420,6 +422,11 @@ struct nix_tx_action {
#define TX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
#define TX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
+/* NPC MCAM reserved entry index per nixlf */
+#define NIXLF_UCAST_ENTRY 0
+#define NIXLF_BCAST_ENTRY 1
+#define NIXLF_PROMISC_ENTRY 2
+
struct npc_mcam_kex {
/* MKEX Profle Header */
u64 mkex_sign; /* "mcam-kex-profile" (8 bytes/ASCII characters) */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 76f399229ddb..c2cc4806d13c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -548,6 +548,12 @@ static inline int is_afvf(u16 pcifunc)
return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
}
+/* check if PF_FUNC is AF */
+static inline bool is_pffunc_af(u16 pcifunc)
+{
+ return !pcifunc;
+}
+
static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
{
return (rvu->fwdata->header_magic == RVU_FWDATA_HEADER_MAGIC) &&
@@ -640,7 +646,8 @@ int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en);
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr);
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, u64 chan, bool allmulti);
+ int nixlf, u64 chan, u8 chan_cnt,
+ bool allmulti);
void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
@@ -665,9 +672,6 @@ int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena);
int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel);
int npc_flow_steering_init(struct rvu *rvu, int blkaddr);
const char *npc_get_field_name(u8 hdr);
-bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf,
- u16 pcifunc, u8 intf, struct mcam_entry *entry,
- int *entry_index);
int npc_get_bank(struct npc_mcam *mcam, int index);
void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
void npc_mcam_disable_flows(struct rvu *rvu, u16 target);
@@ -680,6 +684,11 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
+int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
+ int type);
+bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
+ int index);
+
/* CPT APIs */
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 0945c3a3b180..89253f7bdadb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Marvell. */
+#include <linux/bitfield.h>
#include <linux/pci.h>
#include "rvu_struct.h"
#include "rvu_reg.h"
@@ -9,6 +10,28 @@
/* CPT PF device id */
#define PCI_DEVID_OTX2_CPT_PF 0xA0FD
+#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
+
+/* Length of initial context fetch in 128 byte words */
+#define CPT_CTX_ILEN 2
+
+#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
+({ \
+ u64 free_sts = 0, busy_sts = 0; \
+ typeof(rsp) _rsp = rsp; \
+ u32 e, i; \
+ \
+ for (e = (e_min), i = 0; e < (e_max); e++, i++) { \
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); \
+ if (reg & 0x1) \
+ busy_sts |= 1ULL << i; \
+ \
+ if (reg & 0x2) \
+ free_sts |= 1ULL << i; \
+ } \
+ (_rsp)->busy_sts_##etype = busy_sts; \
+ (_rsp)->free_sts_##etype = free_sts; \
+})
static int get_cpt_pf_num(struct rvu *rvu)
{
@@ -21,7 +44,8 @@ static int get_cpt_pf_num(struct rvu *rvu)
if (!pdev)
continue;
- if (pdev->device == PCI_DEVID_OTX2_CPT_PF) {
+ if (pdev->device == PCI_DEVID_OTX2_CPT_PF ||
+ pdev->device == PCI_DEVID_OTX2_CPT10K_PF) {
cpt_pf_num = i;
put_device(&pdev->dev);
break;
@@ -55,6 +79,17 @@ static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
return true;
}
+static int validate_and_get_cpt_blkaddr(int req_blkaddr)
+{
+ int blkaddr;
+
+ blkaddr = req_blkaddr ? req_blkaddr : BLKADDR_CPT0;
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -EINVAL;
+
+ return blkaddr;
+}
+
int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
struct cpt_lf_alloc_req_msg *req,
struct msg_rsp *rsp)
@@ -65,9 +100,9 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
int num_lfs, slot;
u64 val;
- blkaddr = req->blkaddr ? req->blkaddr : BLKADDR_CPT0;
- if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
- return -ENODEV;
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
if (req->eng_grpmsk == 0x0)
return CPT_AF_ERR_GRP_INVALID;
@@ -103,6 +138,9 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
/* Set CPT LF group and priority */
val = (u64)req->eng_grpmsk << 48 | 1;
+ if (!is_rvu_otx2(rvu))
+ val |= (CPT_CTX_ILEN << 17);
+
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
/* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC */
@@ -162,7 +200,9 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
struct rvu_block *block;
struct rvu_pfvf *pfvf;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
/* Registers that can be accessed from PF/VF */
if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) ||
@@ -192,6 +232,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
case CPT_AF_PF_FUNC:
case CPT_AF_BLK_RST:
case CPT_AF_CONSTANTS1:
+ case CPT_AF_CTX_FLUSH_TIMER:
return true;
}
@@ -217,9 +258,9 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
{
int blkaddr;
- blkaddr = req->blkaddr ? req->blkaddr : BLKADDR_CPT0;
- if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
- return -ENODEV;
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
/* This message is accepted only if sent from CPT PF/VF */
if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
@@ -241,6 +282,141 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
return 0;
}
+static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
+{
+ if (is_rvu_otx2(rvu))
+ return;
+
+ rsp->ctx_mis_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_MIS_PC);
+ rsp->ctx_hit_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_HIT_PC);
+ rsp->ctx_aop_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_AOP_PC);
+ rsp->ctx_aop_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_AOP_LATENCY_PC);
+ rsp->ctx_ifetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_IFETCH_PC);
+ rsp->ctx_ifetch_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_IFETCH_LATENCY_PC);
+ rsp->ctx_ffetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
+ rsp->ctx_ffetch_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_FFETCH_LATENCY_PC);
+ rsp->ctx_wback_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
+ rsp->ctx_wback_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_FFETCH_LATENCY_PC);
+ rsp->ctx_psh_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
+ rsp->ctx_psh_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_FFETCH_LATENCY_PC);
+ rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR);
+ rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID);
+ rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER);
+
+ rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME);
+ rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
+ rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
+ rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
+ rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
+ rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
+ rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
+}
+
+static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
+{
+ u16 max_ses, max_ies, max_aes;
+ u32 e_min = 0, e_max = 0;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ /* Get AE status */
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes;
+ cpt_get_eng_sts(e_min, e_max, rsp, ae);
+ /* Get SE status */
+ e_min = 0;
+ e_max = max_ses;
+ cpt_get_eng_sts(e_min, e_max, rsp, se);
+ /* Get IE status */
+ e_min = max_ses;
+ e_max = max_ses + max_ies;
+ cpt_get_eng_sts(e_min, e_max, rsp, ie);
+}
+
+int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
+ struct cpt_sts_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ get_ctx_pc(rvu, rsp, blkaddr);
+
+ /* Get CPT engines status */
+ get_eng_sts(rvu, rsp, blkaddr);
+
+ /* Read CPT instruction PC registers */
+ rsp->inst_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
+ rsp->inst_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
+ rsp->rd_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
+ rsp->rd_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
+ rsp->rd_uc_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
+ rsp->active_cycles_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_ACTIVE_CYCLES_PC);
+ rsp->exe_err_info = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
+ rsp->cptclk_cnt = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
+ rsp->diag = rvu_read64(rvu, blkaddr, CPT_AF_DIAG);
+
+ return 0;
+}
+
+#define RXC_ZOMBIE_THRES GENMASK_ULL(59, 48)
+#define RXC_ZOMBIE_LIMIT GENMASK_ULL(43, 32)
+#define RXC_ACTIVE_THRES GENMASK_ULL(27, 16)
+#define RXC_ACTIVE_LIMIT GENMASK_ULL(11, 0)
+#define RXC_ACTIVE_COUNT GENMASK_ULL(60, 48)
+#define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48)
+
+static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req,
+ int blkaddr)
+{
+ u64 dfrg_reg;
+
+ dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres);
+ dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit);
+ dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres);
+ dfrg_reg |= FIELD_PREP(RXC_ACTIVE_LIMIT, req->active_limit);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG, req->step);
+ rvu_write64(rvu, blkaddr, CPT_AF_RXC_DFRG, dfrg_reg);
+}
+
+int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
+ struct cpt_rxc_time_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ cpt_rxc_time_cfg(rvu, req, blkaddr);
+
+ return 0;
+}
+
#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index de3968d2e5ce..9bf8eaabf9ab 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -2017,7 +2017,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
break;
case NPC_OUTER_VID:
- seq_printf(s, "%d ", ntohs(rule->packet.vlan_tci));
+ seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
seq_printf(s, "mask 0x%x\n",
ntohs(rule->mask.vlan_tci));
break;
@@ -2160,7 +2160,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
seq_printf(s, "\tmcam entry: %d\n", iter->entry);
rvu_dbg_npc_mcam_show_flows(s, iter);
- if (iter->intf == NIX_INTF_RX) {
+ if (is_npc_intf_rx(iter->intf)) {
target = iter->rx_action.pf_func;
pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
seq_printf(s, "\tForward to: PF%d ", pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 3d068b7d46bd..0a8bd667cb11 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -273,7 +273,8 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
- pfvf->rx_chan_base, false);
+ pfvf->rx_chan_base,
+ pfvf->rx_chan_cnt, false);
break;
}
@@ -3088,7 +3089,8 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
else
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
- pfvf->rx_chan_base, allmulti);
+ pfvf->rx_chan_base,
+ pfvf->rx_chan_cnt, allmulti);
return 0;
}
@@ -3635,9 +3637,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
if (err)
return err;
- rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
-
- npc_mcam_disable_flows(rvu, pcifunc);
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 0bd49c7080a6..0bc4529691ec 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -22,10 +22,6 @@
#define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */
#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
-#define NIXLF_UCAST_ENTRY 0
-#define NIXLF_BCAST_ENTRY 1
-#define NIXLF_PROMISC_ENTRY 2
-
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
#define NPC_HW_TSTAMP_OFFSET 8
#define NPC_KEX_CHAN_MASK 0xFFFULL
@@ -96,6 +92,10 @@ int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel)
if (is_npc_intf_tx(intf))
return 0;
+ /* return in case of AF installed rules */
+ if (is_pffunc_af(pcifunc))
+ return 0;
+
if (is_afvf(pcifunc)) {
end = rvu_get_num_lbk_chans();
if (end < 0)
@@ -196,8 +196,8 @@ static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc,
return mcam->nixlf_offset + (max + nixlf) * RSVD_MCAM_ENTRIES_PER_NIXLF;
}
-static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
- u16 pcifunc, int nixlf, int type)
+int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
+ u16 pcifunc, int nixlf, int type)
{
int pf = rvu_get_pf(pcifunc);
int index;
@@ -230,8 +230,8 @@ int npc_get_bank(struct npc_mcam *mcam, int index)
return bank;
}
-static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, int index)
+bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
{
int bank = npc_get_bank(mcam, index);
u64 cfg;
@@ -647,13 +647,17 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
}
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, u64 chan, bool allmulti)
+ int nixlf, u64 chan, u8 chan_cnt,
+ bool allmulti)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
struct npc_mcam *mcam = &rvu->hw->mcam;
- int blkaddr, ucast_idx, index, kwi;
- struct mcam_entry entry = { {0} };
- struct nix_rx_action action = { };
+ int blkaddr, ucast_idx, index;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ struct nix_rx_action action;
+ u64 relaxed_mask;
/* Only PF or AF VF can add a promiscuous entry */
if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc))
@@ -663,24 +667,15 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
if (blkaddr < 0)
return;
+ *(u64 *)&action = 0x00;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
- entry.kw[0] = chan;
- entry.kw_mask[0] = 0xFFFULL;
-
- if (allmulti) {
- kwi = NPC_KEXOF_DMAC / sizeof(u64);
- entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */
- entry.kw_mask[kwi] = BIT_ULL(40);
- }
-
- ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_UCAST_ENTRY);
-
/* If the corresponding PF's ucast action is RSS,
* use the same action for promisc also
*/
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
blkaddr, ucast_idx);
@@ -691,9 +686,36 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
action.pf_func = pcifunc;
}
- entry.action = *(u64 *)&action;
- npc_config_mcam_entry(rvu, mcam, blkaddr, index,
- pfvf->nix_rx_intf, &entry, true);
+ if (allmulti) {
+ mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */
+ ether_addr_copy(req.packet.dmac, mac_addr);
+ ether_addr_copy(req.mask.dmac, mac_addr);
+ req.features = BIT_ULL(NPC_DMAC);
+ }
+
+ req.chan_mask = 0xFFFU;
+ if (chan_cnt > 1) {
+ if (!is_power_of_2(chan_cnt)) {
+ dev_err(rvu->dev,
+ "%s: channel count more than 1, must be power of 2\n", __func__);
+ return;
+ }
+ relaxed_mask = GENMASK_ULL(BITS_PER_LONG_LONG - 1,
+ ilog2(chan_cnt));
+ req.chan_mask &= relaxed_mask;
+ }
+
+ req.channel = chan;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = action.flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
@@ -728,12 +750,14 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan)
{
+ struct rvu_pfvf *pfvf;
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct mcam_entry entry = { {0} };
struct rvu_hwinfo *hw = rvu->hw;
- struct nix_rx_action action;
- struct rvu_pfvf *pfvf;
int blkaddr, index;
+ u32 req_index = 0;
+ u8 op;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -755,32 +779,29 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
- /* Match ingress channel */
- entry.kw[0] = chan;
- entry.kw_mask[0] = 0xfffull;
-
- /* Match broadcast MAC address.
- * DMAC is extracted at 0th bit of PARSE_KEX::KW1
- */
- entry.kw[1] = 0xffffffffffffull;
- entry.kw_mask[1] = 0xffffffffffffull;
-
- *(u64 *)&action = 0x00;
if (!hw->cap.nix_rx_multicast) {
/* Early silicon doesn't support pkt replication,
* so install entry with UCAST action, so that PF
* receives all broadcast packets.
*/
- action.op = NIX_RX_ACTIONOP_UCAST;
- action.pf_func = pcifunc;
+ op = NIX_RX_ACTIONOP_UCAST;
} else {
- action.index = pfvf->bcast_mce_idx;
- action.op = NIX_RX_ACTIONOP_MCAST;
+ op = NIX_RX_ACTIONOP_MCAST;
+ req_index = pfvf->bcast_mce_idx;
}
- entry.action = *(u64 *)&action;
- npc_config_mcam_entry(rvu, mcam, blkaddr, index,
- pfvf->nix_rx_intf, &entry, true);
+ eth_broadcast_addr((u8 *)&req.packet.dmac);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = chan;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.op = op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.index = req_index;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
@@ -967,7 +988,7 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct rvu_npc_mcam_rule *rule;
+ struct rvu_npc_mcam_rule *rule, *tmp;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -977,15 +998,18 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
mutex_lock(&mcam->lock);
/* Disable MCAM entries directing traffic to this 'pcifunc' */
- list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
if (is_npc_intf_rx(rule->intf) &&
rule->rx_action.pf_func == pcifunc) {
npc_enable_mcam_entry(rvu, mcam, blkaddr,
rule->entry, false);
rule->enable = false;
/* Indicate that default rule is disabled */
- if (rule->default_rule)
+ if (rule->default_rule) {
pfvf->def_ucast_rule = NULL;
+ list_del(&rule->list);
+ kfree(rule);
+ }
}
}
@@ -1674,6 +1698,9 @@ void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
static int npc_mcam_verify_entry(struct npc_mcam *mcam,
u16 pcifunc, int entry)
{
+ /* verify AF installed entries */
+ if (is_pffunc_af(pcifunc))
+ return 0;
/* Verify if entry is valid and if it is indeed
* allocated to the requesting PFFUNC.
*/
@@ -2268,6 +2295,10 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
goto exit;
}
+ /* For AF installed rules, the nix_intf should be set to target NIX */
+ if (is_pffunc_af(req->hdr.pcifunc))
+ nix_intf = req->intf;
+
npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf,
&req->entry_data, req->enable_entry);
@@ -2730,30 +2761,6 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
return 0;
}
-bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf,
- u16 pcifunc, u8 intf, struct mcam_entry *entry,
- int *index)
-{
- struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
- struct npc_mcam *mcam = &rvu->hw->mcam;
- bool enable;
- u8 nix_intf;
-
- if (is_npc_intf_tx(intf))
- nix_intf = pfvf->nix_tx_intf;
- else
- nix_intf = pfvf->nix_rx_intf;
-
- *index = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_UCAST_ENTRY);
- /* dont force enable unicast entry */
- enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, *index);
- npc_config_mcam_entry(rvu, mcam, blkaddr, *index, nix_intf,
- entry, enable);
-
- return enable;
-}
-
int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu,
struct msg_req *req,
struct npc_mcam_read_base_rule_rsp *rsp)
@@ -2799,3 +2806,42 @@ read_entry:
out:
return rc;
}
+
+int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu,
+ struct npc_mcam_get_stats_req *req,
+ struct npc_mcam_get_stats_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 index, cntr;
+ int blkaddr;
+ u64 regval;
+ u32 bank;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+
+ index = req->entry & (mcam->banksize - 1);
+ bank = npc_get_bank(mcam, req->entry);
+
+ /* read MCAM entry STAT_ACT register */
+ regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank));
+
+ if (!(regval & BIT_ULL(9))) {
+ rsp->stat_ena = 0;
+ mutex_unlock(&mcam->lock);
+ return 0;
+ }
+
+ cntr = regval & 0x1FF;
+
+ rsp->stat_ena = 1;
+ rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(cntr));
+ rsp->stat &= BIT_ULL(48) - 1;
+
+ mutex_unlock(&mcam->lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 4ba9d54ce4e3..7f35b62eea13 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -29,6 +29,8 @@ static const char * const npc_flow_names[] = {
[NPC_IPPROTO_TCP] = "ip proto tcp",
[NPC_IPPROTO_UDP] = "ip proto udp",
[NPC_IPPROTO_SCTP] = "ip proto sctp",
+ [NPC_IPPROTO_ICMP] = "ip proto icmp",
+ [NPC_IPPROTO_ICMP6] = "ip proto icmp6",
[NPC_IPPROTO_AH] = "ip proto AH",
[NPC_IPPROTO_ESP] = "ip proto ESP",
[NPC_SPORT_TCP] = "tcp source port",
@@ -427,6 +429,7 @@ do { \
* packet header fields below.
* Example: Source IP is 4 bytes and starts at 12th byte of IP header
*/
+ NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1);
NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
@@ -477,9 +480,12 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
BIT_ULL(NPC_IPPROTO_SCTP);
}
- /* for AH, check if corresponding layer type is present in the key */
- if (npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ /* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) {
*features |= BIT_ULL(NPC_IPPROTO_AH);
+ *features |= BIT_ULL(NPC_IPPROTO_ICMP);
+ *features |= BIT_ULL(NPC_IPPROTO_ICMP6);
+ }
/* for ESP, check if corresponding layer type is present in the key */
if (npc_check_field(rvu, blkaddr, NPC_LE, intf))
@@ -597,7 +603,7 @@ static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
dev_info(rvu->dev, "Unsupported flow(s):\n");
for_each_set_bit(bit, (unsigned long *)&unsupported, 64)
dev_info(rvu->dev, "%s ", npc_get_field_name(bit));
- return -EOPNOTSUPP;
+ return NIX_AF_ERR_NPC_KEY_NOT_SUPP;
}
return 0;
@@ -769,6 +775,12 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
if (features & BIT_ULL(NPC_IPPROTO_SCTP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP,
0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_ICMP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_ICMP6))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6,
+ 0, ~0ULL, 0, intf);
if (features & BIT_ULL(NPC_OUTER_VID))
npc_update_entry(rvu, NPC_LB, entry,
@@ -798,6 +810,7 @@ do { \
NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0);
NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
ntohs(mask->etype), 0);
+ NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0);
NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0,
ntohl(mask->ip4src), 0);
NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0,
@@ -903,9 +916,11 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct npc_install_flow_req *req, u16 target)
{
struct nix_rx_action action;
+ u64 chan_mask;
- npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0,
- ~0ULL, 0, NIX_INTF_RX);
+ chan_mask = req->chan_mask ? req->chan_mask : ~0ULL;
+ npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, chan_mask, 0,
+ NIX_INTF_RX);
*(u64 *)&action = 0x00;
action.pf_func = target;
@@ -998,33 +1013,21 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
if (is_npc_intf_tx(req->intf))
goto find_rule;
- if (def_ucast_rule)
+ if (req->default_rule) {
+ entry_index = npc_get_nixlf_mcam_index(mcam, target, nixlf,
+ NIXLF_UCAST_ENTRY);
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, entry_index);
+ }
+
+ /* update mcam entry with default unicast rule attributes */
+ if (def_ucast_rule && (msg_from_vf || (req->default_rule && req->append))) {
missing_features = (def_ucast_rule->features ^ features) &
def_ucast_rule->features;
-
- if (req->default_rule && req->append) {
- /* add to default rule */
if (missing_features)
npc_update_flow(rvu, entry, missing_features,
&def_ucast_rule->packet,
&def_ucast_rule->mask,
&dummy, req->intf);
- enable = rvu_npc_write_default_rule(rvu, blkaddr,
- nixlf, target,
- pfvf->nix_rx_intf, entry,
- &entry_index);
- installed_features = req->features | missing_features;
- } else if (req->default_rule && !req->append) {
- /* overwrite default rule */
- enable = rvu_npc_write_default_rule(rvu, blkaddr,
- nixlf, target,
- pfvf->nix_rx_intf, entry,
- &entry_index);
- } else if (msg_from_vf) {
- /* normal rule - include default rule also to it for VF */
- npc_update_flow(rvu, entry, missing_features,
- &def_ucast_rule->packet, &def_ucast_rule->mask,
- &dummy, req->intf);
installed_features = req->features | missing_features;
}
@@ -1036,12 +1039,9 @@ find_rule:
return -ENOMEM;
new = true;
}
- /* no counter for default rule */
- if (req->default_rule)
- goto update_rule;
/* allocate new counter if rule has no counter */
- if (req->set_cntr && !rule->has_cntr)
+ if (!req->default_rule && req->set_cntr && !rule->has_cntr)
rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp);
/* if user wants to delete an existing counter for a rule then
@@ -1051,7 +1051,14 @@ find_rule:
rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
write_req.hdr.pcifunc = owner;
- write_req.entry = req->entry;
+
+ /* AF owns the default rules so change the owner just to relax
+ * the checks in rvu_mbox_handler_npc_mcam_write_entry
+ */
+ if (req->default_rule)
+ write_req.hdr.pcifunc = 0;
+
+ write_req.entry = entry_index;
write_req.intf = req->intf;
write_req.enable_entry = (u8)enable;
/* if counter is available then clear and use it */
@@ -1069,7 +1076,7 @@ find_rule:
kfree(rule);
return err;
}
-update_rule:
+ /* update rule */
memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
rule->entry = entry_index;
@@ -1145,8 +1152,13 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
else
target = req->hdr.pcifunc;
- if (npc_check_unsupported_flows(rvu, req->features, req->intf))
- return -EOPNOTSUPP;
+ /* ignore chan_mask in case pf func is not AF, revisit later */
+ if (!is_pffunc_af(req->hdr.pcifunc))
+ req->chan_mask = 0xFFF;
+
+ err = npc_check_unsupported_flows(rvu, req->features, req->intf);
+ if (err)
+ return err;
if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
return -EINVAL;
@@ -1278,6 +1290,7 @@ static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr,
write_req.hdr.pcifunc = rule->owner;
write_req.entry = rule->entry;
+ write_req.intf = pfvf->nix_rx_intf;
mutex_unlock(&mcam->lock);
err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 3e401fd8ac63..ac71c0f2f960 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -494,6 +494,27 @@
#define CPT_AF_RAS_INT_W1S (0x47028)
#define CPT_AF_RAS_INT_ENA_W1S (0x47030)
#define CPT_AF_RAS_INT_ENA_W1C (0x47038)
+#define CPT_AF_CTX_FLUSH_TIMER (0x48000ull)
+#define CPT_AF_CTX_ERR (0x48008ull)
+#define CPT_AF_CTX_ENC_ID (0x48010ull)
+#define CPT_AF_CTX_MIS_PC (0x49400ull)
+#define CPT_AF_CTX_HIT_PC (0x49408ull)
+#define CPT_AF_CTX_AOP_PC (0x49410ull)
+#define CPT_AF_CTX_AOP_LATENCY_PC (0x49418ull)
+#define CPT_AF_CTX_IFETCH_PC (0x49420ull)
+#define CPT_AF_CTX_IFETCH_LATENCY_PC (0x49428ull)
+#define CPT_AF_CTX_FFETCH_PC (0x49430ull)
+#define CPT_AF_CTX_FFETCH_LATENCY_PC (0x49438ull)
+#define CPT_AF_CTX_WBACK_PC (0x49440ull)
+#define CPT_AF_CTX_WBACK_LATENCY_PC (0x49448ull)
+#define CPT_AF_CTX_PSH_PC (0x49450ull)
+#define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull)
+#define CPT_AF_RXC_TIME (0x50010ull)
+#define CPT_AF_RXC_TIME_CFG (0x50018ull)
+#define CPT_AF_RXC_DFRG (0x50020ull)
+#define CPT_AF_RXC_ACTIVE_STS (0x50028ull)
+#define CPT_AF_RXC_ZOMBIE_STS (0x50030ull)
+#define CPT_AF_X2PX_LINK_CFG(a) (0x51000ull | (u64)(a) << 3)
#define AF_BAR2_ALIASX(a, b) (0x9100000ull | (a) << 12 | (b))
#define CPT_AF_BAR2_SEL 0x9000000
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 745aa8a19499..457c94793e63 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o otx2_flows.o cn10k.o
+ otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o
rvu_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index a518c2283f18..45730d0d92f2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -18,6 +18,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <linux/soc/marvell/octeontx2/asm.h>
+#include <net/pkt_cls.h>
#include <mbox.h>
#include <npc.h>
@@ -264,6 +265,7 @@ struct otx2_flow_config {
#define OTX2_MAX_NTUPLE_FLOWS 32
#define OTX2_MAX_UNICAST_FLOWS 8
#define OTX2_MAX_VLAN_FLOWS 1
+#define OTX2_MAX_TC_FLOWS OTX2_MAX_NTUPLE_FLOWS
#define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \
OTX2_MAX_UNICAST_FLOWS + \
OTX2_MAX_VLAN_FLOWS)
@@ -274,10 +276,20 @@ struct otx2_flow_config {
#define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */
#define OTX2_VF_VLAN_RX_INDEX 0
#define OTX2_VF_VLAN_TX_INDEX 1
+ u32 tc_flower_offset;
u32 ntuple_max_flows;
+ u32 tc_max_flows;
struct list_head flow_list;
};
+struct otx2_tc_info {
+ /* hash table to store TC offloaded flows */
+ struct rhashtable flow_table;
+ struct rhashtable_params flow_ht_params;
+ DECLARE_BITMAP(tc_entries_bitmap, OTX2_MAX_TC_FLOWS);
+ unsigned long num_entries;
+};
+
struct dev_hw_ops {
int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
@@ -305,6 +317,8 @@ struct otx2_nic {
#define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8)
#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
+#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
+#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
u64 flags;
struct otx2_qset qset;
@@ -347,6 +361,7 @@ struct otx2_nic {
struct hwtstamp_config tstamp;
struct otx2_flow_config *flow_cfg;
+ struct otx2_tc_info tc_info;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -802,4 +817,9 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
+/* tc support */
+int otx2_init_tc(struct otx2_nic *nic);
+void otx2_shutdown_tc(struct otx2_nic *nic);
+int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index dc1778420978..0b4fa92ba821 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -57,10 +57,13 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
flow_cfg->ntuple_max_flows = rsp->count;
flow_cfg->ntuple_offset = 0;
pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ flow_cfg->tc_max_flows = flow_cfg->ntuple_max_flows;
+ pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
} else {
flow_cfg->vf_vlan_offset = 0;
flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
vf_vlan_max_flows;
+ flow_cfg->tc_flower_offset = flow_cfg->ntuple_offset;
flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
OTX2_MAX_NTUPLE_FLOWS;
flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
@@ -69,6 +72,7 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
}
for (i = 0; i < rsp->count; i++)
@@ -93,6 +97,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
+ pf->flow_cfg->tc_max_flows = pf->flow_cfg->ntuple_max_flows;
err = otx2_alloc_mcam_entries(pf);
if (err)
@@ -303,6 +308,35 @@ static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip4dst));
req->features |= BIT_ULL(NPC_DIP_IPV4);
}
+ if (ipv4_usr_mask->tos) {
+ pkt->tos = ipv4_usr_hdr->tos;
+ pmask->tos = ipv4_usr_mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
+ if (ipv4_usr_mask->proto) {
+ switch (ipv4_usr_hdr->proto) {
+ case IPPROTO_ICMP:
+ req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
+ break;
+ case IPPROTO_TCP:
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ break;
+ case IPPROTO_UDP:
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ break;
+ case IPPROTO_SCTP:
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ break;
+ case IPPROTO_AH:
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ break;
+ case IPPROTO_ESP:
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
pkt->etype = cpu_to_be16(ETH_P_IP);
pmask->etype = cpu_to_be16(0xFFFF);
req->features |= BIT_ULL(NPC_ETYPE);
@@ -327,6 +361,11 @@ static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip4dst));
req->features |= BIT_ULL(NPC_DIP_IPV4);
}
+ if (ipv4_l4_mask->tos) {
+ pkt->tos = ipv4_l4_hdr->tos;
+ pmask->tos = ipv4_l4_mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
if (ipv4_l4_mask->psrc) {
memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
sizeof(pkt->sport));
@@ -377,10 +416,14 @@ static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip4dst));
req->features |= BIT_ULL(NPC_DIP_IPV4);
}
+ if (ah_esp_mask->tos) {
+ pkt->tos = ah_esp_hdr->tos;
+ pmask->tos = ah_esp_mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
/* NPC profile doesn't extract AH/ESP header fields */
- if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
- (ah_esp_mask->tos & ah_esp_mask->tos))
+ if (ah_esp_mask->spi & ah_esp_hdr->spi)
return -EOPNOTSUPP;
if (flow_type == AH_V4_FLOW)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 2fd3d235d292..03004fdac0c6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1765,6 +1765,24 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static netdev_features_t otx2_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ /* check if n-tuple filters are ON */
+ if ((features & NETIF_F_HW_TC) && (dev->features & NETIF_F_NTUPLE)) {
+ netdev_info(dev, "Disabling n-tuple filters\n");
+ features &= ~NETIF_F_NTUPLE;
+ }
+
+ /* check if tc hw offload is ON */
+ if ((features & NETIF_F_NTUPLE) && (dev->features & NETIF_F_HW_TC)) {
+ netdev_info(dev, "Disabling TC hardware offload\n");
+ features &= ~NETIF_F_HW_TC;
+ }
+
+ return features;
+}
+
static void otx2_set_rx_mode(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
@@ -1827,6 +1845,12 @@ static int otx2_set_features(struct net_device *netdev,
if ((changed & NETIF_F_NTUPLE) && !ntuple)
otx2_destroy_ntuple_flows(pf);
+ if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
+ pf->tc_info.num_entries) {
+ netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
+ return -EBUSY;
+ }
+
return 0;
}
@@ -2225,6 +2249,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
.ndo_start_xmit = otx2_xmit,
+ .ndo_fix_features = otx2_fix_features,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2_change_mtu,
.ndo_set_rx_mode = otx2_set_rx_mode,
@@ -2235,6 +2260,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_mac = otx2_set_vf_mac,
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
+ .ndo_setup_tc = otx2_setup_tc,
};
static int otx2_wq_init(struct otx2_nic *pf)
@@ -2454,6 +2480,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
NETIF_F_HW_VLAN_STAG_RX;
netdev->features |= netdev->hw_features;
+ /* HW supports tc offload but mutually exclusive with n-tuple filters */
+ if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
+ netdev->hw_features |= NETIF_F_HW_TC;
+
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
@@ -2475,6 +2505,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
otx2_set_ethtool_ops(netdev);
+ err = otx2_init_tc(pf);
+ if (err)
+ goto err_mcam_flow_del;
+
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
@@ -2484,6 +2518,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+err_mcam_flow_del:
+ otx2_mcam_flow_del(pf);
err_unreg_netdev:
unregister_netdev(netdev);
err_del_mcam_entries:
@@ -2651,6 +2687,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_ptp_destroy(pf);
otx2_mcam_flow_del(pf);
+ otx2_shutdown_tc(pf);
otx2_detach_resources(&pf->mbox);
if (pf->hw.lmt_base)
iounmap(pf->hw.lmt_base);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index 21b811c6ee0f..f4fd72ee9a25 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -152,6 +152,7 @@
#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
+#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
new file mode 100644
index 000000000000..51157b283f6f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -0,0 +1,787 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rhashtable.h>
+#include <linux/bitfield.h>
+#include <net/flow_dissector.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
+#include <net/ipv6.h>
+
+#include "otx2_common.h"
+
+/* Egress rate limiting definitions */
+#define MAX_BURST_EXPONENT 0x0FULL
+#define MAX_BURST_MANTISSA 0xFFULL
+#define MAX_BURST_SIZE 130816ULL
+#define MAX_RATE_DIVIDER_EXPONENT 12ULL
+#define MAX_RATE_EXPONENT 0x0FULL
+#define MAX_RATE_MANTISSA 0xFFULL
+
+/* Bitfields in NIX_TLX_PIR register */
+#define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
+#define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
+#define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13)
+#define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
+#define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
+
+struct otx2_tc_flow_stats {
+ u64 bytes;
+ u64 pkts;
+ u64 used;
+};
+
+struct otx2_tc_flow {
+ struct rhash_head node;
+ unsigned long cookie;
+ u16 entry;
+ unsigned int bitpos;
+ struct rcu_head rcu;
+ struct otx2_tc_flow_stats stats;
+ spinlock_t lock; /* lock for stats */
+};
+
+static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
+ u32 *burst_mantissa)
+{
+ unsigned int tmp;
+
+ /* Burst is calculated as
+ * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
+ * Max supported burst size is 130,816 bytes.
+ */
+ burst = min_t(u32, burst, MAX_BURST_SIZE);
+ if (burst) {
+ *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
+ tmp = burst - rounddown_pow_of_two(burst);
+ if (burst < MAX_BURST_MANTISSA)
+ *burst_mantissa = tmp * 2;
+ else
+ *burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
+ } else {
+ *burst_exp = MAX_BURST_EXPONENT;
+ *burst_mantissa = MAX_BURST_MANTISSA;
+ }
+}
+
+static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
+ u32 *mantissa, u32 *div_exp)
+{
+ unsigned int tmp;
+
+ /* Rate calculation by hardware
+ *
+ * PIR_ADD = ((256 + mantissa) << exp) / 256
+ * rate = (2 * PIR_ADD) / ( 1 << div_exp)
+ * The resultant rate is in Mbps.
+ */
+
+ /* 2Mbps to 100Gbps can be expressed with div_exp = 0.
+ * Setting this to '0' will ease the calculation of
+ * exponent and mantissa.
+ */
+ *div_exp = 0;
+
+ if (maxrate) {
+ *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
+ tmp = maxrate - rounddown_pow_of_two(maxrate);
+ if (maxrate < MAX_RATE_MANTISSA)
+ *mantissa = tmp * 2;
+ else
+ *mantissa = tmp / (1ULL << (*exp - 7));
+ } else {
+ /* Instead of disabling rate limiting, set all values to max */
+ *exp = MAX_RATE_EXPONENT;
+ *mantissa = MAX_RATE_MANTISSA;
+ }
+}
+
+static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
+{
+ struct otx2_hw *hw = &nic->hw;
+ struct nix_txschq_config *req;
+ u32 burst_exp, burst_mantissa;
+ u32 exp, mantissa, div_exp;
+ int txschq, err;
+
+ /* All SQs share the same TL4, so pick the first scheduler */
+ txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
+
+ /* Get exponent and mantissa values from the desired rate */
+ otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
+ otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
+
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->lvl = NIX_TXSCH_LVL_TL4;
+ req->num_regs = 1;
+ req->reg[0] = NIX_AF_TL4X_PIR(txschq);
+ req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
+ FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
+ FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+ FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+ FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ mutex_unlock(&nic->mbox.lock);
+ return err;
+}
+
+static int otx2_tc_validate_flow(struct otx2_nic *nic,
+ struct flow_action *actions,
+ struct netlink_ext_ack *extack)
+{
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ if (!flow_action_has_entries(actions)) {
+ NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
+ return -EINVAL;
+ }
+
+ if (!flow_offload_has_one_action(actions)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress MATCHALL offload supports only 1 policing action");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action *actions = &cls->rule->action;
+ struct flow_action_entry *entry;
+ u32 rate;
+ int err;
+
+ err = otx2_tc_validate_flow(nic, actions, extack);
+ if (err)
+ return err;
+
+ if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one Egress MATCHALL ratelimiter can be offloaded");
+ return -ENOMEM;
+ }
+
+ entry = &cls->rule->action.entries[0];
+ switch (entry->id) {
+ case FLOW_ACTION_POLICE:
+ if (entry->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+ /* Convert bytes per second to Mbps */
+ rate = entry->police.rate_bytes_ps * 8;
+ rate = max_t(u32, rate / 1000000, 1);
+ err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
+ if (err)
+ return err;
+ nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only police action is supported with Egress MATCHALL offload");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ int err;
+
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ err = otx2_set_matchall_egress_rate(nic, 0, 0);
+ nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
+ return err;
+}
+
+static int otx2_tc_parse_actions(struct otx2_nic *nic,
+ struct flow_action *flow_action,
+ struct npc_install_flow_req *req)
+{
+ struct flow_action_entry *act;
+ struct net_device *target;
+ struct otx2_nic *priv;
+ int i;
+
+ if (!flow_action_has_entries(flow_action)) {
+ netdev_info(nic->netdev, "no tc actions specified");
+ return -EINVAL;
+ }
+
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+ req->op = NIX_RX_ACTIONOP_DROP;
+ return 0;
+ case FLOW_ACTION_ACCEPT:
+ req->op = NIX_RX_ACTION_DEFAULT;
+ return 0;
+ case FLOW_ACTION_REDIRECT_INGRESS:
+ target = act->dev;
+ priv = netdev_priv(target);
+ /* npc_install_flow_req doesn't support passing a target pcifunc */
+ if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
+ netdev_info(nic->netdev,
+ "can't redirect to other pf/vf\n");
+ return -EOPNOTSUPP;
+ }
+ req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ return 0;
+ case FLOW_ACTION_VLAN_POP:
+ req->vtag0_valid = true;
+ /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int otx2_tc_prepare_flow(struct otx2_nic *nic,
+ struct flow_cls_offload *f,
+ struct npc_install_flow_req *req)
+{
+ struct flow_msg *flow_spec = &req->packet;
+ struct flow_msg *flow_mask = &req->mask;
+ struct flow_dissector *dissector;
+ struct flow_rule *rule;
+ u8 ip_proto = 0;
+
+ rule = flow_cls_offload_flow_rule(f);
+ dissector = rule->match.dissector;
+
+ if ((dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_IP)))) {
+ netdev_info(nic->netdev, "unsupported flow used key 0x%x",
+ dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+
+ /* All EtherTypes can be matched, no hw limitation */
+ flow_spec->etype = match.key->n_proto;
+ flow_mask->etype = match.mask->n_proto;
+ req->features |= BIT_ULL(NPC_ETYPE);
+
+ if (match.mask->ip_proto &&
+ (match.key->ip_proto != IPPROTO_TCP &&
+ match.key->ip_proto != IPPROTO_UDP &&
+ match.key->ip_proto != IPPROTO_SCTP &&
+ match.key->ip_proto != IPPROTO_ICMP &&
+ match.key->ip_proto != IPPROTO_ICMPV6)) {
+ netdev_info(nic->netdev,
+ "ip_proto=0x%x not supported\n",
+ match.key->ip_proto);
+ return -EOPNOTSUPP;
+ }
+ if (match.mask->ip_proto)
+ ip_proto = match.key->ip_proto;
+
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ else if (ip_proto == IPPROTO_ICMP)
+ req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
+ else if (ip_proto == IPPROTO_ICMPV6)
+ req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ if (!is_zero_ether_addr(match.mask->src)) {
+ netdev_err(nic->netdev, "src mac match not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
+ ether_addr_copy(flow_mask->dmac,
+ (u8 *)&match.mask->dst);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
+ match.mask->tos) {
+ netdev_err(nic->netdev, "tos not supported\n");
+ return -EOPNOTSUPP;
+ }
+ if (match.mask->ttl) {
+ netdev_err(nic->netdev, "ttl not supported\n");
+ return -EOPNOTSUPP;
+ }
+ flow_spec->tos = match.key->tos;
+ flow_mask->tos = match.mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+ u16 vlan_tci, vlan_tci_mask;
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) {
+ netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
+ ntohs(match.key->vlan_tpid));
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->vlan_id ||
+ match.mask->vlan_dei ||
+ match.mask->vlan_priority) {
+ vlan_tci = match.key->vlan_id |
+ match.key->vlan_dei << 12 |
+ match.key->vlan_priority << 13;
+
+ vlan_tci_mask = match.mask->vlan_id |
+ match.key->vlan_dei << 12 |
+ match.key->vlan_priority << 13;
+
+ flow_spec->vlan_tci = htons(vlan_tci);
+ flow_mask->vlan_tci = htons(vlan_tci_mask);
+ req->features |= BIT_ULL(NPC_OUTER_VID);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+
+ flow_spec->ip4dst = match.key->dst;
+ flow_mask->ip4dst = match.mask->dst;
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+
+ flow_spec->ip4src = match.key->src;
+ flow_mask->ip4src = match.mask->src;
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+
+ if (ipv6_addr_loopback(&match.key->dst) ||
+ ipv6_addr_loopback(&match.key->src)) {
+ netdev_err(nic->netdev,
+ "Flow matching on IPv6 loopback addr is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ipv6_addr_any(&match.mask->dst)) {
+ memcpy(&flow_spec->ip6dst,
+ (struct in6_addr *)&match.key->dst,
+ sizeof(flow_spec->ip6dst));
+ memcpy(&flow_mask->ip6dst,
+ (struct in6_addr *)&match.mask->dst,
+ sizeof(flow_spec->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+
+ if (!ipv6_addr_any(&match.mask->src)) {
+ memcpy(&flow_spec->ip6src,
+ (struct in6_addr *)&match.key->src,
+ sizeof(flow_spec->ip6src));
+ memcpy(&flow_mask->ip6src,
+ (struct in6_addr *)&match.mask->src,
+ sizeof(flow_spec->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+
+ flow_spec->dport = match.key->dst;
+ flow_mask->dport = match.mask->dst;
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+
+ flow_spec->sport = match.key->src;
+ flow_mask->sport = match.mask->src;
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
+
+ return otx2_tc_parse_actions(nic, &rule->action, req);
+}
+
+static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
+{
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = entry;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ if (err) {
+ netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
+ entry);
+ mutex_unlock(&nic->mbox.lock);
+ return -EFAULT;
+ }
+ mutex_unlock(&nic->mbox.lock);
+
+ return 0;
+}
+
+static int otx2_tc_del_flow(struct otx2_nic *nic,
+ struct flow_cls_offload *tc_flow_cmd)
+{
+ struct otx2_tc_info *tc_info = &nic->tc_info;
+ struct otx2_tc_flow *flow_node;
+
+ flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
+ &tc_flow_cmd->cookie,
+ tc_info->flow_ht_params);
+ if (!flow_node) {
+ netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
+ tc_flow_cmd->cookie);
+ return -EINVAL;
+ }
+
+ otx2_del_mcam_flow_entry(nic, flow_node->entry);
+
+ WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
+ &flow_node->node,
+ nic->tc_info.flow_ht_params));
+ kfree_rcu(flow_node, rcu);
+
+ clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
+ tc_info->num_entries--;
+
+ return 0;
+}
+
+static int otx2_tc_add_flow(struct otx2_nic *nic,
+ struct flow_cls_offload *tc_flow_cmd)
+{
+ struct otx2_tc_info *tc_info = &nic->tc_info;
+ struct otx2_tc_flow *new_node, *old_node;
+ struct npc_install_flow_req *req;
+ int rc;
+
+ if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
+ return -ENOMEM;
+
+ /* allocate memory for the new flow and it's node */
+ new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
+ if (!new_node)
+ return -ENOMEM;
+ spin_lock_init(&new_node->lock);
+ new_node->cookie = tc_flow_cmd->cookie;
+
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ rc = otx2_tc_prepare_flow(nic, tc_flow_cmd, req);
+ if (rc) {
+ otx2_mbox_reset(&nic->mbox.mbox, 0);
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+ }
+
+ /* If a flow exists with the same cookie, delete it */
+ old_node = rhashtable_lookup_fast(&tc_info->flow_table,
+ &tc_flow_cmd->cookie,
+ tc_info->flow_ht_params);
+ if (old_node)
+ otx2_tc_del_flow(nic, tc_flow_cmd);
+
+ if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
+ netdev_err(nic->netdev, "Not enough MCAM space to add the flow\n");
+ otx2_mbox_reset(&nic->mbox.mbox, 0);
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
+ nic->flow_cfg->tc_max_flows);
+ req->channel = nic->hw.rx_chan_base;
+ req->entry = nic->flow_cfg->entry[nic->flow_cfg->tc_flower_offset +
+ nic->flow_cfg->tc_max_flows - new_node->bitpos];
+ req->intf = NIX_INTF_RX;
+ req->set_cntr = 1;
+ new_node->entry = req->entry;
+
+ /* Send message to AF */
+ rc = otx2_sync_mbox_msg(&nic->mbox);
+ if (rc) {
+ netdev_err(nic->netdev, "Failed to install MCAM flow entry\n");
+ mutex_unlock(&nic->mbox.lock);
+ goto out;
+ }
+ mutex_unlock(&nic->mbox.lock);
+
+ /* add new flow to flow-table */
+ rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
+ nic->tc_info.flow_ht_params);
+ if (rc) {
+ otx2_del_mcam_flow_entry(nic, req->entry);
+ kfree_rcu(new_node, rcu);
+ goto out;
+ }
+
+ set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
+ tc_info->num_entries++;
+out:
+ return rc;
+}
+
+static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
+ struct flow_cls_offload *tc_flow_cmd)
+{
+ struct otx2_tc_info *tc_info = &nic->tc_info;
+ struct npc_mcam_get_stats_req *req;
+ struct npc_mcam_get_stats_rsp *rsp;
+ struct otx2_tc_flow_stats *stats;
+ struct otx2_tc_flow *flow_node;
+ int err;
+
+ flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
+ &tc_flow_cmd->cookie,
+ tc_info->flow_ht_params);
+ if (!flow_node) {
+ netdev_info(nic->netdev, "tc flow not found for cookie %lx",
+ tc_flow_cmd->cookie);
+ return -EINVAL;
+ }
+
+ mutex_lock(&nic->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_node->entry;
+
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ if (err) {
+ netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
+ req->entry);
+ mutex_unlock(&nic->mbox.lock);
+ return -EFAULT;
+ }
+
+ rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
+ (&nic->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ mutex_unlock(&nic->mbox.lock);
+ return PTR_ERR(rsp);
+ }
+
+ mutex_unlock(&nic->mbox.lock);
+
+ if (!rsp->stat_ena)
+ return -EINVAL;
+
+ stats = &flow_node->stats;
+
+ spin_lock(&flow_node->lock);
+ flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+ stats->pkts = rsp->stat;
+ spin_unlock(&flow_node->lock);
+
+ return 0;
+}
+
+static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
+ struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return otx2_tc_add_flow(nic, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return otx2_tc_del_flow(nic, cls_flower);
+ case FLOW_CLS_STATS:
+ return otx2_tc_get_flow_stats(nic, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct otx2_nic *nic = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return otx2_setup_tc_cls_flower(nic, type_data);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls_matchall)
+{
+ switch (cls_matchall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return otx2_tc_egress_matchall_install(nic, cls_matchall);
+ case TC_CLSMATCHALL_DESTROY:
+ return otx2_tc_egress_matchall_delete(nic, cls_matchall);
+ case TC_CLSMATCHALL_STATS:
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct otx2_nic *nic = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return otx2_setup_tc_egress_matchall(nic, type_data);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static LIST_HEAD(otx2_block_cb_list);
+
+static int otx2_setup_tc_block(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ struct otx2_nic *nic = netdev_priv(netdev);
+ flow_setup_cb_t *cb;
+ bool ingress;
+
+ if (f->block_shared)
+ return -EOPNOTSUPP;
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = otx2_setup_tc_block_ingress_cb;
+ ingress = true;
+ } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = otx2_setup_tc_block_egress_cb;
+ ingress = false;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
+ nic, nic, ingress);
+}
+
+int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return otx2_setup_tc_block(netdev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct rhashtable_params tc_flow_ht_params = {
+ .head_offset = offsetof(struct otx2_tc_flow, node),
+ .key_offset = offsetof(struct otx2_tc_flow, cookie),
+ .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
+ .automatic_shrinking = true,
+};
+
+int otx2_init_tc(struct otx2_nic *nic)
+{
+ struct otx2_tc_info *tc = &nic->tc_info;
+
+ tc->flow_ht_params = tc_flow_ht_params;
+ return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
+}
+
+void otx2_shutdown_tc(struct otx2_nic *nic)
+{
+ struct otx2_tc_info *tc = &nic->tc_info;
+
+ rhashtable_destroy(&tc->flow_table);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 25dd903a3e92..2768c78528a5 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -431,7 +431,8 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
netif_carrier_on(port->dev);
if (!delayed_work_pending(caching_dw))
queue_delayed_work(prestera_wq, caching_dw, 0);
- } else {
+ } else if (netif_running(port->dev) &&
+ netif_carrier_ok(port->dev)) {
netif_carrier_off(port->dev);
if (delayed_work_pending(caching_dw))
cancel_delayed_work(caching_dw);
@@ -456,20 +457,17 @@ static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
{
struct device_node *base_mac_np;
struct device_node *np;
- const char *base_mac;
+ int ret;
np = of_find_compatible_node(NULL, NULL, "marvell,prestera");
base_mac_np = of_parse_phandle(np, "base-mac-provider", 0);
- base_mac = of_get_mac_address(base_mac_np);
- of_node_put(base_mac_np);
- if (!IS_ERR(base_mac))
- ether_addr_copy(sw->base_mac, base_mac);
-
- if (!is_valid_ether_addr(sw->base_mac)) {
+ ret = of_get_mac_address(base_mac_np, sw->base_mac);
+ if (ret) {
eth_random_addr(sw->base_mac);
dev_info(prestera_dev(sw), "using random base mac address\n");
}
+ of_node_put(base_mac_np);
return prestera_hw_switch_mac_set(sw, sw->base_mac);
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index be5677623455..298110119272 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -756,6 +756,7 @@ static void prestera_pci_remove(struct pci_dev *pdev)
static const struct pci_device_id prestera_pci_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC80C) },
{ }
};
MODULE_DEVICE_TABLE(pci, prestera_pci_devices);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index 49e052273f30..cb564890a3dc 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -798,7 +798,7 @@ static void prestera_fdb_event_work(struct work_struct *work)
switch (swdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
fdb_info = &swdev_work->fdb_info;
- if (!fdb_info->added_by_user)
+ if (!fdb_info->added_by_user || fdb_info->is_local)
break;
err = prestera_port_fdb_set(port, fdb_info, true);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 3712e1786091..e967867828d8 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1392,7 +1392,6 @@ static int pxa168_eth_probe(struct platform_device *pdev)
struct resource *res;
struct clk *clk;
struct device_node *np;
- const unsigned char *mac_addr = NULL;
int err;
printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
@@ -1435,12 +1434,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
- if (pdev->dev.of_node)
- mac_addr = of_get_mac_address(pdev->dev.of_node);
-
- if (!IS_ERR_OR_NULL(mac_addr)) {
- ether_addr_copy(dev->dev_addr, mac_addr);
- } else {
+ err = of_get_mac_address(pdev->dev.of_node, dev->dev_addr);
+ if (err) {
/* try reading the mac address, if set by the bootloader */
pxa168_eth_get_mac_address(dev, dev->dev_addr);
if (!is_valid_ether_addr(dev->dev_addr)) {
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 8a9c0f490bfb..d4bb27ba1419 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -1617,7 +1617,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
xm_write16(hw, port, XM_TX_THR, 512);
/*
- * Enable the reception of all error frames. This is is
+ * Enable the reception of all error frames. This is
* a necessary evil due to the design of the XMAC. The
* XMAC's receive FIFO is only 8K in size, however jumbo
* frames can be up to 9000 bytes in length. When bad
@@ -2959,8 +2959,9 @@ static void genesis_set_multicast(struct net_device *dev)
static void yukon_add_filter(u8 filter[8], const u8 *addr)
{
- u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f;
- filter[bit/8] |= 1 << (bit%8);
+ u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f;
+
+ filter[bit / 8] |= 1 << (bit % 8);
}
static void yukon_set_multicast(struct net_device *dev)
@@ -3849,7 +3850,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
/* Only used for Genesis XMAC */
if (is_genesis(hw))
- timer_setup(&skge->link_timer, xm_link_timer, 0);
+ timer_setup(&skge->link_timer, xm_link_timer, 0);
else {
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index dbec8e187a68..222c32367b2c 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -55,7 +55,8 @@
#define RX_DEF_PENDING RX_MAX_PENDING
/* This is the worst case number of transmit list elements for a single skb:
- VLAN:GSO + CKSUM + Data + skb_frags * DMA */
+ * VLAN:GSO + CKSUM + Data + skb_frags * DMA
+ */
#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
#define TX_MAX_PENDING 1024
@@ -1529,7 +1530,8 @@ static void sky2_rx_start(struct sky2_port *sky2)
sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
/* These chips have no ram buffer?
- * MAC Rx RAM Read is controlled by hardware */
+ * MAC Rx RAM Read is controlled by hardware
+ */
if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
hw->chip_rev > CHIP_REV_YU_EC_U_A0)
sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
@@ -4135,7 +4137,7 @@ static int sky2_set_coalesce(struct net_device *dev,
/*
* Hardware is limited to min of 128 and max of 2048 for ring size
* and rounded up to next power of two
- * to avoid division in modulus calclation
+ * to avoid division in modulus calculation
*/
static unsigned long roundup_ring_size(unsigned long pending)
{
@@ -4684,7 +4686,8 @@ static __exit void sky2_debug_cleanup(void)
#endif
/* Two copies of network device operations to handle special case of
- not allowing netpoll on second port */
+ * not allowing netpoll on second port
+ */
static const struct net_device_ops sky2_netdev_ops[2] = {
{
.ndo_open = sky2_open,
@@ -4725,7 +4728,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
{
struct sky2_port *sky2;
struct net_device *dev = alloc_etherdev(sizeof(*sky2));
- const void *iap;
+ int ret;
if (!dev)
return NULL;
@@ -4795,10 +4798,8 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
* 1) from device tree data
* 2) from internal registers set by bootloader
*/
- iap = of_get_mac_address(hw->pdev->dev.of_node);
- if (!IS_ERR(iap))
- ether_addr_copy(dev->dev_addr, iap);
- else
+ ret = of_get_mac_address(hw->pdev->dev.of_node, dev->dev_addr);
+ if (ret)
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
ETH_ALEN);
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 3362b148de23..c357c193378e 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -9,7 +9,9 @@ if NET_VENDOR_MEDIATEK
config NET_MEDIATEK_SOC
tristate "MediaTek SoC Gigabit Ethernet support"
+ depends on NET_DSA || !NET_DSA
select PHYLINK
+ select DIMLIB
help
This driver supports the gigabit ethernet MACs in the
MediaTek SoC family.
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index 3a777b4a6cd3..79d4cdbbcbf5 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -4,5 +4,5 @@
#
obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
-mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o
+mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 01d3ee4b5829..ed4eacef17ce 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -19,6 +19,8 @@
#include <linux/interrupt.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/phylink.h>
+#include <linux/jhash.h>
+#include <net/dsa.h>
#include "mtk_eth_soc.h"
@@ -85,7 +87,7 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth)
return 0;
if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
break;
- usleep_range(10, 20);
+ cond_resched();
}
dev_err(eth->dev, "mdio: MDIO timeout\n");
@@ -776,13 +778,18 @@ static inline int mtk_max_buf_size(int frag_size)
return buf_size;
}
-static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
+static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
struct mtk_rx_dma *dma_rxd)
{
- rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
+ if (!(rxd->rxd2 & RX_DMA_DONE))
+ return false;
+
+ rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+
+ return true;
}
/* the qdma core needs scratch memory to be setup */
@@ -857,7 +864,8 @@ static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
}
-static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
+static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
+ bool napi)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
@@ -889,8 +897,12 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
tx_buf->flags = 0;
if (tx_buf->skb &&
- (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
- dev_kfree_skb_any(tx_buf->skb);
+ (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
+ if (napi)
+ napi_consume_skb(tx_buf->skb, napi);
+ else
+ dev_kfree_skb_any(tx_buf->skb);
+ }
tx_buf->skb = NULL;
}
@@ -1068,7 +1080,7 @@ err_dma:
tx_buf = mtk_desc_to_tx_buf(ring, itxd);
/* unmap dma */
- mtk_tx_unmap(eth, tx_buf);
+ mtk_tx_unmap(eth, tx_buf, false);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
@@ -1125,17 +1137,6 @@ static void mtk_wake_queue(struct mtk_eth *eth)
}
}
-static void mtk_stop_queue(struct mtk_eth *eth)
-{
- int i;
-
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->netdev[i])
- continue;
- netif_stop_queue(eth->netdev[i]);
- }
-}
-
static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -1156,7 +1157,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_num = mtk_cal_txd_req(skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
- mtk_stop_queue(eth);
+ netif_stop_queue(dev);
netif_err(eth, tx_queued, dev,
"Tx Ring full when queue awake!\n");
spin_unlock(&eth->page_lock);
@@ -1182,7 +1183,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
- mtk_stop_queue(eth);
+ netif_stop_queue(dev);
spin_unlock(&eth->page_lock);
@@ -1238,17 +1239,19 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
+ struct dim_sample dim_sample = {};
struct mtk_rx_ring *ring;
int idx;
struct sk_buff *skb;
u8 *data, *new_data;
struct mtk_rx_dma *rxd, trxd;
- int done = 0;
+ int done = 0, bytes = 0;
while (done < budget) {
struct net_device *netdev;
unsigned int pktlen;
dma_addr_t dma_addr;
+ u32 hash;
int mac;
ring = mtk_get_rx_ring(eth);
@@ -1259,18 +1262,16 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
rxd = &ring->dma[idx];
data = ring->data[idx];
- mtk_rx_get_desc(&trxd, rxd);
- if (!(trxd.rxd2 & RX_DMA_DONE))
+ if (!mtk_rx_get_desc(&trxd, rxd))
break;
/* find out which mac the packet come from. values start at 1 */
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) ||
+ (trxd.rxd4 & RX_DMA_SPECIAL_TAG))
mac = 0;
- } else {
- mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
- RX_DMA_FPORT_MASK;
- mac--;
- }
+ else
+ mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
+ RX_DMA_FPORT_MASK) - 1;
if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
!eth->netdev[mac]))
@@ -1298,17 +1299,18 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto release_desc;
}
+ dma_unmap_single(eth->dev, trxd.rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+
/* receive data */
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
- skb_free_frag(new_data);
+ skb_free_frag(data);
netdev->stats.rx_dropped++;
- goto release_desc;
+ goto skip_rx;
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- dma_unmap_single(eth->dev, trxd.rxd1,
- ring->buf_size, DMA_FROM_DEVICE);
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
skb_put(skb, pktlen);
@@ -1317,14 +1319,22 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
else
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
+ bytes += pktlen;
+
+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
+ if (hash != MTK_RXD4_FOE_ENTRY) {
+ hash = jhash_1word(hash, 0);
+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
+ }
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
- RX_DMA_VID(trxd.rxd3))
+ (trxd.rxd2 & RX_DMA_VTAG))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
RX_DMA_VID(trxd.rxd3));
skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
+skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
@@ -1348,6 +1358,12 @@ rx_done:
mtk_update_rx_cpu_idx(eth);
}
+ eth->rx_packets += done;
+ eth->rx_bytes += bytes;
+ dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
+ &dim_sample);
+ net_dim(&eth->rx_dim, dim_sample);
+
return done;
}
@@ -1360,7 +1376,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
struct mtk_tx_buf *tx_buf;
u32 cpu, dma;
- cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
+ cpu = ring->last_free_ptr;
dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
desc = mtk_qdma_phys_to_virt(ring, cpu);
@@ -1386,7 +1402,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
done[mac]++;
budget--;
}
- mtk_tx_unmap(eth, tx_buf);
+ mtk_tx_unmap(eth, tx_buf, true);
ring->last_free = desc;
atomic_inc(&ring->free_count);
@@ -1394,6 +1410,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
cpu = next_cpu;
}
+ ring->last_free_ptr = cpu;
mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
return budget;
@@ -1423,7 +1440,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
budget--;
}
- mtk_tx_unmap(eth, tx_buf);
+ mtk_tx_unmap(eth, tx_buf, true);
desc = &ring->dma[cpu];
ring->last_free = desc;
@@ -1440,6 +1457,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
static int mtk_poll_tx(struct mtk_eth *eth, int budget)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct dim_sample dim_sample = {};
unsigned int done[MTK_MAX_DEVS];
unsigned int bytes[MTK_MAX_DEVS];
int total = 0, i;
@@ -1457,8 +1475,14 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
continue;
netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
total += done[i];
+ eth->tx_packets += done[i];
+ eth->tx_bytes += bytes[i];
}
+ dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
+ &dim_sample);
+ net_dim(&eth->tx_dim, dim_sample);
+
if (mtk_queue_stopped(eth) &&
(atomic_read(&ring->free_count) > ring->thresh))
mtk_wake_queue(eth);
@@ -1480,7 +1504,6 @@ static void mtk_handle_status_irq(struct mtk_eth *eth)
static int mtk_napi_tx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
- u32 status, mask;
int tx_done = 0;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
@@ -1489,22 +1512,20 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
tx_done = mtk_poll_tx(eth, budget);
if (unlikely(netif_msg_intr(eth))) {
- status = mtk_r32(eth, eth->tx_int_status_reg);
- mask = mtk_r32(eth, eth->tx_int_mask_reg);
dev_info(eth->dev,
- "done tx %d, intr 0x%08x/0x%x\n",
- tx_done, status, mask);
+ "done tx %d, intr 0x%08x/0x%x\n", tx_done,
+ mtk_r32(eth, eth->tx_int_status_reg),
+ mtk_r32(eth, eth->tx_int_mask_reg));
}
if (tx_done == budget)
return budget;
- status = mtk_r32(eth, eth->tx_int_status_reg);
- if (status & MTK_TX_DONE_INT)
+ if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
return budget;
- napi_complete(napi);
- mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+ if (napi_complete_done(napi, tx_done))
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
return tx_done;
}
@@ -1512,35 +1533,33 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
static int mtk_napi_rx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
- u32 status, mask;
- int rx_done = 0;
- int remain_budget = budget;
+ int rx_done_total = 0;
mtk_handle_status_irq(eth);
-poll_again:
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
- rx_done = mtk_poll_rx(napi, remain_budget, eth);
+ do {
+ int rx_done;
- if (unlikely(netif_msg_intr(eth))) {
- status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
- mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
- dev_info(eth->dev,
- "done rx %d, intr 0x%08x/0x%x\n",
- rx_done, status, mask);
- }
- if (rx_done == remain_budget)
- return budget;
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
+ rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
+ rx_done_total += rx_done;
- status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
- if (status & MTK_RX_DONE_INT) {
- remain_budget -= rx_done;
- goto poll_again;
- }
- napi_complete(napi);
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ if (unlikely(netif_msg_intr(eth))) {
+ dev_info(eth->dev,
+ "done rx %d, intr 0x%08x/0x%x\n", rx_done,
+ mtk_r32(eth, MTK_PDMA_INT_STATUS),
+ mtk_r32(eth, MTK_PDMA_INT_MASK));
+ }
- return rx_done + budget - remain_budget;
+ if (rx_done_total == budget)
+ return budget;
+
+ } while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT);
+
+ if (napi_complete_done(napi, rx_done_total))
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+
+ return rx_done_total;
}
static int mtk_tx_alloc(struct mtk_eth *eth)
@@ -1587,6 +1606,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring->next_free = &ring->dma[0];
ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+ ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
ring->thresh = MAX_SKB_FRAGS;
/* make sure that all changes to the dma ring are flushed before we
@@ -1600,9 +1620,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
mtk_w32(eth,
ring->phys + ((MTK_DMA_SIZE - 1) * sz),
MTK_QTX_CRX_PTR);
- mtk_w32(eth,
- ring->phys + ((MTK_DMA_SIZE - 1) * sz),
- MTK_QTX_DRX_PTR);
+ mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
MTK_QTX_CFG(0));
} else {
@@ -1625,7 +1643,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
if (ring->buf) {
for (i = 0; i < MTK_DMA_SIZE; i++)
- mtk_tx_unmap(eth, &ring->buf[i]);
+ mtk_tx_unmap(eth, &ring->buf[i], false);
kfree(ring->buf);
ring->buf = NULL;
}
@@ -2015,25 +2033,22 @@ static int mtk_set_features(struct net_device *dev, netdev_features_t features)
/* wait for DMA to finish whatever it is doing before we start using it again */
static int mtk_dma_busy_wait(struct mtk_eth *eth)
{
- unsigned long t_start = jiffies;
+ unsigned int reg;
+ int ret;
+ u32 val;
- while (1) {
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
- (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
- return 0;
- } else {
- if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
- (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
- return 0;
- }
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ reg = MTK_QDMA_GLO_CFG;
+ else
+ reg = MTK_PDMA_GLO_CFG;
- if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
- break;
- }
+ ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
+ !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
+ 5, MTK_DMA_BUSY_TIMEOUT_US);
+ if (ret)
+ dev_err(eth->dev, "DMA init timeout\n");
- dev_err(eth->dev, "DMA init timeout\n");
- return -1;
+ return ret;
}
static int mtk_dma_init(struct mtk_eth *eth)
@@ -2133,6 +2148,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
+ eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) {
__napi_schedule(&eth->rx_napi);
mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
@@ -2145,6 +2161,7 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
+ eth->tx_events++;
if (likely(napi_schedule_prep(&eth->tx_napi))) {
__napi_schedule(&eth->tx_napi);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
@@ -2197,7 +2214,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
mtk_w32(eth,
MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
- MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
+ MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
MTK_RX_BT_32DWORDS,
MTK_QDMA_GLO_CFG);
@@ -2233,6 +2250,9 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
val |= config;
+ if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
+ val |= MTK_GDMA_SPECIAL_TAG;
+
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
}
/* Reset and enable PSE */
@@ -2255,12 +2275,17 @@ static int mtk_open(struct net_device *dev)
/* we run 2 netdevs on the same dma ring so we only bring it up once */
if (!refcount_read(&eth->dma_refcnt)) {
- int err = mtk_start_dma(eth);
+ u32 gdm_config = MTK_GDMA_TO_PDMA;
+ int err;
+ err = mtk_start_dma(eth);
if (err)
return err;
- mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
+ if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
+ gdm_config = MTK_GDMA_TO_PPE;
+
+ mtk_gdm_config(eth, gdm_config);
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
@@ -2321,12 +2346,18 @@ static int mtk_stop(struct net_device *dev)
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
+ cancel_work_sync(&eth->rx_dim.work);
+ cancel_work_sync(&eth->tx_dim.work);
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
mtk_dma_free(eth);
+ if (eth->soc->offload_version)
+ mtk_ppe_stop(&eth->ppe);
+
return 0;
}
@@ -2370,6 +2401,64 @@ err_disable_clks:
return ret;
}
+static void mtk_dim_rx(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
+ struct dim_cq_moder cur_profile;
+ u32 val, cur;
+
+ cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
+ dim->profile_ix);
+ spin_lock_bh(&eth->dim_lock);
+
+ val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
+ val &= MTK_PDMA_DELAY_TX_MASK;
+ val |= MTK_PDMA_DELAY_RX_EN;
+
+ cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
+ val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
+
+ cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
+ val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
+
+ mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
+ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+
+ spin_unlock_bh(&eth->dim_lock);
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void mtk_dim_tx(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
+ struct dim_cq_moder cur_profile;
+ u32 val, cur;
+
+ cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
+ dim->profile_ix);
+ spin_lock_bh(&eth->dim_lock);
+
+ val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
+ val &= MTK_PDMA_DELAY_RX_MASK;
+ val |= MTK_PDMA_DELAY_TX_EN;
+
+ cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
+ val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
+
+ cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
+ val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
+
+ mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
+ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+
+ spin_unlock_bh(&eth->dim_lock);
+
+ dim->state = DIM_START_MEASURE;
+}
+
static int mtk_hw_init(struct mtk_eth *eth)
{
int i, val, ret;
@@ -2391,9 +2480,6 @@ static int mtk_hw_init(struct mtk_eth *eth)
goto err_disable_pm;
}
- /* enable interrupt delay for RX */
- mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
-
/* disable delay and normal interrupt */
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
@@ -2432,11 +2518,11 @@ static int mtk_hw_init(struct mtk_eth *eth)
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
- /* enable interrupt delay for RX */
- mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
+ /* set interrupt delays based on current Net DIM sample */
+ mtk_dim_rx(&eth->rx_dim.work);
+ mtk_dim_tx(&eth->tx_dim.work);
/* disable delay and normal interrupt */
- mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
@@ -2473,14 +2559,11 @@ static int __init mtk_init(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- const char *mac_addr;
-
- mac_addr = of_get_mac_address(mac->of_node);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(dev->dev_addr, mac_addr);
+ int ret;
- /* If the mac address is invalid, use random mac address */
- if (!is_valid_ether_addr(dev->dev_addr)) {
+ ret = of_get_mac_address(mac->of_node, dev->dev_addr);
+ if (ret) {
+ /* If the mac address is invalid, use random mac address */
eth_hw_addr_random(dev);
dev_err(eth->dev, "generated random MAC address %pM\n",
dev->dev_addr);
@@ -2832,6 +2915,7 @@ static const struct net_device_ops mtk_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mtk_poll_controller,
#endif
+ .ndo_setup_tc = mtk_eth_setup_tc,
};
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
@@ -2973,6 +3057,13 @@ static int mtk_probe(struct platform_device *pdev)
spin_lock_init(&eth->page_lock);
spin_lock_init(&eth->tx_irq_lock);
spin_lock_init(&eth->rx_irq_lock);
+ spin_lock_init(&eth->dim_lock);
+
+ eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
+
+ eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
@@ -3088,6 +3179,17 @@ static int mtk_probe(struct platform_device *pdev)
goto err_free_dev;
}
+ if (eth->soc->offload_version) {
+ err = mtk_ppe_init(&eth->ppe, eth->dev,
+ eth->base + MTK_ETH_PPE_BASE, 2);
+ if (err)
+ goto err_free_dev;
+
+ err = mtk_eth_offload_init(eth);
+ if (err)
+ goto err_free_dev;
+ }
+
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
@@ -3162,6 +3264,7 @@ static const struct mtk_soc_data mt7621_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
+ .offload_version = 2,
};
static const struct mtk_soc_data mt7622_data = {
@@ -3170,6 +3273,7 @@ static const struct mtk_soc_data mt7622_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
+ .offload_version = 2,
};
static const struct mtk_soc_data mt7623_data = {
@@ -3177,6 +3281,7 @@ static const struct mtk_soc_data mt7623_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
+ .offload_version = 2,
};
static const struct mtk_soc_data mt7629_data = {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index fd3cec8f06ba..11331b44ba07 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -15,12 +15,15 @@
#include <linux/u64_stats_sync.h>
#include <linux/refcount.h>
#include <linux/phylink.h>
+#include <linux/rhashtable.h>
+#include <linux/dim.h>
+#include "mtk_ppe.h"
#define MTK_QDMA_PAGE_SIZE 2048
#define MTK_MAX_RX_LENGTH 1536
#define MTK_MAX_RX_LENGTH_2K 2048
#define MTK_TX_DMA_BUF_LEN 0x3fff
-#define MTK_DMA_SIZE 256
+#define MTK_DMA_SIZE 512
#define MTK_NAPI_WEIGHT 64
#define MTK_MAC_COUNT 2
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
@@ -40,7 +43,8 @@
NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_SG | NETIF_F_TSO | \
NETIF_F_TSO6 | \
- NETIF_F_IPV6_CSUM)
+ NETIF_F_IPV6_CSUM |\
+ NETIF_F_HW_TC)
#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
@@ -82,10 +86,12 @@
/* GDM Exgress Control Register */
#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000))
+#define MTK_GDMA_SPECIAL_TAG BIT(24)
#define MTK_GDMA_ICS_EN BIT(22)
#define MTK_GDMA_TCS_EN BIT(21)
#define MTK_GDMA_UCS_EN BIT(20)
#define MTK_GDMA_TO_PDMA 0x0
+#define MTK_GDMA_TO_PPE 0x4444
#define MTK_GDMA_DROP_ALL 0x7777
/* Unicast Filter MAC Address Register - Low */
@@ -132,13 +138,18 @@
/* PDMA Delay Interrupt Register */
#define MTK_PDMA_DELAY_INT 0xa0c
+#define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0)
#define MTK_PDMA_DELAY_RX_EN BIT(15)
-#define MTK_PDMA_DELAY_RX_PINT 4
#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
-#define MTK_PDMA_DELAY_RX_PTIME 4
-#define MTK_PDMA_DELAY_RX_DELAY \
- (MTK_PDMA_DELAY_RX_EN | MTK_PDMA_DELAY_RX_PTIME | \
- (MTK_PDMA_DELAY_RX_PINT << MTK_PDMA_DELAY_RX_PINT_SHIFT))
+#define MTK_PDMA_DELAY_RX_PTIME_SHIFT 0
+
+#define MTK_PDMA_DELAY_TX_MASK GENMASK(31, 16)
+#define MTK_PDMA_DELAY_TX_EN BIT(31)
+#define MTK_PDMA_DELAY_TX_PINT_SHIFT 24
+#define MTK_PDMA_DELAY_TX_PTIME_SHIFT 16
+
+#define MTK_PDMA_DELAY_PINT_MASK 0x7f
+#define MTK_PDMA_DELAY_PTIME_MASK 0xff
/* PDMA Interrupt Status Register */
#define MTK_PDMA_INT_STATUS 0xa20
@@ -198,12 +209,12 @@
#define MTK_RX_BT_32DWORDS (3 << 11)
#define MTK_NDP_CO_PRO BIT(10)
#define MTK_TX_WB_DDONE BIT(6)
-#define MTK_DMA_SIZE_16DWORDS (2 << 4)
+#define MTK_TX_BT_32DWORDS (3 << 4)
#define MTK_RX_DMA_BUSY BIT(3)
#define MTK_TX_DMA_BUSY BIT(1)
#define MTK_RX_DMA_EN BIT(2)
#define MTK_TX_DMA_EN BIT(0)
-#define MTK_DMA_BUSY_TIMEOUT HZ
+#define MTK_DMA_BUSY_TIMEOUT_US 1000000
/* QDMA Reset Index Register */
#define MTK_QDMA_RST_IDX 0x1A08
@@ -220,6 +231,7 @@
/* QDMA Interrupt Status Register */
#define MTK_QDMA_INT_STATUS 0x1A18
#define MTK_RX_DONE_DLY BIT(30)
+#define MTK_TX_DONE_DLY BIT(28)
#define MTK_RX_DONE_INT3 BIT(19)
#define MTK_RX_DONE_INT2 BIT(18)
#define MTK_RX_DONE_INT1 BIT(17)
@@ -229,8 +241,7 @@
#define MTK_TX_DONE_INT1 BIT(1)
#define MTK_TX_DONE_INT0 BIT(0)
#define MTK_RX_DONE_INT MTK_RX_DONE_DLY
-#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
- MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
+#define MTK_TX_DONE_INT MTK_TX_DONE_DLY
/* QDMA Interrupt grouping registers */
#define MTK_QDMA_INT_GRP1 0x1a20
@@ -296,15 +307,23 @@
#define RX_DMA_LSO BIT(30)
#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
+#define RX_DMA_VTAG BIT(15)
/* QDMA descriptor rxd3 */
#define RX_DMA_VID(_x) ((_x) & 0xfff)
/* QDMA descriptor rxd4 */
+#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
+#define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14)
+#define MTK_RXD4_SRC_PORT GENMASK(21, 19)
+#define MTK_RXD4_ALG GENMASK(31, 22)
+
+/* QDMA descriptor rxd4 */
#define RX_DMA_L4_VALID BIT(24)
#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
#define RX_DMA_FPORT_SHIFT 19
#define RX_DMA_FPORT_MASK 0x7
+#define RX_DMA_SPECIAL_TAG BIT(22)
/* PHY Indirect Access Control registers */
#define MTK_PHY_IAC 0x10004
@@ -623,6 +642,7 @@ struct mtk_tx_buf {
* @phys: The physical addr of tx_buf
* @next_free: Pointer to the next free descriptor
* @last_free: Pointer to the last free descriptor
+ * @last_free_ptr: Hardware pointer value of the last free descriptor
* @thresh: The threshold of minimum amount of free descriptors
* @free_count: QDMA uses a linked list. Track how many free descriptors
* are present
@@ -633,6 +653,7 @@ struct mtk_tx_ring {
dma_addr_t phys;
struct mtk_tx_dma *next_free;
struct mtk_tx_dma *last_free;
+ u32 last_free_ptr;
u16 thresh;
atomic_t free_count;
int dma_size;
@@ -802,6 +823,7 @@ struct mtk_soc_data {
u32 caps;
u32 required_clks;
bool required_pctl;
+ u8 offload_version;
netdev_features_t hw_features;
};
@@ -835,6 +857,7 @@ struct mtk_sgmii {
* @page_lock: Make sure that register operations are atomic
* @tx_irq__lock: Make sure that IRQ register operations are atomic
* @rx_irq__lock: Make sure that IRQ register operations are atomic
+ * @dim_lock: Make sure that Net DIM operations are atomic
* @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
* dummy for NAPI to work
* @netdev: The netdev instances
@@ -853,6 +876,14 @@ struct mtk_sgmii {
* @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring
* @tx_napi: The TX NAPI struct
* @rx_napi: The RX NAPI struct
+ * @rx_events: Net DIM RX event counter
+ * @rx_packets: Net DIM RX packet counter
+ * @rx_bytes: Net DIM RX byte counter
+ * @rx_dim: Net DIM RX context
+ * @tx_events: Net DIM TX event counter
+ * @tx_packets: Net DIM TX packet counter
+ * @tx_bytes: Net DIM TX byte counter
+ * @tx_dim: Net DIM TX context
* @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
* @phy_scratch_ring: physical address of scratch_ring
* @scratch_head: The scratch memory that scratch_ring points to.
@@ -897,10 +928,25 @@ struct mtk_eth {
const struct mtk_soc_data *soc;
+ spinlock_t dim_lock;
+
+ u32 rx_events;
+ u32 rx_packets;
+ u32 rx_bytes;
+ struct dim rx_dim;
+
+ u32 tx_events;
+ u32 tx_packets;
+ u32 tx_bytes;
+ struct dim tx_dim;
+
u32 tx_int_mask_reg;
u32 tx_int_status_reg;
u32 rx_dma_l4_valid;
int ip_align;
+
+ struct mtk_ppe ppe;
+ struct rhashtable flow_table;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
@@ -945,4 +991,9 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_eth_offload_init(struct mtk_eth *eth);
+int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+
+
#endif /* MTK_ETH_H */
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
new file mode 100644
index 000000000000..3ad10c793308
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include "mtk_ppe.h"
+#include "mtk_ppe_regs.h"
+
+static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
+{
+ writel(val, ppe->base + reg);
+}
+
+static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
+{
+ return readl(ppe->base + reg);
+}
+
+static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
+{
+ u32 val;
+
+ val = ppe_r32(ppe, reg);
+ val &= ~mask;
+ val |= set;
+ ppe_w32(ppe, reg, val);
+
+ return val;
+}
+
+static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
+{
+ return ppe_m32(ppe, reg, 0, val);
+}
+
+static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
+{
+ return ppe_m32(ppe, reg, val, 0);
+}
+
+static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
+{
+ int ret;
+ u32 val;
+
+ ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
+ !(val & MTK_PPE_GLO_CFG_BUSY),
+ 20, MTK_PPE_WAIT_TIMEOUT_US);
+
+ if (ret)
+ dev_err(ppe->dev, "PPE table busy");
+
+ return ret;
+}
+
+static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
+{
+ ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
+ ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
+}
+
+static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
+{
+ mtk_ppe_cache_clear(ppe);
+
+ ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
+ enable * MTK_PPE_CACHE_CTL_EN);
+}
+
+static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
+{
+ u32 hv1, hv2, hv3;
+ u32 hash;
+
+ switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
+ case MTK_PPE_PKT_TYPE_BRIDGE:
+ hv1 = e->bridge.src_mac_lo;
+ hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
+ hv2 = e->bridge.src_mac_hi >> 16;
+ hv2 ^= e->bridge.dest_mac_lo;
+ hv3 = e->bridge.dest_mac_hi;
+ break;
+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
+ hv1 = e->ipv4.orig.ports;
+ hv2 = e->ipv4.orig.dest_ip;
+ hv3 = e->ipv4.orig.src_ip;
+ break;
+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
+ hv1 ^= e->ipv6.ports;
+
+ hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
+ hv2 ^= e->ipv6.dest_ip[0];
+
+ hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
+ hv3 ^= e->ipv6.src_ip[0];
+ break;
+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
+ default:
+ WARN_ON_ONCE(1);
+ return MTK_PPE_HASH_MASK;
+ }
+
+ hash = (hv1 & hv2) | ((~hv1) & hv3);
+ hash = (hash >> 24) | ((hash & 0xffffff) << 8);
+ hash ^= hv1 ^ hv2 ^ hv3;
+ hash ^= hash >> 16;
+ hash <<= 1;
+ hash &= MTK_PPE_ENTRIES - 1;
+
+ return hash;
+}
+
+static inline struct mtk_foe_mac_info *
+mtk_foe_entry_l2(struct mtk_foe_entry *entry)
+{
+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+
+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
+ return &entry->ipv6.l2;
+
+ return &entry->ipv4.l2;
+}
+
+static inline u32 *
+mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
+{
+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+
+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
+ return &entry->ipv6.ib2;
+
+ return &entry->ipv4.ib2;
+}
+
+int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
+ u8 pse_port, u8 *src_mac, u8 *dest_mac)
+{
+ struct mtk_foe_mac_info *l2;
+ u32 ports_pad, val;
+
+ memset(entry, 0, sizeof(*entry));
+
+ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
+ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
+ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
+ MTK_FOE_IB1_BIND_TTL |
+ MTK_FOE_IB1_BIND_CACHE;
+ entry->ib1 = val;
+
+ val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
+ FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
+
+ if (is_multicast_ether_addr(dest_mac))
+ val |= MTK_FOE_IB2_MULTICAST;
+
+ ports_pad = 0xa5a5a500 | (l4proto & 0xff);
+ if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
+ entry->ipv4.orig.ports = ports_pad;
+ if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
+ entry->ipv6.ports = ports_pad;
+
+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
+ entry->ipv6.ib2 = val;
+ l2 = &entry->ipv6.l2;
+ } else {
+ entry->ipv4.ib2 = val;
+ l2 = &entry->ipv4.l2;
+ }
+
+ l2->dest_mac_hi = get_unaligned_be32(dest_mac);
+ l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
+ l2->src_mac_hi = get_unaligned_be32(src_mac);
+ l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
+
+ if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
+ l2->etype = ETH_P_IPV6;
+ else
+ l2->etype = ETH_P_IP;
+
+ return 0;
+}
+
+int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
+{
+ u32 *ib2 = mtk_foe_entry_ib2(entry);
+ u32 val;
+
+ val = *ib2;
+ val &= ~MTK_FOE_IB2_DEST_PORT;
+ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
+ *ib2 = val;
+
+ return 0;
+}
+
+int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
+ __be32 src_addr, __be16 src_port,
+ __be32 dest_addr, __be16 dest_port)
+{
+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ struct mtk_ipv4_tuple *t;
+
+ switch (type) {
+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
+ if (egress) {
+ t = &entry->ipv4.new;
+ break;
+ }
+ fallthrough;
+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
+ t = &entry->ipv4.orig;
+ break;
+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
+ entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
+ entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
+ return 0;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ t->src_ip = be32_to_cpu(src_addr);
+ t->dest_ip = be32_to_cpu(dest_addr);
+
+ if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
+ return 0;
+
+ t->src_port = be16_to_cpu(src_port);
+ t->dest_port = be16_to_cpu(dest_port);
+
+ return 0;
+}
+
+int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
+ __be32 *src_addr, __be16 src_port,
+ __be32 *dest_addr, __be16 dest_port)
+{
+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ u32 *src, *dest;
+ int i;
+
+ switch (type) {
+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
+ src = entry->dslite.tunnel_src_ip;
+ dest = entry->dslite.tunnel_dest_ip;
+ break;
+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
+ entry->ipv6.src_port = be16_to_cpu(src_port);
+ entry->ipv6.dest_port = be16_to_cpu(dest_port);
+ fallthrough;
+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ src = entry->ipv6.src_ip;
+ dest = entry->ipv6.dest_ip;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 4; i++)
+ src[i] = be32_to_cpu(src_addr[i]);
+ for (i = 0; i < 4; i++)
+ dest[i] = be32_to_cpu(dest_addr[i]);
+
+ return 0;
+}
+
+int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
+{
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+
+ l2->etype = BIT(port);
+
+ if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+ else
+ l2->etype |= BIT(8);
+
+ entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
+
+ return 0;
+}
+
+int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
+{
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+
+ switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
+ case 0:
+ entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
+ FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+ l2->vlan1 = vid;
+ return 0;
+ case 1:
+ if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
+ l2->vlan1 = vid;
+ l2->etype |= BIT(8);
+ } else {
+ l2->vlan2 = vid;
+ entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+ }
+ return 0;
+ default:
+ return -ENOSPC;
+ }
+}
+
+int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
+{
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+
+ if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
+ (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
+ l2->etype = ETH_P_PPP_SES;
+
+ entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
+ l2->pppoe_id = sid;
+
+ return 0;
+}
+
+static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
+{
+ return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
+ FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
+}
+
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
+ u16 timestamp)
+{
+ struct mtk_foe_entry *hwe;
+ u32 hash;
+
+ timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
+
+ hash = mtk_ppe_hash_entry(entry);
+ hwe = &ppe->foe_table[hash];
+ if (!mtk_foe_entry_usable(hwe)) {
+ hwe++;
+ hash++;
+
+ if (!mtk_foe_entry_usable(hwe))
+ return -ENOSPC;
+ }
+
+ memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
+ wmb();
+ hwe->ib1 = entry->ib1;
+
+ dma_wmb();
+
+ mtk_ppe_cache_clear(ppe);
+
+ return hash;
+}
+
+int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
+ int version)
+{
+ struct mtk_foe_entry *foe;
+
+ /* need to allocate a separate device, since it PPE DMA access is
+ * not coherent.
+ */
+ ppe->base = base;
+ ppe->dev = dev;
+ ppe->version = version;
+
+ foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
+ &ppe->foe_phys, GFP_KERNEL);
+ if (!foe)
+ return -ENOMEM;
+
+ ppe->foe_table = foe;
+
+ mtk_ppe_debugfs_init(ppe);
+
+ return 0;
+}
+
+static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
+{
+ static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
+ int i, k;
+
+ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
+
+ if (!IS_ENABLED(CONFIG_SOC_MT7621))
+ return;
+
+ /* skip all entries that cross the 1024 byte boundary */
+ for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
+ for (k = 0; k < ARRAY_SIZE(skip); k++)
+ ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
+}
+
+int mtk_ppe_start(struct mtk_ppe *ppe)
+{
+ u32 val;
+
+ mtk_ppe_init_foe_table(ppe);
+ ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
+
+ val = MTK_PPE_TB_CFG_ENTRY_80B |
+ MTK_PPE_TB_CFG_AGE_NON_L4 |
+ MTK_PPE_TB_CFG_AGE_UNBIND |
+ MTK_PPE_TB_CFG_AGE_TCP |
+ MTK_PPE_TB_CFG_AGE_UDP |
+ MTK_PPE_TB_CFG_AGE_TCP_FIN |
+ FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
+ MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
+ FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
+ MTK_PPE_KEEPALIVE_DISABLE) |
+ FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
+ FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
+ MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
+ FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
+ MTK_PPE_ENTRIES_SHIFT);
+ ppe_w32(ppe, MTK_PPE_TB_CFG, val);
+
+ ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
+ MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
+
+ mtk_ppe_cache_enable(ppe, true);
+
+ val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
+ MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
+ MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
+ MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
+ MTK_PPE_FLOW_CFG_IP6_6RD |
+ MTK_PPE_FLOW_CFG_IP4_NAT |
+ MTK_PPE_FLOW_CFG_IP4_NAPT |
+ MTK_PPE_FLOW_CFG_IP4_DSLITE |
+ MTK_PPE_FLOW_CFG_L2_BRIDGE |
+ MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
+ ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
+
+ val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
+ FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
+ ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
+
+ val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
+ FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
+ ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
+
+ val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
+ FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
+ ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
+
+ val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
+ ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
+
+ val = MTK_PPE_BIND_LIMIT1_FULL |
+ FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
+ ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
+
+ val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
+ FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
+ ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
+
+ /* enable PPE */
+ val = MTK_PPE_GLO_CFG_EN |
+ MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
+ MTK_PPE_GLO_CFG_IP4_CS_DROP |
+ MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
+ ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
+
+ ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
+
+ return 0;
+}
+
+int mtk_ppe_stop(struct mtk_ppe *ppe)
+{
+ u32 val;
+ int i;
+
+ for (i = 0; i < MTK_PPE_ENTRIES; i++)
+ ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
+ MTK_FOE_STATE_INVALID);
+
+ mtk_ppe_cache_enable(ppe, false);
+
+ /* disable offload engine */
+ ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
+ ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
+
+ /* disable aging */
+ val = MTK_PPE_TB_CFG_AGE_NON_L4 |
+ MTK_PPE_TB_CFG_AGE_UNBIND |
+ MTK_PPE_TB_CFG_AGE_TCP |
+ MTK_PPE_TB_CFG_AGE_UDP |
+ MTK_PPE_TB_CFG_AGE_TCP_FIN;
+ ppe_clear(ppe, MTK_PPE_TB_CFG, val);
+
+ return mtk_ppe_wait_busy(ppe);
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
new file mode 100644
index 000000000000..242fb8f2ae65
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_PPE_H
+#define __MTK_PPE_H
+
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+
+#define MTK_ETH_PPE_BASE 0xc00
+
+#define MTK_PPE_ENTRIES_SHIFT 3
+#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
+#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1)
+#define MTK_PPE_WAIT_TIMEOUT_US 1000000
+
+#define MTK_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0)
+#define MTK_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8)
+#define MTK_FOE_IB1_UNBIND_PREBIND BIT(24)
+
+#define MTK_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0)
+#define MTK_FOE_IB1_BIND_KEEPALIVE BIT(15)
+#define MTK_FOE_IB1_BIND_VLAN_LAYER GENMASK(18, 16)
+#define MTK_FOE_IB1_BIND_PPPOE BIT(19)
+#define MTK_FOE_IB1_BIND_VLAN_TAG BIT(20)
+#define MTK_FOE_IB1_BIND_PKT_SAMPLE BIT(21)
+#define MTK_FOE_IB1_BIND_CACHE BIT(22)
+#define MTK_FOE_IB1_BIND_TUNNEL_DECAP BIT(23)
+#define MTK_FOE_IB1_BIND_TTL BIT(24)
+
+#define MTK_FOE_IB1_PACKET_TYPE GENMASK(27, 25)
+#define MTK_FOE_IB1_STATE GENMASK(29, 28)
+#define MTK_FOE_IB1_UDP BIT(30)
+#define MTK_FOE_IB1_STATIC BIT(31)
+
+enum {
+ MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0,
+ MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1,
+ MTK_PPE_PKT_TYPE_BRIDGE = 2,
+ MTK_PPE_PKT_TYPE_IPV4_DSLITE = 3,
+ MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
+ MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
+ MTK_PPE_PKT_TYPE_IPV6_6RD = 7,
+};
+
+#define MTK_FOE_IB2_QID GENMASK(3, 0)
+#define MTK_FOE_IB2_PSE_QOS BIT(4)
+#define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
+#define MTK_FOE_IB2_MULTICAST BIT(8)
+
+#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
+#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
+#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
+
+#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
+
+#define MTK_FOE_IB2_PORT_AG GENMASK(23, 18)
+
+#define MTK_FOE_IB2_DSCP GENMASK(31, 24)
+
+#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
+#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
+#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
+
+enum {
+ MTK_FOE_STATE_INVALID,
+ MTK_FOE_STATE_UNBIND,
+ MTK_FOE_STATE_BIND,
+ MTK_FOE_STATE_FIN
+};
+
+struct mtk_foe_mac_info {
+ u16 vlan1;
+ u16 etype;
+
+ u32 dest_mac_hi;
+
+ u16 vlan2;
+ u16 dest_mac_lo;
+
+ u32 src_mac_hi;
+
+ u16 pppoe_id;
+ u16 src_mac_lo;
+};
+
+struct mtk_foe_bridge {
+ u32 dest_mac_hi;
+
+ u16 src_mac_lo;
+ u16 dest_mac_lo;
+
+ u32 src_mac_hi;
+
+ u32 ib2;
+
+ u32 _rsv[5];
+
+ u32 udf_tsid;
+ struct mtk_foe_mac_info l2;
+};
+
+struct mtk_ipv4_tuple {
+ u32 src_ip;
+ u32 dest_ip;
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 _pad[3]; /* fill with 0xa5a5a5 */
+ };
+ u32 ports;
+ };
+};
+
+struct mtk_foe_ipv4 {
+ struct mtk_ipv4_tuple orig;
+
+ u32 ib2;
+
+ struct mtk_ipv4_tuple new;
+
+ u16 timestamp;
+ u16 _rsv0[3];
+
+ u32 udf_tsid;
+
+ struct mtk_foe_mac_info l2;
+};
+
+struct mtk_foe_ipv4_dslite {
+ struct mtk_ipv4_tuple ip4;
+
+ u32 tunnel_src_ip[4];
+ u32 tunnel_dest_ip[4];
+
+ u8 flow_label[3];
+ u8 priority;
+
+ u32 udf_tsid;
+
+ u32 ib2;
+
+ struct mtk_foe_mac_info l2;
+};
+
+struct mtk_foe_ipv6 {
+ u32 src_ip[4];
+ u32 dest_ip[4];
+
+ union {
+ struct {
+ u8 protocol;
+ u8 _pad[3]; /* fill with 0xa5a5a5 */
+ }; /* 3-tuple */
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ }; /* 5-tuple */
+ u32 ports;
+ };
+
+ u32 _rsv[3];
+
+ u32 udf;
+
+ u32 ib2;
+ struct mtk_foe_mac_info l2;
+};
+
+struct mtk_foe_ipv6_6rd {
+ u32 src_ip[4];
+ u32 dest_ip[4];
+ u16 dest_port;
+ u16 src_port;
+
+ u32 tunnel_src_ip;
+ u32 tunnel_dest_ip;
+
+ u16 hdr_csum;
+ u8 dscp;
+ u8 ttl;
+
+ u8 flag;
+ u8 pad;
+ u8 per_flow_6rd_id;
+ u8 pad2;
+
+ u32 ib2;
+ struct mtk_foe_mac_info l2;
+};
+
+struct mtk_foe_entry {
+ u32 ib1;
+
+ union {
+ struct mtk_foe_bridge bridge;
+ struct mtk_foe_ipv4 ipv4;
+ struct mtk_foe_ipv4_dslite dslite;
+ struct mtk_foe_ipv6 ipv6;
+ struct mtk_foe_ipv6_6rd ipv6_6rd;
+ u32 data[19];
+ };
+};
+
+enum {
+ MTK_PPE_CPU_REASON_TTL_EXCEEDED = 0x02,
+ MTK_PPE_CPU_REASON_OPTION_HEADER = 0x03,
+ MTK_PPE_CPU_REASON_NO_FLOW = 0x07,
+ MTK_PPE_CPU_REASON_IPV4_FRAG = 0x08,
+ MTK_PPE_CPU_REASON_IPV4_DSLITE_FRAG = 0x09,
+ MTK_PPE_CPU_REASON_IPV4_DSLITE_NO_TCP_UDP = 0x0a,
+ MTK_PPE_CPU_REASON_IPV6_6RD_NO_TCP_UDP = 0x0b,
+ MTK_PPE_CPU_REASON_TCP_FIN_SYN_RST = 0x0c,
+ MTK_PPE_CPU_REASON_UN_HIT = 0x0d,
+ MTK_PPE_CPU_REASON_HIT_UNBIND = 0x0e,
+ MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
+ MTK_PPE_CPU_REASON_HIT_BIND_TCP_FIN = 0x10,
+ MTK_PPE_CPU_REASON_HIT_TTL_1 = 0x11,
+ MTK_PPE_CPU_REASON_HIT_BIND_VLAN_VIOLATION = 0x12,
+ MTK_PPE_CPU_REASON_KEEPALIVE_UC_OLD_HDR = 0x13,
+ MTK_PPE_CPU_REASON_KEEPALIVE_MC_NEW_HDR = 0x14,
+ MTK_PPE_CPU_REASON_KEEPALIVE_DUP_OLD_HDR = 0x15,
+ MTK_PPE_CPU_REASON_HIT_BIND_FORCE_CPU = 0x16,
+ MTK_PPE_CPU_REASON_TUNNEL_OPTION_HEADER = 0x17,
+ MTK_PPE_CPU_REASON_MULTICAST_TO_CPU = 0x18,
+ MTK_PPE_CPU_REASON_MULTICAST_TO_GMAC1_CPU = 0x19,
+ MTK_PPE_CPU_REASON_HIT_PRE_BIND = 0x1a,
+ MTK_PPE_CPU_REASON_PACKET_SAMPLING = 0x1b,
+ MTK_PPE_CPU_REASON_EXCEED_MTU = 0x1c,
+ MTK_PPE_CPU_REASON_PPE_BYPASS = 0x1e,
+ MTK_PPE_CPU_REASON_INVALID = 0x1f,
+};
+
+struct mtk_ppe {
+ struct device *dev;
+ void __iomem *base;
+ int version;
+
+ struct mtk_foe_entry *foe_table;
+ dma_addr_t foe_phys;
+
+ void *acct_table;
+};
+
+int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
+ int version);
+int mtk_ppe_start(struct mtk_ppe *ppe);
+int mtk_ppe_stop(struct mtk_ppe *ppe);
+
+static inline void
+mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
+{
+ ppe->foe_table[hash].ib1 = 0;
+ dma_wmb();
+}
+
+static inline int
+mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash)
+{
+ u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1);
+
+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND)
+ return -1;
+
+ return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1);
+}
+
+int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
+ u8 pse_port, u8 *src_mac, u8 *dest_mac);
+int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port);
+int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig,
+ __be32 src_addr, __be16 src_port,
+ __be32 dest_addr, __be16 dest_port);
+int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
+ __be32 *src_addr, __be16 src_port,
+ __be32 *dest_addr, __be16 dest_port);
+int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
+int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
+int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
+ u16 timestamp);
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
+
+#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
new file mode 100644
index 000000000000..98b1d3577bcd
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include "mtk_eth_soc.h"
+
+struct mtk_flow_addr_info
+{
+ void *src, *dest;
+ u16 *src_port, *dest_port;
+ bool ipv6;
+};
+
+static const char *mtk_foe_entry_state_str(int state)
+{
+ static const char * const state_str[] = {
+ [MTK_FOE_STATE_INVALID] = "INV",
+ [MTK_FOE_STATE_UNBIND] = "UNB",
+ [MTK_FOE_STATE_BIND] = "BND",
+ [MTK_FOE_STATE_FIN] = "FIN",
+ };
+
+ if (state >= ARRAY_SIZE(state_str) || !state_str[state])
+ return "UNK";
+
+ return state_str[state];
+}
+
+static const char *mtk_foe_pkt_type_str(int type)
+{
+ static const char * const type_str[] = {
+ [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
+ [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
+ [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
+ [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
+ [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
+ [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
+ [MTK_PPE_PKT_TYPE_IPV6_6RD] = "6RD",
+ };
+
+ if (type >= ARRAY_SIZE(type_str) || !type_str[type])
+ return "UNKNOWN";
+
+ return type_str[type];
+}
+
+static void
+mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6)
+{
+ u32 n_addr[4];
+ int i;
+
+ if (!ipv6) {
+ seq_printf(m, "%pI4h", addr);
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(n_addr); i++)
+ n_addr[i] = htonl(addr[i]);
+ seq_printf(m, "%pI6", n_addr);
+}
+
+static void
+mtk_print_addr_info(struct seq_file *m, struct mtk_flow_addr_info *ai)
+{
+ mtk_print_addr(m, ai->src, ai->ipv6);
+ if (ai->src_port)
+ seq_printf(m, ":%d", *ai->src_port);
+ seq_printf(m, "->");
+ mtk_print_addr(m, ai->dest, ai->ipv6);
+ if (ai->dest_port)
+ seq_printf(m, ":%d", *ai->dest_port);
+}
+
+static int
+mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
+{
+ struct mtk_ppe *ppe = m->private;
+ int i;
+
+ for (i = 0; i < MTK_PPE_ENTRIES; i++) {
+ struct mtk_foe_entry *entry = &ppe->foe_table[i];
+ struct mtk_foe_mac_info *l2;
+ struct mtk_flow_addr_info ai = {};
+ unsigned char h_source[ETH_ALEN];
+ unsigned char h_dest[ETH_ALEN];
+ int type, state;
+ u32 ib2;
+
+
+ state = FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1);
+ if (!state)
+ continue;
+
+ if (bind && state != MTK_FOE_STATE_BIND)
+ continue;
+
+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ seq_printf(m, "%05x %s %7s", i,
+ mtk_foe_entry_state_str(state),
+ mtk_foe_pkt_type_str(type));
+
+ switch (type) {
+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
+ ai.src_port = &entry->ipv4.orig.src_port;
+ ai.dest_port = &entry->ipv4.orig.dest_port;
+ fallthrough;
+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
+ ai.src = &entry->ipv4.orig.src_ip;
+ ai.dest = &entry->ipv4.orig.dest_ip;
+ break;
+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ ai.src_port = &entry->ipv6.src_port;
+ ai.dest_port = &entry->ipv6.dest_port;
+ fallthrough;
+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
+ ai.src = &entry->ipv6.src_ip;
+ ai.dest = &entry->ipv6.dest_ip;
+ ai.ipv6 = true;
+ break;
+ }
+
+ seq_printf(m, " orig=");
+ mtk_print_addr_info(m, &ai);
+
+ switch (type) {
+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
+ ai.src_port = &entry->ipv4.new.src_port;
+ ai.dest_port = &entry->ipv4.new.dest_port;
+ fallthrough;
+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
+ ai.src = &entry->ipv4.new.src_ip;
+ ai.dest = &entry->ipv4.new.dest_ip;
+ seq_printf(m, " new=");
+ mtk_print_addr_info(m, &ai);
+ break;
+ }
+
+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
+ l2 = &entry->ipv6.l2;
+ ib2 = entry->ipv6.ib2;
+ } else {
+ l2 = &entry->ipv4.l2;
+ ib2 = entry->ipv4.ib2;
+ }
+
+ *((__be32 *)h_source) = htonl(l2->src_mac_hi);
+ *((__be16 *)&h_source[4]) = htons(l2->src_mac_lo);
+ *((__be32 *)h_dest) = htonl(l2->dest_mac_hi);
+ *((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo);
+
+ seq_printf(m, " eth=%pM->%pM etype=%04x"
+ " vlan=%d,%d ib1=%08x ib2=%08x\n",
+ h_source, h_dest, ntohs(l2->etype),
+ l2->vlan1, l2->vlan2, entry->ib1, ib2);
+ }
+
+ return 0;
+}
+
+static int
+mtk_ppe_debugfs_foe_show_all(struct seq_file *m, void *private)
+{
+ return mtk_ppe_debugfs_foe_show(m, private, false);
+}
+
+static int
+mtk_ppe_debugfs_foe_show_bind(struct seq_file *m, void *private)
+{
+ return mtk_ppe_debugfs_foe_show(m, private, true);
+}
+
+static int
+mtk_ppe_debugfs_foe_open_all(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtk_ppe_debugfs_foe_show_all,
+ inode->i_private);
+}
+
+static int
+mtk_ppe_debugfs_foe_open_bind(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtk_ppe_debugfs_foe_show_bind,
+ inode->i_private);
+}
+
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
+{
+ static const struct file_operations fops_all = {
+ .open = mtk_ppe_debugfs_foe_open_all,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+
+ static const struct file_operations fops_bind = {
+ .open = mtk_ppe_debugfs_foe_open_bind,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+
+ struct dentry *root;
+
+ root = debugfs_create_dir("mtk_ppe", NULL);
+ if (!root)
+ return -ENOMEM;
+
+ debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
+ debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
new file mode 100644
index 000000000000..b5f68f66d42a
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/if_ether.h>
+#include <linux/rhashtable.h>
+#include <linux/ip.h>
+#include <net/flow_offload.h>
+#include <net/pkt_cls.h>
+#include <net/dsa.h>
+#include "mtk_eth_soc.h"
+
+struct mtk_flow_data {
+ struct ethhdr eth;
+
+ union {
+ struct {
+ __be32 src_addr;
+ __be32 dst_addr;
+ } v4;
+ };
+
+ __be16 src_port;
+ __be16 dst_port;
+
+ struct {
+ u16 id;
+ __be16 proto;
+ u8 num;
+ } vlan;
+ struct {
+ u16 sid;
+ u8 num;
+ } pppoe;
+};
+
+struct mtk_flow_entry {
+ struct rhash_head node;
+ unsigned long cookie;
+ u16 hash;
+};
+
+static const struct rhashtable_params mtk_flow_ht_params = {
+ .head_offset = offsetof(struct mtk_flow_entry, node),
+ .key_offset = offsetof(struct mtk_flow_entry, cookie),
+ .key_len = sizeof(unsigned long),
+ .automatic_shrinking = true,
+};
+
+static u32
+mtk_eth_timestamp(struct mtk_eth *eth)
+{
+ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
+}
+
+static int
+mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
+ bool egress)
+{
+ return mtk_foe_entry_set_ipv4_tuple(foe, egress,
+ data->v4.src_addr, data->src_port,
+ data->v4.dst_addr, data->dst_port);
+}
+
+static void
+mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
+{
+ void *dest = eth + act->mangle.offset;
+ const void *src = &act->mangle.val;
+
+ if (act->mangle.offset > 8)
+ return;
+
+ if (act->mangle.mask == 0xffff) {
+ src += 2;
+ dest += 2;
+ }
+
+ memcpy(dest, src, act->mangle.mask ? 2 : 4);
+}
+
+
+static int
+mtk_flow_mangle_ports(const struct flow_action_entry *act,
+ struct mtk_flow_data *data)
+{
+ u32 val = ntohl(act->mangle.val);
+
+ switch (act->mangle.offset) {
+ case 0:
+ if (act->mangle.mask == ~htonl(0xffff))
+ data->dst_port = cpu_to_be16(val);
+ else
+ data->src_port = cpu_to_be16(val >> 16);
+ break;
+ case 2:
+ data->dst_port = cpu_to_be16(val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
+ struct mtk_flow_data *data)
+{
+ __be32 *dest;
+
+ switch (act->mangle.offset) {
+ case offsetof(struct iphdr, saddr):
+ dest = &data->v4.src_addr;
+ break;
+ case offsetof(struct iphdr, daddr):
+ dest = &data->v4.dst_addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(dest, &act->mangle.val, sizeof(u32));
+
+ return 0;
+}
+
+static int
+mtk_flow_get_dsa_port(struct net_device **dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct dsa_port *dp;
+
+ dp = dsa_port_from_netdev(*dev);
+ if (IS_ERR(dp))
+ return -ENODEV;
+
+ if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ return -ENODEV;
+
+ *dev = dp->cpu_dp->master;
+
+ return dp->index;
+#else
+ return -ENODEV;
+#endif
+}
+
+static int
+mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
+ struct net_device *dev)
+{
+ int pse_port, dsa_port;
+
+ dsa_port = mtk_flow_get_dsa_port(&dev);
+ if (dsa_port >= 0)
+ mtk_foe_entry_set_dsa(foe, dsa_port);
+
+ if (dev == eth->netdev[0])
+ pse_port = 1;
+ else if (dev == eth->netdev[1])
+ pse_port = 2;
+ else
+ return -EOPNOTSUPP;
+
+ mtk_foe_entry_set_pse_port(foe, pse_port);
+
+ return 0;
+}
+
+static int
+mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_action_entry *act;
+ struct mtk_flow_data data = {};
+ struct mtk_foe_entry foe;
+ struct net_device *odev = NULL;
+ struct mtk_flow_entry *entry;
+ int offload_type = 0;
+ u16 addr_type = 0;
+ u32 timestamp;
+ u8 l4proto = 0;
+ int err = 0;
+ int hash;
+ int i;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
+ struct flow_match_meta match;
+
+ flow_rule_match_meta(rule, &match);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ l4proto = match.key->ip_proto;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
+ mtk_flow_offload_mangle_eth(act, &data.eth);
+ break;
+ case FLOW_ACTION_REDIRECT:
+ odev = act->dev;
+ break;
+ case FLOW_ACTION_CSUM:
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ if (data.vlan.num == 1 ||
+ act->vlan.proto != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ data.vlan.id = act->vlan.vid;
+ data.vlan.proto = act->vlan.proto;
+ data.vlan.num++;
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ break;
+ case FLOW_ACTION_PPPOE_PUSH:
+ if (data.pppoe.num == 1)
+ return -EOPNOTSUPP;
+
+ data.pppoe.sid = act->pppoe.sid;
+ data.pppoe.num++;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ switch (addr_type) {
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (!is_valid_ether_addr(data.eth.h_source) ||
+ !is_valid_ether_addr(data.eth.h_dest))
+ return -EINVAL;
+
+ err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
+ data.eth.h_source,
+ data.eth.h_dest);
+ if (err)
+ return err;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports ports;
+
+ flow_rule_match_ports(rule, &ports);
+ data.src_port = ports.key->src;
+ data.dst_port = ports.key->dst;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs addrs;
+
+ flow_rule_match_ipv4_addrs(rule, &addrs);
+
+ data.v4.src_addr = addrs.key->src;
+ data.v4.dst_addr = addrs.key->dst;
+
+ mtk_flow_set_ipv4_addr(&foe, &data, false);
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id != FLOW_ACTION_MANGLE)
+ continue;
+
+ switch (act->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ err = mtk_flow_mangle_ports(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ err = mtk_flow_mangle_ipv4(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ /* handled earlier */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (err)
+ return err;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ err = mtk_flow_set_ipv4_addr(&foe, &data, true);
+ if (err)
+ return err;
+ }
+
+ if (data.vlan.num == 1) {
+ if (data.vlan.proto != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ mtk_foe_entry_set_vlan(&foe, data.vlan.id);
+ }
+ if (data.pppoe.num == 1)
+ mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
+
+ err = mtk_flow_set_output_device(eth, &foe, odev);
+ if (err)
+ return err;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->cookie = f->cookie;
+ timestamp = mtk_eth_timestamp(eth);
+ hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
+ if (hash < 0) {
+ err = hash;
+ goto free;
+ }
+
+ entry->hash = hash;
+ err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
+ mtk_flow_ht_params);
+ if (err < 0)
+ goto clear_flow;
+
+ return 0;
+clear_flow:
+ mtk_foe_entry_clear(&eth->ppe, hash);
+free:
+ kfree(entry);
+ return err;
+}
+
+static int
+mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
+{
+ struct mtk_flow_entry *entry;
+
+ entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ mtk_flow_ht_params);
+ if (!entry)
+ return -ENOENT;
+
+ mtk_foe_entry_clear(&eth->ppe, entry->hash);
+ rhashtable_remove_fast(&eth->flow_table, &entry->node,
+ mtk_flow_ht_params);
+ kfree(entry);
+
+ return 0;
+}
+
+static int
+mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
+{
+ struct mtk_flow_entry *entry;
+ int timestamp;
+ u32 idle;
+
+ entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ mtk_flow_ht_params);
+ if (!entry)
+ return -ENOENT;
+
+ timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
+ if (timestamp < 0)
+ return -ETIMEDOUT;
+
+ idle = mtk_eth_timestamp(eth) - timestamp;
+ f->stats.lastused = jiffies - idle * HZ;
+
+ return 0;
+}
+
+static DEFINE_MUTEX(mtk_flow_offload_mutex);
+
+static int
+mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct flow_cls_offload *cls = type_data;
+ struct net_device *dev = cb_priv;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int err;
+
+ if (!tc_can_offload(dev))
+ return -EOPNOTSUPP;
+
+ if (type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&mtk_flow_offload_mutex);
+ switch (cls->command) {
+ case FLOW_CLS_REPLACE:
+ err = mtk_flow_offload_replace(eth, cls);
+ break;
+ case FLOW_CLS_DESTROY:
+ err = mtk_flow_offload_destroy(eth, cls);
+ break;
+ case FLOW_CLS_STATS:
+ err = mtk_flow_offload_stats(eth, cls);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ mutex_unlock(&mtk_flow_offload_mutex);
+
+ return err;
+}
+
+static int
+mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ static LIST_HEAD(block_cb_list);
+ struct flow_block_cb *block_cb;
+ flow_setup_cb_t *cb;
+
+ if (!eth->ppe.foe_table)
+ return -EOPNOTSUPP;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ cb = mtk_eth_setup_tc_block_cb;
+ f->driver_block_list = &block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
+ if (block_cb) {
+ flow_block_cb_incref(block_cb);
+ return 0;
+ }
+ block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
+ if (!block_cb)
+ return -ENOENT;
+
+ if (flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ if (type == TC_SETUP_FT)
+ return mtk_eth_setup_tc_block(dev, type_data);
+
+ return -EOPNOTSUPP;
+}
+
+int mtk_eth_offload_init(struct mtk_eth *eth)
+{
+ if (!eth->ppe.foe_table)
+ return 0;
+
+ return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
new file mode 100644
index 000000000000..0c45ea0900f1
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_PPE_REGS_H
+#define __MTK_PPE_REGS_H
+
+#define MTK_PPE_GLO_CFG 0x200
+#define MTK_PPE_GLO_CFG_EN BIT(0)
+#define MTK_PPE_GLO_CFG_TSID_EN BIT(1)
+#define MTK_PPE_GLO_CFG_IP4_L4_CS_DROP BIT(2)
+#define MTK_PPE_GLO_CFG_IP4_CS_DROP BIT(3)
+#define MTK_PPE_GLO_CFG_TTL0_DROP BIT(4)
+#define MTK_PPE_GLO_CFG_PPE_BSWAP BIT(5)
+#define MTK_PPE_GLO_CFG_PSE_HASH_OFS BIT(6)
+#define MTK_PPE_GLO_CFG_MCAST_TB_EN BIT(7)
+#define MTK_PPE_GLO_CFG_FLOW_DROP_KA BIT(8)
+#define MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE BIT(9)
+#define MTK_PPE_GLO_CFG_UDP_LITE_EN BIT(10)
+#define MTK_PPE_GLO_CFG_UDP_LEN_DROP BIT(11)
+#define MTK_PPE_GLO_CFG_MCAST_ENTRIES GNEMASK(13, 12)
+#define MTK_PPE_GLO_CFG_BUSY BIT(31)
+
+#define MTK_PPE_FLOW_CFG 0x204
+#define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6)
+#define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7)
+#define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8)
+#define MTK_PPE_FLOW_CFG_IP6_5T_ROUTE BIT(9)
+#define MTK_PPE_FLOW_CFG_IP6_6RD BIT(10)
+#define MTK_PPE_FLOW_CFG_IP4_NAT BIT(12)
+#define MTK_PPE_FLOW_CFG_IP4_NAPT BIT(13)
+#define MTK_PPE_FLOW_CFG_IP4_DSLITE BIT(14)
+#define MTK_PPE_FLOW_CFG_L2_BRIDGE BIT(15)
+#define MTK_PPE_FLOW_CFG_IP_PROTO_BLACKLIST BIT(16)
+#define MTK_PPE_FLOW_CFG_IP4_NAT_FRAG BIT(17)
+#define MTK_PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL BIT(18)
+#define MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY BIT(19)
+#define MTK_PPE_FLOW_CFG_IP6_HASH_GRE_KEY BIT(20)
+
+#define MTK_PPE_IP_PROTO_CHK 0x208
+#define MTK_PPE_IP_PROTO_CHK_IPV4 GENMASK(15, 0)
+#define MTK_PPE_IP_PROTO_CHK_IPV6 GENMASK(31, 16)
+
+#define MTK_PPE_TB_CFG 0x21c
+#define MTK_PPE_TB_CFG_ENTRY_NUM GENMASK(2, 0)
+#define MTK_PPE_TB_CFG_ENTRY_80B BIT(3)
+#define MTK_PPE_TB_CFG_SEARCH_MISS GENMASK(5, 4)
+#define MTK_PPE_TB_CFG_AGE_PREBIND BIT(6)
+#define MTK_PPE_TB_CFG_AGE_NON_L4 BIT(7)
+#define MTK_PPE_TB_CFG_AGE_UNBIND BIT(8)
+#define MTK_PPE_TB_CFG_AGE_TCP BIT(9)
+#define MTK_PPE_TB_CFG_AGE_UDP BIT(10)
+#define MTK_PPE_TB_CFG_AGE_TCP_FIN BIT(11)
+#define MTK_PPE_TB_CFG_KEEPALIVE GENMASK(13, 12)
+#define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14)
+#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
+#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
+
+enum {
+ MTK_PPE_SCAN_MODE_DISABLED,
+ MTK_PPE_SCAN_MODE_CHECK_AGE,
+ MTK_PPE_SCAN_MODE_KEEPALIVE_AGE,
+};
+
+enum {
+ MTK_PPE_KEEPALIVE_DISABLE,
+ MTK_PPE_KEEPALIVE_UNICAST_CPU,
+ MTK_PPE_KEEPALIVE_DUP_CPU = 3,
+};
+
+enum {
+ MTK_PPE_SEARCH_MISS_ACTION_DROP,
+ MTK_PPE_SEARCH_MISS_ACTION_FORWARD = 2,
+ MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD = 3,
+};
+
+#define MTK_PPE_TB_BASE 0x220
+
+#define MTK_PPE_TB_USED 0x224
+#define MTK_PPE_TB_USED_NUM GENMASK(13, 0)
+
+#define MTK_PPE_BIND_RATE 0x228
+#define MTK_PPE_BIND_RATE_BIND GENMASK(15, 0)
+#define MTK_PPE_BIND_RATE_PREBIND GENMASK(31, 16)
+
+#define MTK_PPE_BIND_LIMIT0 0x22c
+#define MTK_PPE_BIND_LIMIT0_QUARTER GENMASK(13, 0)
+#define MTK_PPE_BIND_LIMIT0_HALF GENMASK(29, 16)
+
+#define MTK_PPE_BIND_LIMIT1 0x230
+#define MTK_PPE_BIND_LIMIT1_FULL GENMASK(13, 0)
+#define MTK_PPE_BIND_LIMIT1_NON_L4 GENMASK(23, 16)
+
+#define MTK_PPE_KEEPALIVE 0x234
+#define MTK_PPE_KEEPALIVE_TIME GENMASK(15, 0)
+#define MTK_PPE_KEEPALIVE_TIME_TCP GENMASK(23, 16)
+#define MTK_PPE_KEEPALIVE_TIME_UDP GENMASK(31, 24)
+
+#define MTK_PPE_UNBIND_AGE 0x238
+#define MTK_PPE_UNBIND_AGE_MIN_PACKETS GENMASK(31, 16)
+#define MTK_PPE_UNBIND_AGE_DELTA GENMASK(7, 0)
+
+#define MTK_PPE_BIND_AGE0 0x23c
+#define MTK_PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16)
+#define MTK_PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0)
+
+#define MTK_PPE_BIND_AGE1 0x240
+#define MTK_PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16)
+#define MTK_PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0)
+
+#define MTK_PPE_HASH_SEED 0x244
+
+#define MTK_PPE_DEFAULT_CPU_PORT 0x248
+#define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4))
+
+#define MTK_PPE_MTU_DROP 0x308
+
+#define MTK_PPE_VLAN_MTU0 0x30c
+#define MTK_PPE_VLAN_MTU0_NONE GENMASK(13, 0)
+#define MTK_PPE_VLAN_MTU0_1TAG GENMASK(29, 16)
+
+#define MTK_PPE_VLAN_MTU1 0x310
+#define MTK_PPE_VLAN_MTU1_2TAG GENMASK(13, 0)
+#define MTK_PPE_VLAN_MTU1_3TAG GENMASK(29, 16)
+
+#define MTK_PPE_VPM_TPID 0x318
+
+#define MTK_PPE_CACHE_CTL 0x320
+#define MTK_PPE_CACHE_CTL_EN BIT(0)
+#define MTK_PPE_CACHE_CTL_LOCK_CLR BIT(4)
+#define MTK_PPE_CACHE_CTL_REQ BIT(8)
+#define MTK_PPE_CACHE_CTL_CLEAR BIT(9)
+#define MTK_PPE_CACHE_CTL_CMD GENMASK(13, 12)
+
+#define MTK_PPE_MIB_CFG 0x334
+#define MTK_PPE_MIB_CFG_EN BIT(0)
+#define MTK_PPE_MIB_CFG_RD_CLR BIT(1)
+
+#define MTK_PPE_MIB_TB_BASE 0x338
+
+#define MTK_PPE_MIB_CACHE_CTL 0x350
+#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0)
+#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2)
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index c678344d22a2..8d751383530b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2241,43 +2241,52 @@ void mlx4_master_comm_channel(struct work_struct *work)
struct mlx4_priv *priv =
container_of(mfunc, struct mlx4_priv, mfunc);
struct mlx4_dev *dev = &priv->dev;
- __be32 *bit_vec;
+ u32 lbit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
+ u32 nmbr_bits;
u32 comm_cmd;
- u32 vec;
- int i, j, slave;
+ int i, slave;
int toggle;
+ bool first = true;
int served = 0;
int reported = 0;
u32 slt;
- bit_vec = master->comm_arm_bit_vector;
- for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
- vec = be32_to_cpu(bit_vec[i]);
- for (j = 0; j < 32; j++) {
- if (!(vec & (1 << j)))
- continue;
- ++reported;
- slave = (i * 32) + j;
- comm_cmd = swab32(readl(
- &mfunc->comm[slave].slave_write));
- slt = swab32(readl(&mfunc->comm[slave].slave_read))
- >> 31;
- toggle = comm_cmd >> 31;
- if (toggle != slt) {
- if (master->slave_state[slave].comm_toggle
- != slt) {
- pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
- slave, slt,
- master->slave_state[slave].comm_toggle);
- master->slave_state[slave].comm_toggle =
- slt;
- }
- mlx4_master_do_cmd(dev, slave,
- comm_cmd >> 16 & 0xff,
- comm_cmd & 0xffff, toggle);
- ++served;
+ for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++)
+ lbit_vec[i] = be32_to_cpu(master->comm_arm_bit_vector[i]);
+ nmbr_bits = dev->persist->num_vfs + 1;
+ if (++master->next_slave >= nmbr_bits)
+ master->next_slave = 0;
+ slave = master->next_slave;
+ while (true) {
+ slave = find_next_bit((const unsigned long *)&lbit_vec, nmbr_bits, slave);
+ if (!first && slave >= master->next_slave)
+ break;
+ if (slave == nmbr_bits) {
+ if (!first)
+ break;
+ first = false;
+ slave = 0;
+ continue;
+ }
+ ++reported;
+ comm_cmd = swab32(readl(&mfunc->comm[slave].slave_write));
+ slt = swab32(readl(&mfunc->comm[slave].slave_read)) >> 31;
+ toggle = comm_cmd >> 31;
+ if (toggle != slt) {
+ if (master->slave_state[slave].comm_toggle
+ != slt) {
+ pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
+ slave, slt,
+ master->slave_state[slave].comm_toggle);
+ master->slave_state[slave].comm_toggle =
+ slt;
}
+ mlx4_master_do_cmd(dev, slave,
+ comm_cmd >> 16 & 0xff,
+ comm_cmd & 0xffff, toggle);
+ ++served;
}
+ slave++;
}
if (reported && reported != served)
@@ -2389,6 +2398,8 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
if (!priv->mfunc.master.vf_oper)
goto err_comm_oper;
+ priv->mfunc.master.next_slave = 0;
+
for (i = 0; i < dev->num_slaves; ++i) {
vf_admin = &priv->mfunc.master.vf_admin[i];
vf_oper = &priv->mfunc.master.vf_oper[i];
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 64bed7ac3836..6ccf340660d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -603,6 +603,7 @@ struct mlx4_mfunc_master_ctx {
struct mlx4_slave_event_eq slave_eq;
struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX];
struct mlx4_qos_manager qos_ctl[MLX4_MAX_PORTS + 1];
+ u32 next_slave; /* mlx4_master_comm_channel */
};
struct mlx4_mfunc {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 9d623e38d783..461a43f338e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -104,6 +104,18 @@ config MLX5_TC_CT
If unsure, set to Y
+config MLX5_TC_SAMPLE
+ bool "MLX5 TC sample offload support"
+ depends on MLX5_CLS_ACT
+ default y
+ help
+ Say Y here if you want to support offloading sample rules via tc
+ sample action.
+ If set to N, will not be able to configure tc rules with sample
+ action.
+
+ If unsure, set to Y
+
config MLX5_CORE_EN_DCB
bool "Data Center Bridging (DCB) Support"
default y
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 8cb2625472c3..a1223e904190 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -27,7 +27,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
- en/qos.o en/trap.o
+ en/qos.o en/trap.o en/fs_tt_redirect.o
#
# Netdev extra
@@ -37,9 +37,10 @@ mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
- en_rep.o en/rep/bond.o en/mod_hdr.o
+ en_rep.o en/rep/bond.o en/mod_hdr.o \
+ en/mapping.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
- en/mapping.o lib/fs_chains.o en/tc_tun.o \
+ lib/fs_chains.o en/tc_tun.o \
esw/indir_table.o en/tc_tun_encap.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o
@@ -49,11 +50,12 @@ mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
- ecpf.o rdma.o
+ ecpf.o rdma.o esw/legacy.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o \
- esw/devlink_port.o
+ esw/devlink_port.o esw/vporttbl.o
+mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += esw/sample.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e8cecd50558d..9d79c5ec31e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -263,15 +263,15 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
return 0;
}
-static void dump_buf(void *buf, int size, int data_only, int offset)
+static void dump_buf(void *buf, int size, int data_only, int offset, int idx)
{
__be32 *p = buf;
int i;
for (i = 0; i < size; i += 16) {
- pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
- be32_to_cpu(p[1]), be32_to_cpu(p[2]),
- be32_to_cpu(p[3]));
+ pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset,
+ be32_to_cpu(p[0]), be32_to_cpu(p[1]),
+ be32_to_cpu(p[2]), be32_to_cpu(p[3]));
p += 4;
offset += 16;
}
@@ -802,39 +802,41 @@ static void dump_command(struct mlx5_core_dev *dev,
int dump_len;
int i;
+ mlx5_core_dbg(dev, "cmd[%d]: start dump\n", ent->idx);
data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
if (data_only)
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
- "dump command data %s(0x%x) %s\n",
- mlx5_command_str(op), op,
+ "cmd[%d]: dump command data %s(0x%x) %s\n",
+ ent->idx, mlx5_command_str(op), op,
input ? "INPUT" : "OUTPUT");
else
- mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
- mlx5_command_str(op), op,
+ mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n",
+ ent->idx, mlx5_command_str(op), op,
input ? "INPUT" : "OUTPUT");
if (data_only) {
if (input) {
- dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
+ dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset, ent->idx);
offset += sizeof(ent->lay->in);
} else {
- dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
+ dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset, ent->idx);
offset += sizeof(ent->lay->out);
}
} else {
- dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
+ dump_buf(ent->lay, sizeof(*ent->lay), 0, offset, ent->idx);
offset += sizeof(*ent->lay);
}
for (i = 0; i < n && next; i++) {
if (data_only) {
dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
- dump_buf(next->buf, dump_len, 1, offset);
+ dump_buf(next->buf, dump_len, 1, offset, ent->idx);
offset += MLX5_CMD_DATA_BLOCK_SIZE;
} else {
- mlx5_core_dbg(dev, "command block:\n");
- dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
+ mlx5_core_dbg(dev, "cmd[%d]: command block:\n", ent->idx);
+ dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset,
+ ent->idx);
offset += sizeof(struct mlx5_cmd_prot_block);
}
next = next->next;
@@ -842,6 +844,8 @@ static void dump_command(struct mlx5_core_dev *dev,
if (data_only)
pr_debug("\n");
+
+ mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx);
}
static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 9153c9bda96f..a9166cd85013 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -58,9 +58,6 @@ static bool is_eth_supported(struct mlx5_core_dev *dev)
if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
return false;
- if (is_eth_rep_supported(dev))
- return false;
-
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index d0f9d3cee97d..44c458443428 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -137,18 +137,18 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
* unregistering devlink instance while holding devlink_mutext.
* Hence, do not support reload.
*/
- NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated\n");
+ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated");
return -EOPNOTSUPP;
}
if (mlx5_lag_is_active(dev)) {
- NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode\n");
+ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode");
return -EOPNOTSUPP;
}
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev);
return 0;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
@@ -170,13 +170,13 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
*actions_performed = BIT(action);
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- return mlx5_load_one(dev, false);
+ return mlx5_load_one(dev);
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
break;
/* On fw_activate action, also driver is reloaded and reinit performed */
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- return mlx5_load_one(dev, false);
+ return mlx5_load_one(dev);
default:
/* Unsupported action should not get to this function */
WARN_ON(1);
@@ -461,6 +461,50 @@ static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id
return 0;
}
+
+static int mlx5_devlink_esw_port_metadata_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!MLX5_ESWITCH_MANAGER(dev))
+ return -EOPNOTSUPP;
+
+ return mlx5_esw_offloads_vport_metadata_set(dev->priv.eswitch, ctx->val.vbool);
+}
+
+static int mlx5_devlink_esw_port_metadata_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!MLX5_ESWITCH_MANAGER(dev))
+ return -EOPNOTSUPP;
+
+ ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
+ return 0;
+}
+
+static int mlx5_devlink_esw_port_metadata_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u8 esw_mode;
+
+ if (!MLX5_ESWITCH_MANAGER(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch is unsupported");
+ return -EOPNOTSUPP;
+ }
+ esw_mode = mlx5_eswitch_mode(dev);
+ if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-Switch must either disabled or non switchdev mode");
+ return -EBUSY;
+ }
+ return 0;
+}
+
#endif
static int mlx5_devlink_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
@@ -495,6 +539,12 @@ static const struct devlink_param mlx5_devlink_params[] = {
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL,
mlx5_devlink_large_group_num_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
+ "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlx5_devlink_esw_port_metadata_get,
+ mlx5_devlink_esw_port_metadata_set,
+ mlx5_devlink_esw_port_metadata_validate),
#endif
DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
mlx5_devlink_enable_remote_dev_reset_get,
@@ -524,6 +574,18 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
value);
+
+ if (MLX5_ESWITCH_MANAGER(dev)) {
+ if (mlx5_esw_vport_match_metadata_supported(dev->priv.eswitch)) {
+ dev->priv.eswitch->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+ value.vbool = true;
+ } else {
+ value.vbool = false;
+ }
+ devlink_param_driverinit_value_set(devlink,
+ MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
+ value);
+ }
#endif
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index eff107dad922..7318d44b774b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -10,6 +10,7 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
};
struct mlx5_trap_ctx {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 2eb022ad7fd0..01a1d02dcf15 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -1100,7 +1100,7 @@ int mlx5_fw_tracer_reload(struct mlx5_fw_tracer *tracer)
int err;
if (IS_ERR_OR_NULL(tracer))
- return -EINVAL;
+ return 0;
dev = tracer->dev;
mlx5_fw_tracer_cleanup(tracer);
@@ -1126,8 +1126,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
switch (eqe->sub_type) {
case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE:
- if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state))
- queue_work(tracer->work_queue, &tracer->ownership_change_work);
+ queue_work(tracer->work_queue, &tracer->ownership_change_work);
break;
case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
if (likely(tracer->str_db.loaded))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index bc6f77ea0a31..b636d63358d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -269,6 +269,7 @@ struct mlx5e_params {
struct mlx5e_xsk *xsk;
unsigned int sw_mtu;
int hard_mtu;
+ bool ptp_rx;
};
enum {
@@ -324,9 +325,9 @@ enum {
MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
- MLX5E_SQ_STATE_TLS,
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
+ MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
};
struct mlx5e_tx_mpwqe {
@@ -499,6 +500,8 @@ struct mlx5e_xdpsq {
struct mlx5e_channel *channel;
} ____cacheline_aligned_in_smp;
+struct mlx5e_ktls_resync_resp;
+
struct mlx5e_icosq {
/* data path */
u16 cc;
@@ -518,6 +521,7 @@ struct mlx5e_icosq {
u32 sqn;
u16 reserved_room;
unsigned long state;
+ struct mlx5e_ktls_resync_resp *ktls_resync;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
@@ -708,11 +712,11 @@ struct mlx5e_channel {
int cpu;
};
-struct mlx5e_port_ptp;
+struct mlx5e_ptp;
struct mlx5e_channels {
struct mlx5e_channel **c;
- struct mlx5e_port_ptp *port_ptp;
+ struct mlx5e_ptp *ptp;
unsigned int num;
struct mlx5e_params params;
};
@@ -727,10 +731,11 @@ struct mlx5e_channel_stats {
struct mlx5e_xdpsq_stats xsksq;
} ____cacheline_aligned_in_smp;
-struct mlx5e_port_ptp_stats {
+struct mlx5e_ptp_stats {
struct mlx5e_ch_stats ch;
struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_rq_stats rq;
} ____cacheline_aligned_in_smp;
enum {
@@ -837,6 +842,7 @@ struct mlx5e_priv {
struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
struct mlx5e_tir xsk_tir[MLX5E_MAX_NUM_CHANNELS];
+ struct mlx5e_tir ptp_tir;
struct mlx5e_rss_params rss_params;
u32 tx_rates[MLX5E_MAX_NUM_SQS];
@@ -856,10 +862,11 @@ struct mlx5e_priv {
struct mlx5e_stats stats;
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
struct mlx5e_channel_stats trap_stats;
- struct mlx5e_port_ptp_stats port_ptp_stats;
+ struct mlx5e_ptp_stats ptp_stats;
u16 max_nch;
u8 max_opened_tc;
- bool port_ptp_opened;
+ bool tx_ptp_opened;
+ bool rx_ptp_opened;
struct hwtstamp_config tstamp;
u16 q_counter;
u16 drop_rq_q_counter;
@@ -882,7 +889,6 @@ struct mlx5e_priv {
#endif
struct devlink_health_reporter *tx_reporter;
struct devlink_health_reporter *rx_reporter;
- struct devlink_port dl_port;
struct mlx5e_xsk xsk;
#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
struct mlx5e_hv_vhca_stats_agent stats_agent;
@@ -916,13 +922,12 @@ struct mlx5e_profile {
const struct mlx5e_rx_handlers *rx_handlers;
int max_tc;
u8 rq_groups;
+ bool rx_ptp_support;
};
void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
-bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
@@ -965,9 +970,9 @@ struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types
struct mlx5e_xsk_param;
struct mlx5e_rq_param;
-int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
- struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq);
+int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
+ struct mlx5e_xsk_param *xsk, int node,
+ struct mlx5e_rq *rq);
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_close_rq(struct mlx5e_rq *rq);
@@ -1013,27 +1018,20 @@ int fn##_ctx(struct mlx5e_priv *priv, void *context) \
return fn(priv); \
}
int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
-int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
- struct mlx5e_channels *new_chs,
- mlx5e_fp_preactivate preactivate,
- void *context);
+int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *new_params,
+ mlx5e_fp_preactivate preactivate,
+ void *context, bool reset);
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
+int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx);
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels);
-void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
-void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
-
-void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
-void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
void mlx5e_activate_rq(struct mlx5e_rq *rq);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
@@ -1092,10 +1090,10 @@ int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
-int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
-void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
-int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
-void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
+void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
@@ -1176,10 +1174,9 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
const struct mlx5e_profile *new_profile, void *new_ppriv);
+void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
-void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
u16 num_channels);
void mlx5e_rx_dim_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index a69c62d72d16..0dd7615e5931 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -2,37 +2,70 @@
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include "en/devlink.h"
+#include "eswitch.h"
+
+static void
+mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid)
+{
+ u64 parent_id;
+
+ parent_id = mlx5_query_nic_system_image_guid(dev);
+ ppid->id_len = sizeof(parent_id);
+ memcpy(ppid->id, &parent_id, sizeof(parent_id));
+}
int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
{
struct devlink *devlink = priv_to_devlink(priv->mdev);
struct devlink_port_attrs attrs = {};
+ struct netdev_phys_item_id ppid = {};
+ struct devlink_port *dl_port;
+ unsigned int dl_port_index;
if (mlx5_core_is_pf(priv->mdev)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = PCI_FUNC(priv->mdev->pdev->devfn);
+ if (MLX5_ESWITCH_MANAGER(priv->mdev)) {
+ mlx5e_devlink_get_port_parent_id(priv->mdev, &ppid);
+ memcpy(attrs.switch_id.id, ppid.id, ppid.id_len);
+ attrs.switch_id.id_len = ppid.id_len;
+ }
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(priv->mdev,
+ MLX5_VPORT_UPLINK);
} else {
attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(priv->mdev, 0);
}
- devlink_port_attrs_set(&priv->dl_port, &attrs);
+ dl_port = mlx5e_devlink_get_dl_port(priv);
+ memset(dl_port, 0, sizeof(*dl_port));
+ devlink_port_attrs_set(dl_port, &attrs);
- return devlink_port_register(devlink, &priv->dl_port, 1);
+ return devlink_port_register(devlink, dl_port, dl_port_index);
}
void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
{
- devlink_port_type_eth_set(&priv->dl_port, priv->netdev);
+ struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
+
+ devlink_port_type_eth_set(dl_port, priv->netdev);
}
void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
{
- devlink_port_unregister(&priv->dl_port);
+ struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
+
+ if (dl_port->registered)
+ devlink_port_unregister(dl_port);
}
struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct devlink_port *port;
- return &priv->dl_port;
+ port = mlx5e_devlink_get_dl_port(priv);
+ if (port->registered)
+ return port;
+ return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
index 83123a801adc..10b50feb9883 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
@@ -12,4 +12,10 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv);
void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv);
struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev);
+static inline struct devlink_port *
+mlx5e_devlink_get_dl_port(struct mlx5e_priv *priv)
+{
+ return &priv->mdev->mlx5e_res.dl_port;
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index a16297e7e2ac..1d5ce07b83f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -29,6 +29,7 @@ struct mlx5e_tc_table {
struct netdev_net_notifier netdevice_nn;
struct mlx5_tc_ct_priv *ct;
+ struct mapping_ctx *mapping;
};
struct mlx5e_flow_table {
@@ -49,18 +50,10 @@ struct mlx5e_promisc_table {
struct mlx5_flow_handle *rule;
};
-struct mlx5e_vlan_table {
- struct mlx5e_flow_table ft;
- DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
- DECLARE_BITMAP(active_svlans, VLAN_N_VID);
- struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
- struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
- struct mlx5_flow_handle *untagged_rule;
- struct mlx5_flow_handle *any_cvlan_rule;
- struct mlx5_flow_handle *any_svlan_rule;
- struct mlx5_flow_handle *trap_rule;
- bool cvlan_filter_disabled;
-};
+/* Forward declaration and APIs to get private fields of vlan_table */
+struct mlx5e_vlan_table;
+unsigned long *mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table *vlan);
+struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan);
struct mlx5e_l2_table {
struct mlx5e_flow_table ft;
@@ -137,11 +130,13 @@ enum {
MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL,
MLX5E_INNER_TTC_FT_LEVEL,
+ MLX5E_FS_TT_UDP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
+ MLX5E_FS_TT_ANY_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#ifdef CONFIG_MLX5_EN_TLS
- MLX5E_ACCEL_FS_TCP_FT_LEVEL,
+ MLX5E_ACCEL_FS_TCP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
#ifdef CONFIG_MLX5_EN_ARFS
- MLX5E_ARFS_FT_LEVEL,
+ MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
@@ -198,31 +193,7 @@ static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
#endif /* CONFIG_MLX5_EN_RXNFC */
#ifdef CONFIG_MLX5_EN_ARFS
-#define ARFS_HASH_SHIFT BITS_PER_BYTE
-#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
-
-struct arfs_table {
- struct mlx5e_flow_table ft;
- struct mlx5_flow_handle *default_rule;
- struct hlist_head rules_hash[ARFS_HASH_SIZE];
-};
-
-enum arfs_type {
- ARFS_IPV4_TCP,
- ARFS_IPV6_TCP,
- ARFS_IPV4_UDP,
- ARFS_IPV6_UDP,
- ARFS_NUM_TYPES,
-};
-
-struct mlx5e_arfs_tables {
- struct arfs_table arfs_tables[ARFS_NUM_TYPES];
- /* Protect aRFS rules list */
- spinlock_t arfs_lock;
- struct list_head rules;
- int last_filter_id;
- struct workqueue_struct *wq;
-};
+struct mlx5e_arfs_tables;
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
@@ -241,6 +212,10 @@ static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSU
struct mlx5e_accel_fs_tcp;
#endif
+struct mlx5e_fs_udp;
+struct mlx5e_fs_any;
+struct mlx5e_ptp_fs;
+
struct mlx5e_flow_steering {
struct mlx5_flow_namespace *ns;
struct mlx5_flow_namespace *egress_ns;
@@ -249,16 +224,19 @@ struct mlx5e_flow_steering {
#endif
struct mlx5e_tc_table tc;
struct mlx5e_promisc_table promisc;
- struct mlx5e_vlan_table vlan;
+ struct mlx5e_vlan_table *vlan;
struct mlx5e_l2_table l2;
struct mlx5e_ttc_table ttc;
struct mlx5e_ttc_table inner_ttc;
#ifdef CONFIG_MLX5_EN_ARFS
- struct mlx5e_arfs_tables arfs;
+ struct mlx5e_arfs_tables *arfs;
#endif
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_fs_tcp *accel_tcp;
#endif
+ struct mlx5e_fs_udp *udp;
+ struct mlx5e_fs_any *any;
+ struct mlx5e_ptp_fs *ptp_fs;
};
struct ttc_params {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
new file mode 100644
index 000000000000..909faa6c89d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -0,0 +1,605 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#include <linux/netdevice.h>
+#include "en/fs_tt_redirect.h"
+#include "fs_core.h"
+
+enum fs_udp_type {
+ FS_IPV4_UDP,
+ FS_IPV6_UDP,
+ FS_UDP_NUM_TYPES,
+};
+
+struct mlx5e_fs_udp {
+ struct mlx5e_flow_table tables[FS_UDP_NUM_TYPES];
+ struct mlx5_flow_handle *default_rules[FS_UDP_NUM_TYPES];
+ int ref_cnt;
+};
+
+struct mlx5e_fs_any {
+ struct mlx5e_flow_table table;
+ struct mlx5_flow_handle *default_rule;
+ int ref_cnt;
+};
+
+static char *fs_udp_type2str(enum fs_udp_type i)
+{
+ switch (i) {
+ case FS_IPV4_UDP:
+ return "UDP v4";
+ default: /* FS_IPV6_UDP */
+ return "UDP v6";
+ }
+}
+
+static enum mlx5e_traffic_types fs_udp2tt(enum fs_udp_type i)
+{
+ switch (i) {
+ case FS_IPV4_UDP:
+ return MLX5E_TT_IPV4_UDP;
+ default: /* FS_IPV6_UDP */
+ return MLX5E_TT_IPV6_UDP;
+ }
+}
+
+static enum fs_udp_type tt2fs_udp(enum mlx5e_traffic_types i)
+{
+ switch (i) {
+ case MLX5E_TT_IPV4_UDP:
+ return FS_IPV4_UDP;
+ case MLX5E_TT_IPV6_UDP:
+ return FS_IPV6_UDP;
+ default:
+ return FS_UDP_NUM_TYPES;
+ }
+}
+
+void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule)
+{
+ mlx5_del_flow_rules(rule);
+}
+
+static void fs_udp_set_dport_flow(struct mlx5_flow_spec *spec, enum fs_udp_type type,
+ u16 udp_dport)
+{
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version,
+ type == FS_IPV4_UDP ? 4 : 6);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, udp_dport);
+}
+
+struct mlx5_flow_handle *
+mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
+ enum mlx5e_traffic_types ttc_type,
+ u32 tir_num, u16 d_port)
+{
+ enum fs_udp_type type = tt2fs_udp(ttc_type);
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_table *ft = NULL;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5e_fs_udp *fs_udp;
+ int err;
+
+ if (type == FS_UDP_NUM_TYPES)
+ return ERR_PTR(-EINVAL);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ fs_udp = priv->fs.udp;
+ ft = fs_udp->tables[type].t;
+
+ fs_udp_set_dport_flow(spec, type, d_port);
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = tir_num;
+
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ kvfree(spec);
+
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "%s: add %s rule failed, err %d\n",
+ __func__, fs_udp_type2str(type), err);
+ }
+ return rule;
+}
+
+static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type type)
+{
+ struct mlx5e_flow_table *fs_udp_t;
+ struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5e_fs_udp *fs_udp;
+ int err;
+
+ fs_udp = priv->fs.udp;
+ fs_udp_t = &fs_udp->tables[type];
+
+ dest = mlx5e_ttc_get_default_dest(priv, fs_udp2tt(type));
+ rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev,
+ "%s: add default rule failed, fs type=%d, err %d\n",
+ __func__, type, err);
+ return err;
+ }
+
+ fs_udp->default_rules[type] = rule;
+ return 0;
+}
+
+#define MLX5E_FS_UDP_NUM_GROUPS (2)
+#define MLX5E_FS_UDP_GROUP1_SIZE (BIT(16))
+#define MLX5E_FS_UDP_GROUP2_SIZE (BIT(0))
+#define MLX5E_FS_UDP_TABLE_SIZE (MLX5E_FS_UDP_GROUP1_SIZE +\
+ MLX5E_FS_UDP_GROUP2_SIZE)
+static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type type)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *outer_headers_c;
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(MLX5E_FS_UDP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in || !ft->g) {
+ kfree(ft->g);
+ kvfree(in);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_version);
+
+ switch (type) {
+ case FS_IPV4_UDP:
+ case FS_IPV6_UDP:
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+ /* Match on udp protocol, Ipv4/6 and dport */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_FS_UDP_GROUP1_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Default Flow Group */
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_FS_UDP_GROUP2_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+out:
+ kvfree(in);
+
+ return err;
+}
+
+static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
+{
+ struct mlx5e_flow_table *ft = &priv->fs.udp->tables[type];
+ struct mlx5_flow_table_attr ft_attr = {};
+ int err;
+
+ ft->num_groups = 0;
+
+ ft_attr.max_fte = MLX5E_FS_UDP_TABLE_SIZE;
+ ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ if (IS_ERR(ft->t)) {
+ err = PTR_ERR(ft->t);
+ ft->t = NULL;
+ return err;
+ }
+
+ netdev_dbg(priv->netdev, "Created fs %s table id %u level %u\n",
+ fs_udp_type2str(type), ft->t->id, ft->t->level);
+
+ err = fs_udp_create_groups(ft, type);
+ if (err)
+ goto err;
+
+ err = fs_udp_add_default_rule(priv, type);
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ mlx5e_destroy_flow_table(ft);
+ return err;
+}
+
+static void fs_udp_destroy_table(struct mlx5e_fs_udp *fs_udp, int i)
+{
+ if (IS_ERR_OR_NULL(fs_udp->tables[i].t))
+ return;
+
+ mlx5_del_flow_rules(fs_udp->default_rules[i]);
+ mlx5e_destroy_flow_table(&fs_udp->tables[i]);
+ fs_udp->tables[i].t = NULL;
+}
+
+static int fs_udp_disable(struct mlx5e_priv *priv)
+{
+ int err, i;
+
+ for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
+ /* Modify ttc rules destination to point back to the indir TIRs */
+ err = mlx5e_ttc_fwd_default_dest(priv, fs_udp2tt(i));
+ if (err) {
+ netdev_err(priv->netdev,
+ "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, fs_udp2tt(i), err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int fs_udp_enable(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_destination dest = {};
+ int err, i;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
+ dest.ft = priv->fs.udp->tables[i].t;
+
+ /* Modify ttc rules destination to point on the accel_fs FTs */
+ err = mlx5e_ttc_fwd_dest(priv, fs_udp2tt(i), &dest);
+ if (err) {
+ netdev_err(priv->netdev,
+ "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
+ __func__, fs_udp2tt(i), err);
+ return err;
+ }
+ }
+ return 0;
+}
+
+void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
+{
+ struct mlx5e_fs_udp *fs_udp = priv->fs.udp;
+ int i;
+
+ if (!fs_udp)
+ return;
+
+ if (--fs_udp->ref_cnt)
+ return;
+
+ fs_udp_disable(priv);
+
+ for (i = 0; i < FS_UDP_NUM_TYPES; i++)
+ fs_udp_destroy_table(fs_udp, i);
+
+ kfree(fs_udp);
+ priv->fs.udp = NULL;
+}
+
+int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
+{
+ int i, err;
+
+ if (priv->fs.udp) {
+ priv->fs.udp->ref_cnt++;
+ return 0;
+ }
+
+ priv->fs.udp = kzalloc(sizeof(*priv->fs.udp), GFP_KERNEL);
+ if (!priv->fs.udp)
+ return -ENOMEM;
+
+ for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
+ err = fs_udp_create_table(priv, i);
+ if (err)
+ goto err_destroy_tables;
+ }
+
+ err = fs_udp_enable(priv);
+ if (err)
+ goto err_destroy_tables;
+
+ priv->fs.udp->ref_cnt = 1;
+
+ return 0;
+
+err_destroy_tables:
+ while (--i >= 0)
+ fs_udp_destroy_table(priv->fs.udp, i);
+
+ kfree(priv->fs.udp);
+ priv->fs.udp = NULL;
+ return err;
+}
+
+static void fs_any_set_ethertype_flow(struct mlx5_flow_spec *spec, u16 ether_type)
+{
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ether_type);
+}
+
+struct mlx5_flow_handle *
+mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
+ u32 tir_num, u16 ether_type)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_table *ft = NULL;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5e_fs_any *fs_any;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ fs_any = priv->fs.any;
+ ft = fs_any->table.t;
+
+ fs_any_set_ethertype_flow(spec, ether_type);
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = tir_num;
+
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ kvfree(spec);
+
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "%s: add ANY rule failed, err %d\n",
+ __func__, err);
+ }
+ return rule;
+}
+
+static int fs_any_add_default_rule(struct mlx5e_priv *priv)
+{
+ struct mlx5e_flow_table *fs_any_t;
+ struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5e_fs_any *fs_any;
+ int err;
+
+ fs_any = priv->fs.any;
+ fs_any_t = &fs_any->table;
+
+ dest = mlx5e_ttc_get_default_dest(priv, MLX5E_TT_ANY);
+ rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev,
+ "%s: add default rule failed, fs type=ANY, err %d\n",
+ __func__, err);
+ return err;
+ }
+
+ fs_any->default_rule = rule;
+ return 0;
+}
+
+#define MLX5E_FS_ANY_NUM_GROUPS (2)
+#define MLX5E_FS_ANY_GROUP1_SIZE (BIT(16))
+#define MLX5E_FS_ANY_GROUP2_SIZE (BIT(0))
+#define MLX5E_FS_ANY_TABLE_SIZE (MLX5E_FS_ANY_GROUP1_SIZE +\
+ MLX5E_FS_ANY_GROUP2_SIZE)
+
+static int fs_any_create_groups(struct mlx5e_flow_table *ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *outer_headers_c;
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(MLX5E_FS_UDP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in || !ft->g) {
+ kfree(ft->g);
+ kvfree(in);
+ return -ENOMEM;
+ }
+
+ /* Match on ethertype */
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_FS_ANY_GROUP1_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Default Flow Group */
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_FS_ANY_GROUP2_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+static int fs_any_create_table(struct mlx5e_priv *priv)
+{
+ struct mlx5e_flow_table *ft = &priv->fs.any->table;
+ struct mlx5_flow_table_attr ft_attr = {};
+ int err;
+
+ ft->num_groups = 0;
+
+ ft_attr.max_fte = MLX5E_FS_UDP_TABLE_SIZE;
+ ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ if (IS_ERR(ft->t)) {
+ err = PTR_ERR(ft->t);
+ ft->t = NULL;
+ return err;
+ }
+
+ netdev_dbg(priv->netdev, "Created fs ANY table id %u level %u\n",
+ ft->t->id, ft->t->level);
+
+ err = fs_any_create_groups(ft);
+ if (err)
+ goto err;
+
+ err = fs_any_add_default_rule(priv);
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ mlx5e_destroy_flow_table(ft);
+ return err;
+}
+
+static int fs_any_disable(struct mlx5e_priv *priv)
+{
+ int err;
+
+ /* Modify ttc rules destination to point back to the indir TIRs */
+ err = mlx5e_ttc_fwd_default_dest(priv, MLX5E_TT_ANY);
+ if (err) {
+ netdev_err(priv->netdev,
+ "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, MLX5E_TT_ANY, err);
+ return err;
+ }
+ return 0;
+}
+
+static int fs_any_enable(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_destination dest = {};
+ int err;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = priv->fs.any->table.t;
+
+ /* Modify ttc rules destination to point on the accel_fs FTs */
+ err = mlx5e_ttc_fwd_dest(priv, MLX5E_TT_ANY, &dest);
+ if (err) {
+ netdev_err(priv->netdev,
+ "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
+ __func__, MLX5E_TT_ANY, err);
+ return err;
+ }
+ return 0;
+}
+
+static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any)
+{
+ if (IS_ERR_OR_NULL(fs_any->table.t))
+ return;
+
+ mlx5_del_flow_rules(fs_any->default_rule);
+ mlx5e_destroy_flow_table(&fs_any->table);
+ fs_any->table.t = NULL;
+}
+
+void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
+{
+ struct mlx5e_fs_any *fs_any = priv->fs.any;
+
+ if (!fs_any)
+ return;
+
+ if (--fs_any->ref_cnt)
+ return;
+
+ fs_any_disable(priv);
+
+ fs_any_destroy_table(fs_any);
+
+ kfree(fs_any);
+ priv->fs.any = NULL;
+}
+
+int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
+{
+ int err;
+
+ if (priv->fs.any) {
+ priv->fs.any->ref_cnt++;
+ return 0;
+ }
+
+ priv->fs.any = kzalloc(sizeof(*priv->fs.any), GFP_KERNEL);
+ if (!priv->fs.any)
+ return -ENOMEM;
+
+ err = fs_any_create_table(priv);
+ if (err)
+ return err;
+
+ err = fs_any_enable(priv);
+ if (err)
+ goto err_destroy_table;
+
+ priv->fs.any->ref_cnt = 1;
+
+ return 0;
+
+err_destroy_table:
+ fs_any_destroy_table(priv->fs.any);
+
+ kfree(priv->fs.any);
+ priv->fs.any = NULL;
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
new file mode 100644
index 000000000000..8385df24eb99
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5E_FS_TT_REDIRECT_H__
+#define __MLX5E_FS_TT_REDIRECT_H__
+
+#include "en.h"
+#include "en/fs.h"
+
+void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule);
+
+/* UDP traffic type redirect */
+struct mlx5_flow_handle *
+mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
+ enum mlx5e_traffic_types ttc_type,
+ u32 tir_num, u16 d_port);
+void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv);
+int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv);
+
+/* ANY traffic type redirect*/
+struct mlx5_flow_handle *
+mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
+ u32 tir_num, u16 ether_type);
+void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv);
+int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv);
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 84e501e057b4..6f4e6c34b2a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -128,7 +128,7 @@ int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg
if (err)
return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "size", eq->core.nent);
+ err = devlink_fmsg_u32_pair_put(fmsg, "size", eq_get_size(&eq->core));
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 36381a2ed5a5..f410c1268422 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -3,10 +3,13 @@
#include "en/params.h"
#include "en/txrx.h"
-#include "en_accel/tls_rxtx.h"
+#include "en/port.h"
+#include "en_accel/en_accel.h"
+#include "accel/ipsec.h"
+#include "fpga/ipsec.h"
-static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
return params->xdp_prog || xsk;
}
@@ -37,8 +40,8 @@ u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
return linear_rq_headroom + hw_mtu;
}
-u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);
@@ -87,30 +90,39 @@ bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
return !params->lro_en && linear_frag_sz <= PAGE_SIZE;
}
-#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
- MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
-bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
+ u8 log_stride_sz, u8 log_num_strides)
{
- u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
- s8 signed_log_num_strides_param;
- u8 log_num_strides;
+ if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ)
+ return false;
- if (!mlx5e_rx_is_linear_skb(params, xsk))
+ if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
+ log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX)
return false;
- if (order_base_2(linear_frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
+ if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX)
return false;
if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
- return true;
+ return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE;
+
+ return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
+}
- log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
- signed_log_num_strides_param =
- (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
+bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ s8 log_num_strides;
+ u8 log_stride_sz;
- return signed_log_num_strides_param >= 0;
+ if (!mlx5e_rx_is_linear_skb(params, xsk))
+ return false;
+
+ log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
+ log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz;
+
+ return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides);
}
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
@@ -172,17 +184,505 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
return stop_room;
}
-int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params)
+int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
size_t sq_size = 1 << params->log_sq_size;
u16 stop_room;
- stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
+ stop_room = mlx5e_calc_sq_stop_room(mdev, params);
if (stop_room >= sq_size) {
- netdev_err(priv->netdev, "Stop room %u is bigger than the SQ size %zu\n",
- stop_room, sq_size);
+ mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n",
+ stop_room, sq_size);
return -EINVAL;
}
return 0;
}
+
+static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
+{
+ struct dim_cq_moder moder;
+
+ moder.cq_period_mode = cq_period_mode;
+ moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+ if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+ moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
+
+ return moder;
+}
+
+static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
+{
+ struct dim_cq_moder moder;
+
+ moder.cq_period_mode = cq_period_mode;
+ moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+ moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+ if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+ moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
+
+ return moder;
+}
+
+static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
+{
+ return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
+void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ if (params->tx_dim_enabled) {
+ u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
+
+ params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
+ } else {
+ params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
+ }
+}
+
+void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ if (params->rx_dim_enabled) {
+ u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
+
+ params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
+ } else {
+ params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
+ }
+}
+
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ mlx5e_reset_tx_moderation(params, cq_period_mode);
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ params->tx_cq_moderation.cq_period_mode ==
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+}
+
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ mlx5e_reset_rx_moderation(params, cq_period_mode);
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ params->rx_cq_moderation.cq_period_mode ==
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+}
+
+bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
+{
+ u32 link_speed = 0;
+ u32 pci_bw = 0;
+
+ mlx5e_port_max_linkspeed(mdev, &link_speed);
+ pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
+ mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
+ link_speed, pci_bw);
+
+#define MLX5E_SLOW_PCI_RATIO (2)
+
+ return link_speed && pci_bw &&
+ link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
+}
+
+bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
+ return false;
+
+ if (mlx5_fpga_is_ipsec_device(mdev))
+ return false;
+
+ if (params->xdp_prog) {
+ /* XSK params are not considered here. If striding RQ is in use,
+ * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
+ * be called with the known XSK params.
+ */
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
+ return false;
+ }
+
+ return true;
+}
+
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ params->log_rq_mtu_frames = is_kdump_kernel() ?
+ MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
+ MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+
+ mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
+ params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+ params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+ BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
+ BIT(params->log_rq_mtu_frames),
+ BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
+}
+
+void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+{
+ params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+ MLX5_WQ_TYPE_CYCLIC;
+}
+
+void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ /* Prefer Striding RQ, unless any of the following holds:
+ * - Striding RQ configuration is not possible/supported.
+ * - Slow PCI heuristic.
+ * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
+ *
+ * No XSK params: checking the availability of striding RQ in general.
+ */
+ if (!slow_pci_heuristic(mdev) &&
+ mlx5e_striding_rq_possible(mdev, params) &&
+ (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
+ !mlx5e_rx_is_linear_skb(params, NULL)))
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
+ mlx5e_set_rq_type(mdev, params);
+ mlx5e_init_rq_type_params(mdev, params);
+}
+
+/* Build queue parameters */
+
+void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
+{
+ *ccp = (struct mlx5e_create_cq_param) {
+ .napi = &c->napi,
+ .ch_stats = c->stats,
+ .node = cpu_to_node(c->cpu),
+ .ix = c->ix,
+ };
+}
+
+#define DEFAULT_FRAG_SIZE (2048)
+
+static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_frags_info *info)
+{
+ u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ int frag_size_max = DEFAULT_FRAG_SIZE;
+ u32 buf_size = 0;
+ int i;
+
+ if (mlx5_fpga_is_ipsec_device(mdev))
+ byte_count += MLX5E_METADATA_ETHER_LEN;
+
+ if (mlx5e_rx_is_linear_skb(params, xsk)) {
+ int frag_stride;
+
+ frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
+ frag_stride = roundup_pow_of_two(frag_stride);
+
+ info->arr[0].frag_size = byte_count;
+ info->arr[0].frag_stride = frag_stride;
+ info->num_frags = 1;
+ info->wqe_bulk = PAGE_SIZE / frag_stride;
+ goto out;
+ }
+
+ if (byte_count > PAGE_SIZE +
+ (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
+ frag_size_max = PAGE_SIZE;
+
+ i = 0;
+ while (buf_size < byte_count) {
+ int frag_size = byte_count - buf_size;
+
+ if (i < MLX5E_MAX_RX_FRAGS - 1)
+ frag_size = min(frag_size, frag_size_max);
+
+ info->arr[i].frag_size = frag_size;
+ info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
+
+ buf_size += frag_size;
+ i++;
+ }
+ info->num_frags = i;
+ /* number of different wqes sharing a page */
+ info->wqe_bulk = 1 + (info->num_frags % 2);
+
+out:
+ info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
+ info->log_num_frags = order_base_2(info->num_frags);
+}
+
+static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
+{
+ int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
+
+ switch (wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ sz += sizeof(struct mlx5e_rx_wqe_ll);
+ break;
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ sz += sizeof(struct mlx5e_rx_wqe_cyc);
+ }
+
+ return order_base_2(sz);
+}
+
+static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
+ MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
+}
+
+static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5e_cq_param *param)
+{
+ bool hw_stridx = false;
+ void *cqc = param->cqc;
+ u8 log_cq_size;
+
+ switch (params->rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
+ mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
+ hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
+ break;
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ log_cq_size = params->log_rq_mtu_frames;
+ }
+
+ MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
+ MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
+ MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
+ MLX5_SET(cqc, cqc, cqe_comp_en, 1);
+ }
+
+ mlx5e_build_common_cq_param(mdev, param);
+ param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
+}
+
+int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ u16 q_counter,
+ struct mlx5e_rq_param *param)
+{
+ void *rqc = param->rqc;
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ int ndsegs = 1;
+
+ switch (params->rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
+ u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
+ u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+
+ if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
+ log_wqe_num_of_strides)) {
+ mlx5_core_err(mdev,
+ "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n",
+ log_wqe_stride_size, log_wqe_num_of_strides);
+ return -EINVAL;
+ }
+
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
+ MLX5_SET(wq, wq, log_wqe_stride_size,
+ log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
+ MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
+ break;
+ }
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
+ mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
+ ndsegs = param->frags_info.num_frags;
+ }
+
+ MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, log_wq_stride,
+ mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
+ MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET(rqc, rqc, counter_set_id, q_counter);
+ MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
+ MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
+
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+ mlx5e_build_rx_cq_param(mdev, params, xsk, &param->cqp);
+
+ return 0;
+}
+
+void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
+ u16 q_counter,
+ struct mlx5e_rq_param *param)
+{
+ void *rqc = param->rqc;
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, log_wq_stride,
+ mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
+ MLX5_SET(rqc, rqc, counter_set_id, q_counter);
+
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+}
+
+void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
+
+ mlx5e_build_common_cq_param(mdev, param);
+ param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
+}
+
+void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
+
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+}
+
+void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ bool allow_swp;
+
+ allow_swp = mlx5_geneve_tx_allowed(mdev) ||
+ !!MLX5_IPSEC_DEV(mdev);
+ mlx5e_build_sq_param_common(mdev, param);
+ MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
+ MLX5_SET(sqc, sqc, allow_swp, allow_swp);
+ param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
+ param->stop_room = mlx5e_calc_sq_stop_room(mdev, params);
+ mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
+}
+
+static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
+ u8 log_wq_size,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
+
+ mlx5e_build_common_cq_param(mdev, param);
+
+ param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
+static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
+{
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ return MLX5_GET(wq, wq, log_wq_sz);
+}
+
+static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
+ struct mlx5e_rq_param *rqp)
+{
+ switch (params->rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE,
+ order_base_2(MLX5E_UMR_WQEBBS) +
+ mlx5e_get_rq_log_wq_sz(rqp->rqc));
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ }
+}
+
+static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
+{
+ if (mlx5_accel_is_ktls_rx(mdev))
+ return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+
+ return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+}
+
+static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
+ u8 log_wq_size,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(mdev, param);
+
+ MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+ MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
+ mlx5e_build_ico_cq_param(mdev, log_wq_size, &param->cqp);
+}
+
+static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
+ u8 log_wq_size,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(mdev, param);
+ param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
+ param->is_tls = mlx5_accel_is_ktls_rx(mdev);
+ if (param->is_tls)
+ param->stop_room += mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */
+ MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
+ MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+ mlx5e_build_ico_cq_param(mdev, log_wq_size, &param->cqp);
+}
+
+void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(mdev, param);
+ MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
+ param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
+ mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
+}
+
+int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ u16 q_counter,
+ struct mlx5e_channel_param *cparam)
+{
+ u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
+ int err;
+
+ err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
+ if (err)
+ return err;
+
+ icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
+ async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
+
+ mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
+ mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
+ mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
+ mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index ea2cfb04b31a..e9593f5f0661 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -30,6 +30,7 @@ struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
bool is_mpw;
+ bool is_tls;
u16 stop_room;
};
@@ -84,12 +85,23 @@ static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile,
/* Parameter calculations */
+void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
+
+bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
+bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+
+bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
+ u8 log_stride_sz, u8 log_num_strides);
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
-u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
@@ -112,32 +124,31 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
/* Build queue parameters */
void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c);
-void mlx5e_build_rq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
- struct mlx5e_rq_param *param);
-void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
+int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ u16 q_counter,
+ struct mlx5e_rq_param *param);
+void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
+ u16 q_counter,
+ struct mlx5e_rq_param *param);
+void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param);
-void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params,
+void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_sq_param *param);
-void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
- struct mlx5e_cq_param *param);
-void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_cq_param *param);
-void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
- u8 log_wq_size,
- struct mlx5e_cq_param *param);
-void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
- u8 log_wq_size,
- struct mlx5e_sq_param *param);
-void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
+void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_sq_param *param);
+int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ u16 q_counter,
+ struct mlx5e_channel_param *cparam);
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
-int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params);
+int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
#endif /* __MLX5_EN_PARAMS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index d57b6f06382f..d907c1acd4d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -1,8 +1,26 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2020 Mellanox Technologies
+#include <linux/ptp_classify.h>
#include "en/ptp.h"
#include "en/txrx.h"
+#include "en/params.h"
+#include "en/fs_tt_redirect.h"
+
+struct mlx5e_ptp_fs {
+ struct mlx5_flow_handle *l2_rule;
+ struct mlx5_flow_handle *udp_v4_rule;
+ struct mlx5_flow_handle *udp_v6_rule;
+ bool valid;
+};
+
+#define MLX5E_PTP_CHANNEL_IX 0
+
+struct mlx5e_ptp_params {
+ struct mlx5e_params params;
+ struct mlx5e_sq_param txq_sq_param;
+ struct mlx5e_rq_param rq_param;
+};
struct mlx5e_skb_cb_hwtstamp {
ktime_t cqe_hwtstamp;
@@ -116,9 +134,9 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
{
- struct mlx5e_port_ptp *c = container_of(napi, struct mlx5e_port_ptp,
- napi);
+ struct mlx5e_ptp *c = container_of(napi, struct mlx5e_ptp, napi);
struct mlx5e_ch_stats *ch_stats = c->stats;
+ struct mlx5e_rq *rq = &c->rq;
bool busy = false;
int work_done = 0;
int i;
@@ -127,9 +145,19 @@ static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
ch_stats->poll++;
- for (i = 0; i < c->num_tc; i++) {
- busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
- busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+ for (i = 0; i < c->num_tc; i++) {
+ busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
+ busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
+ }
+ }
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state) && likely(budget)) {
+ work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
+ busy |= work_done == budget;
+ busy |= INDIRECT_CALL_2(rq->post_wqes,
+ mlx5e_post_rx_mpwqes,
+ mlx5e_post_rx_wqes,
+ rq);
}
if (busy) {
@@ -142,10 +170,14 @@ static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
ch_stats->arm++;
- for (i = 0; i < c->num_tc; i++) {
- mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
- mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+ for (i = 0; i < c->num_tc; i++) {
+ mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
+ mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
+ }
}
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ mlx5e_cq_arm(&rq->cq);
out:
rcu_read_unlock();
@@ -153,7 +185,7 @@ out:
return work_done;
}
-static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix,
+static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
struct mlx5e_txqsq *sq, int tc,
@@ -172,20 +204,18 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix,
sq->netdev = c->netdev;
sq->priv = c->priv;
sq->mdev = mdev;
- sq->ch_ix = c->ix;
+ sq->ch_ix = MLX5E_PTP_CHANNEL_IX;
sq->txq_ix = txq_ix;
- sq->uar_map = mdev->mlx5e_res.bfreg.map;
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- sq->stats = &c->priv->port_ptp_stats.sq[tc];
+ sq->stats = &c->priv->ptp_stats.sq[tc];
sq->ptpsq = ptpsq;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
sq->stop_room = param->stop_room;
- sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ?
- mlx5_real_time_cyc2time :
- mlx5_timecounter_cyc2time;
+ sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
node = dev_to_node(mlx5_core_dma_dev(mdev));
@@ -243,7 +273,7 @@ static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
kvfree(skb_fifo->fifo);
}
-static int mlx5e_ptp_open_txqsq(struct mlx5e_port_ptp *c, u32 tisn,
+static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
int txq_ix, struct mlx5e_ptp_params *cparams,
int tc, struct mlx5e_ptpsq *ptpsq)
{
@@ -293,7 +323,7 @@ static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
mlx5e_free_txqsq(sq);
}
-static int mlx5e_ptp_open_txqsqs(struct mlx5e_port_ptp *c,
+static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
struct mlx5e_ptp_params *cparams)
{
struct mlx5e_params *params = &cparams->params;
@@ -321,7 +351,7 @@ close_txqsq:
return err;
}
-static void mlx5e_ptp_close_txqsqs(struct mlx5e_port_ptp *c)
+static void mlx5e_ptp_close_txqsqs(struct mlx5e_ptp *c)
{
int tc;
@@ -329,8 +359,8 @@ static void mlx5e_ptp_close_txqsqs(struct mlx5e_port_ptp *c)
mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
}
-static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c,
- struct mlx5e_ptp_params *cparams)
+static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
+ struct mlx5e_ptp_params *cparams)
{
struct mlx5e_params *params = &cparams->params;
struct mlx5e_create_cq_param ccp = {};
@@ -342,7 +372,7 @@ static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c,
ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
- ccp.ix = c->ix;
+ ccp.ix = MLX5E_PTP_CHANNEL_IX;
cq_param = &cparams->txq_sq_param.cqp;
@@ -362,7 +392,7 @@ static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c,
if (err)
goto out_err_ts_cq;
- ptpsq->cq_stats = &c->priv->port_ptp_stats.cq[tc];
+ ptpsq->cq_stats = &c->priv->ptp_stats.cq[tc];
}
return 0;
@@ -378,7 +408,25 @@ out_err_txqsq_cq:
return err;
}
-static void mlx5e_ptp_close_cqs(struct mlx5e_port_ptp *c)
+static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
+ struct mlx5e_ptp_params *cparams)
+{
+ struct mlx5e_create_cq_param ccp = {};
+ struct dim_cq_moder ptp_moder = {};
+ struct mlx5e_cq_param *cq_param;
+ struct mlx5e_cq *cq = &c->rq.cq;
+
+ ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
+ ccp.ch_stats = c->stats;
+ ccp.napi = &c->napi;
+ ccp.ix = MLX5E_PTP_CHANNEL_IX;
+
+ cq_param = &cparams->rq_param.cqp;
+
+ return mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
+}
+
+static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c)
{
int tc;
@@ -389,22 +437,36 @@ static void mlx5e_ptp_close_cqs(struct mlx5e_port_ptp *c)
mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
}
-static void mlx5e_ptp_build_sq_param(struct mlx5e_priv *priv,
+static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq;
- mlx5e_build_sq_param_common(priv, param);
+ mlx5e_build_sq_param_common(mdev, param);
wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
- mlx5e_build_tx_cq_param(priv, params, &param->cqp);
+ mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
+}
+
+static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ u16 q_counter,
+ struct mlx5e_ptp_params *ptp_params)
+{
+ struct mlx5e_rq_param *rq_params = &ptp_params->rq_param;
+ struct mlx5e_params *params = &ptp_params->params;
+
+ params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
+ mlx5e_init_rq_type_params(mdev, params);
+ params->sw_mtu = netdev->max_mtu;
+ mlx5e_build_rq_param(mdev, params, NULL, q_counter, rq_params);
}
-static void mlx5e_ptp_build_params(struct mlx5e_port_ptp *c,
+static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
struct mlx5e_ptp_params *cparams,
struct mlx5e_params *orig)
{
@@ -417,52 +479,193 @@ static void mlx5e_ptp_build_params(struct mlx5e_port_ptp *c,
params->num_tc = orig->num_tc;
/* SQ */
- params->log_sq_size = orig->log_sq_size;
-
- mlx5e_ptp_build_sq_param(c->priv, params, &cparams->txq_sq_param);
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+ params->log_sq_size = orig->log_sq_size;
+ mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
+ }
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
}
-static int mlx5e_ptp_open_queues(struct mlx5e_port_ptp *c,
- struct mlx5e_ptp_params *cparams)
+static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
+ struct mlx5e_rq *rq)
{
+ struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5e_priv *priv = c->priv;
int err;
- err = mlx5e_ptp_open_cqs(c, cparams);
+ rq->wq_type = params->rq_wq_type;
+ rq->pdev = mdev->device;
+ rq->netdev = priv->netdev;
+ rq->priv = priv;
+ rq->clock = &mdev->clock;
+ rq->tstamp = &priv->tstamp;
+ rq->mdev = mdev;
+ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ rq->stats = &c->priv->ptp_stats.rq;
+ rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
+ err = mlx5e_rq_set_handlers(rq, params, false);
if (err)
return err;
- err = mlx5e_ptp_open_txqsqs(c, cparams);
+ return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
+}
+
+static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param)
+{
+ int node = dev_to_node(c->mdev->device);
+ int err;
+
+ err = mlx5e_init_ptp_rq(c, params, &c->rq);
if (err)
- goto close_cqs;
+ return err;
+
+ return mlx5e_open_rq(params, rq_param, NULL, node, &c->rq);
+}
+
+static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
+ struct mlx5e_ptp_params *cparams)
+{
+ int err;
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+ err = mlx5e_ptp_open_tx_cqs(c, cparams);
+ if (err)
+ return err;
+
+ err = mlx5e_ptp_open_txqsqs(c, cparams);
+ if (err)
+ goto close_tx_cqs;
+ }
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ err = mlx5e_ptp_open_rx_cq(c, cparams);
+ if (err)
+ goto close_txqsq;
+
+ err = mlx5e_ptp_open_rq(c, &cparams->params, &cparams->rq_param);
+ if (err)
+ goto close_rx_cq;
+ }
return 0;
-close_cqs:
- mlx5e_ptp_close_cqs(c);
+close_rx_cq:
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ mlx5e_close_cq(&c->rq.cq);
+close_txqsq:
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state))
+ mlx5e_ptp_close_txqsqs(c);
+close_tx_cqs:
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state))
+ mlx5e_ptp_close_tx_cqs(c);
return err;
}
-static void mlx5e_ptp_close_queues(struct mlx5e_port_ptp *c)
+static void mlx5e_ptp_close_queues(struct mlx5e_ptp *c)
{
- mlx5e_ptp_close_txqsqs(c);
- mlx5e_ptp_close_cqs(c);
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ mlx5e_close_rq(&c->rq);
+ mlx5e_close_cq(&c->rq.cq);
+ }
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+ mlx5e_ptp_close_txqsqs(c);
+ mlx5e_ptp_close_tx_cqs(c);
+ }
}
-int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
- u8 lag_port, struct mlx5e_port_ptp **cp)
+static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
+{
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS))
+ __set_bit(MLX5E_PTP_STATE_TX, c->state);
+
+ if (params->ptp_rx)
+ __set_bit(MLX5E_PTP_STATE_RX, c->state);
+
+ return bitmap_empty(c->state, MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0;
+}
+
+static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+
+ if (!ptp_fs->valid)
+ return;
+
+ mlx5e_fs_tt_redirect_del_rule(ptp_fs->l2_rule);
+ mlx5e_fs_tt_redirect_any_destroy(priv);
+
+ mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
+ mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
+ mlx5e_fs_tt_redirect_udp_destroy(priv);
+ ptp_fs->valid = false;
+}
+
+static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5_flow_handle *rule;
+ u32 tirn = priv->ptp_tir.tirn;
+ int err;
+
+ if (ptp_fs->valid)
+ return 0;
+
+ err = mlx5e_fs_tt_redirect_udp_create(priv);
+ if (err)
+ goto out_free;
+
+ rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV4_UDP,
+ tirn, PTP_EV_PORT);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto out_destroy_fs_udp;
+ }
+ ptp_fs->udp_v4_rule = rule;
+
+ rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV6_UDP,
+ tirn, PTP_EV_PORT);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto out_destroy_udp_v4_rule;
+ }
+ ptp_fs->udp_v6_rule = rule;
+
+ err = mlx5e_fs_tt_redirect_any_create(priv);
+ if (err)
+ goto out_destroy_udp_v6_rule;
+
+ rule = mlx5e_fs_tt_redirect_any_add_rule(priv, tirn, ETH_P_1588);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto out_destroy_fs_any;
+ }
+ ptp_fs->l2_rule = rule;
+ ptp_fs->valid = true;
+
+ return 0;
+
+out_destroy_fs_any:
+ mlx5e_fs_tt_redirect_any_destroy(priv);
+out_destroy_udp_v6_rule:
+ mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
+out_destroy_udp_v4_rule:
+ mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
+out_destroy_fs_udp:
+ mlx5e_fs_tt_redirect_udp_destroy(priv);
+out_free:
+ return err;
+}
+
+int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ u8 lag_port, struct mlx5e_ptp **cp)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_ptp_params *cparams;
- struct mlx5e_port_ptp *c;
- unsigned int irq;
+ struct mlx5e_ptp *c;
int err;
- int eqn;
- err = mlx5_vector2eqn(priv->mdev, 0, &eqn, &irq);
- if (err)
- return err;
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
@@ -472,14 +675,17 @@ int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c->priv = priv;
c->mdev = priv->mdev;
c->tstamp = &priv->tstamp;
- c->ix = 0;
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
+ c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
c->num_tc = params->num_tc;
- c->stats = &priv->port_ptp_stats.ch;
+ c->stats = &priv->ptp_stats.ch;
c->lag_port = lag_port;
+ err = mlx5e_ptp_set_state(c, params);
+ if (err)
+ goto err_free;
+
netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
mlx5e_ptp_build_params(c, cparams, params);
@@ -488,6 +694,9 @@ int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err))
goto err_napi_del;
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ priv->rx_ptp_opened = true;
+
*cp = c;
kvfree(cparams);
@@ -496,13 +705,13 @@ int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
err_napi_del:
netif_napi_del(&c->napi);
-
+err_free:
kvfree(cparams);
kvfree(c);
return err;
}
-void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c)
+void mlx5e_ptp_close(struct mlx5e_ptp *c)
{
mlx5e_ptp_close_queues(c);
netif_napi_del(&c->napi);
@@ -510,22 +719,94 @@ void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c)
kvfree(c);
}
-void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c)
+void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
{
int tc;
napi_enable(&c->napi);
- for (tc = 0; tc < c->num_tc; tc++)
- mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
+ }
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ mlx5e_ptp_rx_set_fs(c->priv);
+ mlx5e_activate_rq(&c->rq);
+ }
}
-void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c)
+void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
{
int tc;
- for (tc = 0; tc < c->num_tc; tc++)
- mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ mlx5e_deactivate_rq(&c->rq);
+
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
+ }
napi_disable(&c->napi);
}
+
+int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
+{
+ if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state))
+ return -EINVAL;
+
+ *rqn = c->rq.rqn;
+ return 0;
+}
+
+int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ptp_fs *ptp_fs;
+
+ if (!priv->profile->rx_ptp_support)
+ return 0;
+
+ ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
+ if (!ptp_fs)
+ return -ENOMEM;
+
+ priv->fs.ptp_fs = ptp_fs;
+ return 0;
+}
+
+void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+
+ if (!priv->profile->rx_ptp_support)
+ return;
+
+ mlx5e_ptp_rx_unset_fs(priv);
+ kfree(ptp_fs);
+}
+
+int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
+{
+ struct mlx5e_ptp *c = priv->channels.ptp;
+
+ if (!priv->profile->rx_ptp_support)
+ return 0;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ if (set) {
+ if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ netdev_WARN_ONCE(priv->netdev, "Don't try to add PTP RX-FS rules");
+ return -EINVAL;
+ }
+ return mlx5e_ptp_rx_set_fs(priv);
+ }
+ /* set == false */
+ if (c && test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules");
+ return -EINVAL;
+ }
+ mlx5e_ptp_rx_unset_fs(priv);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 90c98ea63b7f..ab935cce952b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -5,7 +5,6 @@
#define __MLX5_EN_PTP_H__
#include "en.h"
-#include "en/params.h"
#include "en_stats.h"
struct mlx5e_ptpsq {
@@ -17,9 +16,16 @@ struct mlx5e_ptpsq {
struct mlx5e_ptp_cq_stats *cq_stats;
};
-struct mlx5e_port_ptp {
+enum {
+ MLX5E_PTP_STATE_TX,
+ MLX5E_PTP_STATE_RX,
+ MLX5E_PTP_STATE_NUM_STATES,
+};
+
+struct mlx5e_ptp {
/* data path */
struct mlx5e_ptpsq ptpsq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_rq rq;
struct napi_struct napi;
struct device *pdev;
struct net_device *netdev;
@@ -34,20 +40,18 @@ struct mlx5e_port_ptp {
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
struct hwtstamp_config *tstamp;
- DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
- int ix;
-};
-
-struct mlx5e_ptp_params {
- struct mlx5e_params params;
- struct mlx5e_sq_param txq_sq_param;
+ DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES);
};
-int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
- u8 lag_port, struct mlx5e_port_ptp **cp);
-void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c);
-void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c);
-void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c);
+int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ u8 lag_port, struct mlx5e_ptp **cp);
+void mlx5e_ptp_close(struct mlx5e_ptp *c);
+void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c);
+void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c);
+int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn);
+int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv);
+void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv);
+int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set);
enum {
MLX5E_SKB_CB_CQE_HWTSTAMP = BIT(0),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 12d7ad061237..5efe3278b0f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -232,8 +232,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
memset(&param_sq, 0, sizeof(param_sq));
memset(&param_cq, 0, sizeof(param_cq));
- mlx5e_build_sq_param(priv, params, &param_sq);
- mlx5e_build_tx_cq_param(priv, params, &param_cq);
+ mlx5e_build_sq_param(priv->mdev, params, &param_sq);
+ mlx5e_build_tx_cq_param(priv->mdev, params, &param_cq);
err = mlx5e_open_cq(priv, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
if (err)
goto err_free_sq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 065126370acd..6cdc52d50a48 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -17,6 +17,7 @@
#include "en/mapping.h"
#include "en/tc_tun.h"
#include "lib/port_tun.h"
+#include "esw/sample.h"
struct mlx5e_rep_indr_block_priv {
struct net_device *netdev;
@@ -169,6 +170,9 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_priv *priv = cb_priv;
+ if (!priv->netdev || !netif_device_present(priv->netdev))
+ return -EOPNOTSUPP;
+
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
@@ -321,6 +325,9 @@ mlx5e_rep_indr_offload(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
int err = 0;
+ if (!netif_device_present(indr_priv->rpriv->netdev))
+ return -EOPNOTSUPP;
+
switch (flower->command) {
case FLOW_CLS_REPLACE:
err = mlx5e_configure_flower(netdev, priv, flower, flags);
@@ -605,27 +612,50 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
return true;
}
+
+static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
+ struct mlx5e_tc_update_priv *tc_priv)
+{
+ struct mlx5e_priv *priv = netdev_priv(skb->dev);
+ u32 tunnel_id = reg_c1 >> ESW_TUN_OFFSET;
+
+ if (chain) {
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct tc_skb_ext *tc_skb_ext;
+ struct mlx5_eswitch *esw;
+ u32 zone_restore_id;
+
+ tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ if (!tc_skb_ext) {
+ WARN_ON(1);
+ return false;
+ }
+ tc_skb_ext->chain = chain;
+ zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
+ esw = priv->mdev->priv.eswitch;
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ if (!mlx5e_tc_ct_restore_flow(uplink_priv->ct_priv, skb,
+ zone_restore_id))
+ return false;
+ }
+ return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+}
#endif /* CONFIG_NET_TC_SKB_EXT */
bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
struct sk_buff *skb,
struct mlx5e_tc_update_priv *tc_priv)
{
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- u32 chain = 0, reg_c0, reg_c1, tunnel_id, zone_restore_id;
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
- struct tc_skb_ext *tc_skb_ext;
+ struct mlx5_mapped_obj mapped_obj;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
+ u32 reg_c0, reg_c1;
int err;
reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
- if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
- reg_c0 = 0;
- reg_c1 = be32_to_cpu(cqe->ft_metadata);
-
- if (!reg_c0)
+ if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
return true;
/* If reg_c0 is not equal to the default flow tag then skb->mark
@@ -633,38 +663,33 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
*/
skb->mark = 0;
+ reg_c1 = be32_to_cpu(cqe->ft_metadata);
+
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;
-
- err = mlx5_get_chain_for_tag(esw_chains(esw), reg_c0, &chain);
+ err = mapping_find(esw->offloads.reg_c0_obj_pool, reg_c0, &mapped_obj);
if (err) {
netdev_dbg(priv->netdev,
- "Couldn't find chain for chain tag: %d, err: %d\n",
+ "Couldn't find mapped object for reg_c0: %d, err: %d\n",
reg_c0, err);
return false;
}
- if (chain) {
- tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
- if (!tc_skb_ext) {
- WARN_ON(1);
- return false;
- }
-
- tc_skb_ext->chain = chain;
-
- zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
- if (!mlx5e_tc_ct_restore_flow(uplink_priv->ct_priv, skb,
- zone_restore_id))
- return false;
- }
-
- tunnel_id = reg_c1 >> ESW_TUN_OFFSET;
- return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN)
+ return mlx5e_restore_skb(skb, mapped_obj.chain, reg_c1, tc_priv);
#endif /* CONFIG_NET_TC_SKB_EXT */
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+ if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) {
+ mlx5_esw_sample_skb(skb, &mapped_obj);
+ return false;
+ }
+#endif /* CONFIG_MLX5_TC_SAMPLE */
+ if (mapped_obj.type != MLX5_MAPPED_OBJ_SAMPLE &&
+ mapped_obj.type != MLX5_MAPPED_OBJ_CHAIN) {
+ netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
+ return false;
+ }
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index d80bbd17e5f8..0eb125316fe2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -4,6 +4,8 @@
#include "health.h"
#include "params.h"
#include "txrx.h"
+#include "devlink.h"
+#include "ptp.h"
static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state)
{
@@ -229,8 +231,9 @@ static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state,
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
- struct devlink_fmsg *fmsg)
+static int
+mlx5e_rx_reporter_build_diagnose_output_rq_common(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
{
u16 wqe_counter;
int wqes_sz;
@@ -246,14 +249,6 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
wq_head = mlx5e_rqwq_get_head(rq);
wqe_counter = mlx5e_rqwq_get_wqe_counter(rq);
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix);
- if (err)
- return err;
-
err = devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn);
if (err)
return err;
@@ -299,61 +294,155 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
return err;
}
- err = devlink_fmsg_obj_nest_end(fmsg);
+ return 0;
+}
+
+static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
- return 0;
+ err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix);
+ if (err)
+ return err;
+
+ err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
+ if (err)
+ return err;
+
+ return devlink_fmsg_obj_nest_end(fmsg);
}
-static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg,
- struct netlink_ext_ack *extack)
+static int mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- struct mlx5e_params *params = &priv->channels.params;
- struct mlx5e_rq *generic_rq;
+ struct mlx5e_priv *priv = rq->priv;
+ struct mlx5e_params *params;
u32 rq_stride, rq_sz;
- int i, err = 0;
-
- mutex_lock(&priv->state_lock);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
+ bool real_time;
+ int err;
- generic_rq = &priv->channels.c[0]->rq;
- rq_sz = mlx5e_rqwq_get_size(generic_rq);
+ params = &priv->channels.params;
+ rq_sz = mlx5e_rqwq_get_size(rq);
+ real_time = mlx5_is_real_time_rq(priv->mdev);
rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config");
- if (err)
- goto unlock;
-
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
if (err)
- goto unlock;
+ return err;
err = devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
if (err)
- goto unlock;
+ return err;
err = devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride);
if (err)
- goto unlock;
+ return err;
err = devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz);
if (err)
- goto unlock;
+ return err;
- err = mlx5e_health_cq_common_diag_fmsg(&generic_rq->cq, fmsg);
+ err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
if (err)
- goto unlock;
+ return err;
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ err = mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg);
+ if (err)
+ return err;
+
+ return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+static int
+mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx5e_ptp *ptp_ch,
+ struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter);
if (err)
+ return err;
+
+ err = mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg);
+ if (err)
+ return err;
+
+ return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+static int
+mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq;
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ int err;
+
+ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config");
+ if (err)
+ return err;
+
+ err = mlx5e_rx_reporter_diagnose_generic_rq(generic_rq, fmsg);
+ if (err)
+ return err;
+
+ if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) {
+ err = mlx5e_rx_reporter_diagnose_common_ptp_config(priv, ptp_ch, fmsg);
+ if (err)
+ return err;
+ }
+
+ return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
+ if (err)
+ return err;
+
+ err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ int i, err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ err = mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg);
if (err)
goto unlock;
@@ -368,9 +457,12 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
if (err)
goto unlock;
}
+ if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) {
+ err = mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg);
+ if (err)
+ goto unlock;
+ }
err = devlink_fmsg_arr_pair_nest_end(fmsg);
- if (err)
- goto unlock;
unlock:
mutex_unlock(&priv->state_lock);
return err;
@@ -502,6 +594,7 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms
static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
struct devlink_fmsg *fmsg)
{
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
struct mlx5_rsc_key key = {};
int i, err;
@@ -534,6 +627,12 @@ static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
return err;
}
+ if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) {
+ err = mlx5e_health_queue_dump(priv, fmsg, ptp_ch->rq.rqn, "PTP RQ");
+ if (err)
+ return err;
+ }
+
return devlink_fmsg_arr_pair_nest_end(fmsg);
}
@@ -615,9 +714,10 @@ static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
void mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
{
+ struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(&priv->dl_port, &mlx5_rx_reporter_ops,
+ reporter = devlink_port_health_reporter_create(dl_port, &mlx5_rx_reporter_ops,
MLX5E_REPORTER_RX_GRACEFUL_PERIOD, priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
@@ -633,4 +733,5 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv)
return;
devlink_port_health_reporter_destroy(priv->rx_reporter);
+ priv->rx_reporter = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index d7275c84313e..9d361efd5ff7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -3,6 +3,7 @@
#include "health.h"
#include "en/ptp.h"
+#include "en/devlink.h"
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
{
@@ -256,12 +257,14 @@ mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *txqsq)
{
u32 sq_stride, sq_sz;
+ bool real_time;
int err;
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
if (err)
return err;
+ real_time = mlx5_is_real_time_sq(txqsq->mdev);
sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq);
sq_stride = MLX5_SEND_WQE_BB;
@@ -273,6 +276,10 @@ mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg,
if (err)
return err;
+ err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
+ if (err)
+ return err;
+
err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg);
if (err)
return err;
@@ -303,6 +310,7 @@ mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporte
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
struct mlx5e_ptpsq *generic_ptpsq;
int err;
@@ -314,12 +322,11 @@ mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporte
if (err)
return err;
- generic_ptpsq = priv->channels.port_ptp ?
- &priv->channels.port_ptp->ptpsq[0] :
- NULL;
- if (!generic_ptpsq)
+ if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
goto out;
+ generic_ptpsq = &ptp_ch->ptpsq[0];
+
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
if (err)
return err;
@@ -345,7 +352,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp;
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
int i, tc, err = 0;
@@ -374,7 +381,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
}
}
- if (!ptp_ch)
+ if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
goto close_sqs_nest;
for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
@@ -459,7 +466,7 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
struct devlink_fmsg *fmsg)
{
- struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp;
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
struct mlx5_rsc_key key = {};
int i, tc, err;
@@ -496,7 +503,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
}
}
- if (ptp_ch) {
+ if (ptp_ch && test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) {
for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq;
@@ -572,9 +579,10 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
void mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
{
+ struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(&priv->dl_port, &mlx5_tx_reporter_ops,
+ reporter = devlink_port_health_reporter_create(dl_port, &mlx5_tx_reporter_ops,
MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev,
@@ -591,4 +599,5 @@ void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv)
return;
devlink_port_health_reporter_destroy(priv->tx_reporter);
+ priv->tx_reporter = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 68e54cc1cd16..5da5e5323a44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -29,6 +29,8 @@
#define MLX5_CT_STATE_TRK_BIT BIT(2)
#define MLX5_CT_STATE_NAT_BIT BIT(3)
#define MLX5_CT_STATE_REPLY_BIT BIT(4)
+#define MLX5_CT_STATE_RELATED_BIT BIT(5)
+#define MLX5_CT_STATE_INVALID_BIT BIT(6)
#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8)
#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
@@ -717,7 +719,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
zone_rule->nat = nat;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
@@ -759,7 +761,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
zone_rule->attr = attr;
- kfree(spec);
+ kvfree(spec);
ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone);
return 0;
@@ -771,7 +773,7 @@ err_rule:
err_mod_hdr:
kfree(attr);
err_attr:
- kfree(spec);
+ kvfree(spec);
return err;
}
@@ -1229,8 +1231,8 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack)
{
+ bool trk, est, untrk, unest, new, rpl, unrpl, rel, unrel, inv, uninv;
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
- bool trk, est, untrk, unest, new, rpl, unrpl;
struct flow_dissector_key_ct *mask, *key;
u32 ctstate = 0, ctstate_mask = 0;
u16 ct_state_on, ct_state_off;
@@ -1258,7 +1260,9 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
TCA_FLOWER_KEY_CT_FLAGS_NEW |
- TCA_FLOWER_KEY_CT_FLAGS_REPLY)) {
+ TCA_FLOWER_KEY_CT_FLAGS_REPLY |
+ TCA_FLOWER_KEY_CT_FLAGS_RELATED |
+ TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
NL_SET_ERR_MSG_MOD(extack,
"only ct_state trk, est, new and rpl are supported for offload");
return -EOPNOTSUPP;
@@ -1270,9 +1274,13 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW;
est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
rpl = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
+ rel = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
+ inv = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
+ unrel = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
+ uninv = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
@@ -1280,6 +1288,20 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0;
ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
ctstate_mask |= (unrpl || rpl) ? MLX5_CT_STATE_REPLY_BIT : 0;
+ ctstate_mask |= unrel ? MLX5_CT_STATE_RELATED_BIT : 0;
+ ctstate_mask |= uninv ? MLX5_CT_STATE_INVALID_BIT : 0;
+
+ if (rel) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "matching on ct_state +rel isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (inv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "matching on ct_state +inv isn't supported");
+ return -EOPNOTSUPP;
+ }
if (new) {
NL_SET_ERR_MSG_MOD(extack,
@@ -1562,6 +1584,14 @@ mlx5_tc_ct_free_pre_ct_tables(struct mlx5_ct_ft *ft)
mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct);
}
+/* To avoid false lock dependency warning set the ct_entries_ht lock
+ * class different than the lock class of the ht being used when deleting
+ * last flow from a group and then deleting a group, we get into del_sw_flow_group()
+ * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
+ * it's different than the ht->mutex here.
+ */
+static struct lock_class_key ct_entries_ht_lock_key;
+
static struct mlx5_ct_ft *
mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
struct nf_flowtable *nf_ft)
@@ -1596,6 +1626,8 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
if (err)
goto err_init;
+ lockdep_set_class(&ft->ct_entries_ht.mutex, &ct_entries_ht_lock_key);
+
err = rhashtable_insert_fast(&ct_priv->zone_ht, &ft->node,
zone_params);
if (err)
@@ -1697,10 +1729,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_ft *ft;
u32 fte_id = 1;
- post_ct_spec = kzalloc(sizeof(*post_ct_spec), GFP_KERNEL);
+ post_ct_spec = kvzalloc(sizeof(*post_ct_spec), GFP_KERNEL);
ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
if (!post_ct_spec || !ct_flow) {
- kfree(post_ct_spec);
+ kvfree(post_ct_spec);
kfree(ct_flow);
return ERR_PTR(-ENOMEM);
}
@@ -1810,6 +1842,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
ct_flow->post_ct_attr->prio = 0;
ct_flow->post_ct_attr->ft = ct_priv->post_ct;
+ /* Splits were handled before CT */
+ if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ ct_flow->post_ct_attr->esw_attr->split_count = 0;
+
ct_flow->post_ct_attr->inner_match_level = MLX5_MATCH_NONE;
ct_flow->post_ct_attr->outer_match_level = MLX5_MATCH_NONE;
ct_flow->post_ct_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
@@ -1835,7 +1871,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
attr->ct_attr.ct_flow = ct_flow;
dealloc_mod_hdr_actions(&pre_mod_acts);
- kfree(post_ct_spec);
+ kvfree(post_ct_spec);
return rule;
@@ -1856,7 +1892,7 @@ err_alloc_pre:
err_idr:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
err_ft:
- kfree(post_ct_spec);
+ kvfree(post_ct_spec);
kfree(ct_flow);
netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err);
return ERR_PTR(err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index c223591ffc22..d1599b7b944b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -27,6 +27,7 @@ enum {
MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 9,
MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 10,
+ MLX5E_TC_FLOW_FLAG_SAMPLE = MLX5E_TC_FLOW_BASE + 11,
};
struct mlx5e_tc_flow_parse_attr {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index e1271998b937..9350ca05ce65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -83,10 +83,12 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
static inline int
mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
- struct mlx5e_encap_entry *e) { return -EOPNOTSUPP; }
-int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct mlx5e_encap_entry *e)
+ struct mlx5e_encap_entry *e)
+{ return -EOPNOTSUPP; }
+static inline int
+mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e)
{ return -EOPNOTSUPP; }
#endif
int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 9f16ad2c0710..593503bc4d07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2021 Mellanox Technologies. */
#include <net/fib_notifier.h>
+#include <net/nexthop.h>
#include "tc_tun_encap.h"
#include "en_tc.h"
#include "tc_tun.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 37fc1d77ded7..86ab4e864fe6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -30,172 +30,62 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static int mlx5e_alloc_trap_rq(struct mlx5e_priv *priv, struct mlx5e_rq_param *rqp,
- struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
- struct mlx5e_ch_stats *ch_stats,
+static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params,
struct mlx5e_rq *rq)
{
- void *rqc_wq = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
- struct mlx5_core_dev *mdev = priv->mdev;
- struct page_pool_params pp_params = {};
- int node = dev_to_node(mdev->device);
- u32 pool_size;
- int wq_sz;
- int err;
- int i;
-
- rqp->wq.db_numa_node = node;
-
- rq->wq_type = params->rq_wq_type;
- rq->pdev = mdev->device;
- rq->netdev = priv->netdev;
- rq->mdev = mdev;
- rq->priv = priv;
- rq->stats = stats;
- rq->clock = &mdev->clock;
- rq->tstamp = &priv->tstamp;
- rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
-
+ struct mlx5_core_dev *mdev = t->mdev;
+ struct mlx5e_priv *priv = t->priv;
+
+ rq->wq_type = params->rq_wq_type;
+ rq->pdev = mdev->device;
+ rq->netdev = priv->netdev;
+ rq->priv = priv;
+ rq->clock = &mdev->clock;
+ rq->tstamp = &priv->tstamp;
+ rq->mdev = mdev;
+ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ rq->stats = &priv->trap_stats.rq;
+ rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
xdp_rxq_info_unused(&rq->xdp_rxq);
-
- rq->buff.map_dir = DMA_FROM_DEVICE;
- rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, NULL);
- pool_size = 1 << params->log_rq_mtu_frames;
-
- err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl);
- if (err)
- return err;
-
- rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
-
- wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
-
- rq->wqe.info = rqp->frags_info;
- rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
- rq->wqe.frags = kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
- (wq_sz << rq->wqe.info.log_num_frags)),
- GFP_KERNEL, node);
- if (!rq->wqe.frags) {
- err = -ENOMEM;
- goto err_wq_cyc_destroy;
- }
-
- err = mlx5e_init_di_list(rq, wq_sz, node);
- if (err)
- goto err_free_frags;
-
- rq->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
-
mlx5e_rq_set_trap_handlers(rq, params);
-
- /* Create a page_pool and register it with rxq */
- pp_params.order = 0;
- pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
- pp_params.pool_size = pool_size;
- pp_params.nid = node;
- pp_params.dev = mdev->device;
- pp_params.dma_dir = rq->buff.map_dir;
-
- /* page_pool can be used even when there is no rq->xdp_prog,
- * given page_pool does not handle DMA mapping there is no
- * required state to clear. And page_pool gracefully handle
- * elevated refcnt.
- */
- rq->page_pool = page_pool_create(&pp_params);
- if (IS_ERR(rq->page_pool)) {
- err = PTR_ERR(rq->page_pool);
- rq->page_pool = NULL;
- goto err_free_di_list;
- }
- for (i = 0; i < wq_sz; i++) {
- struct mlx5e_rx_wqe_cyc *wqe =
- mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
- int f;
-
- for (f = 0; f < rq->wqe.info.num_frags; f++) {
- u32 frag_size = rq->wqe.info.arr[f].frag_size |
- MLX5_HW_START_PADDING;
-
- wqe->data[f].byte_count = cpu_to_be32(frag_size);
- wqe->data[f].lkey = rq->mkey_be;
- }
- /* check if num_frags is not a pow of two */
- if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
- wqe->data[f].byte_count = 0;
- wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
- wqe->data[f].addr = 0;
- }
- }
- return 0;
-
-err_free_di_list:
- mlx5e_free_di_list(rq);
-err_free_frags:
- kvfree(rq->wqe.frags);
-err_wq_cyc_destroy:
- mlx5_wq_destroy(&rq->wq_ctrl);
-
- return err;
}
-static void mlx5e_free_trap_rq(struct mlx5e_rq *rq)
-{
- page_pool_destroy(rq->page_pool);
- mlx5e_free_di_list(rq);
- kvfree(rq->wqe.frags);
- mlx5_wq_destroy(&rq->wq_ctrl);
-}
-
-static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct napi_struct *napi,
- struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
- struct mlx5e_rq_param *rq_param,
- struct mlx5e_ch_stats *ch_stats,
- struct mlx5e_rq *rq)
+static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
{
+ struct mlx5e_rq_param *rq_param = &t->rq_param;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_create_cq_param ccp = {};
struct dim_cq_moder trap_moder = {};
- struct mlx5e_cq *cq = &rq->cq;
+ struct mlx5e_rq *rq = &t->rq;
+ int node;
int err;
- ccp.node = dev_to_node(mdev->device);
- ccp.ch_stats = ch_stats;
- ccp.napi = napi;
+ node = dev_to_node(mdev->device);
+
+ ccp.node = node;
+ ccp.ch_stats = t->stats;
+ ccp.napi = &t->napi;
ccp.ix = 0;
- err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, cq);
+ err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, &rq->cq);
if (err)
return err;
- err = mlx5e_alloc_trap_rq(priv, rq_param, stats, params, ch_stats, rq);
+ mlx5e_init_trap_rq(t, &t->params, rq);
+ err = mlx5e_open_rq(&t->params, rq_param, NULL, node, rq);
if (err)
goto err_destroy_cq;
- err = mlx5e_create_rq(rq, rq_param);
- if (err)
- goto err_free_rq;
-
- err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
- if (err)
- goto err_destroy_rq;
-
return 0;
-err_destroy_rq:
- mlx5e_destroy_rq(rq);
- mlx5e_free_rx_descs(rq);
-err_free_rq:
- mlx5e_free_trap_rq(rq);
err_destroy_cq:
- mlx5e_close_cq(cq);
+ mlx5e_close_cq(&rq->cq);
return err;
}
static void mlx5e_close_trap_rq(struct mlx5e_rq *rq)
{
- mlx5e_destroy_rq(rq);
- mlx5e_free_rx_descs(rq);
- mlx5e_free_trap_rq(rq);
+ mlx5e_close_rq(rq);
mlx5e_close_cq(&rq->cq);
}
@@ -213,7 +103,7 @@ static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct ml
return -ENOMEM;
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn);
+ MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
MLX5_SET(tirc, tirc, inline_rqn, rqn);
@@ -228,24 +118,16 @@ static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct
mlx5e_destroy_tir(mdev, tir);
}
-static void mlx5e_activate_trap_rq(struct mlx5e_rq *rq)
-{
- set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
-}
-
-static void mlx5e_deactivate_trap_rq(struct mlx5e_rq *rq)
-{
- clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
-}
-
-static void mlx5e_build_trap_params(struct mlx5e_priv *priv, struct mlx5e_trap *t)
+static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev,
+ int max_mtu, u16 q_counter,
+ struct mlx5e_trap *t)
{
struct mlx5e_params *params = &t->params;
params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
- mlx5e_init_rq_type_params(priv->mdev, params);
- params->sw_mtu = priv->netdev->max_mtu;
- mlx5e_build_rq_param(priv, params, NULL, &t->rq_param);
+ mlx5e_init_rq_type_params(mdev, params);
+ params->sw_mtu = max_mtu;
+ mlx5e_build_rq_param(mdev, params, NULL, q_counter, &t->rq_param);
}
static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
@@ -259,23 +141,19 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
if (!t)
return ERR_PTR(-ENOMEM);
- mlx5e_build_trap_params(priv, t);
+ mlx5e_build_trap_params(priv->mdev, netdev->max_mtu, priv->q_counter, t);
t->priv = priv;
t->mdev = priv->mdev;
t->tstamp = &priv->tstamp;
t->pdev = mlx5_core_dma_dev(priv->mdev);
t->netdev = priv->netdev;
- t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
+ t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
t->stats = &priv->trap_stats.ch;
netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64);
- err = mlx5e_open_trap_rq(priv, &t->napi,
- &priv->trap_stats.rq,
- &t->params, &t->rq_param,
- &priv->trap_stats.ch,
- &t->rq);
+ err = mlx5e_open_trap_rq(priv, t);
if (unlikely(err))
goto err_napi_del;
@@ -304,15 +182,14 @@ void mlx5e_close_trap(struct mlx5e_trap *trap)
static void mlx5e_activate_trap(struct mlx5e_trap *trap)
{
napi_enable(&trap->napi);
- mlx5e_activate_trap_rq(&trap->rq);
- napi_schedule(&trap->napi);
+ mlx5e_activate_rq(&trap->rq);
}
void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
{
struct mlx5e_trap *trap = priv->en_trap;
- mlx5e_deactivate_trap_rq(&trap->rq);
+ mlx5e_deactivate_rq(&trap->rq);
napi_disable(&trap->napi);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 2e3e78b0f333..2f0df5cc1a2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -500,7 +500,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_xdpsq *sq;
- int drops = 0;
+ int nxmit = 0;
int sq_num;
int i;
@@ -529,11 +529,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data,
xdptxd.len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr))) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- continue;
- }
+ if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr)))
+ break;
xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
xdpi.frame.xdpf = xdpf;
@@ -544,9 +541,9 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (unlikely(!ret)) {
dma_unmap_single(sq->pdev, xdptxd.dma_addr,
xdptxd.len, DMA_TO_DEVICE);
- xdp_return_frame_rx_napi(xdpf);
- drops++;
+ break;
}
+ nxmit++;
}
if (flags & XDP_XMIT_FLUSH) {
@@ -555,7 +552,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
mlx5e_xmit_xdp_doorbell(sq);
}
- return n - drops;
+ return nxmit;
}
void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index f4bce1365639..a8315f166696 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -35,13 +35,59 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
}
}
-static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv,
+static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
+ u16 q_counter,
struct mlx5e_channel_param *cparam)
{
- mlx5e_build_rq_param(priv, params, xsk, &cparam->rq);
- mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
+ mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq);
+ mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
+}
+
+static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct xsk_buff_pool *pool,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq *rq)
+{
+ struct mlx5_core_dev *mdev = c->mdev;
+ int rq_xdp_ix;
+ int err;
+
+ rq->wq_type = params->rq_wq_type;
+ rq->pdev = c->pdev;
+ rq->netdev = c->netdev;
+ rq->priv = c->priv;
+ rq->tstamp = c->tstamp;
+ rq->clock = &mdev->clock;
+ rq->icosq = &c->icosq;
+ rq->ix = c->ix;
+ rq->mdev = mdev;
+ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ rq->xdpsq = &c->rq_xdpsq;
+ rq->xsk_pool = pool;
+ rq->stats = &c->priv->channel_stats[c->ix].xskrq;
+ rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
+ rq_xdp_ix = c->ix + params->num_channels * MLX5E_RQ_GROUP_XSK;
+ err = mlx5e_rq_set_handlers(rq, params, xsk);
+ if (err)
+ return err;
+
+ return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0);
+}
+
+static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_params, struct xsk_buff_pool *pool,
+ struct mlx5e_xsk_param *xsk)
+{
+ int err;
+
+ err = mlx5e_init_xsk_rq(c, params, pool, xsk, &c->xskrq);
+ if (err)
+ return err;
+
+ return mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), &c->xskrq);
}
int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
@@ -61,14 +107,14 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (!cparam)
return -ENOMEM;
- mlx5e_build_xsk_cparam(priv, params, xsk, cparam);
+ mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam);
err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->xskrq.cq);
if (unlikely(err))
goto err_free_cparam;
- err = mlx5e_open_rq(c, params, &cparam->rq, xsk, pool, &c->xskrq);
+ err = mlx5e_open_xsk_rq(c, params, &cparam->rq, pool, xsk);
if (unlikely(err))
goto err_close_rx_cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index cc0efac7b812..00af0b831a28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -123,11 +123,10 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
mlx5e_udp_gso_handle_tx_skb(skb);
#ifdef CONFIG_MLX5_EN_TLS
- if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
- /* May send SKBs and WQEs. */
+ /* May send SKBs and WQEs. */
+ if (mlx5e_tls_skb_offloaded(skb))
if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
return false;
- }
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
@@ -186,7 +185,7 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
struct mlx5_wqe_inline_seg *inlseg)
{
#ifdef CONFIG_MLX5_EN_TLS
- mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
+ mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 381a9c8c9da9..34119ce92031 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -60,7 +60,7 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec;
int err = 0;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
@@ -101,7 +101,7 @@ out:
if (err)
mlx5_modify_header_dealloc(mdev, modify_hdr);
out_spec:
- kfree(spec);
+ kvfree(spec);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index baa58b62e8df..aaa579bf9a39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -12,6 +12,9 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv);
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv);
int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable);
+struct mlx5e_ktls_resync_resp *
+mlx5e_ktls_rx_resync_create_resp_list(void);
+void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list);
#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
@@ -33,6 +36,14 @@ static inline int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enab
return -EOPNOTSUPP;
}
+static inline struct mlx5e_ktls_resync_resp *
+mlx5e_ktls_rx_resync_create_resp_list(void)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void
+mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {}
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 19d22a63313f..4e58fade7a60 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -56,6 +56,7 @@ struct mlx5e_ktls_offload_context_rx {
/* resync */
struct mlx5e_ktls_rx_resync_ctx resync;
+ struct list_head list;
};
static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx)
@@ -72,6 +73,32 @@ static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx
refcount_inc(&priv_rx->resync.refcnt);
}
+struct mlx5e_ktls_resync_resp {
+ /* protects list changes */
+ spinlock_t lock;
+ struct list_head list;
+};
+
+void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list)
+{
+ kvfree(resp_list);
+}
+
+struct mlx5e_ktls_resync_resp *
+mlx5e_ktls_rx_resync_create_resp_list(void)
+{
+ struct mlx5e_ktls_resync_resp *resp_list;
+
+ resp_list = kvzalloc(sizeof(*resp_list), GFP_KERNEL);
+ if (!resp_list)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&resp_list->list);
+ spin_lock_init(&resp_list->lock);
+
+ return resp_list;
+}
+
static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn)
{
int err, inlen;
@@ -85,7 +112,7 @@ static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn);
+ MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
MLX5_SET(tirc, tirc, indirect_table, rqtn);
@@ -119,8 +146,7 @@ out:
complete(&priv_rx->add_ctx);
}
-static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv,
- struct sock *sk)
+static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv)
{
INIT_WORK(&rule->work, accel_rule_handle_work);
rule->priv = priv;
@@ -359,33 +385,32 @@ static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
/* Function can be called with the refcount being either elevated or not.
* It does not affect the refcount.
*/
-static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
- struct mlx5e_channel *c)
+static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
+ struct mlx5e_channel *c)
{
struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info;
- struct mlx5_wqe_ctrl_seg *cseg;
+ struct mlx5e_ktls_resync_resp *ktls_resync;
struct mlx5e_icosq *sq;
- int err;
+ bool trigger_poll;
memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
- err = 0;
sq = &c->async_icosq;
- spin_lock_bh(&c->async_icosq_lock);
+ ktls_resync = sq->ktls_resync;
- cseg = post_static_params(sq, priv_rx);
- if (IS_ERR(cseg)) {
- priv_rx->rq_stats->tls_resync_res_skip++;
- err = PTR_ERR(cseg);
- goto unlock;
- }
- /* Do not increment priv_rx refcnt, CQE handling is empty */
- mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
- priv_rx->rq_stats->tls_resync_res_ok++;
-unlock:
- spin_unlock_bh(&c->async_icosq_lock);
+ spin_lock_bh(&ktls_resync->lock);
+ list_add_tail(&priv_rx->list, &ktls_resync->list);
+ trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
+ spin_unlock_bh(&ktls_resync->lock);
- return err;
+ if (!trigger_poll)
+ return;
+
+ if (!napi_if_scheduled_mark_missed(&c->napi)) {
+ spin_lock_bh(&c->async_icosq_lock);
+ mlx5e_trigger_irq(sq);
+ spin_unlock_bh(&c->async_icosq_lock);
+ }
}
/* Function can be called with the refcount being either elevated or not.
@@ -618,7 +643,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
init_completion(&priv_rx->add_ctx);
- accel_rule_init(&priv_rx->rule, priv, sk);
+ accel_rule_init(&priv_rx->rule, priv);
resync = &priv_rx->resync;
resync_init(resync, priv);
tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core;
@@ -676,3 +701,65 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
*/
mlx5e_ktls_priv_rx_put(priv_rx);
}
+
+bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
+{
+ struct mlx5e_ktls_offload_context_rx *priv_rx, *tmp;
+ struct mlx5e_ktls_resync_resp *ktls_resync;
+ struct mlx5_wqe_ctrl_seg *db_cseg;
+ struct mlx5e_icosq *sq;
+ LIST_HEAD(local_list);
+ int i, j;
+
+ sq = &c->async_icosq;
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+ return false;
+
+ ktls_resync = sq->ktls_resync;
+ db_cseg = NULL;
+ i = 0;
+
+ spin_lock(&ktls_resync->lock);
+ list_for_each_entry_safe(priv_rx, tmp, &ktls_resync->list, list) {
+ list_move(&priv_rx->list, &local_list);
+ if (++i == budget)
+ break;
+ }
+ if (list_empty(&ktls_resync->list))
+ clear_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
+ spin_unlock(&ktls_resync->lock);
+
+ spin_lock(&c->async_icosq_lock);
+ for (j = 0; j < i; j++) {
+ struct mlx5_wqe_ctrl_seg *cseg;
+
+ priv_rx = list_first_entry(&local_list,
+ struct mlx5e_ktls_offload_context_rx,
+ list);
+ cseg = post_static_params(sq, priv_rx);
+ if (IS_ERR(cseg))
+ break;
+ list_del(&priv_rx->list);
+ db_cseg = cseg;
+ }
+ if (db_cseg)
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, db_cseg);
+ spin_unlock(&c->async_icosq_lock);
+
+ priv_rx->rq_stats->tls_resync_res_ok += j;
+
+ if (!list_empty(&local_list)) {
+ /* This happens only if ICOSQ is full.
+ * There is no need to mark busy or explicitly ask for a NAPI cycle,
+ * it will be triggered by the outstanding ICOSQ completions.
+ */
+ spin_lock(&ktls_resync->lock);
+ list_splice(&local_list, &ktls_resync->list);
+ set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
+ spin_unlock(&ktls_resync->lock);
+ priv_rx->rq_stats->tls_resync_res_retry++;
+ }
+
+ return i == budget;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index ee04e916fa21..8f79335057dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -40,6 +40,14 @@ mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
}
return false;
}
+
+bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget);
+
+static inline bool
+mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
+{
+ return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state);
+}
#else
static inline bool
mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
@@ -49,6 +57,18 @@ mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
return false;
}
+static inline bool
+mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
+{
+ return false;
+}
+
+static inline bool
+mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
+{
+ return false;
+}
+
#endif /* CONFIG_MLX5_EN_TLS */
#endif /* __MLX5E_TLS_TXRX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 2b51d3222ca1..82dc09aaa7fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -263,9 +263,6 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
int datalen;
u32 skb_seq;
- if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
- return true;
-
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (!datalen)
return true;
@@ -301,12 +298,6 @@ err_out:
return false;
}
-void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
- struct mlx5e_accel_tx_tls_state *state)
-{
- cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
-}
-
static int tls_update_resync_sn(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5e_tls_metadata *mdata)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
index 9923132c9440..0ca0a023fb8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -47,8 +47,18 @@ u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
-void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
- struct mlx5e_accel_tx_tls_state *state);
+
+static inline bool mlx5e_tls_skb_offloaded(struct sk_buff *skb)
+{
+ return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
+}
+
+static inline void
+mlx5e_tls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state)
+{
+ cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
+}
void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
u32 *cqe_bcnt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 39475f6565c7..5cd466ec6492 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -36,6 +36,32 @@
#include <linux/ipv6.h>
#include "en.h"
+#define ARFS_HASH_SHIFT BITS_PER_BYTE
+#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
+
+struct arfs_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_handle *default_rule;
+ struct hlist_head rules_hash[ARFS_HASH_SIZE];
+};
+
+enum arfs_type {
+ ARFS_IPV4_TCP,
+ ARFS_IPV6_TCP,
+ ARFS_IPV4_UDP,
+ ARFS_IPV6_UDP,
+ ARFS_NUM_TYPES,
+};
+
+struct mlx5e_arfs_tables {
+ struct arfs_table arfs_tables[ARFS_NUM_TYPES];
+ /* Protect aRFS rules list */
+ spinlock_t arfs_lock;
+ struct list_head rules;
+ int last_filter_id;
+ struct workqueue_struct *wq;
+};
+
struct arfs_tuple {
__be16 etype;
u8 ip_proto;
@@ -121,7 +147,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
+ dest.ft = priv->fs.arfs->arfs_tables[i].ft.t;
/* Modify ttc rules destination to point on the aRFS FTs */
err = mlx5e_ttc_fwd_dest(priv, arfs_get_tt(i), &dest);
if (err) {
@@ -141,25 +167,31 @@ static void arfs_destroy_table(struct arfs_table *arfs_t)
mlx5e_destroy_flow_table(&arfs_t->ft);
}
-void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
+static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
{
int i;
- if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
- return;
-
arfs_del_rules(priv);
- destroy_workqueue(priv->fs.arfs.wq);
+ destroy_workqueue(priv->fs.arfs->wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
- arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
+ if (!IS_ERR_OR_NULL(priv->fs.arfs->arfs_tables[i].ft.t))
+ arfs_destroy_table(&priv->fs.arfs->arfs_tables[i]);
}
}
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
+{
+ if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+ return;
+
+ _mlx5e_cleanup_tables(priv);
+ kvfree(priv->fs.arfs);
+}
+
static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
- struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
+ struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type];
struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
@@ -290,7 +322,7 @@ out:
static int arfs_create_table(struct mlx5e_priv *priv,
enum arfs_type type)
{
- struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -330,20 +362,27 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return 0;
- spin_lock_init(&priv->fs.arfs.arfs_lock);
- INIT_LIST_HEAD(&priv->fs.arfs.rules);
- priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
- if (!priv->fs.arfs.wq)
+ priv->fs.arfs = kvzalloc(sizeof(*priv->fs.arfs), GFP_KERNEL);
+ if (!priv->fs.arfs)
return -ENOMEM;
+ spin_lock_init(&priv->fs.arfs->arfs_lock);
+ INIT_LIST_HEAD(&priv->fs.arfs->rules);
+ priv->fs.arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
+ if (!priv->fs.arfs->wq)
+ goto err;
+
for (i = 0; i < ARFS_NUM_TYPES; i++) {
err = arfs_create_table(priv, i);
if (err)
- goto err;
+ goto err_des;
}
return 0;
+
+err_des:
+ _mlx5e_cleanup_tables(priv);
err:
- mlx5e_arfs_destroy_tables(priv);
+ kvfree(priv->fs.arfs);
return err;
}
@@ -353,13 +392,13 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
{
struct arfs_rule *arfs_rule;
struct hlist_node *htmp;
+ HLIST_HEAD(del_list);
int quota = 0;
int i;
int j;
- HLIST_HEAD(del_list);
- spin_lock_bh(&priv->fs.arfs.arfs_lock);
- mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+ spin_lock_bh(&priv->fs.arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
@@ -370,7 +409,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
break;
}
}
- spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+ spin_unlock_bh(&priv->fs.arfs->arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
mlx5_del_flow_rules(arfs_rule->rule);
@@ -383,16 +422,16 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
{
struct hlist_node *htmp;
struct arfs_rule *rule;
+ HLIST_HEAD(del_list);
int i;
int j;
- HLIST_HEAD(del_list);
- spin_lock_bh(&priv->fs.arfs.arfs_lock);
- mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+ spin_lock_bh(&priv->fs.arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list);
}
- spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+ spin_unlock_bh(&priv->fs.arfs->arfs_lock);
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
@@ -436,7 +475,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
- struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest = {};
@@ -554,9 +593,9 @@ static void arfs_handle_work(struct work_struct *work)
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- spin_lock_bh(&priv->fs.arfs.arfs_lock);
+ spin_lock_bh(&priv->fs.arfs->arfs_lock);
hlist_del(&arfs_rule->hlist);
- spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+ spin_unlock_bh(&priv->fs.arfs->arfs_lock);
mutex_unlock(&priv->state_lock);
kfree(arfs_rule);
@@ -609,7 +648,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id;
- rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
+ rule->filter_id = priv->fs.arfs->last_filter_id++ % RPS_NO_FILTER;
hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port,
@@ -653,7 +692,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule;
struct flow_keys fk;
@@ -687,7 +726,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -ENOMEM;
}
}
- queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work);
+ queue_work(priv->fs.arfs->wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index a6cf008057b5..8c166ee56d8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -38,15 +38,16 @@
int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in)
{
+ struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
int err;
err = mlx5_core_create_tir(mdev, in, &tir->tirn);
if (err)
return err;
- mutex_lock(&mdev->mlx5e_res.td.list_lock);
- list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
- mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ mutex_lock(&res->td.list_lock);
+ list_add(&tir->list, &res->td.tirs_list);
+ mutex_unlock(&res->td.list_lock);
return 0;
}
@@ -54,10 +55,12 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in)
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir)
{
- mutex_lock(&mdev->mlx5e_res.td.list_lock);
+ struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
+
+ mutex_lock(&res->td.list_lock);
mlx5_core_destroy_tir(mdev, tir->tirn);
list_del(&tir->list);
- mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ mutex_unlock(&res->td.list_lock);
}
void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
@@ -99,7 +102,7 @@ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
{
- struct mlx5e_resources *res = &mdev->mlx5e_res;
+ struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
int err;
err = mlx5_core_alloc_pd(mdev, &res->pdn);
@@ -126,8 +129,8 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
goto err_destroy_mkey;
}
- INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
- mutex_init(&mdev->mlx5e_res.td.list_lock);
+ INIT_LIST_HEAD(&res->td.tirs_list);
+ mutex_init(&res->td.list_lock);
return 0;
@@ -142,7 +145,7 @@ err_dealloc_pd:
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
{
- struct mlx5e_resources *res = &mdev->mlx5e_res;
+ struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, &res->mkey);
@@ -180,8 +183,8 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
- mutex_lock(&mdev->mlx5e_res.td.list_lock);
- list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
+ mutex_lock(&mdev->mlx5e_res.hw_objs.td.list_lock);
+ list_for_each_entry(tir, &mdev->mlx5e_res.hw_objs.td.tirs_list, list) {
tirn = tir->tirn;
err = mlx5_core_modify_tir(mdev, tirn, in);
if (err)
@@ -192,7 +195,7 @@ out:
kvfree(in);
if (err)
netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
- mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index f23c67575073..a4c8d8d00d5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1149,35 +1149,23 @@ static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context)
static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{
- struct mlx5e_channels new_channels = {};
- bool reset_channels = true;
- bool opened;
- int err = 0;
+ struct mlx5e_params new_params;
+ bool reset = true;
+ int err;
mutex_lock(&priv->state_lock);
- new_channels.params = priv->channels.params;
- mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params,
+ new_params = priv->channels.params;
+ mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_params,
trust_state);
- opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (!opened)
- reset_channels = false;
-
/* Skip if tx_min_inline is the same */
- if (new_channels.params.tx_min_inline_mode ==
- priv->channels.params.tx_min_inline_mode)
- reset_channels = false;
-
- if (reset_channels) {
- err = mlx5e_safe_switch_channels(priv, &new_channels,
- mlx5e_update_trust_state_hw,
- &trust_state);
- } else {
- err = mlx5e_update_trust_state_hw(priv, &trust_state);
- if (!err && !opened)
- priv->channels.params = new_channels.params;
- }
+ if (new_params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode)
+ reset = false;
+
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_update_trust_state_hw,
+ &trust_state, reset);
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 53802e18af90..8360289813f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -34,6 +34,7 @@
#include "en/port.h"
#include "en/params.h"
#include "en/xsk/pool.h"
+#include "en/ptp.h"
#include "lib/clock.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
@@ -325,7 +326,7 @@ static void mlx5e_get_ringparam(struct net_device *dev,
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param)
{
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
u8 log_rq_size;
u8 log_sq_size;
int err = 0;
@@ -364,20 +365,15 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
- new_channels.params = priv->channels.params;
- new_channels.params.log_rq_mtu_frames = log_rq_size;
- new_channels.params.log_sq_size = log_sq_size;
+ new_params = priv->channels.params;
+ new_params.log_rq_mtu_frames = log_rq_size;
+ new_params.log_sq_size = log_sq_size;
- err = mlx5e_validate_params(priv, &new_channels.params);
+ err = mlx5e_validate_params(priv->mdev, &new_params);
if (err)
goto unlock;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- goto unlock;
- }
-
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
unlock:
mutex_unlock(&priv->state_lock);
@@ -422,8 +418,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
{
struct mlx5e_params *cur_params = &priv->channels.params;
unsigned int count = ch->combined_count;
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
bool arfs_enabled;
+ bool opened;
int err = 0;
if (!count) {
@@ -458,28 +455,18 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
goto out;
}
- new_channels.params = *cur_params;
- new_channels.params.num_channels = count;
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- struct mlx5e_params old_params;
+ new_params = *cur_params;
+ new_params.num_channels = count;
- old_params = *cur_params;
- *cur_params = new_channels.params;
- err = mlx5e_num_channels_changed(priv);
- if (err)
- *cur_params = old_params;
-
- goto out;
- }
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- arfs_enabled = priv->netdev->features & NETIF_F_NTUPLE;
+ arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE);
if (arfs_enabled)
mlx5e_arfs_disable(priv);
/* Switch to new channels, set new parameters and close old ones */
- err = mlx5e_safe_switch_channels(priv, &new_channels,
- mlx5e_num_channels_changed_ctx, NULL);
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_num_channels_changed_ctx, NULL, true);
if (arfs_enabled) {
int err2 = mlx5e_arfs_enable(priv);
@@ -574,8 +561,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
{
struct dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
bool reset_rx, reset_tx;
+ bool reset = true;
int err = 0;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
@@ -596,51 +584,47 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
}
mutex_lock(&priv->state_lock);
- new_channels.params = priv->channels.params;
+ new_params = priv->channels.params;
- rx_moder = &new_channels.params.rx_cq_moderation;
+ rx_moder = &new_params.rx_cq_moderation;
rx_moder->usec = coal->rx_coalesce_usecs;
rx_moder->pkts = coal->rx_max_coalesced_frames;
- new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
+ new_params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
- tx_moder = &new_channels.params.tx_cq_moderation;
+ tx_moder = &new_params.tx_cq_moderation;
tx_moder->usec = coal->tx_coalesce_usecs;
tx_moder->pkts = coal->tx_max_coalesced_frames;
- new_channels.params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
+ new_params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
if (reset_rx) {
- u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
+ u8 mode = MLX5E_GET_PFLAG(&new_params,
MLX5E_PFLAG_RX_CQE_BASED_MODER);
- mlx5e_reset_rx_moderation(&new_channels.params, mode);
+ mlx5e_reset_rx_moderation(&new_params, mode);
}
if (reset_tx) {
- u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
+ u8 mode = MLX5E_GET_PFLAG(&new_params,
MLX5E_PFLAG_TX_CQE_BASED_MODER);
- mlx5e_reset_tx_moderation(&new_channels.params, mode);
+ mlx5e_reset_tx_moderation(&new_params, mode);
}
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- goto out;
- }
-
- if (!reset_rx && !reset_tx) {
+ /* If DIM state hasn't changed, it's possible to modify interrupt
+ * moderation parameters on the fly, even if the channels are open.
+ */
+ if (!reset_rx && !reset_tx && test_bit(MLX5E_STATE_OPENED, &priv->state)) {
if (!coal->use_adaptive_rx_coalesce)
mlx5e_set_priv_channels_rx_coalesce(priv, coal);
if (!coal->use_adaptive_tx_coalesce)
mlx5e_set_priv_channels_tx_coalesce(priv, coal);
- priv->channels.params = new_channels.params;
- goto out;
+ reset = false;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
-out:
mutex_unlock(&priv->state_lock);
return err;
}
@@ -1601,6 +1585,14 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
+static void mlx5e_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_fec_get(priv, fec_stats);
+}
+
static int mlx5e_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fecparam)
{
@@ -1769,6 +1761,49 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_module_eeprom_query_params query;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 *data = page_data->data;
+ int size_read;
+ int i = 0;
+
+ if (!page_data->length)
+ return -EINVAL;
+
+ memset(data, 0, page_data->length);
+
+ query.offset = page_data->offset;
+ query.i2c_address = page_data->i2c_address;
+ query.bank = page_data->bank;
+ query.page = page_data->page;
+ while (i < page_data->length) {
+ query.size = page_data->length - i;
+ size_read = mlx5_query_module_eeprom_by_page(mdev, &query, data + i);
+
+ /* Done reading, return how many bytes was read */
+ if (!size_read)
+ return i;
+
+ if (size_read == -EINVAL)
+ return -EINVAL;
+ if (size_read < 0) {
+ netdev_err(priv->netdev, "%s: mlx5_query_module_eeprom_by_page failed:0x%x\n",
+ __func__, size_read);
+ return i;
+ }
+
+ i += size_read;
+ query.offset += size_read;
+ }
+
+ return i;
+}
+
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash)
{
@@ -1808,7 +1843,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
bool mode_changed;
u8 cq_period_mode, current_cq_period_mode;
@@ -1827,18 +1862,13 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
if (!mode_changed)
return 0;
- new_channels.params = priv->channels.params;
+ new_params = priv->channels.params;
if (is_rx_cq)
- mlx5e_set_rx_cq_mode_params(&new_channels.params, cq_period_mode);
+ mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode);
else
- mlx5e_set_tx_cq_mode_params(&new_channels.params, cq_period_mode);
+ mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- return 0;
- }
-
- return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+ return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
}
static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable)
@@ -1854,7 +1884,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
{
bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
int err = 0;
if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
@@ -1863,15 +1893,16 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (curr_val == new_val)
return 0;
- new_channels.params = priv->channels.params;
- MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- return 0;
- }
+ new_params = priv->channels.params;
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
+ if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
+ new_params.ptp_rx = new_val;
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+ if (new_params.ptp_rx == priv->channels.params.ptp_rx)
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
+ else
+ err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
+ &new_params.ptp_rx, true);
if (err)
return err;
@@ -1892,11 +1923,6 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP;
- if (enable && priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
- netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
- return -EINVAL;
- }
-
err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
if (err)
return err;
@@ -1910,7 +1936,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
if (enable) {
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
@@ -1922,17 +1948,12 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
return -EINVAL;
}
- new_channels.params = priv->channels.params;
-
- MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_STRIDING_RQ, enable);
- mlx5e_set_rq_type(mdev, &new_channels.params);
+ new_params = priv->channels.params;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- return 0;
- }
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ, enable);
+ mlx5e_set_rq_type(mdev, &new_params);
- return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+ return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
}
static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
@@ -1961,23 +1982,16 @@ static int set_pflag_tx_mpwqe_common(struct net_device *netdev, u32 flag, bool e
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channels new_channels = {};
- int err;
+ struct mlx5e_params new_params;
if (enable && !MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
return -EOPNOTSUPP;
- new_channels.params = priv->channels.params;
-
- MLX5E_SET_PFLAG(&new_channels.params, flag, enable);
+ new_params = priv->channels.params;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- return 0;
- }
+ MLX5E_SET_PFLAG(&new_params, flag, enable);
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
- return err;
+ return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
}
static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
@@ -1994,7 +2008,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
int err;
if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
@@ -2010,29 +2024,17 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
return -EINVAL;
}
- new_channels.params = priv->channels.params;
- MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_TX_PORT_TS, enable);
+ new_params = priv->channels.params;
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_PORT_TS, enable);
/* No need to verify SQ stop room as
* ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both
* has the same log_sq_size.
*/
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- struct mlx5e_params old_params;
-
- old_params = priv->channels.params;
- priv->channels.params = new_channels.params;
- err = mlx5e_num_channels_changed(priv);
- if (err)
- priv->channels.params = old_params;
- goto out;
- }
-
- err = mlx5e_safe_switch_channels(priv, &new_channels,
- mlx5e_num_channels_changed_ctx, NULL);
-out:
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_num_channels_changed_ctx, NULL, true);
if (!err)
- priv->port_ptp_opened = true;
+ priv->tx_ptp_opened = true;
return err;
}
@@ -2123,12 +2125,216 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return mlx5e_ethtool_set_rxnfc(dev, cmd);
}
+static int query_port_status_opcode(struct mlx5_core_dev *mdev, u32 *status_opcode)
+{
+ struct mlx5_ifc_pddr_troubleshooting_page_bits *pddr_troubleshooting_page;
+ u32 in[MLX5_ST_SZ_DW(pddr_reg)] = {};
+ u32 out[MLX5_ST_SZ_DW(pddr_reg)];
+ int err;
+
+ MLX5_SET(pddr_reg, in, local_port, 1);
+ MLX5_SET(pddr_reg, in, page_select,
+ MLX5_PDDR_REG_PAGE_SELECT_TROUBLESHOOTING_INFO_PAGE);
+
+ pddr_troubleshooting_page = MLX5_ADDR_OF(pddr_reg, in, page_data);
+ MLX5_SET(pddr_troubleshooting_page, pddr_troubleshooting_page,
+ group_opcode, MLX5_PDDR_REG_TRBLSH_GROUP_OPCODE_MONITOR);
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PDDR, 0, 0);
+ if (err)
+ return err;
+
+ pddr_troubleshooting_page = MLX5_ADDR_OF(pddr_reg, out, page_data);
+ *status_opcode = MLX5_GET(pddr_troubleshooting_page, pddr_troubleshooting_page,
+ status_opcode);
+ return 0;
+}
+
+struct mlx5e_ethtool_link_ext_state_opcode_mapping {
+ u32 status_opcode;
+ enum ethtool_link_ext_state link_ext_state;
+ u8 link_ext_substate;
+};
+
+static const struct mlx5e_ethtool_link_ext_state_opcode_mapping
+mlx5e_link_ext_state_opcode_map[] = {
+ /* States relating to the autonegotiation or issues therein */
+ {2, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED},
+ {3, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED},
+ {4, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED},
+ {36, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE},
+ {38, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE},
+ {39, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD},
+
+ /* Failure during link training */
+ {5, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED},
+ {6, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT},
+ {7, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY},
+ {8, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, 0},
+ {14, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT},
+
+ /* Logical mismatch in physical coding sublayer or forward error correction sublayer */
+ {9, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK},
+ {10, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK},
+ {11, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS},
+ {12, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED},
+ {13, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED},
+
+ /* Signal integrity issues */
+ {15, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, 0},
+ {17, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS},
+ {42, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE},
+
+ /* No cable connected */
+ {1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0},
+
+ /* Failure is related to cable, e.g., unsupported cable */
+ {16, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+ {20, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+ {29, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+ {1025, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+ {1029, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+ {1031, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, 0},
+
+ /* Failure is related to EEPROM, e.g., failure during reading or parsing the data */
+ {1027, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0},
+
+ /* Failure during calibration algorithm */
+ {23, ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE, 0},
+
+ /* The hardware is not able to provide the power required from cable or module */
+ {1032, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0},
+
+ /* The module is overheated */
+ {1030, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0},
+};
+
+static void
+mlx5e_set_link_ext_state(struct mlx5e_ethtool_link_ext_state_opcode_mapping
+ link_ext_state_mapping,
+ struct ethtool_link_ext_state_info *link_ext_state_info)
+{
+ switch (link_ext_state_mapping.link_ext_state) {
+ case ETHTOOL_LINK_EXT_STATE_AUTONEG:
+ link_ext_state_info->autoneg =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ case ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE:
+ link_ext_state_info->link_training =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ case ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH:
+ link_ext_state_info->link_logical_mismatch =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ case ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY:
+ link_ext_state_info->bad_signal_integrity =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ case ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE:
+ link_ext_state_info->cable_issue =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ default:
+ break;
+ }
+
+ link_ext_state_info->link_ext_state = link_ext_state_mapping.link_ext_state;
+}
+
+static int
+mlx5e_get_link_ext_state(struct net_device *dev,
+ struct ethtool_link_ext_state_info *link_ext_state_info)
+{
+ struct mlx5e_ethtool_link_ext_state_opcode_mapping link_ext_state_mapping;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ u32 status_opcode = 0;
+ int i;
+
+ /* Exit without data if the interface state is OK, since no extended data is
+ * available in such case
+ */
+ if (netif_carrier_ok(dev))
+ return -ENODATA;
+
+ if (query_port_status_opcode(priv->mdev, &status_opcode) ||
+ !status_opcode)
+ return -ENODATA;
+
+ for (i = 0; i < ARRAY_SIZE(mlx5e_link_ext_state_opcode_map); i++) {
+ link_ext_state_mapping = mlx5e_link_ext_state_opcode_map[i];
+ if (link_ext_state_mapping.status_opcode == status_opcode) {
+ mlx5e_set_link_ext_state(link_ext_state_mapping,
+ link_ext_state_info);
+ return 0;
+ }
+ }
+
+ return -ENODATA;
+}
+
+static void mlx5e_get_eth_phy_stats(struct net_device *netdev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_eth_phy_get(priv, phy_stats);
+}
+
+static void mlx5e_get_eth_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_eth_mac_get(priv, mac_stats);
+}
+
+static void mlx5e_get_eth_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_eth_ctrl_get(priv, ctrl_stats);
+}
+
+static void mlx5e_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_rmon_get(priv, rmon_stats, ranges);
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_link_ext_state = mlx5e_get_link_ext_state,
.get_strings = mlx5e_get_strings,
.get_sset_count = mlx5e_get_sset_count,
.get_ethtool_stats = mlx5e_get_ethtool_stats,
@@ -2157,12 +2363,18 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_wol = mlx5e_set_wol,
.get_module_info = mlx5e_get_module_info,
.get_module_eeprom = mlx5e_get_module_eeprom,
+ .get_module_eeprom_by_page = mlx5e_get_module_eeprom_by_page,
.flash_device = mlx5e_flash_device,
.get_priv_flags = mlx5e_get_priv_flags,
.set_priv_flags = mlx5e_set_priv_flags,
.self_test = mlx5e_self_test,
.get_msglevel = mlx5e_get_msglevel,
.set_msglevel = mlx5e_set_msglevel,
+ .get_fec_stats = mlx5e_get_fec_stats,
.get_fecparam = mlx5e_get_fecparam,
.set_fecparam = mlx5e_set_fecparam,
+ .get_eth_phy_stats = mlx5e_get_eth_phy_stats,
+ .get_eth_mac_stats = mlx5e_get_eth_mac_stats,
+ .get_eth_ctrl_stats = mlx5e_get_eth_ctrl_stats,
+ .get_rmon_stats = mlx5e_get_rmon_stats,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 16ce7756ac43..0d571a0c76d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -36,7 +36,9 @@
#include <linux/tcp.h>
#include <linux/mlx5/fs.h>
#include "en.h"
+#include "en_rep.h"
#include "lib/mpfs.h"
+#include "en/ptp.h"
static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type);
@@ -106,6 +108,29 @@ static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
kfree(hn);
}
+struct mlx5e_vlan_table {
+ struct mlx5e_flow_table ft;
+ DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
+ DECLARE_BITMAP(active_svlans, VLAN_N_VID);
+ struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
+ struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
+ struct mlx5_flow_handle *untagged_rule;
+ struct mlx5_flow_handle *any_cvlan_rule;
+ struct mlx5_flow_handle *any_svlan_rule;
+ struct mlx5_flow_handle *trap_rule;
+ bool cvlan_filter_disabled;
+};
+
+unsigned long *mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table *vlan)
+{
+ return vlan->active_svlans;
+}
+
+struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan)
+{
+ return vlan->ft.t;
+}
+
static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
{
struct net_device *ndev = priv->netdev;
@@ -117,7 +142,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i;
list_size = 0;
- for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID)
+ for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID)
list_size++;
max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
@@ -134,7 +159,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
return -ENOMEM;
i = 0;
- for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
+ for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
if (i >= list_size)
break;
vlans[i++] = vlan;
@@ -161,7 +186,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type,
u16 vid, struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
+ struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
@@ -178,24 +203,24 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
* disabled in match value means both S & C tags
* don't exist (untagged of both)
*/
- rule_p = &priv->fs.vlan.untagged_rule;
+ rule_p = &priv->fs.vlan->untagged_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
- rule_p = &priv->fs.vlan.any_cvlan_rule;
+ rule_p = &priv->fs.vlan->any_cvlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
- rule_p = &priv->fs.vlan.any_svlan_rule;
+ rule_p = &priv->fs.vlan->any_svlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
- rule_p = &priv->fs.vlan.active_svlans_rule[vid];
+ rule_p = &priv->fs.vlan->active_svlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
@@ -205,7 +230,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
vid);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
- rule_p = &priv->fs.vlan.active_cvlans_rule[vid];
+ rule_p = &priv->fs.vlan->active_cvlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
@@ -255,33 +280,33 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
{
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- if (priv->fs.vlan.untagged_rule) {
- mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
- priv->fs.vlan.untagged_rule = NULL;
+ if (priv->fs.vlan->untagged_rule) {
+ mlx5_del_flow_rules(priv->fs.vlan->untagged_rule);
+ priv->fs.vlan->untagged_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
- if (priv->fs.vlan.any_cvlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
- priv->fs.vlan.any_cvlan_rule = NULL;
+ if (priv->fs.vlan->any_cvlan_rule) {
+ mlx5_del_flow_rules(priv->fs.vlan->any_cvlan_rule);
+ priv->fs.vlan->any_cvlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
- if (priv->fs.vlan.any_svlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
- priv->fs.vlan.any_svlan_rule = NULL;
+ if (priv->fs.vlan->any_svlan_rule) {
+ mlx5_del_flow_rules(priv->fs.vlan->any_svlan_rule);
+ priv->fs.vlan->any_svlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
- if (priv->fs.vlan.active_svlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]);
- priv->fs.vlan.active_svlans_rule[vid] = NULL;
+ if (priv->fs.vlan->active_svlans_rule[vid]) {
+ mlx5_del_flow_rules(priv->fs.vlan->active_svlans_rule[vid]);
+ priv->fs.vlan->active_svlans_rule[vid] = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
- if (priv->fs.vlan.active_cvlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
- priv->fs.vlan.active_cvlans_rule[vid] = NULL;
+ if (priv->fs.vlan->active_cvlans_rule[vid]) {
+ mlx5_del_flow_rules(priv->fs.vlan->active_cvlans_rule[vid]);
+ priv->fs.vlan->active_cvlans_rule[vid] = NULL;
}
mlx5e_vport_context_update_vlans(priv);
break;
@@ -328,27 +353,27 @@ mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
+ struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs.vlan.trap_rule = NULL;
+ priv->fs.vlan->trap_rule = NULL;
netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
__func__, err);
return err;
}
- priv->fs.vlan.trap_rule = rule;
+ priv->fs.vlan->trap_rule = rule;
return 0;
}
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
{
- if (priv->fs.vlan.trap_rule) {
- mlx5_del_flow_rules(priv->fs.vlan.trap_rule);
- priv->fs.vlan.trap_rule = NULL;
+ if (priv->fs.vlan->trap_rule) {
+ mlx5_del_flow_rules(priv->fs.vlan->trap_rule);
+ priv->fs.vlan->trap_rule = NULL;
}
}
@@ -380,10 +405,10 @@ void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (!priv->fs.vlan.cvlan_filter_disabled)
+ if (!priv->fs.vlan->cvlan_filter_disabled)
return;
- priv->fs.vlan.cvlan_filter_disabled = false;
+ priv->fs.vlan->cvlan_filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
@@ -391,10 +416,10 @@ void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (priv->fs.vlan.cvlan_filter_disabled)
+ if (priv->fs.vlan->cvlan_filter_disabled)
return;
- priv->fs.vlan.cvlan_filter_disabled = true;
+ priv->fs.vlan->cvlan_filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
@@ -404,11 +429,11 @@ static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
{
int err;
- set_bit(vid, priv->fs.vlan.active_cvlans);
+ set_bit(vid, priv->fs.vlan->active_cvlans);
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
if (err)
- clear_bit(vid, priv->fs.vlan.active_cvlans);
+ clear_bit(vid, priv->fs.vlan->active_cvlans);
return err;
}
@@ -418,11 +443,11 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
struct net_device *netdev = priv->netdev;
int err;
- set_bit(vid, priv->fs.vlan.active_svlans);
+ set_bit(vid, priv->fs.vlan->active_svlans);
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
if (err) {
- clear_bit(vid, priv->fs.vlan.active_svlans);
+ clear_bit(vid, priv->fs.vlan->active_svlans);
return err;
}
@@ -435,6 +460,9 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ if (mlx5e_is_uplink_rep(priv))
+ return 0; /* no vlan table for uplink rep */
+
if (be16_to_cpu(proto) == ETH_P_8021Q)
return mlx5e_vlan_rx_add_cvid(priv, vid);
else if (be16_to_cpu(proto) == ETH_P_8021AD)
@@ -447,11 +475,14 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ if (mlx5e_is_uplink_rep(priv))
+ return 0; /* no vlan table for uplink rep */
+
if (be16_to_cpu(proto) == ETH_P_8021Q) {
- clear_bit(vid, priv->fs.vlan.active_cvlans);
+ clear_bit(vid, priv->fs.vlan->active_cvlans);
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
- clear_bit(vid, priv->fs.vlan.active_svlans);
+ clear_bit(vid, priv->fs.vlan->active_svlans);
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
netdev_update_features(dev);
}
@@ -465,14 +496,14 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
+ for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
+ for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- if (priv->fs.vlan.cvlan_filter_disabled)
+ if (priv->fs.vlan->cvlan_filter_disabled)
mlx5e_add_any_vid_rules(priv);
}
@@ -482,11 +513,11 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
+ for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
+ for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
@@ -496,7 +527,7 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
/* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed
*/
- if (priv->fs.vlan.cvlan_filter_disabled)
+ if (priv->fs.vlan->cvlan_filter_disabled)
mlx5e_del_any_vid_rules(priv);
}
@@ -1684,10 +1715,15 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
{
- struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5e_flow_table *ft;
int err;
+ priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
+ if (!priv->fs.vlan)
+ return -ENOMEM;
+
+ ft = &priv->fs.vlan->ft;
ft->num_groups = 0;
ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
@@ -1695,12 +1731,11 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
-
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
- ft->t = NULL;
- return err;
+ goto err_free_t;
}
+
ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g) {
err = -ENOMEM;
@@ -1719,7 +1754,9 @@ err_free_g:
kfree(ft->g);
err_destroy_vlan_table:
mlx5_destroy_flow_table(ft->t);
- ft->t = NULL;
+err_free_t:
+ kvfree(priv->fs.vlan);
+ priv->fs.vlan = NULL;
return err;
}
@@ -1727,7 +1764,8 @@ err_destroy_vlan_table:
static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
mlx5e_del_vlan_rules(priv);
- mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
+ mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
+ kvfree(priv->fs.vlan);
}
int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
@@ -1785,10 +1823,16 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
goto err_destroy_l2_table;
}
+ err = mlx5e_ptp_alloc_rx_fs(priv);
+ if (err)
+ goto err_destory_vlan_table;
+
mlx5e_ethtool_init_steering(priv);
return 0;
+err_destory_vlan_table:
+ mlx5e_destroy_vlan_table(priv);
err_destroy_l2_table:
mlx5e_destroy_l2_table(priv);
err_destroy_ttc_table:
@@ -1803,6 +1847,7 @@ err_destroy_arfs_tables:
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
{
+ mlx5e_ptp_free_rx_fs(priv);
mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_l2_table(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5db63b9f3b70..bca832cdc4cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -87,51 +87,6 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
return true;
}
-void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- params->log_rq_mtu_frames = is_kdump_kernel() ?
- MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
- MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
-
- mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
- BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
- BIT(params->log_rq_mtu_frames),
- BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
-}
-
-bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
- return false;
-
- if (mlx5_fpga_is_ipsec_device(mdev))
- return false;
-
- if (params->xdp_prog) {
- /* XSK params are not considered here. If striding RQ is in use,
- * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
- * be called with the known XSK params.
- */
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
- return false;
- }
-
- return true;
-}
-
-void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
-{
- params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
- MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
- MLX5_WQ_TYPE_CYCLIC;
-}
-
void mlx5e_update_carrier(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -259,18 +214,17 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
-static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
- struct mlx5e_channel *c)
+static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
sizeof(*rq->mpwqe.info)),
- GFP_KERNEL, cpu_to_node(c->cpu));
+ GFP_KERNEL, node);
if (!rq->mpwqe.info)
return -ENOMEM;
- mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
+ mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe);
return 0;
}
@@ -302,7 +256,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
+ MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
MLX5_SET64(mkc, mkc, len, npages << page_shift);
MLX5_SET(mkc, mkc, translations_octword_size,
MLX5_MTT_OCTW(npages));
@@ -419,58 +373,53 @@ static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
__free_page(rq->wqe_overflow.page);
}
-static int mlx5e_alloc_rq(struct mlx5e_channel *c,
- struct mlx5e_params *params,
+static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ struct mlx5e_rq *rq)
+{
+ struct mlx5_core_dev *mdev = c->mdev;
+ int err;
+
+ rq->wq_type = params->rq_wq_type;
+ rq->pdev = c->pdev;
+ rq->netdev = c->netdev;
+ rq->priv = c->priv;
+ rq->tstamp = c->tstamp;
+ rq->clock = &mdev->clock;
+ rq->icosq = &c->icosq;
+ rq->ix = c->ix;
+ rq->mdev = mdev;
+ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ rq->xdpsq = &c->rq_xdpsq;
+ rq->stats = &c->priv->channel_stats[c->ix].rq;
+ rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
+ err = mlx5e_rq_set_handlers(rq, params, NULL);
+ if (err)
+ return err;
+
+ return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
+}
+
+static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- struct xsk_buff_pool *xsk_pool,
struct mlx5e_rq_param *rqp,
- struct mlx5e_rq *rq)
+ int node, struct mlx5e_rq *rq)
{
struct page_pool_params pp_params = { 0 };
- struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5_core_dev *mdev = rq->mdev;
void *rqc = rqp->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
- u32 rq_xdp_ix;
u32 pool_size;
int wq_sz;
int err;
int i;
- rqp->wq.db_numa_node = cpu_to_node(c->cpu);
-
- rq->wq_type = params->rq_wq_type;
- rq->pdev = c->pdev;
- rq->netdev = c->netdev;
- rq->priv = c->priv;
- rq->tstamp = c->tstamp;
- rq->clock = &mdev->clock;
- rq->icosq = &c->icosq;
- rq->ix = c->ix;
- rq->mdev = mdev;
- rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- rq->xdpsq = &c->rq_xdpsq;
- rq->xsk_pool = xsk_pool;
- rq->ptp_cyc2time = mlx5_is_real_time_rq(mdev) ?
- mlx5_real_time_cyc2time :
- mlx5_timecounter_cyc2time;
-
- if (rq->xsk_pool)
- rq->stats = &c->priv->channel_stats[c->ix].xskrq;
- else
- rq->stats = &c->priv->channel_stats[c->ix].rq;
+ rqp->wq.db_numa_node = node;
INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
if (params->xdp_prog)
bpf_prog_inc(params->xdp_prog);
RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
- rq_xdp_ix = rq->ix;
- if (xsk)
- rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK;
- err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0);
- if (err < 0)
- goto err_rq_xdp_prog;
-
rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
pool_size = 1 << params->log_rq_mtu_frames;
@@ -480,7 +429,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
&rq->wq_ctrl);
if (err)
- goto err_rq_xdp;
+ goto err_rq_xdp_prog;
err = mlx5e_alloc_mpwqe_rq_drop_page(rq);
if (err)
@@ -504,7 +453,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_drop_page;
rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
- err = mlx5e_rq_alloc_mpwqe_info(rq, c);
+ err = mlx5e_rq_alloc_mpwqe_info(rq, node);
if (err)
goto err_rq_mkey;
break;
@@ -512,7 +461,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl);
if (err)
- goto err_rq_xdp;
+ goto err_rq_xdp_prog;
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
@@ -524,23 +473,19 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->wqe.frags =
kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
(wq_sz << rq->wqe.info.log_num_frags)),
- GFP_KERNEL, cpu_to_node(c->cpu));
+ GFP_KERNEL, node);
if (!rq->wqe.frags) {
err = -ENOMEM;
goto err_rq_wq_destroy;
}
- err = mlx5e_init_di_list(rq, wq_sz, cpu_to_node(c->cpu));
+ err = mlx5e_init_di_list(rq, wq_sz, node);
if (err)
goto err_rq_frags;
- rq->mkey_be = c->mkey_be;
+ rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey.key);
}
- err = mlx5e_rq_set_handlers(rq, params, xsk);
- if (err)
- goto err_free_by_rq_type;
-
if (xsk) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, NULL);
@@ -550,8 +495,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
pp_params.order = 0;
pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
pp_params.pool_size = pool_size;
- pp_params.nid = cpu_to_node(c->cpu);
- pp_params.dev = c->pdev;
+ pp_params.nid = node;
+ pp_params.dev = rq->pdev;
pp_params.dma_dir = rq->buff.map_dir;
/* page_pool can be used even when there is no rq->xdp_prog,
@@ -565,8 +510,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->page_pool = NULL;
goto err_free_by_rq_type;
}
- err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
- MEM_TYPE_PAGE_POOL, rq->page_pool);
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
+ err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
+ MEM_TYPE_PAGE_POOL, rq->page_pool);
}
if (err)
goto err_free_by_rq_type;
@@ -635,8 +581,6 @@ err_rq_frags:
}
err_rq_wq_destroy:
mlx5_wq_destroy(&rq->wq_ctrl);
-err_rq_xdp:
- xdp_rxq_info_unreg(&rq->xdp_rxq);
err_rq_xdp_prog:
if (params->xdp_prog)
bpf_prog_put(params->xdp_prog);
@@ -649,10 +593,12 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
struct bpf_prog *old_prog;
int i;
- old_prog = rcu_dereference_protected(rq->xdp_prog,
- lockdep_is_held(&rq->priv->state_lock));
- if (old_prog)
- bpf_prog_put(old_prog);
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
+ old_prog = rcu_dereference_protected(rq->xdp_prog,
+ lockdep_is_held(&rq->priv->state_lock));
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
@@ -888,13 +834,14 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
}
-int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
- struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq)
+int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
+ struct mlx5e_xsk_param *xsk, int node,
+ struct mlx5e_rq *rq)
{
+ struct mlx5_core_dev *mdev = rq->mdev;
int err;
- err = mlx5e_alloc_rq(c, params, xsk, xsk_pool, param, rq);
+ err = mlx5e_alloc_rq(params, xsk, param, node, rq);
if (err)
return err;
@@ -906,28 +853,28 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
if (err)
goto err_destroy_rq;
- if (mlx5e_is_tls_on(c->priv) && !mlx5_accel_is_ktls_device(c->mdev))
- __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &c->rq.state); /* must be FPGA */
+ if (mlx5e_is_tls_on(rq->priv) && !mlx5_accel_is_ktls_device(mdev))
+ __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */
- if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full))
- __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state);
+ if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
+ __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
if (params->rx_dim_enabled)
- __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
+ __set_bit(MLX5E_RQ_STATE_AM, &rq->state);
/* We disable csum_complete when XDP is enabled since
* XDP programs might manipulate packets which will render
* skb->checksum incorrect.
*/
- if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
- __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || params->xdp_prog)
+ __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state);
/* For CQE compression on striding RQ, use stride index provided by
* HW if capability is supported.
*/
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) &&
- MLX5_CAP_GEN(c->mdev, mini_cqe_resp_stride_index))
- __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &c->rq.state);
+ MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index))
+ __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state);
return 0;
@@ -942,7 +889,10 @@ err_free_rq:
void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- mlx5e_trigger_irq(rq->icosq);
+ if (rq->icosq)
+ mlx5e_trigger_irq(rq->icosq);
+ else
+ napi_schedule(rq->cq.napi);
}
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -954,7 +904,8 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
cancel_work_sync(&rq->dim.work);
- cancel_work_sync(&rq->icosq->recover_work);
+ if (rq->icosq)
+ cancel_work_sync(&rq->icosq->recover_work);
cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
@@ -1019,7 +970,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->pdev = c->pdev;
sq->mkey_be = c->mkey_be;
sq->channel = c;
- sq->uar_map = mdev->mlx5e_res.bfreg.map;
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->xsk_pool = xsk_pool;
@@ -1090,7 +1041,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
int err;
sq->channel = c;
- sq->uar_map = mdev->mlx5e_res.bfreg.map;
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->reserved_room = param->stop_room;
param->wq.db_numa_node = cpu_to_node(c->cpu);
@@ -1175,7 +1126,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->priv = c->priv;
sq->ch_ix = c->ix;
sq->txq_ix = txq_ix;
- sq->uar_map = mdev->mlx5e_res.bfreg.map;
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
@@ -1183,14 +1134,10 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
- if (mlx5_accel_is_tls_device(c->priv->mdev))
- set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
if (param->is_mpw)
set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
sq->stop_room = param->stop_room;
- sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ?
- mlx5_real_time_cyc2time :
- mlx5_timecounter_cyc2time;
+ sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1258,7 +1205,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
+ MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
@@ -1462,8 +1409,17 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
if (err)
goto err_free_icosq;
+ if (param->is_tls) {
+ sq->ktls_resync = mlx5e_ktls_rx_resync_create_resp_list();
+ if (IS_ERR(sq->ktls_resync)) {
+ err = PTR_ERR(sq->ktls_resync);
+ goto err_destroy_icosq;
+ }
+ }
return 0;
+err_destroy_icosq:
+ mlx5e_destroy_sq(c->mdev, sq->sqn);
err_free_icosq:
mlx5e_free_icosq(sq);
@@ -1485,6 +1441,8 @@ void mlx5e_close_icosq(struct mlx5e_icosq *sq)
{
struct mlx5e_channel *c = sq->channel;
+ if (sq->ktls_resync)
+ mlx5e_ktls_rx_resync_destroy_resp_list(sq->ktls_resync);
mlx5e_destroy_sq(c->mdev, sq->sqn);
mlx5e_free_icosq_descs(sq);
mlx5e_free_icosq(sq);
@@ -1861,14 +1819,16 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
-void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
+static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_params)
{
- *ccp = (struct mlx5e_create_cq_param) {
- .napi = &c->napi,
- .ch_stats = c->stats,
- .node = cpu_to_node(c->cpu),
- .ix = c->ix,
- };
+ int err;
+
+ err = mlx5e_init_rxq_rq(c, params, &c->rq);
+ if (err)
+ return err;
+
+ return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), &c->rq);
}
static int mlx5e_open_queues(struct mlx5e_channel *c,
@@ -1931,7 +1891,7 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
goto err_close_sqs;
}
- err = mlx5e_open_rq(c, params, &cparam->rq, NULL, NULL, &c->rq);
+ err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
if (err)
goto err_close_xdp_sq;
@@ -2033,7 +1993,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->cpu = cpu;
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
+ c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
c->num_tc = params->num_tc;
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
@@ -2112,314 +2072,6 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
kvfree(c);
}
-#define DEFAULT_FRAG_SIZE (2048)
-
-static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
- struct mlx5e_rq_frags_info *info)
-{
- u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- int frag_size_max = DEFAULT_FRAG_SIZE;
- u32 buf_size = 0;
- int i;
-
- if (mlx5_fpga_is_ipsec_device(mdev))
- byte_count += MLX5E_METADATA_ETHER_LEN;
-
- if (mlx5e_rx_is_linear_skb(params, xsk)) {
- int frag_stride;
-
- frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
- frag_stride = roundup_pow_of_two(frag_stride);
-
- info->arr[0].frag_size = byte_count;
- info->arr[0].frag_stride = frag_stride;
- info->num_frags = 1;
- info->wqe_bulk = PAGE_SIZE / frag_stride;
- goto out;
- }
-
- if (byte_count > PAGE_SIZE +
- (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
- frag_size_max = PAGE_SIZE;
-
- i = 0;
- while (buf_size < byte_count) {
- int frag_size = byte_count - buf_size;
-
- if (i < MLX5E_MAX_RX_FRAGS - 1)
- frag_size = min(frag_size, frag_size_max);
-
- info->arr[i].frag_size = frag_size;
- info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
-
- buf_size += frag_size;
- i++;
- }
- info->num_frags = i;
- /* number of different wqes sharing a page */
- info->wqe_bulk = 1 + (info->num_frags % 2);
-
-out:
- info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
- info->log_num_frags = order_base_2(info->num_frags);
-}
-
-static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
-{
- int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
-
- switch (wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- sz += sizeof(struct mlx5e_rx_wqe_ll);
- break;
- default: /* MLX5_WQ_TYPE_CYCLIC */
- sz += sizeof(struct mlx5e_rx_wqe_cyc);
- }
-
- return order_base_2(sz);
-}
-
-static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
-{
- void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
-
- return MLX5_GET(wq, wq, log_wq_sz);
-}
-
-void mlx5e_build_rq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
- struct mlx5e_rq_param *param)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- void *rqc = param->rqc;
- void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
- int ndsegs = 1;
-
- switch (params->rq_wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- MLX5_SET(wq, wq, log_wqe_num_of_strides,
- mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk) -
- MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
- MLX5_SET(wq, wq, log_wqe_stride_size,
- mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk) -
- MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
- MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
- break;
- default: /* MLX5_WQ_TYPE_CYCLIC */
- MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
- mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
- ndsegs = param->frags_info.num_frags;
- }
-
- MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
- MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
- MLX5_SET(wq, wq, log_wq_stride,
- mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
- MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn);
- MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
- MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
- MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
-
- param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
- mlx5e_build_rx_cq_param(priv, params, xsk, &param->cqp);
-}
-
-static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
- struct mlx5e_rq_param *param)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- void *rqc = param->rqc;
- void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
-
- MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, log_wq_stride,
- mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
- MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
-
- param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
-}
-
-void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
- struct mlx5e_sq_param *param)
-{
- void *sqc = param->sqc;
- void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
-
- MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
- MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
-
- param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev));
-}
-
-void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params,
- struct mlx5e_sq_param *param)
-{
- void *sqc = param->sqc;
- void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
- bool allow_swp;
-
- allow_swp = mlx5_geneve_tx_allowed(priv->mdev) ||
- !!MLX5_IPSEC_DEV(priv->mdev);
- mlx5e_build_sq_param_common(priv, param);
- MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
- MLX5_SET(sqc, sqc, allow_swp, allow_swp);
- param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
- param->stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
- mlx5e_build_tx_cq_param(priv, params, &param->cqp);
-}
-
-static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
- struct mlx5e_cq_param *param)
-{
- void *cqc = param->cqc;
-
- MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
- if (MLX5_CAP_GEN(priv->mdev, cqe_128_always) && cache_line_size() >= 128)
- MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
-}
-
-void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
- struct mlx5e_cq_param *param)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- bool hw_stridx = false;
- void *cqc = param->cqc;
- u8 log_cq_size;
-
- switch (params->rq_wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
- mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
- hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
- break;
- default: /* MLX5_WQ_TYPE_CYCLIC */
- log_cq_size = params->log_rq_mtu_frames;
- }
-
- MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
- if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
- MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
- MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
- MLX5_SET(cqc, cqc, cqe_comp_en, 1);
- }
-
- mlx5e_build_common_cq_param(priv, param);
- param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
-}
-
-void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_cq_param *param)
-{
- void *cqc = param->cqc;
-
- MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
-
- mlx5e_build_common_cq_param(priv, param);
- param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
-}
-
-void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
- u8 log_wq_size,
- struct mlx5e_cq_param *param)
-{
- void *cqc = param->cqc;
-
- MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
-
- mlx5e_build_common_cq_param(priv, param);
-
- param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
-}
-
-void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
- u8 log_wq_size,
- struct mlx5e_sq_param *param)
-{
- void *sqc = param->sqc;
- void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
-
- mlx5e_build_sq_param_common(priv, param);
-
- MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
- MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
- mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
-}
-
-static void mlx5e_build_async_icosq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- u8 log_wq_size,
- struct mlx5e_sq_param *param)
-{
- void *sqc = param->sqc;
- void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
-
- mlx5e_build_sq_param_common(priv, param);
-
- /* async_icosq is used by XSK only if xdp_prog is active */
- if (params->xdp_prog)
- param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
- MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
- MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
- mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
-}
-
-void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_sq_param *param)
-{
- void *sqc = param->sqc;
- void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
-
- mlx5e_build_sq_param_common(priv, param);
- MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
- param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
- mlx5e_build_tx_cq_param(priv, params, &param->cqp);
-}
-
-static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
- struct mlx5e_rq_param *rqp)
-{
- switch (params->rq_wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE,
- order_base_2(MLX5E_UMR_WQEBBS) +
- mlx5e_get_rq_log_wq_sz(rqp->rqc));
- default: /* MLX5_WQ_TYPE_CYCLIC */
- return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
- }
-}
-
-static u8 mlx5e_build_async_icosq_log_wq_sz(struct net_device *netdev)
-{
- if (netdev->hw_features & NETIF_F_HW_TLS_RX)
- return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
-
- return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
-}
-
-static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_channel_param *cparam)
-{
- u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
-
- mlx5e_build_rq_param(priv, params, NULL, &cparam->rq);
-
- icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
- async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(priv->netdev);
-
- mlx5e_build_sq_param(priv, params, &cparam->txq_sq);
- mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
- mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
- mlx5e_build_async_icosq_param(priv, params, async_icosq_log_wq_sz, &cparam->async_icosq);
-}
-
int mlx5e_open_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs)
{
@@ -2434,7 +2086,10 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
if (!chs->c || !cparam)
goto err_free;
- mlx5e_build_channel_param(priv, &chs->params, cparam);
+ err = mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam);
+ if (err)
+ goto err_free;
+
for (i = 0; i < chs->num; i++) {
struct xsk_buff_pool *xsk_pool = NULL;
@@ -2446,9 +2101,8 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
- if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS)) {
- err = mlx5e_port_ptp_open(priv, &chs->params, chs->c[0]->lag_port,
- &chs->port_ptp);
+ if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS) || chs->params.ptp_rx) {
+ err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
if (err)
goto err_close_channels;
}
@@ -2462,8 +2116,8 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
return 0;
err_close_ptp:
- if (chs->port_ptp)
- mlx5e_port_ptp_close(chs->port_ptp);
+ if (chs->ptp)
+ mlx5e_ptp_close(chs->ptp);
err_close_channels:
for (i--; i >= 0; i--)
@@ -2483,8 +2137,8 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs)
for (i = 0; i < chs->num; i++)
mlx5e_activate_channel(chs->c[i]);
- if (chs->port_ptp)
- mlx5e_ptp_activate_channel(chs->port_ptp);
+ if (chs->ptp)
+ mlx5e_ptp_activate_channel(chs->ptp);
}
#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
@@ -2511,8 +2165,8 @@ static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
{
int i;
- if (chs->port_ptp)
- mlx5e_ptp_deactivate_channel(chs->port_ptp);
+ if (chs->ptp)
+ mlx5e_ptp_deactivate_channel(chs->ptp);
for (i = 0; i < chs->num; i++)
mlx5e_deactivate_channel(chs->c[i]);
@@ -2522,11 +2176,10 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
{
int i;
- if (chs->port_ptp) {
- mlx5e_port_ptp_close(chs->port_ptp);
- chs->port_ptp = NULL;
+ if (chs->ptp) {
+ mlx5e_ptp_close(chs->ptp);
+ chs->ptp = NULL;
}
-
for (i = 0; i < chs->num; i++)
mlx5e_close_channel(chs->c[i]);
@@ -2582,12 +2235,12 @@ int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
return err;
}
-int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
{
int err;
int ix;
- for (ix = 0; ix < priv->max_nch; ix++) {
+ for (ix = 0; ix < n; ix++) {
err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt);
if (unlikely(err))
goto err_destroy_rqts;
@@ -2603,11 +2256,11 @@ err_destroy_rqts:
return err;
}
-void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
+void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
{
int i;
- for (i = 0; i < priv->max_nch; i++)
+ for (i = 0; i < n; i++)
mlx5e_destroy_rqt(priv, &tirs[i].rqt);
}
@@ -2690,7 +2343,8 @@ static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
}
static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
- struct mlx5e_redirect_rqt_param rrp)
+ struct mlx5e_redirect_rqt_param rrp,
+ struct mlx5e_redirect_rqt_param *ptp_rrp)
{
u32 rqtn;
int ix;
@@ -2716,11 +2370,17 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
rqtn = priv->direct_tir[ix].rqt.rqtn;
mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
}
+ if (ptp_rrp) {
+ rqtn = priv->ptp_tir.rqt.rqtn;
+ mlx5e_redirect_rqt(priv, rqtn, 1, *ptp_rrp);
+ }
}
static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs)
{
+ bool rx_ptp_support = priv->profile->rx_ptp_support;
+ struct mlx5e_redirect_rqt_param *ptp_rrp_p = NULL;
struct mlx5e_redirect_rqt_param rrp = {
.is_rss = true,
{
@@ -2730,12 +2390,22 @@ static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
}
},
};
+ struct mlx5e_redirect_rqt_param ptp_rrp;
+
+ if (rx_ptp_support) {
+ u32 ptp_rqn;
- mlx5e_redirect_rqts(priv, rrp);
+ ptp_rrp.is_rss = false;
+ ptp_rrp.rqn = mlx5e_ptp_get_rqn(priv->channels.ptp, &ptp_rqn) ?
+ priv->drop_rq.rqn : ptp_rqn;
+ ptp_rrp_p = &ptp_rrp;
+ }
+ mlx5e_redirect_rqts(priv, rrp, ptp_rrp_p);
}
static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
{
+ bool rx_ptp_support = priv->profile->rx_ptp_support;
struct mlx5e_redirect_rqt_param drop_rrp = {
.is_rss = false,
{
@@ -2743,7 +2413,7 @@ static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
},
};
- mlx5e_redirect_rqts(priv, drop_rrp);
+ mlx5e_redirect_rqts(priv, drop_rrp, rx_ptp_support ? &drop_rrp : NULL);
}
static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = {
@@ -3032,6 +2702,8 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc;
num_rxqs = nch * priv->profile->rq_groups;
+ if (priv->channels.params.ptp_rx)
+ num_rxqs++;
mlx5e_netdev_set_tcs(netdev, nch, ntc);
@@ -3117,11 +2789,14 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
}
}
- if (!priv->channels.port_ptp)
+ if (!priv->channels.ptp)
+ return;
+
+ if (!test_bit(MLX5E_PTP_STATE_TX, priv->channels.ptp->state))
return;
for (tc = 0; tc < num_tc; tc++) {
- struct mlx5e_port_ptp *c = priv->channels.port_ptp;
+ struct mlx5e_ptp *c = priv->channels.ptp;
struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
priv->txq2sq[sq->txq_ix] = sq;
@@ -3172,6 +2847,29 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
mlx5e_deactivate_channels(&priv->channels);
}
+static int mlx5e_switch_priv_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *new_params,
+ mlx5e_fp_preactivate preactivate,
+ void *context)
+{
+ struct mlx5e_params old_params;
+
+ old_params = priv->channels.params;
+ priv->channels.params = *new_params;
+
+ if (preactivate) {
+ int err;
+
+ err = preactivate(priv, context);
+ if (err) {
+ priv->channels.params = old_params;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs,
mlx5e_fp_preactivate preactivate,
@@ -3214,35 +2912,32 @@ out:
return err;
}
-int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
- struct mlx5e_channels *new_chs,
- mlx5e_fp_preactivate preactivate,
- void *context)
+int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
+ mlx5e_fp_preactivate preactivate,
+ void *context, bool reset)
{
+ struct mlx5e_channels new_chs = {};
int err;
- err = mlx5e_open_channels(priv, new_chs);
+ reset &= test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (!reset)
+ return mlx5e_switch_priv_params(priv, params, preactivate, context);
+
+ new_chs.params = *params;
+ err = mlx5e_open_channels(priv, &new_chs);
if (err)
return err;
-
- err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context);
+ err = mlx5e_switch_priv_channels(priv, &new_chs, preactivate, context);
if (err)
- goto err_close;
-
- return 0;
-
-err_close:
- mlx5e_close_channels(new_chs);
+ mlx5e_close_channels(&new_chs);
return err;
}
int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
{
- struct mlx5e_channels new_channels = {};
-
- new_channels.params = priv->channels.params;
- return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+ return mlx5e_safe_switch_params(priv, &priv->channels.params, NULL, NULL, true);
}
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
@@ -3395,7 +3090,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_cq *cq = &drop_rq->cq;
int err;
- mlx5e_build_drop_rq_param(priv, &rq_param);
+ mlx5e_build_drop_rq_param(mdev, priv->drop_rq_q_counter, &rq_param);
err = mlx5e_alloc_drop_cq(priv, cq, &cq_param);
if (err)
@@ -3443,10 +3138,10 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
{
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
- MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
+ MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
if (MLX5_GET(tisc, tisc, tls_en))
- MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.pdn);
+ MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
if (mlx5_lag_is_lacp_owner(mdev))
MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
@@ -3516,7 +3211,7 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv,
u32 rqtn, u32 *tirc)
{
- MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
+ MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.hw_objs.td.tdn);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, rqtn);
MLX5_SET(tirc, tirc, tunneled_offload_en,
@@ -3608,7 +3303,7 @@ err_destroy_inner_tirs:
return err;
}
-int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
{
struct mlx5e_tir *tir;
void *tirc;
@@ -3622,7 +3317,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
if (!in)
return -ENOMEM;
- for (ix = 0; ix < priv->max_nch; ix++) {
+ for (ix = 0; ix < n; ix++) {
memset(in, 0, inlen);
tir = &tirs[ix];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
@@ -3660,11 +3355,11 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
}
-void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
{
int i;
- for (i = 0; i < priv->max_nch; i++)
+ for (i = 0; i < n; i++)
mlx5e_destroy_tir(priv->mdev, &tirs[i]);
}
@@ -3699,7 +3394,7 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
struct tc_mqprio_qopt *mqprio)
{
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
u8 tc = mqprio->num_tc;
int err = 0;
@@ -3718,23 +3413,11 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
goto out;
}
- new_channels.params = priv->channels.params;
- new_channels.params.num_tc = tc ? tc : 1;
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- struct mlx5e_params old_params;
+ new_params = priv->channels.params;
+ new_params.num_tc = tc ? tc : 1;
- old_params = priv->channels.params;
- priv->channels.params = new_channels.params;
- err = mlx5e_num_channels_changed(priv);
- if (err)
- priv->channels.params = old_params;
-
- goto out;
- }
-
- err = mlx5e_safe_switch_channels(priv, &new_channels,
- mlx5e_num_channels_changed_ctx, NULL);
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_num_channels_changed_ctx, NULL, true);
out:
priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
@@ -3791,8 +3474,16 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ bool tc_unbind = false;
int err;
+ if (type == TC_SETUP_BLOCK &&
+ ((struct flow_block_offload *)type_data)->command == FLOW_BLOCK_UNBIND)
+ tc_unbind = true;
+
+ if (!netif_device_present(dev) && !tc_unbind)
+ return -ENODEV;
+
switch (type) {
case TC_SETUP_BLOCK: {
struct flow_block_offload *f = type_data;
@@ -3837,15 +3528,22 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
s->tx_dropped += sq_stats->dropped;
}
}
- if (priv->port_ptp_opened) {
+ if (priv->tx_ptp_opened) {
for (i = 0; i < priv->max_opened_tc; i++) {
- struct mlx5e_sq_stats *sq_stats = &priv->port_ptp_stats.sq[i];
+ struct mlx5e_sq_stats *sq_stats = &priv->ptp_stats.sq[i];
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
s->tx_dropped += sq_stats->dropped;
}
}
+ if (priv->rx_ptp_opened) {
+ struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq;
+
+ s->rx_packets += rq_stats->packets;
+ s->rx_bytes += rq_stats->bytes;
+ s->multicast += rq_stats->mcast_packets;
+ }
}
void
@@ -3854,6 +3552,9 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ if (!netif_device_present(dev))
+ return;
+
/* In switchdev mode, monitor counters doesn't monitor
* rx/tx stats of 802_3. The update stats mechanism
* should keep the 802_3 layout counters updated
@@ -3895,11 +3596,19 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
}
+static void mlx5e_nic_set_rx_mode(struct mlx5e_priv *priv)
+{
+ if (mlx5e_is_uplink_rep(priv))
+ return; /* no rx mode for uplink rep */
+
+ queue_work(priv->wq, &priv->set_rx_mode_work);
+}
+
static void mlx5e_set_rx_mode(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- queue_work(priv->wq, &priv->set_rx_mode_work);
+ mlx5e_nic_set_rx_mode(priv);
}
static int mlx5e_set_mac(struct net_device *netdev, void *addr)
@@ -3914,7 +3623,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
ether_addr_copy(netdev->dev_addr, saddr->sa_data);
netif_addr_unlock_bh(netdev);
- queue_work(priv->wq, &priv->set_rx_mode_work);
+ mlx5e_nic_set_rx_mode(priv);
return 0;
}
@@ -3933,10 +3642,10 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channels new_channels = {};
struct mlx5e_params *cur_params;
+ struct mlx5e_params new_params;
+ bool reset = true;
int err = 0;
- bool reset;
mutex_lock(&priv->state_lock);
@@ -3954,30 +3663,17 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
goto out;
}
- reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ new_params = *cur_params;
+ new_params.lro_en = enable;
- new_channels.params = *cur_params;
- new_channels.params.lro_en = enable;
-
- if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
+ if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
- mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
reset = false;
}
- if (!reset) {
- struct mlx5e_params old_params;
-
- old_params = *cur_params;
- *cur_params = new_channels.params;
- err = mlx5e_modify_tirs_lro(priv);
- if (err)
- *cur_params = old_params;
- goto out;
- }
-
- err = mlx5e_safe_switch_channels(priv, &new_channels,
- mlx5e_modify_tirs_lro_ctx, NULL);
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_modify_tirs_lro_ctx, NULL, reset);
out:
mutex_unlock(&priv->state_lock);
return err;
@@ -4136,7 +3832,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
- if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
+ if (!priv->fs.vlan ||
+ !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs.vlan), VLAN_N_VID)) {
/* HW strips the outer C-tag header, this is a problem
* for S-tag traffic.
*/
@@ -4205,26 +3902,23 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
mlx5e_fp_preactivate preactivate)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
struct mlx5e_params *params;
+ bool reset = true;
int err = 0;
- bool reset;
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
- reset = !params->lro_en;
- reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
-
- new_channels.params = *params;
- new_channels.params.sw_mtu = new_mtu;
- err = mlx5e_validate_params(priv, &new_channels.params);
+ new_params = *params;
+ new_params.sw_mtu = new_mtu;
+ err = mlx5e_validate_params(priv->mdev, &new_params);
if (err)
goto out;
if (params->xdp_prog &&
- !mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
+ !mlx5e_rx_is_linear_skb(&new_params, NULL)) {
netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
new_mtu, mlx5e_xdp_max_mtu(params, NULL));
err = -EINVAL;
@@ -4233,47 +3927,34 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (priv->xsk.refcnt &&
!mlx5e_xsk_validate_mtu(netdev, &priv->channels,
- &new_channels.params, priv->mdev)) {
+ &new_params, priv->mdev)) {
err = -EINVAL;
goto out;
}
+ if (params->lro_en)
+ reset = false;
+
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
- &new_channels.params,
- NULL);
+ bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL);
+ bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
+ &new_params, NULL);
u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL);
- u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params, NULL);
+ u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_params, NULL);
- /* If XSK is active, XSK RQs are linear. */
- is_linear |= priv->xsk.refcnt;
-
- /* Always reset in linear mode - hw_mtu is used in data path. */
- reset = reset && (is_linear || (ppw_old != ppw_new));
- }
-
- if (!reset) {
- unsigned int old_mtu = params->sw_mtu;
-
- params->sw_mtu = new_mtu;
- if (preactivate) {
- err = preactivate(priv, NULL);
- if (err) {
- params->sw_mtu = old_mtu;
- goto out;
- }
- }
- netdev->mtu = params->sw_mtu;
- goto out;
+ /* Always reset in linear mode - hw_mtu is used in data path.
+ * Check that the mode was non-linear and didn't change.
+ * If XSK is active, XSK RQs are linear.
+ */
+ if (!is_linear_old && !is_linear_new && !priv->xsk.refcnt &&
+ ppw_old == ppw_new)
+ reset = false;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, preactivate, NULL);
- if (err)
- goto out;
-
- netdev->mtu = new_channels.params.sw_mtu;
+ err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset);
out:
+ netdev->mtu = params->sw_mtu;
mutex_unlock(&priv->state_lock);
return err;
}
@@ -4283,9 +3964,18 @@ static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
}
+int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx)
+{
+ bool set = *(bool *)ctx;
+
+ return mlx5e_ptp_rx_manage_fs(priv, set);
+}
+
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
{
+ struct mlx5e_params new_params;
struct hwtstamp_config config;
+ bool rx_cqe_compress_def;
int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
@@ -4305,11 +3995,13 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
}
mutex_lock(&priv->state_lock);
+ new_params = priv->channels.params;
+ rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
+
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- /* Reset CQE compression to Admin default */
- mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
+ new_params.ptp_rx = false;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -4326,15 +4018,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- /* Disable CQE compression */
- if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
- netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
- err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
- if (err) {
- netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
- mutex_unlock(&priv->state_lock);
- return err;
- }
+ new_params.ptp_rx = rx_cqe_compress_def;
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
@@ -4342,6 +4026,16 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
return -ERANGE;
}
+ if (new_params.ptp_rx == priv->channels.params.ptp_rx)
+ goto out;
+
+ err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
+ &new_params.ptp_rx, true);
+ if (err) {
+ mutex_unlock(&priv->state_lock);
+ return err;
+ }
+out:
memcpy(&priv->tstamp, &config, sizeof(config));
mutex_unlock(&priv->state_lock);
@@ -4452,6 +4146,9 @@ static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
+ if (mlx5e_is_uplink_rep(priv))
+ return -EOPNOTSUPP;
+
return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
mlx5_ifla_link2vport(link_state));
}
@@ -4463,6 +4160,9 @@ int mlx5e_get_vf_config(struct net_device *dev,
struct mlx5_core_dev *mdev = priv->mdev;
int err;
+ if (!netif_device_present(dev))
+ return -EOPNOTSUPP;
+
err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
if (err)
return err;
@@ -4479,6 +4179,32 @@ int mlx5e_get_vf_stats(struct net_device *dev,
return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
vf_stats);
}
+
+static bool
+mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (!netif_device_present(dev))
+ return false;
+
+ if (!mlx5e_is_uplink_rep(priv))
+ return false;
+
+ return mlx5e_rep_has_offload_stats(dev, attr_id);
+}
+
+static int
+mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (!mlx5e_is_uplink_rep(priv))
+ return -EOPNOTSUPP;
+
+ return mlx5e_rep_get_offload_stats(attr_id, dev, sp);
+}
#endif
static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type)
@@ -4622,7 +4348,7 @@ static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
{
struct net_device *netdev = priv->netdev;
- struct mlx5e_channels new_channels = {};
+ struct mlx5e_params new_params;
if (priv->channels.params.lro_en) {
netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
@@ -4635,16 +4361,16 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
return -EINVAL;
}
- new_channels.params = priv->channels.params;
- new_channels.params.xdp_prog = prog;
+ new_params = priv->channels.params;
+ new_params.xdp_prog = prog;
/* No XSK params: AF_XDP can't be enabled yet at the point of setting
* the XDP program.
*/
- if (!mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
+ if (!mlx5e_rx_is_linear_skb(&new_params, NULL)) {
netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
- new_channels.params.sw_mtu,
- mlx5e_xdp_max_mtu(&new_channels.params, NULL));
+ new_params.sw_mtu,
+ mlx5e_xdp_max_mtu(&new_params, NULL));
return -EINVAL;
}
@@ -4664,9 +4390,10 @@ static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_params new_params;
struct bpf_prog *old_prog;
- bool reset, was_opened;
int err = 0;
+ bool reset;
int i;
mutex_lock(&priv->state_lock);
@@ -4677,46 +4404,29 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
goto unlock;
}
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
/* no need for full reset when exchanging programs */
reset = (!priv->channels.params.xdp_prog || !prog);
- if (was_opened && !reset)
- /* num_channels is invariant here, so we can take the
- * batched reference right upfront.
- */
- bpf_prog_add(prog, priv->channels.num);
-
- if (was_opened && reset) {
- struct mlx5e_channels new_channels = {};
-
- new_channels.params = priv->channels.params;
- new_channels.params.xdp_prog = prog;
- mlx5e_set_rq_type(priv->mdev, &new_channels.params);
- old_prog = priv->channels.params.xdp_prog;
+ new_params = priv->channels.params;
+ new_params.xdp_prog = prog;
+ if (reset)
+ mlx5e_set_rq_type(priv->mdev, &new_params);
+ old_prog = priv->channels.params.xdp_prog;
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
- if (err)
- goto unlock;
- } else {
- /* exchange programs, extra prog reference we got from caller
- * as long as we don't fail from this point onwards.
- */
- old_prog = xchg(&priv->channels.params.xdp_prog, prog);
- }
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
+ if (err)
+ goto unlock;
if (old_prog)
bpf_prog_put(old_prog);
- if (!was_opened && reset) /* change RQ type according to priv->xdp_prog */
- mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
-
- if (!was_opened || reset)
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
goto unlock;
/* exchanging programs w/o reset, we update ref counts on behalf
* of the channels RQs here.
*/
+ bpf_prog_add(prog, priv->channels.num);
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
@@ -4837,6 +4547,8 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
+ .ndo_has_offload_stats = mlx5e_has_offload_stats,
+ .ndo_get_offload_stats = mlx5e_get_offload_stats,
#endif
.ndo_get_devlink_port = mlx5e_get_devlink_port,
};
@@ -4850,93 +4562,6 @@ void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
indirection_rqt[i] = i % num_channels;
}
-static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
-{
- u32 link_speed = 0;
- u32 pci_bw = 0;
-
- mlx5e_port_max_linkspeed(mdev, &link_speed);
- pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
- mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
- link_speed, pci_bw);
-
-#define MLX5E_SLOW_PCI_RATIO (2)
-
- return link_speed && pci_bw &&
- link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
-}
-
-static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
-{
- struct dim_cq_moder moder;
-
- moder.cq_period_mode = cq_period_mode;
- moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
- moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
- if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
-
- return moder;
-}
-
-static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
-{
- struct dim_cq_moder moder;
-
- moder.cq_period_mode = cq_period_mode;
- moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
- moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
- if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
-
- return moder;
-}
-
-static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
-{
- return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
- DIM_CQ_PERIOD_MODE_START_FROM_CQE :
- DIM_CQ_PERIOD_MODE_START_FROM_EQE;
-}
-
-void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
-{
- if (params->tx_dim_enabled) {
- u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
-
- params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
- } else {
- params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
- }
-}
-
-void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
-{
- if (params->rx_dim_enabled) {
- u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
-
- params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
- } else {
- params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
- }
-}
-
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
-{
- mlx5e_reset_tx_moderation(params, cq_period_mode);
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
- params->tx_cq_moderation.cq_period_mode ==
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
-}
-
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
-{
- mlx5e_reset_rx_moderation(params, cq_period_mode);
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
- params->rx_cq_moderation.cq_period_mode ==
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
-}
-
static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
{
int i;
@@ -4949,25 +4574,6 @@ static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeo
return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
}
-void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- /* Prefer Striding RQ, unless any of the following holds:
- * - Striding RQ configuration is not possible/supported.
- * - Slow PCI heuristic.
- * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
- *
- * No XSK params: checking the availability of striding RQ in general.
- */
- if (!slow_pci_heuristic(mdev) &&
- mlx5e_striding_rq_possible(mdev, params) &&
- (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
- !mlx5e_rx_is_linear_skb(params, NULL)))
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
- mlx5e_set_rq_type(mdev, params);
- mlx5e_init_rq_type_params(mdev, params);
-}
-
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
u16 num_channels)
{
@@ -5283,6 +4889,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct devlink_port *dl_port;
int err;
mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
@@ -5298,19 +4905,19 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
- err = mlx5e_devlink_port_register(priv);
- if (err)
- mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
-
- mlx5e_health_create_reporters(priv);
+ dl_port = mlx5e_devlink_get_dl_port(priv);
+ if (dl_port->registered)
+ mlx5e_health_create_reporters(priv);
return 0;
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
- mlx5e_health_destroy_reporters(priv);
- mlx5e_devlink_port_unregister(priv);
+ struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
+
+ if (dl_port->registered)
+ mlx5e_health_destroy_reporters(priv);
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
}
@@ -5318,6 +4925,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ u16 max_nch = priv->max_nch;
int err;
mlx5e_create_q_counters(priv);
@@ -5332,7 +4940,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
+ err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
if (err)
goto err_destroy_indirect_rqts;
@@ -5340,22 +4948,30 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_direct_rqts;
- err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
+ err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
if (err)
goto err_destroy_indirect_tirs;
- err = mlx5e_create_direct_rqts(priv, priv->xsk_tir);
+ err = mlx5e_create_direct_rqts(priv, priv->xsk_tir, max_nch);
if (unlikely(err))
goto err_destroy_direct_tirs;
- err = mlx5e_create_direct_tirs(priv, priv->xsk_tir);
+ err = mlx5e_create_direct_tirs(priv, priv->xsk_tir, max_nch);
if (unlikely(err))
goto err_destroy_xsk_rqts;
+ err = mlx5e_create_direct_rqts(priv, &priv->ptp_tir, 1);
+ if (err)
+ goto err_destroy_xsk_tirs;
+
+ err = mlx5e_create_direct_tirs(priv, &priv->ptp_tir, 1);
+ if (err)
+ goto err_destroy_ptp_rqt;
+
err = mlx5e_create_flow_steering(priv);
if (err) {
mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
- goto err_destroy_xsk_tirs;
+ goto err_destroy_ptp_direct_tir;
}
err = mlx5e_tc_nic_init(priv);
@@ -5376,16 +4992,20 @@ err_tc_nic_cleanup:
mlx5e_tc_nic_cleanup(priv);
err_destroy_flow_steering:
mlx5e_destroy_flow_steering(priv);
+err_destroy_ptp_direct_tir:
+ mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1);
+err_destroy_ptp_rqt:
+ mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1);
err_destroy_xsk_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
+ mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch);
err_destroy_xsk_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
+ mlx5e_destroy_direct_rqts(priv, priv->xsk_tir, max_nch);
err_destroy_direct_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
err_close_drop_rq:
@@ -5397,14 +5017,18 @@ err_destroy_q_counters:
static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
+ u16 max_nch = priv->max_nch;
+
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv);
mlx5e_destroy_flow_steering(priv);
- mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
- mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
+ mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1);
+ mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1);
+ mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch);
+ mlx5e_destroy_direct_rqts(priv, priv->xsk_tir, max_nch);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
mlx5e_destroy_indirect_tirs(priv);
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
@@ -5450,7 +5074,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
return;
mlx5e_dcbnl_init_app(priv);
- queue_work(priv->wq, &priv->set_rx_mode_work);
+ mlx5e_nic_set_rx_mode(priv);
rtnl_lock();
if (netif_running(netdev))
@@ -5473,7 +5097,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
netif_device_detach(priv->netdev);
rtnl_unlock();
- queue_work(priv->wq, &priv->set_rx_mode_work);
+ mlx5e_nic_set_rx_mode(priv);
mlx5e_hv_vhca_stats_destroy(priv);
if (mlx5e_monitor_counter_supported(priv))
@@ -5512,6 +5136,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_nic_stats_grps,
.stats_grps_num = mlx5e_nic_stats_grps_num,
+ .rx_ptp_support = true,
};
/* mlx5e generic netdev management API (move to en_common.c) */
@@ -5746,6 +5371,11 @@ rollback:
return err;
}
+void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv)
+{
+ mlx5e_netdev_change_profile(priv, &mlx5e_nic_profile, NULL);
+}
+
void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
@@ -5828,10 +5458,17 @@ static int mlx5e_probe(struct auxiliary_device *adev,
priv->profile = profile;
priv->ppriv = NULL;
+
+ err = mlx5e_devlink_port_register(priv);
+ if (err) {
+ mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
+ goto err_destroy_netdev;
+ }
+
err = profile->init(mdev, netdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err);
- goto err_destroy_netdev;
+ goto err_devlink_cleanup;
}
err = mlx5e_resume(adev);
@@ -5849,12 +5486,15 @@ static int mlx5e_probe(struct auxiliary_device *adev,
mlx5e_devlink_port_type_eth_set(priv);
mlx5e_dcbnl_init_app(priv);
+ mlx5_uplink_netdev_set(mdev, netdev);
return 0;
err_resume:
mlx5e_suspend(adev, state);
err_profile_cleanup:
profile->cleanup(priv);
+err_devlink_cleanup:
+ mlx5e_devlink_port_unregister(priv);
err_destroy_netdev:
mlx5e_destroy_netdev(priv);
return err;
@@ -5869,6 +5509,7 @@ static void mlx5e_remove(struct auxiliary_device *adev)
unregister_netdev(priv->netdev);
mlx5e_suspend(adev, state);
priv->profile->cleanup(priv);
+ mlx5e_devlink_port_unregister(priv);
mlx5e_destroy_netdev(priv);
}
@@ -5894,18 +5535,18 @@ int mlx5e_init(void)
mlx5e_ipsec_build_inverse_table();
mlx5e_build_ptys2ethtool_map();
- ret = mlx5e_rep_init();
+ ret = auxiliary_driver_register(&mlx5e_driver);
if (ret)
return ret;
- ret = auxiliary_driver_register(&mlx5e_driver);
+ ret = mlx5e_rep_init();
if (ret)
- mlx5e_rep_cleanup();
+ auxiliary_driver_unregister(&mlx5e_driver);
return ret;
}
void mlx5e_cleanup(void)
{
- auxiliary_driver_unregister(&mlx5e_driver);
mlx5e_rep_cleanup();
+ auxiliary_driver_unregister(&mlx5e_driver);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 8d39bfee84a9..34eb1118670f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -40,17 +40,19 @@
#include "eswitch.h"
#include "en.h"
#include "en_rep.h"
+#include "en/params.h"
#include "en/txrx.h"
#include "en_tc.h"
#include "en/rep/tc.h"
#include "en/rep/neigh.h"
+#include "en/devlink.h"
#include "fs_core.h"
#include "lib/mlx5.h"
#define CREATE_TRACE_POINTS
#include "diag/en_rep_tracepoint.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
- max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
+ max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
@@ -69,16 +71,6 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
fw_rev_sub(mdev), mdev->board_id);
}
-static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- mlx5e_rep_get_drvinfo(dev, drvinfo);
- strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev),
- sizeof(drvinfo->bus_info));
-}
-
static const struct counter_desc sw_rep_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
@@ -285,46 +277,6 @@ static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
-static void mlx5e_uplink_rep_get_pause_stats(struct net_device *netdev,
- struct ethtool_pause_stats *stats)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- mlx5e_stats_pause_get(priv, stats);
-}
-
-static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pauseparam)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- mlx5e_ethtool_get_pauseparam(priv, pauseparam);
-}
-
-static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pauseparam)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
-}
-
-static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *link_ksettings)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
-}
-
-static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *link_ksettings)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
-}
-
static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -344,34 +296,6 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
};
-static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_USE_ADAPTIVE,
- .get_drvinfo = mlx5e_uplink_rep_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_strings = mlx5e_rep_get_strings,
- .get_sset_count = mlx5e_rep_get_sset_count,
- .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
- .get_ringparam = mlx5e_rep_get_ringparam,
- .set_ringparam = mlx5e_rep_set_ringparam,
- .get_channels = mlx5e_rep_get_channels,
- .set_channels = mlx5e_rep_set_channels,
- .get_coalesce = mlx5e_rep_get_coalesce,
- .set_coalesce = mlx5e_rep_set_coalesce,
- .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
- .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
- .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
- .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
- .get_rxfh = mlx5e_get_rxfh,
- .set_rxfh = mlx5e_set_rxfh,
- .get_rxnfc = mlx5e_get_rxnfc,
- .set_rxnfc = mlx5e_set_rxnfc,
- .get_pause_stats = mlx5e_uplink_rep_get_pause_stats,
- .get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
- .set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
-};
-
static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
@@ -411,8 +335,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
}
/* Add re-inject rule to the PF/representor sqs */
- flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
- rep->vport,
+ flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, rep,
sqns_array[i]);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
@@ -522,7 +445,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
return (rep->vport == MLX5_VPORT_UPLINK);
}
-static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
+bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
{
switch (attr_id) {
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
@@ -542,8 +465,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev,
return 0;
}
-static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
- void *sp)
+int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
{
switch (attr_id) {
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
@@ -568,34 +491,6 @@ static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
return mlx5e_change_mtu(netdev, new_mtu, NULL);
}
-static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
-{
- return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
-}
-
-static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
-{
- struct sockaddr *saddr = addr;
-
- if (!is_valid_ether_addr(saddr->sa_data))
- return -EADDRNOTAVAIL;
-
- ether_addr_copy(netdev->dev_addr, saddr->sa_data);
- return 0;
-}
-
-static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
- __be16 vlan_proto)
-{
- netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
-
- if (vlan != 0)
- return -EOPNOTSUPP;
-
- /* allow setting 0-vid for compatibility with libvirt */
- return 0;
-}
-
static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -641,29 +536,10 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_change_carrier = mlx5e_rep_change_carrier,
};
-static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
- .ndo_open = mlx5e_open,
- .ndo_stop = mlx5e_close,
- .ndo_start_xmit = mlx5e_xmit,
- .ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
- .ndo_setup_tc = mlx5e_rep_setup_tc,
- .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
- .ndo_get_stats64 = mlx5e_get_stats,
- .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
- .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
- .ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
- .ndo_features_check = mlx5e_features_check,
- .ndo_set_vf_mac = mlx5e_set_vf_mac,
- .ndo_set_vf_rate = mlx5e_set_vf_rate,
- .ndo_get_vf_config = mlx5e_get_vf_config,
- .ndo_get_vf_stats = mlx5e_get_vf_stats,
- .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
- .ndo_set_features = mlx5e_set_features,
-};
-
bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
{
- return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep;
+ return netdev->netdev_ops == &mlx5e_netdev_ops &&
+ mlx5e_is_uplink_rep(netdev_priv(netdev));
}
bool mlx5e_eswitch_vf_rep(struct net_device *netdev)
@@ -713,26 +589,15 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
}
static void mlx5e_build_rep_netdev(struct net_device *netdev,
- struct mlx5_core_dev *mdev,
- struct mlx5_eswitch_rep *rep)
+ struct mlx5_core_dev *mdev)
{
SET_NETDEV_DEV(netdev, mdev->device);
- if (rep->vport == MLX5_VPORT_UPLINK) {
- netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
- /* we want a persistent mac for the uplink rep */
- mlx5_query_mac_address(mdev, netdev->dev_addr);
- netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
- mlx5e_dcbnl_build_rep_netdev(netdev);
- } else {
- netdev->netdev_ops = &mlx5e_netdev_ops_rep;
- eth_hw_addr_random(netdev);
- netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
- }
+ netdev->netdev_ops = &mlx5e_netdev_ops_rep;
+ eth_hw_addr_random(netdev);
+ netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
netdev->watchdog_timeo = 15 * HZ;
- netdev->features |= NETIF_F_NETNS_LOCAL;
-
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
@@ -744,12 +609,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->hw_features |= NETIF_F_TSO6;
netdev->hw_features |= NETIF_F_RXCSUM;
- if (rep->vport == MLX5_VPORT_UPLINK)
- netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- else
- netdev->features |= NETIF_F_VLAN_CHALLENGED;
-
netdev->features |= netdev->hw_features;
+ netdev->features |= NETIF_F_VLAN_CHALLENGED;
+ netdev->features |= NETIF_F_NETNS_LOCAL;
}
static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
@@ -890,6 +752,7 @@ int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ u16 max_nch = priv->max_nch;
int err;
mlx5e_init_l2_addr(priv);
@@ -904,7 +767,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
+ err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
if (err)
goto err_destroy_indirect_rqts;
@@ -912,7 +775,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_direct_rqts;
- err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
+ err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
if (err)
goto err_destroy_indirect_tirs;
@@ -937,11 +800,11 @@ err_destroy_root_ft:
err_destroy_ttc_table:
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_direct_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
err_close_drop_rq:
@@ -951,13 +814,15 @@ err_close_drop_rq:
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
+ u16 max_nch = priv->max_nch;
+
mlx5e_ethtool_cleanup_steering(priv);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
mlx5e_destroy_indirect_tirs(priv);
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
}
@@ -1116,6 +981,14 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5e_dcbnl_initialize(priv);
mlx5e_dcbnl_init_app(priv);
mlx5e_rep_neigh_init(rpriv);
+
+ netdev->wanted_features |= NETIF_F_HW_TC;
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ mlx5e_open(netdev);
+ netif_device_attach(netdev);
+ rtnl_unlock();
}
static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
@@ -1123,6 +996,12 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_core_dev *mdev = priv->mdev;
+ rtnl_lock();
+ if (netif_running(priv->netdev))
+ mlx5e_close(priv->netdev);
+ netif_device_detach(priv->netdev);
+ rtnl_unlock();
+
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_dcbnl_delete_app(priv);
mlx5_notifier_unregister(mdev, &priv->events_nb);
@@ -1183,6 +1062,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_rep_stats_grps,
.stats_grps_num = mlx5e_rep_stats_grps_num,
+ .rx_ptp_support = false,
};
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
@@ -1199,33 +1079,65 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = MLX5E_MAX_NUM_TC,
- .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
+ /* XSK is needed so we can replace profile with NIC netdev */
+ .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_ul_rep_stats_grps,
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
+ .rx_ptp_support = false,
};
/* e-Switch vport representors */
static int
-mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct devlink_port *dl_port;
+ int err;
+
+ rpriv->netdev = priv->netdev;
+
+ err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
+ rpriv);
+ if (err)
+ return err;
+
+ dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
+ if (dl_port)
+ devlink_port_type_eth_set(dl_port, rpriv->netdev);
+
+ return 0;
+}
+
+static void
+mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
+{
+ struct net_device *netdev = rpriv->netdev;
+ struct devlink_port *dl_port;
+ struct mlx5_core_dev *dev;
+ struct mlx5e_priv *priv;
+
+ priv = netdev_priv(netdev);
+ dev = priv->mdev;
+
+ dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
+ if (dl_port)
+ devlink_port_type_clear(dl_port);
+ mlx5e_netdev_attach_nic_profile(priv);
+}
+
+static int
+mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
const struct mlx5e_profile *profile;
- struct mlx5e_rep_priv *rpriv;
struct devlink_port *dl_port;
struct net_device *netdev;
struct mlx5e_priv *priv;
unsigned int txqs, rxqs;
int nch, err;
- rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
- if (!rpriv)
- return -ENOMEM;
-
- /* rpriv->rep to be looked up when profile->init() is called */
- rpriv->rep = rep;
-
- profile = (rep->vport == MLX5_VPORT_UPLINK) ?
- &mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
-
+ profile = &mlx5e_rep_profile;
nch = mlx5e_get_max_num_channels(dev);
txqs = nch * profile->max_tc;
rxqs = nch * profile->rq_groups;
@@ -1234,21 +1146,11 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
mlx5_core_warn(dev,
"Failed to create representor netdev for vport %d\n",
rep->vport);
- kfree(rpriv);
return -EINVAL;
}
- mlx5e_build_rep_netdev(netdev, dev, rep);
-
+ mlx5e_build_rep_netdev(netdev, dev);
rpriv->netdev = netdev;
- rep->rep_data[REP_ETH].priv = rpriv;
- INIT_LIST_HEAD(&rpriv->vport_sqs_list);
-
- if (rep->vport == MLX5_VPORT_UPLINK) {
- err = mlx5e_create_mdev_resources(dev);
- if (err)
- goto err_destroy_netdev;
- }
priv = netdev_priv(netdev);
priv->profile = profile;
@@ -1256,7 +1158,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
err = profile->init(dev, netdev);
if (err) {
netdev_warn(netdev, "rep profile init failed, %d\n", err);
- goto err_destroy_mdev_resources;
+ goto err_destroy_netdev;
}
err = mlx5e_attach_netdev(netdev_priv(netdev));
@@ -1286,13 +1188,34 @@ err_detach_netdev:
err_cleanup_profile:
priv->profile->cleanup(priv);
-err_destroy_mdev_resources:
- if (rep->vport == MLX5_VPORT_UPLINK)
- mlx5e_destroy_mdev_resources(dev);
-
err_destroy_netdev:
mlx5e_destroy_netdev(netdev_priv(netdev));
- kfree(rpriv);
+ return err;
+}
+
+static int
+mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_rep_priv *rpriv;
+ int err;
+
+ rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
+ if (!rpriv)
+ return -ENOMEM;
+
+ /* rpriv->rep to be looked up when profile->init() is called */
+ rpriv->rep = rep;
+ rep->rep_data[REP_ETH].priv = rpriv;
+ INIT_LIST_HEAD(&rpriv->vport_sqs_list);
+
+ if (rep->vport == MLX5_VPORT_UPLINK)
+ err = mlx5e_vport_uplink_rep_load(dev, rep);
+ else
+ err = mlx5e_vport_vf_rep_load(dev, rep);
+
+ if (err)
+ kfree(rpriv);
+
return err;
}
@@ -1306,15 +1229,19 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
struct devlink_port *dl_port;
void *ppriv = priv->ppriv;
+ if (rep->vport == MLX5_VPORT_UPLINK) {
+ mlx5e_vport_uplink_rep_unload(rpriv);
+ goto free_ppriv;
+ }
+
dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
if (dl_port)
devlink_port_type_clear(dl_port);
unregister_netdev(netdev);
mlx5e_detach_netdev(priv);
priv->profile->cleanup(priv);
- if (rep->vport == MLX5_VPORT_UPLINK)
- mlx5e_destroy_mdev_resources(priv->mdev);
mlx5e_destroy_netdev(priv);
+free_ppriv:
kfree(ppriv); /* mlx5e_rep_priv */
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index d1696404cca9..22585015c7a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -89,6 +89,7 @@ struct mlx5_rep_uplink_priv {
struct mapping_ctx *tunnel_enc_opts_mapping;
struct mlx5_tc_ct_priv *ct_priv;
+ struct mlx5_esw_psample *esw_psample;
/* support eswitch vports bonding */
struct mlx5e_rep_bond *bond;
@@ -220,6 +221,10 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
const struct net_device *lag_dev);
int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup);
+bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id);
+int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp);
+
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
@@ -240,6 +245,11 @@ static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {}
static inline int mlx5e_rep_init(void) { return 0; };
static inline void mlx5e_rep_cleanup(void) {};
+static inline bool mlx5e_rep_has_offload_stats(const struct net_device *dev,
+ int attr_id) { return false; }
+static inline int mlx5e_rep_get_offload_stats(int attr_id,
+ const struct net_device *dev,
+ void *sp) { return -EOPNOTSUPP; }
#endif
static inline bool mlx5e_is_vport_rep(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 249d8905e644..f90894eea9e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -52,6 +52,7 @@
#include "en/health.h"
#include "en/params.h"
#include "devlink.h"
+#include "en/devlink.h"
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
@@ -669,6 +670,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
get_cqe_opcode(cqe));
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
+ mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(cq->priv->wq, &sq->recover_work);
break;
@@ -1822,6 +1824,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
struct mlx5e_priv *priv = netdev_priv(rq->netdev);
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
+ struct devlink_port *dl_port;
struct sk_buff *skb;
u32 cqe_bcnt;
u16 trap_id;
@@ -1844,7 +1847,8 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
skb_push(skb, ETH_HLEN);
- mlx5_devlink_trap_report(rq->mdev, trap_id, skb, &priv->dl_port);
+ dl_port = mlx5e_devlink_get_dl_port(priv);
+ mlx5_devlink_trap_report(rq->mdev, trap_id, skb, dl_port);
dev_kfree_skb_any(skb);
free_wqe:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 88a01c59ce61..e4f5b6395148 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -184,6 +184,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
#endif
@@ -344,6 +345,7 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
+ s->rx_tls_resync_res_retry += rq_stats->tls_resync_res_retry;
s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
s->rx_tls_err += rq_stats->tls_err;
#endif
@@ -401,13 +403,21 @@ static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
{
int i;
- if (!priv->port_ptp_opened)
+ if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
return;
- mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->port_ptp_stats.ch);
+ mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
- for (i = 0; i < priv->max_opened_tc; i++) {
- mlx5e_stats_grp_sw_update_stats_sq(s, &priv->port_ptp_stats.sq[i]);
+ if (priv->tx_ptp_opened) {
+ for (i = 0; i < priv->max_opened_tc; i++) {
+ mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
+
+ /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
+ barrier();
+ }
+ }
+ if (priv->rx_ptp_opened) {
+ mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
barrier();
@@ -760,35 +770,112 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
-#define MLX5E_READ_CTR64_BE_F(ptr, c) \
+#define MLX5E_READ_CTR64_BE_F(ptr, set, c) \
be64_to_cpu(*(__be64 *)((char *)ptr + \
MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)))
+ counter_set.set.c##_high)))
-void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
- struct ethtool_pause_stats *pause_stats)
+static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
+ u32 *ppcnt_ieee_802_3)
{
- u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
- struct mlx5_core_dev *mdev = priv->mdev;
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
- return;
+ return -EOPNOTSUPP;
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
- sz, MLX5_REG_PPCNT, 0, 0);
+ return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
+ sz, MLX5_REG_PPCNT, 0, 0);
+}
+
+void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
+ struct ethtool_pause_stats *pause_stats)
+{
+ u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
+ return;
pause_stats->tx_pause_frames =
MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ eth_802_3_cntrs_grp_data_layout,
a_pause_mac_ctrl_frames_transmitted);
pause_stats->rx_pause_frames =
MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ eth_802_3_cntrs_grp_data_layout,
a_pause_mac_ctrl_frames_received);
}
+void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
+ return;
+
+ phy_stats->SymbolErrorDuringCarrier =
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ eth_802_3_cntrs_grp_data_layout,
+ a_symbol_error_during_carrier);
+}
+
+void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
+ return;
+
+#define RD(name) \
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, \
+ eth_802_3_cntrs_grp_data_layout, \
+ name)
+
+ mac_stats->FramesTransmittedOK = RD(a_frames_transmitted_ok);
+ mac_stats->FramesReceivedOK = RD(a_frames_received_ok);
+ mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
+ mac_stats->OctetsTransmittedOK = RD(a_octets_transmitted_ok);
+ mac_stats->OctetsReceivedOK = RD(a_octets_received_ok);
+ mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
+ mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
+ mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
+ mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
+ mac_stats->InRangeLengthErrors = RD(a_in_range_length_errors);
+ mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
+ mac_stats->FrameTooLongErrors = RD(a_frame_too_long_errors);
+#undef RD
+}
+
+void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
+ return;
+
+ ctrl_stats->MACControlFramesTransmitted =
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ eth_802_3_cntrs_grp_data_layout,
+ a_mac_control_frames_transmitted);
+ ctrl_stats->MACControlFramesReceived =
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ eth_802_3_cntrs_grp_data_layout,
+ a_mac_control_frames_received);
+ ctrl_stats->UnsupportedOpcodesReceived =
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ eth_802_3_cntrs_grp_data_layout,
+ a_unsupported_opcodes_received);
+}
+
#define PPORT_2863_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
@@ -900,6 +987,59 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
+static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 2047 },
+ { 2048, 4095 },
+ { 4096, 8191 },
+ { 8192, 10239 },
+ {}
+};
+
+void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
+ struct ethtool_rmon_stats *rmon,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+ if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters,
+ sz, MLX5_REG_PPCNT, 0, 0))
+ return;
+
+#define RD(name) \
+ MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters, \
+ eth_2819_cntrs_grp_data_layout, \
+ name)
+
+ rmon->undersize_pkts = RD(ether_stats_undersize_pkts);
+ rmon->fragments = RD(ether_stats_fragments);
+ rmon->jabbers = RD(ether_stats_jabbers);
+
+ rmon->hist[0] = RD(ether_stats_pkts64octets);
+ rmon->hist[1] = RD(ether_stats_pkts65to127octets);
+ rmon->hist[2] = RD(ether_stats_pkts128to255octets);
+ rmon->hist[3] = RD(ether_stats_pkts256to511octets);
+ rmon->hist[4] = RD(ether_stats_pkts512to1023octets);
+ rmon->hist[5] = RD(ether_stats_pkts1024to1518octets);
+ rmon->hist[6] = RD(ether_stats_pkts1519to2047octets);
+ rmon->hist[7] = RD(ether_stats_pkts2048to4095octets);
+ rmon->hist[8] = RD(ether_stats_pkts4096to8191octets);
+ rmon->hist[9] = RD(ether_stats_pkts8192to10239octets);
+#undef RD
+
+ *ranges = mlx5e_rmon_ranges;
+}
+
#define PPORT_PHY_STATISTICAL_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.phys_layer_statistical_cntrs.c##_high)
@@ -1007,6 +1147,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
+void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
+ return;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+ if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
+ sz, MLX5_REG_PPCNT, 0, 0))
+ return;
+
+ fec_stats->corrected_bits.total =
+ MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
+ phys_layer_statistical_cntrs,
+ phy_corrected_bits);
+}
+
#define PPORT_ETH_EXT_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
@@ -1621,6 +1784,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
#endif
@@ -1751,6 +1915,38 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
};
+static const struct counter_desc ptp_rq_stats_desc[] = {
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_reuse) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_full) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_empty) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_busy) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_waive) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) },
+ { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
+};
+
static const struct counter_desc qos_sq_stats_desc[] = {
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
@@ -1795,6 +1991,7 @@ static const struct counter_desc qos_sq_stats_desc[] = {
#define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
#define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
#define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
+#define NUM_PTP_RQ_STATS ARRAY_SIZE(ptp_rq_stats_desc)
#define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
@@ -1841,32 +2038,46 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
{
- return priv->port_ptp_opened ?
- NUM_PTP_CH_STATS +
- ((NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc) :
- 0;
+ int num = NUM_PTP_CH_STATS;
+
+ if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
+ return 0;
+
+ if (priv->tx_ptp_opened)
+ num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
+ if (priv->rx_ptp_opened)
+ num += NUM_PTP_RQ_STATS;
+
+ return num;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
{
int i, tc;
- if (!priv->port_ptp_opened)
+ if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
return idx;
for (i = 0; i < NUM_PTP_CH_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ptp_ch_stats_desc[i].format);
- for (tc = 0; tc < priv->max_opened_tc; tc++)
- for (i = 0; i < NUM_PTP_SQ_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_sq_stats_desc[i].format, tc);
+ if (priv->tx_ptp_opened) {
+ for (tc = 0; tc < priv->max_opened_tc; tc++)
+ for (i = 0; i < NUM_PTP_SQ_STATS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ ptp_sq_stats_desc[i].format, tc);
- for (tc = 0; tc < priv->max_opened_tc; tc++)
- for (i = 0; i < NUM_PTP_CQ_STATS; i++)
+ for (tc = 0; tc < priv->max_opened_tc; tc++)
+ for (i = 0; i < NUM_PTP_CQ_STATS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ ptp_cq_stats_desc[i].format, tc);
+ }
+ if (priv->rx_ptp_opened) {
+ for (i = 0; i < NUM_PTP_RQ_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_cq_stats_desc[i].format, tc);
+ ptp_rq_stats_desc[i].format);
+ }
return idx;
}
@@ -1874,26 +2085,33 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
{
int i, tc;
- if (!priv->port_ptp_opened)
+ if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
return idx;
for (i = 0; i < NUM_PTP_CH_STATS; i++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.ch,
+ MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
ptp_ch_stats_desc, i);
- for (tc = 0; tc < priv->max_opened_tc; tc++)
- for (i = 0; i < NUM_PTP_SQ_STATS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.sq[tc],
- ptp_sq_stats_desc, i);
+ if (priv->tx_ptp_opened) {
+ for (tc = 0; tc < priv->max_opened_tc; tc++)
+ for (i = 0; i < NUM_PTP_SQ_STATS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
+ ptp_sq_stats_desc, i);
- for (tc = 0; tc < priv->max_opened_tc; tc++)
- for (i = 0; i < NUM_PTP_CQ_STATS; i++)
+ for (tc = 0; tc < priv->max_opened_tc; tc++)
+ for (i = 0; i < NUM_PTP_CQ_STATS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
+ ptp_cq_stats_desc, i);
+ }
+ if (priv->rx_ptp_opened) {
+ for (i = 0; i < NUM_PTP_RQ_STATS; i++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.cq[tc],
- ptp_cq_stats_desc, i);
-
+ MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
+ ptp_rq_stats_desc, i);
+ }
return idx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index adf9b7b8b712..139e59f30db0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -54,6 +54,7 @@
#define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld)
@@ -113,6 +114,18 @@ void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
struct ethtool_pause_stats *pause_stats);
+void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
+ struct ethtool_fec_stats *fec_stats);
+
+void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
+ struct ethtool_eth_phy_stats *phy_stats);
+void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
+ struct ethtool_eth_mac_stats *mac_stats);
+void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
+ struct ethtool_rmon_stats *rmon,
+ const struct ethtool_rmon_hist_range **ranges);
/* Concrete NIC Stats */
@@ -206,6 +219,7 @@ struct mlx5e_sw_stats {
u64 rx_tls_resync_req_end;
u64 rx_tls_resync_req_skip;
u64 rx_tls_resync_res_ok;
+ u64 rx_tls_resync_res_retry;
u64 rx_tls_resync_res_skip;
u64 rx_tls_err;
#endif
@@ -336,6 +350,7 @@ struct mlx5e_rq_stats {
u64 tls_resync_req_end;
u64 tls_resync_req_skip;
u64 tls_resync_res_ok;
+ u64 tls_resync_res_retry;
u64 tls_resync_res_skip;
u64 tls_err;
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index d675107d9eca..47a9c49b25fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -47,6 +47,7 @@
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
#include <net/tc_act/tc_mpls.h>
+#include <net/psample.h>
#include <net/arp.h>
#include <net/ipv6_stubs.h>
#include <net/bareudp.h>
@@ -65,6 +66,7 @@
#include "en/mod_hdr.h"
#include "en/tc_priv.h"
#include "en/tc_tun_encap.h"
+#include "esw/sample.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
@@ -221,6 +223,25 @@ get_ct_priv(struct mlx5e_priv *priv)
return priv->fs.tc.ct;
}
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+static struct mlx5_esw_psample *
+get_sample_priv(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ if (is_mdev_switchdev_mode(priv->mdev)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ return uplink_priv->esw_psample;
+ }
+
+ return NULL;
+}
+#endif
+
struct mlx5_flow_handle *
mlx5_tc_rule_insert(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
@@ -445,11 +466,15 @@ static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
}
-static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
+static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
{
- u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
struct mlx5e_priv *priv = hp->func_priv;
int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
+ u32 *indirection_rqt, rqn;
+
+ indirection_rqt = kcalloc(sz, sizeof(*indirection_rqt), GFP_KERNEL);
+ if (!indirection_rqt)
+ return -ENOMEM;
mlx5e_build_default_indir_rqt(indirection_rqt, sz,
hp->num_channels);
@@ -462,6 +487,9 @@ static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
rqn = hp->pair->rqn[ix];
MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
}
+
+ kfree(indirection_rqt);
+ return 0;
}
static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
@@ -482,12 +510,15 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
- mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
+ err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
+ if (err)
+ goto out;
err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
if (!err)
hp->indir_rqt.enabled = true;
+out:
kvfree(in);
return err;
}
@@ -896,7 +927,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (IS_ERR(dest[dest_ix].ft))
return ERR_CAST(dest[dest_ix].ft);
} else {
- dest[dest_ix].ft = priv->fs.vlan.ft.t;
+ dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
}
dest_ix++;
}
@@ -1077,19 +1108,27 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
if (flow_flag_test(flow, CT)) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
- return mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
+ rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
flow, spec, attr,
mod_hdr_acts);
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+ } else if (flow_flag_test(flow, SAMPLE)) {
+ rule = mlx5_esw_sample_offload(get_sample_priv(flow->priv), spec, attr);
+#endif
+ } else {
+ rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
- rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
if (IS_ERR(rule))
return rule;
if (attr->esw_attr->split_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
if (IS_ERR(flow->rule[1])) {
- mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+ if (flow_flag_test(flow, CT))
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
+ else
+ mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
return flow->rule[1];
}
}
@@ -1111,6 +1150,13 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
return;
}
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+ if (flow_flag_test(flow, SAMPLE)) {
+ mlx5_esw_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
+ return;
+ }
+#endif
+
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
@@ -1467,6 +1513,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
+ kfree(flow->attr->esw_attr->sample);
kfree(flow->attr);
}
@@ -1950,6 +1997,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
+ void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_3);
+ void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_3);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
u16 addr_type = 0;
@@ -1979,6 +2030,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_CT) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT(FLOW_DISSECTOR_KEY_ICMP) |
BIT(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
@@ -2298,7 +2350,49 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (match.mask->flags)
*match_level = MLX5_MATCH_L4;
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
+ struct flow_match_icmp match;
+ flow_rule_match_icmp(rule, &match);
+ switch (ip_proto) {
+ case IPPROTO_ICMP:
+ if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
+ MLX5_FLEX_PROTO_ICMP))
+ return -EOPNOTSUPP;
+ MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
+ match.mask->type);
+ MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
+ match.key->type);
+ MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
+ match.mask->code);
+ MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
+ match.key->code);
+ break;
+ case IPPROTO_ICMPV6:
+ if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
+ MLX5_FLEX_PROTO_ICMPV6))
+ return -EOPNOTSUPP;
+ MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
+ match.mask->type);
+ MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
+ match.key->type);
+ MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
+ match.mask->code);
+ MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
+ match.key->code);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Code and type matching only with ICMP and ICMPv6");
+ netdev_err(priv->netdev,
+ "Code and type matching only with ICMP and ICMPv6\n");
+ return -EINVAL;
+ }
+ if (match.mask->code || match.mask->type) {
+ *match_level = MLX5_MATCH_L4;
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
+ }
+ }
/* Currenlty supported only for MPLS over UDP */
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
!netif_is_bareudp(filter_dev)) {
@@ -3014,7 +3108,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
actions = flow->attr->action;
if (mlx5e_is_eswitch_flow(flow)) {
- if (flow->attr->esw_attr->split_count && ct_flow) {
+ if (flow->attr->esw_attr->split_count && ct_flow &&
+ !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) {
/* All registers used by ct are cleared when using
* split rules.
*/
@@ -3052,6 +3147,13 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
return (fsystem_guid == psystem_guid);
}
+static bool same_vf_reps(struct mlx5e_priv *priv,
+ struct net_device *out_dev)
+{
+ return mlx5e_eswitch_vf_rep(priv->netdev) &&
+ priv->netdev == out_dev;
+}
+
static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -3561,6 +3663,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act;
struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_sample_attr sample = {};
bool encap = false, decap = false;
u32 action = attr->action;
int err, i, if_count = 0;
@@ -3737,6 +3840,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ if (same_vf_reps(priv, out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't forward from a VF to itself");
+ return -EOPNOTSUPP;
+ }
+
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
@@ -3809,11 +3918,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
+ if (flow_flag_test(flow, SAMPLE)) {
+ NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
+ return -EOPNOTSUPP;
+ }
err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
if (err)
return err;
flow_flag_set(flow, CT);
+ esw_attr->split_count = esw_attr->out_count;
+ break;
+ case FLOW_ACTION_SAMPLE:
+ if (flow_flag_test(flow, CT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
+ return -EOPNOTSUPP;
+ }
+ sample.rate = act->sample.rate;
+ sample.group_num = act->sample.psample_group->group_num;
+ if (act->sample.truncate)
+ sample.trunc_size = act->sample.trunc_size;
+ flow_flag_set(flow, SAMPLE);
break;
default:
NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
@@ -3876,11 +4001,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- NL_SET_ERR_MSG_MOD(extack,
- "Mirroring goto chain rules isn't supported");
- return -EOPNOTSUPP;
- }
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
@@ -3898,6 +4018,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ /* Allocate sample attribute only when there is a sample action and
+ * no errors after parsing.
+ */
+ if (flow_flag_test(flow, SAMPLE)) {
+ esw_attr->sample = kzalloc(sizeof(*esw_attr->sample), GFP_KERNEL);
+ if (!esw_attr->sample)
+ return -ENOMEM;
+ *esw_attr->sample = sample;
+ }
+
return 0;
}
@@ -4300,6 +4430,11 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
int err = 0;
+ if (!mlx5_esw_hold(priv->mdev))
+ return -EAGAIN;
+
+ mlx5_esw_get(priv->mdev);
+
rcu_read_lock();
flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
if (flow) {
@@ -4337,11 +4472,14 @@ rcu_unlock:
if (err)
goto err_free;
+ mlx5_esw_release(priv->mdev);
return 0;
err_free:
mlx5e_flow_put(priv, flow);
out:
+ mlx5_esw_put(priv->mdev);
+ mlx5_esw_release(priv->mdev);
return err;
}
@@ -4381,6 +4519,7 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
trace_mlx5e_delete_flower(f);
mlx5e_flow_put(priv, flow);
+ mlx5_esw_put(priv->mdev);
return 0;
errout:
@@ -4516,6 +4655,10 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
if (err)
return err;
@@ -4650,6 +4793,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
struct mlx5_core_dev *dev = priv->mdev;
+ struct mapping_ctx *chains_mapping;
struct mlx5_chains_attr attr = {};
int err;
@@ -4664,15 +4808,22 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
- if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
+ chains_mapping = mapping_create(sizeof(struct mlx5_mapped_obj),
+ MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
+ if (IS_ERR(chains_mapping)) {
+ err = PTR_ERR(chains_mapping);
+ goto err_mapping;
+ }
+ tc->mapping = chains_mapping;
+
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
- attr.max_restore_tag = MLX5E_TC_TABLE_CHAIN_TAG_MASK;
- }
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
- attr.default_ft = priv->fs.vlan.ft.t;
+ attr.default_ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
+ attr.mapping = chains_mapping;
tc->chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(tc->chains)) {
@@ -4682,10 +4833,6 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (IS_ERR(tc->ct)) {
- err = PTR_ERR(tc->ct);
- goto err_ct;
- }
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
err = register_netdevice_notifier_dev_net(priv->netdev,
@@ -4701,9 +4848,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
err_reg:
mlx5_tc_ct_clean(tc->ct);
-err_ct:
mlx5_chains_destroy(tc->chains);
err_chains:
+ mapping_destroy(chains_mapping);
+err_mapping:
rhashtable_destroy(&tc->ht);
return err;
}
@@ -4738,6 +4886,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
mutex_destroy(&tc->t_lock);
mlx5_tc_ct_clean(tc->ct);
+ mapping_destroy(tc->mapping);
mlx5_chains_destroy(tc->chains);
}
@@ -4760,8 +4909,10 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
esw_chains(esw),
&esw->offloads.mod_hdr,
MLX5_FLOW_NAMESPACE_FDB);
- if (IS_ERR(uplink_priv->ct_priv))
- goto err_ct;
+
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+ uplink_priv->esw_psample = mlx5_esw_sample_init(netdev_priv(priv->netdev));
+#endif
mapping = mapping_create(sizeof(struct tunnel_match_key),
TUNNEL_INFO_BITS_MASK, true);
@@ -4800,8 +4951,10 @@ err_ht_init:
err_enc_opts_mapping:
mapping_destroy(uplink_priv->tunnel_mapping);
err_tun_mapping:
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+ mlx5_esw_sample_cleanup(uplink_priv->esw_psample);
+#endif
mlx5_tc_ct_clean(uplink_priv->ct_priv);
-err_ct:
netdev_warn(priv->netdev,
"Failed to initialize tc (eswitch), err: %d", err);
return err;
@@ -4819,6 +4972,9 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
mapping_destroy(uplink_priv->tunnel_mapping);
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+ mlx5_esw_sample_cleanup(uplink_priv->esw_psample);
+#endif
mlx5_tc_ct_clean(uplink_priv->ct_priv);
}
@@ -4874,9 +5030,17 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
- unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
+ unsigned long flags = MLX5_TC_FLAG(INGRESS);
struct mlx5e_priv *priv = cb_priv;
+ if (!priv->netdev || !netif_device_present(priv->netdev))
+ return -EOPNOTSUPP;
+
+ if (mlx5e_is_uplink_rep(priv))
+ flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
+ else
+ flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
+
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
@@ -4892,6 +5056,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
u32 chain = 0, chain_tag, reg_b, zone_restore_id;
struct mlx5e_priv *priv = netdev_priv(skb->dev);
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5_mapped_obj mapped_obj;
struct tc_skb_ext *tc_skb_ext;
int err;
@@ -4899,7 +5064,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
- err = mlx5_get_chain_for_tag(nic_chains(priv), chain_tag, &chain);
+ err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
if (err) {
netdev_dbg(priv->netdev,
"Couldn't find chain for chain tag: %d, err: %d\n",
@@ -4907,7 +5072,8 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
return false;
}
- if (chain) {
+ if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
+ chain = mapped_obj.chain;
tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
if (WARN_ON(!tc_skb_ext))
return false;
@@ -4920,6 +5086,9 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
zone_restore_id))
return false;
+ } else {
+ netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
+ return false;
}
#endif /* CONFIG_NET_TC_SKB_EXT */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index bdbffe484fce..8ba62671f5f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -133,6 +133,8 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
/* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
+ struct mlx5e_ptp *ptp_channel;
+
/* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
@@ -142,10 +144,11 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq_ix;
}
- if (unlikely(priv->channels.port_ptp))
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- mlx5e_use_ptpsq(skb))
- return mlx5e_select_ptpsq(dev, skb);
+ ptp_channel = READ_ONCE(priv->channels.ptp);
+ if (unlikely(ptp_channel) &&
+ test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
+ mlx5e_use_ptpsq(skb))
+ return mlx5e_select_ptpsq(dev, skb);
txq_ix = netdev_pick_tx(dev, skb, NULL);
/* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
@@ -576,7 +579,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
- prefetchw(wqe->data);
+ net_prefetchw(wqe->data);
*session = (struct mlx5e_tx_mpwqe) {
.wqe = wqe,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index d54da3797c30..833be29170a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -36,6 +36,7 @@
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
+#include "en_accel/ktls_txrx.h"
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
{
@@ -171,6 +172,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
*/
clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state);
+ /* Keep after async ICOSQ CQ poll */
+ if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget)))
+ busy |= mlx5e_ktls_rx_handle_resync_list(c, budget);
+
busy |= INDIRECT_CALL_2(rq->post_wqes,
mlx5e_post_rx_mpwqes,
mlx5e_post_rx_wqes,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 1fa9c18563da..77c0ca655975 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -271,7 +271,7 @@ static void init_eq_buf(struct mlx5_eq *eq)
struct mlx5_eqe *eqe;
int i;
- for (i = 0; i < eq->nent; i++) {
+ for (i = 0; i < eq_get_size(eq); i++) {
eqe = get_eqe(eq, i);
eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
}
@@ -281,8 +281,10 @@ static int
create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct mlx5_eq_param *param)
{
+ u8 log_eq_size = order_base_2(param->nent + MLX5_NUM_SPARE_EQE);
struct mlx5_cq_table *cq_table = &eq->cq_table;
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
+ u8 log_eq_stride = ilog2(MLX5_EQE_SIZE);
struct mlx5_priv *priv = &dev->priv;
u8 vecidx = param->irq_index;
__be64 *pas;
@@ -297,16 +299,18 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
spin_lock_init(&cq_table->lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
- eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE);
eq->cons_index = 0;
- err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
+
+ err = mlx5_frag_buf_alloc_node(dev, wq_get_byte_sz(log_eq_size, log_eq_stride),
+ &eq->frag_buf, dev->priv.numa_node);
if (err)
return err;
+ mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
init_eq_buf(eq);
inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
- MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
+ MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
@@ -315,7 +319,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
}
pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
- mlx5_fill_page_array(&eq->buf, pas);
+ mlx5_fill_page_frag_array(&eq->frag_buf, pas);
MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
@@ -326,11 +330,11 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
param->mask[i]);
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
- MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
+ MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz);
MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
MLX5_SET(eqc, eqc, intr, vecidx);
MLX5_SET(eqc, eqc, log_page_size,
- eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err)
@@ -356,7 +360,7 @@ err_in:
kvfree(in);
err_buf:
- mlx5_buf_free(dev, &eq->buf);
+ mlx5_frag_buf_free(dev, &eq->frag_buf);
return err;
}
@@ -413,7 +417,7 @@ static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
eq->eqn);
synchronize_irq(eq->irqn);
- mlx5_buf_free(dev, &eq->buf);
+ mlx5_frag_buf_free(dev, &eq->frag_buf);
return err;
}
@@ -764,10 +768,11 @@ EXPORT_SYMBOL(mlx5_eq_destroy_generic);
struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
{
u32 ci = eq->cons_index + cc;
+ u32 nent = eq_get_size(eq);
struct mlx5_eqe *eqe;
- eqe = get_eqe(eq, ci & (eq->nent - 1));
- eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe;
+ eqe = get_eqe(eq, ci & (nent - 1));
+ eqe = ((eqe->owner & 1) ^ !!(ci & nent)) ? NULL : eqe;
/* Make sure we read EQ entry contents after we've
* checked the ownership bit.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 3e19b1721303..0399a396d166 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -96,7 +96,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
}
if (!vport->egress.acl) {
- vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+ vport->egress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
table_size);
if (IS_ERR(vport->egress.acl)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
index 26b37a0f8762..505bf811984a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
@@ -148,7 +148,7 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
esw_acl_egress_vlan_grp_destroy(vport);
}
-static bool esw_acl_egress_needed(const struct mlx5_eswitch *esw, u16 vport_num)
+static bool esw_acl_egress_needed(struct mlx5_eswitch *esw, u16 vport_num)
{
return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num);
}
@@ -171,7 +171,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
table_size++;
if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
table_size++;
- vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+ vport->egress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size);
if (IS_ERR(vport->egress.acl)) {
err = PTR_ERR(vport->egress.acl);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
index 4a369669e51e..45b839116212 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
@@ -6,14 +6,14 @@
#include "helper.h"
struct mlx5_flow_table *
-esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
+esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *acl;
int acl_supported;
- int vport_index;
+ u16 vport_num;
int err;
acl_supported = (ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS) ?
@@ -23,11 +23,11 @@ esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
if (!acl_supported)
return ERR_PTR(-EOPNOTSUPP);
+ vport_num = vport->vport;
esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num,
ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress");
- vport_index = mlx5_eswitch_vport_num_to_index(esw, vport_num);
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport_index);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport->index);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n",
vport_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h
index 8dc4cab66a71..a47063fab57e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h
@@ -8,7 +8,7 @@
/* General acl helper functions */
struct mlx5_flow_table *
-esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size);
+esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size);
/* Egress acl helper functions */
void esw_acl_egress_table_destroy(struct mlx5_vport *vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index d64fad2823e7..f75b86abaf1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -177,7 +177,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
}
if (!vport->ingress.acl) {
- vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
+ vport->ingress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
table_size);
if (IS_ERR(vport->ingress.acl)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
index 548c005ea633..39e948bc1204 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -7,7 +7,7 @@
#include "ofld.h"
static bool
-esw_acl_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
+esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
const struct mlx5_vport *vport)
{
return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
@@ -255,7 +255,7 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
if (esw_acl_ingress_prio_tag_enabled(esw, vport))
num_ftes++;
- vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
+ vport->ingress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
num_ftes);
if (IS_ERR(vport->ingress.acl)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index cb1e181f4c6a..1703384eca95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -14,8 +14,7 @@ mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_i
memcpy(ppid->id, &parent_id, sizeof(parent_id));
}
-static bool
-mlx5_esw_devlink_port_supported(const struct mlx5_eswitch *esw, u16 vport_num)
+static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num == MLX5_VPORT_UPLINK ||
(mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) ||
@@ -120,11 +119,11 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
- return vport->dl_port;
+ return IS_ERR(vport) ? ERR_CAST(vport) : vport->dl_port;
}
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
- u16 vport_num, u32 sfnum)
+ u16 vport_num, u32 controller, u32 sfnum)
{
struct mlx5_core_dev *dev = esw->dev;
struct netdev_phys_item_id ppid = {};
@@ -142,7 +141,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
- devlink_port_attrs_pci_sf_set(dl_port, 0, pfnum, sfnum);
+ devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
err = devlink_port_register(devlink, dl_port, dl_port_index);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
index cb9eafd1b4ee..21d56b49d14b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
@@ -30,13 +30,13 @@ mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr);
#else
/* indir API stubs */
-struct mlx5_esw_indir_table *
+static inline struct mlx5_esw_indir_table *
mlx5_esw_indir_table_init(void)
{
return NULL;
}
-void
+static inline void
mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir)
{
}
@@ -57,7 +57,7 @@ mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
{
}
-bool
+static inline bool
mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
u16 vport_num,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
new file mode 100644
index 000000000000..d9041b16611d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies Ltd */
+
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
+#include "esw/acl/lgcy.h"
+#include "esw/legacy.h"
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "fs_core.h"
+
+enum {
+ LEGACY_VEPA_PRIO = 0,
+ LEGACY_FDB_PRIO,
+};
+
+static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *fdb;
+ int err;
+
+ root_ns = mlx5_get_fdb_sub_ns(dev, 0);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get FDB flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* num FTE 2, num FG 2 */
+ ft_attr.prio = LEGACY_VEPA_PRIO;
+ ft_attr.max_fte = 2;
+ ft_attr.autogroup.max_num_groups = 2;
+ fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(fdb)) {
+ err = PTR_ERR(fdb);
+ esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
+ return err;
+ }
+ esw->fdb_table.legacy.vepa_fdb = fdb;
+
+ return 0;
+}
+
+static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
+{
+ esw_debug(esw->dev, "Destroy FDB Table\n");
+ if (!esw->fdb_table.legacy.fdb)
+ return;
+
+ if (esw->fdb_table.legacy.promisc_grp)
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
+ if (esw->fdb_table.legacy.allmulti_grp)
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
+ if (esw->fdb_table.legacy.addr_grp)
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
+ mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
+
+ esw->fdb_table.legacy.fdb = NULL;
+ esw->fdb_table.legacy.addr_grp = NULL;
+ esw->fdb_table.legacy.allmulti_grp = NULL;
+ esw->fdb_table.legacy.promisc_grp = NULL;
+ atomic64_set(&esw->user_count, 0);
+}
+
+static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *fdb;
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ int table_size;
+ u32 *flow_group_in;
+ u8 *dmac;
+ int err = 0;
+
+ esw_debug(dev, "Create FDB log_max_size(%d)\n",
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+
+ root_ns = mlx5_get_fdb_sub_ns(dev, 0);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get FDB flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+ ft_attr.max_fte = table_size;
+ ft_attr.prio = LEGACY_FDB_PRIO;
+ fdb = mlx5_create_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(fdb)) {
+ err = PTR_ERR(fdb);
+ esw_warn(dev, "Failed to create FDB Table err %d\n", err);
+ goto out;
+ }
+ esw->fdb_table.legacy.fdb = fdb;
+
+ /* Addresses group : Full match unicast/multicast addresses */
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+ dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ /* Preserve 2 entries for allmulti and promisc rules*/
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
+ eth_broadcast_addr(dmac);
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.legacy.addr_grp = g;
+
+ /* Allmulti group : One rule that forwards any mcast traffic */
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
+ eth_zero_addr(dmac);
+ dmac[0] = 0x01;
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.legacy.allmulti_grp = g;
+
+ /* Promiscuous group :
+ * One rule that forward all unmatched traffic from previous groups
+ */
+ eth_zero_addr(dmac);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.legacy.promisc_grp = g;
+
+out:
+ if (err)
+ esw_destroy_legacy_fdb_table(esw);
+
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
+{
+ esw_debug(esw->dev, "Destroy VEPA Table\n");
+ if (!esw->fdb_table.legacy.vepa_fdb)
+ return;
+
+ mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
+ esw->fdb_table.legacy.vepa_fdb = NULL;
+}
+
+static int esw_create_legacy_table(struct mlx5_eswitch *esw)
+{
+ int err;
+
+ memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
+ atomic64_set(&esw->user_count, 0);
+
+ err = esw_create_legacy_vepa_table(esw);
+ if (err)
+ return err;
+
+ err = esw_create_legacy_fdb_table(esw);
+ if (err)
+ esw_destroy_legacy_vepa_table(esw);
+
+ return err;
+}
+
+static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
+{
+ if (esw->fdb_table.legacy.vepa_uplink_rule)
+ mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
+
+ if (esw->fdb_table.legacy.vepa_star_rule)
+ mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
+
+ esw->fdb_table.legacy.vepa_uplink_rule = NULL;
+ esw->fdb_table.legacy.vepa_star_rule = NULL;
+}
+
+static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
+{
+ esw_cleanup_vepa_rules(esw);
+ esw_destroy_legacy_fdb_table(esw);
+ esw_destroy_legacy_vepa_table(esw);
+}
+
+#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
+ MLX5_VPORT_MC_ADDR_CHANGE | \
+ MLX5_VPORT_PROMISC_CHANGE)
+
+int esw_legacy_enable(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+ int ret;
+
+ ret = esw_create_legacy_table(esw);
+ if (ret)
+ return ret;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+
+ ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
+ if (ret)
+ esw_destroy_legacy_table(esw);
+ return ret;
+}
+
+void esw_legacy_disable(struct mlx5_eswitch *esw)
+{
+ struct esw_mc_addr *mc_promisc;
+
+ mlx5_eswitch_disable_pf_vf_vports(esw);
+
+ mc_promisc = &esw->mc_promisc;
+ if (mc_promisc->uplink_rule)
+ mlx5_del_flow_rules(mc_promisc->uplink_rule);
+
+ esw_destroy_legacy_table(esw);
+}
+
+static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
+ u8 setting)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+ void *misc;
+
+ if (!setting) {
+ esw_cleanup_vepa_rules(esw);
+ return 0;
+ }
+
+ if (esw->fdb_table.legacy.vepa_uplink_rule)
+ return 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Uplink rule forward uplink traffic to FDB */
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = esw->fdb_table.legacy.fdb;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
+ &flow_act, &dest, 1);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ goto out;
+ } else {
+ esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
+ }
+
+ /* Star rule to forward all traffic to uplink vport */
+ memset(&dest, 0, sizeof(dest));
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport.num = MLX5_VPORT_UPLINK;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
+ &flow_act, &dest, 1);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ goto out;
+ } else {
+ esw->fdb_table.legacy.vepa_star_rule = flow_rule;
+ }
+
+out:
+ kvfree(spec);
+ if (err)
+ esw_cleanup_vepa_rules(esw);
+ return err;
+}
+
+int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
+{
+ int err = 0;
+
+ if (!esw)
+ return -EOPNOTSUPP;
+
+ if (!mlx5_esw_allowed(esw))
+ return -EPERM;
+
+ mutex_lock(&esw->state_lock);
+ if (esw->mode != MLX5_ESWITCH_LEGACY) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = _mlx5_eswitch_set_vepa_locked(esw, setting);
+
+out:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
+{
+ if (!esw)
+ return -EOPNOTSUPP;
+
+ if (!mlx5_esw_allowed(esw))
+ return -EPERM;
+
+ if (esw->mode != MLX5_ESWITCH_LEGACY)
+ return -EOPNOTSUPP;
+
+ *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
+ return 0;
+}
+
+int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ int ret;
+
+ /* Only non manager vports need ACL in legacy mode */
+ if (mlx5_esw_is_manager_vport(esw, vport->vport))
+ return 0;
+
+ ret = esw_acl_ingress_lgcy_setup(esw, vport);
+ if (ret)
+ goto ingress_err;
+
+ ret = esw_acl_egress_lgcy_setup(esw, vport);
+ if (ret)
+ goto egress_err;
+
+ return 0;
+
+egress_err:
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
+ingress_err:
+ return ret;
+}
+
+void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ if (mlx5_esw_is_manager_vport(esw, vport->vport))
+ return;
+
+ esw_acl_egress_lgcy_cleanup(esw, vport);
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
+}
+
+int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev,
+ struct mlx5_vport *vport,
+ struct mlx5_vport_drop_stats *stats)
+{
+ u64 rx_discard_vport_down, tx_discard_vport_down;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ u64 bytes = 0;
+ int err = 0;
+
+ if (esw->mode != MLX5_ESWITCH_LEGACY)
+ return 0;
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled)
+ goto unlock;
+
+ if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
+ mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
+ &stats->rx_dropped, &bytes);
+
+ if (vport->ingress.legacy.drop_counter)
+ mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
+ &stats->tx_dropped, &bytes);
+
+ if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
+ !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
+ goto unlock;
+
+ err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
+ &rx_discard_vport_down,
+ &tx_discard_vport_down);
+ if (err)
+ goto unlock;
+
+ if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
+ stats->rx_dropped += rx_discard_vport_down;
+ if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
+ stats->tx_dropped += tx_discard_vport_down;
+
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ u16 vport, u16 vlan, u8 qos)
+{
+ u8 set_flags = 0;
+ int err = 0;
+
+ if (!mlx5_esw_allowed(esw))
+ return -EPERM;
+
+ if (vlan || qos)
+ set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
+
+ mutex_lock(&esw->state_lock);
+ if (esw->mode != MLX5_ESWITCH_LEGACY) {
+ if (!vlan)
+ goto unlock; /* compatibility with libvirt */
+
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
+
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+ u16 vport, bool spoofchk)
+{
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+ bool pschk;
+ int err = 0;
+
+ if (!mlx5_esw_allowed(esw))
+ return -EPERM;
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
+
+ mutex_lock(&esw->state_lock);
+ if (esw->mode != MLX5_ESWITCH_LEGACY) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+ pschk = evport->info.spoofchk;
+ evport->info.spoofchk = spoofchk;
+ if (pschk && !is_valid_ether_addr(evport->info.mac))
+ mlx5_core_warn(esw->dev,
+ "Spoofchk in set while MAC is invalid, vport(%d)\n",
+ evport->vport);
+ if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
+ err = esw_acl_ingress_lgcy_setup(esw, evport);
+ if (err)
+ evport->info.spoofchk = pschk;
+
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+ u16 vport, bool setting)
+{
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+ int err = 0;
+
+ if (!mlx5_esw_allowed(esw))
+ return -EPERM;
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
+
+ mutex_lock(&esw->state_lock);
+ if (esw->mode != MLX5_ESWITCH_LEGACY) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+ evport->info.trusted = setting;
+ if (evport->enabled)
+ esw_vport_change_handle_locked(evport);
+
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h
new file mode 100644
index 000000000000..e0820bb72b57
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_ESW_LEGACY_H__
+#define __MLX5_ESW_LEGACY_H__
+
+#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
+ MLX5_VPORT_MC_ADDR_CHANGE | \
+ MLX5_VPORT_PROMISC_CHANGE)
+
+struct mlx5_eswitch;
+
+int esw_legacy_enable(struct mlx5_eswitch *esw);
+void esw_legacy_disable(struct mlx5_eswitch *esw);
+
+int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+
+int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev,
+ struct mlx5_vport *vport,
+ struct mlx5_vport_drop_stats *stats);
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c
new file mode 100644
index 000000000000..794012c5c476
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#include <linux/skbuff.h>
+#include <net/psample.h>
+#include "en/mapping.h"
+#include "esw/sample.h"
+#include "eswitch.h"
+#include "en_tc.h"
+#include "fs_core.h"
+
+#define MLX5_ESW_VPORT_TBL_SIZE_SAMPLE (64 * 1024)
+
+static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = {
+ .max_fte = MLX5_ESW_VPORT_TBL_SIZE_SAMPLE,
+ .max_num_groups = 0, /* default num of groups */
+ .flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP,
+};
+
+struct mlx5_esw_psample {
+ struct mlx5e_priv *priv;
+ struct mlx5_flow_table *termtbl;
+ struct mlx5_flow_handle *termtbl_rule;
+ DECLARE_HASHTABLE(hashtbl, 8);
+ struct mutex ht_lock; /* protect hashtbl */
+ DECLARE_HASHTABLE(restore_hashtbl, 8);
+ struct mutex restore_lock; /* protect restore_hashtbl */
+};
+
+struct mlx5_sampler {
+ struct hlist_node hlist;
+ u32 sampler_id;
+ u32 sample_ratio;
+ u32 sample_table_id;
+ u32 default_table_id;
+ int count;
+};
+
+struct mlx5_sample_flow {
+ struct mlx5_sampler *sampler;
+ struct mlx5_sample_restore *restore;
+ struct mlx5_flow_attr *pre_attr;
+ struct mlx5_flow_handle *pre_rule;
+ struct mlx5_flow_handle *rule;
+};
+
+struct mlx5_sample_restore {
+ struct hlist_node hlist;
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_flow_handle *rule;
+ u32 obj_id;
+ int count;
+};
+
+static int
+sampler_termtbl_create(struct mlx5_esw_psample *esw_psample)
+{
+ struct mlx5_core_dev *dev = esw_psample->priv->mdev;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_act act = {};
+ int err;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, termination_table)) {
+ mlx5_core_warn(dev, "termination table is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns) {
+ mlx5_core_warn(dev, "failed to get FDB flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.max_fte = 1;
+ ft_attr.level = 1;
+ esw_psample->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(esw_psample->termtbl)) {
+ err = PTR_ERR(esw_psample->termtbl);
+ mlx5_core_warn(dev, "failed to create termtbl, err: %d\n", err);
+ return err;
+ }
+
+ act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.vport.num = esw->manager_vport;
+ esw_psample->termtbl_rule = mlx5_add_flow_rules(esw_psample->termtbl, NULL, &act, &dest, 1);
+ if (IS_ERR(esw_psample->termtbl_rule)) {
+ err = PTR_ERR(esw_psample->termtbl_rule);
+ mlx5_core_warn(dev, "failed to create termtbl rule, err: %d\n", err);
+ mlx5_destroy_flow_table(esw_psample->termtbl);
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+sampler_termtbl_destroy(struct mlx5_esw_psample *esw_psample)
+{
+ mlx5_del_flow_rules(esw_psample->termtbl_rule);
+ mlx5_destroy_flow_table(esw_psample->termtbl);
+}
+
+static int
+sampler_obj_create(struct mlx5_core_dev *mdev, struct mlx5_sampler *sampler)
+{
+ u32 in[MLX5_ST_SZ_DW(create_sampler_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ u64 general_obj_types;
+ void *obj;
+ int err;
+
+ general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
+ if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER))
+ return -EOPNOTSUPP;
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
+ return -EOPNOTSUPP;
+
+ obj = MLX5_ADDR_OF(create_sampler_obj_in, in, sampler_object);
+ MLX5_SET(sampler_obj, obj, table_type, FS_FT_FDB);
+ MLX5_SET(sampler_obj, obj, ignore_flow_level, 1);
+ MLX5_SET(sampler_obj, obj, level, 1);
+ MLX5_SET(sampler_obj, obj, sample_ratio, sampler->sample_ratio);
+ MLX5_SET(sampler_obj, obj, sample_table_id, sampler->sample_table_id);
+ MLX5_SET(sampler_obj, obj, default_table_id, sampler->default_table_id);
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ sampler->sampler_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return err;
+}
+
+static void
+sampler_obj_destroy(struct mlx5_core_dev *mdev, u32 sampler_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static u32
+sampler_hash(u32 sample_ratio, u32 default_table_id)
+{
+ return jhash_2words(sample_ratio, default_table_id, 0);
+}
+
+static int
+sampler_cmp(u32 sample_ratio1, u32 default_table_id1, u32 sample_ratio2, u32 default_table_id2)
+{
+ return sample_ratio1 != sample_ratio2 || default_table_id1 != default_table_id2;
+}
+
+static struct mlx5_sampler *
+sampler_get(struct mlx5_esw_psample *esw_psample, u32 sample_ratio, u32 default_table_id)
+{
+ struct mlx5_sampler *sampler;
+ u32 hash_key;
+ int err;
+
+ mutex_lock(&esw_psample->ht_lock);
+ hash_key = sampler_hash(sample_ratio, default_table_id);
+ hash_for_each_possible(esw_psample->hashtbl, sampler, hlist, hash_key)
+ if (!sampler_cmp(sampler->sample_ratio, sampler->default_table_id,
+ sample_ratio, default_table_id))
+ goto add_ref;
+
+ sampler = kzalloc(sizeof(*sampler), GFP_KERNEL);
+ if (!sampler) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ sampler->sample_table_id = esw_psample->termtbl->id;
+ sampler->default_table_id = default_table_id;
+ sampler->sample_ratio = sample_ratio;
+
+ err = sampler_obj_create(esw_psample->priv->mdev, sampler);
+ if (err)
+ goto err_create;
+
+ hash_add(esw_psample->hashtbl, &sampler->hlist, hash_key);
+
+add_ref:
+ sampler->count++;
+ mutex_unlock(&esw_psample->ht_lock);
+ return sampler;
+
+err_create:
+ kfree(sampler);
+err_alloc:
+ mutex_unlock(&esw_psample->ht_lock);
+ return ERR_PTR(err);
+}
+
+static void
+sampler_put(struct mlx5_esw_psample *esw_psample, struct mlx5_sampler *sampler)
+{
+ mutex_lock(&esw_psample->ht_lock);
+ if (--sampler->count == 0) {
+ hash_del(&sampler->hlist);
+ sampler_obj_destroy(esw_psample->priv->mdev, sampler->sampler_id);
+ kfree(sampler);
+ }
+ mutex_unlock(&esw_psample->ht_lock);
+}
+
+static struct mlx5_modify_hdr *
+sample_metadata_rule_get(struct mlx5_core_dev *mdev, u32 obj_id)
+{
+ struct mlx5e_tc_mod_hdr_acts mod_acts = {};
+ struct mlx5_modify_hdr *modify_hdr;
+ int err;
+
+ err = mlx5e_tc_match_to_reg_set(mdev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
+ CHAIN_TO_REG, obj_id);
+ if (err)
+ goto err_set_regc0;
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
+ mod_acts.num_actions,
+ mod_acts.actions);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ goto err_modify_hdr;
+ }
+
+ dealloc_mod_hdr_actions(&mod_acts);
+ return modify_hdr;
+
+err_modify_hdr:
+ dealloc_mod_hdr_actions(&mod_acts);
+err_set_regc0:
+ return ERR_PTR(err);
+}
+
+static struct mlx5_sample_restore *
+sample_restore_get(struct mlx5_esw_psample *esw_psample, u32 obj_id)
+{
+ struct mlx5_core_dev *mdev = esw_psample->priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_sample_restore *restore;
+ struct mlx5_modify_hdr *modify_hdr;
+ int err;
+
+ mutex_lock(&esw_psample->restore_lock);
+ hash_for_each_possible(esw_psample->restore_hashtbl, restore, hlist, obj_id)
+ if (restore->obj_id == obj_id)
+ goto add_ref;
+
+ restore = kzalloc(sizeof(*restore), GFP_KERNEL);
+ if (!restore) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ restore->obj_id = obj_id;
+
+ modify_hdr = sample_metadata_rule_get(mdev, obj_id);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ goto err_modify_hdr;
+ }
+ restore->modify_hdr = modify_hdr;
+
+ restore->rule = esw_add_restore_rule(esw, obj_id);
+ if (IS_ERR(restore->rule)) {
+ err = PTR_ERR(restore->rule);
+ goto err_restore;
+ }
+
+ hash_add(esw_psample->restore_hashtbl, &restore->hlist, obj_id);
+add_ref:
+ restore->count++;
+ mutex_unlock(&esw_psample->restore_lock);
+ return restore;
+
+err_restore:
+ mlx5_modify_header_dealloc(mdev, restore->modify_hdr);
+err_modify_hdr:
+ kfree(restore);
+err_alloc:
+ mutex_unlock(&esw_psample->restore_lock);
+ return ERR_PTR(err);
+}
+
+static void
+sample_restore_put(struct mlx5_esw_psample *esw_psample, struct mlx5_sample_restore *restore)
+{
+ mutex_lock(&esw_psample->restore_lock);
+ if (--restore->count == 0)
+ hash_del(&restore->hlist);
+ mutex_unlock(&esw_psample->restore_lock);
+
+ if (!restore->count) {
+ mlx5_del_flow_rules(restore->rule);
+ mlx5_modify_header_dealloc(esw_psample->priv->mdev, restore->modify_hdr);
+ kfree(restore);
+ }
+}
+
+void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj)
+{
+ u32 trunc_size = mapped_obj->sample.trunc_size;
+ struct psample_group psample_group = {};
+ struct psample_metadata md = {};
+
+ md.trunc_size = trunc_size ? min(trunc_size, skb->len) : skb->len;
+ md.in_ifindex = skb->dev->ifindex;
+ psample_group.group_num = mapped_obj->sample.group_id;
+ psample_group.net = &init_net;
+ skb_push(skb, skb->mac_len);
+
+ psample_sample_packet(&psample_group, skb, mapped_obj->sample.rate, &md);
+}
+
+/* For the following typical flow table:
+ *
+ * +-------------------------------+
+ * + original flow table +
+ * +-------------------------------+
+ * + original match +
+ * +-------------------------------+
+ * + sample action + other actions +
+ * +-------------------------------+
+ *
+ * We translate the tc filter with sample action to the following HW model:
+ *
+ * +---------------------+
+ * + original flow table +
+ * +---------------------+
+ * + original match +
+ * +---------------------+
+ * |
+ * v
+ * +------------------------------------------------+
+ * + Flow Sampler Object +
+ * +------------------------------------------------+
+ * + sample ratio +
+ * +------------------------------------------------+
+ * + sample table id | default table id +
+ * +------------------------------------------------+
+ * | |
+ * v v
+ * +-----------------------------+ +----------------------------------------+
+ * + sample table + + default table per <vport, chain, prio> +
+ * +-----------------------------+ +----------------------------------------+
+ * + forward to management vport + + original match +
+ * +-----------------------------+ +----------------------------------------+
+ * + other actions +
+ * +----------------------------------------+
+ */
+struct mlx5_flow_handle *
+mlx5_esw_sample_offload(struct mlx5_esw_psample *esw_psample,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_vport_tbl_attr per_vport_tbl_attr;
+ struct mlx5_esw_flow_attr *pre_esw_attr;
+ struct mlx5_mapped_obj restore_obj = {};
+ struct mlx5_sample_flow *sample_flow;
+ struct mlx5_sample_attr *sample_attr;
+ struct mlx5_flow_table *default_tbl;
+ struct mlx5_flow_attr *pre_attr;
+ struct mlx5_eswitch *esw;
+ u32 obj_id;
+ int err;
+
+ if (IS_ERR_OR_NULL(esw_psample))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* If slow path flag is set, eg. when the neigh is invalid for encap,
+ * don't offload sample action.
+ */
+ esw = esw_psample->priv->mdev->priv.eswitch;
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+
+ sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL);
+ if (!sample_flow)
+ return ERR_PTR(-ENOMEM);
+ esw_attr->sample->sample_flow = sample_flow;
+
+ /* Allocate default table per vport, chain and prio. Otherwise, there is
+ * only one default table for the same sampler object. Rules with different
+ * prio and chain may overlap. For CT sample action, per vport default
+ * table is needed to resotre the metadata.
+ */
+ per_vport_tbl_attr.chain = attr->chain;
+ per_vport_tbl_attr.prio = attr->prio;
+ per_vport_tbl_attr.vport = esw_attr->in_rep->vport;
+ per_vport_tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
+ default_tbl = mlx5_esw_vporttbl_get(esw, &per_vport_tbl_attr);
+ if (IS_ERR(default_tbl)) {
+ err = PTR_ERR(default_tbl);
+ goto err_default_tbl;
+ }
+
+ /* Perform the original matches on the default table.
+ * Offload all actions except the sample action.
+ */
+ esw_attr->sample->sample_default_tbl = default_tbl;
+ /* When offloading sample and encap action, if there is no valid
+ * neigh data struct, a slow path rule is offloaded first. Source
+ * port metadata match is set at that time. A per vport table is
+ * already allocated. No need to match it again. So clear the source
+ * port metadata match.
+ */
+ mlx5_eswitch_clear_rule_source_port(esw, spec);
+ sample_flow->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+ if (IS_ERR(sample_flow->rule)) {
+ err = PTR_ERR(sample_flow->rule);
+ goto err_offload_rule;
+ }
+
+ /* Create sampler object. */
+ sample_flow->sampler = sampler_get(esw_psample, esw_attr->sample->rate, default_tbl->id);
+ if (IS_ERR(sample_flow->sampler)) {
+ err = PTR_ERR(sample_flow->sampler);
+ goto err_sampler;
+ }
+
+ /* Create an id mapping reg_c0 value to sample object. */
+ restore_obj.type = MLX5_MAPPED_OBJ_SAMPLE;
+ restore_obj.sample.group_id = esw_attr->sample->group_num;
+ restore_obj.sample.rate = esw_attr->sample->rate;
+ restore_obj.sample.trunc_size = esw_attr->sample->trunc_size;
+ err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
+ if (err)
+ goto err_obj_id;
+ esw_attr->sample->restore_obj_id = obj_id;
+
+ /* Create sample restore context. */
+ sample_flow->restore = sample_restore_get(esw_psample, obj_id);
+ if (IS_ERR(sample_flow->restore)) {
+ err = PTR_ERR(sample_flow->restore);
+ goto err_sample_restore;
+ }
+
+ /* Perform the original matches on the original table. Offload the
+ * sample action. The destination is the sampler object.
+ */
+ pre_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
+ if (!pre_attr) {
+ err = -ENOMEM;
+ goto err_alloc_flow_attr;
+ }
+ sample_attr = kzalloc(sizeof(*sample_attr), GFP_KERNEL);
+ if (!sample_attr) {
+ err = -ENOMEM;
+ goto err_alloc_sample_attr;
+ }
+ pre_esw_attr = pre_attr->esw_attr;
+ pre_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
+ pre_attr->flags = MLX5_ESW_ATTR_FLAG_SAMPLE;
+ pre_attr->chain = attr->chain;
+ pre_attr->prio = attr->prio;
+ pre_esw_attr->sample = sample_attr;
+ pre_esw_attr->sample->sampler_id = sample_flow->sampler->sampler_id;
+ pre_esw_attr->in_mdev = esw_attr->in_mdev;
+ pre_esw_attr->in_rep = esw_attr->in_rep;
+ sample_flow->pre_rule = mlx5_eswitch_add_offloaded_rule(esw, spec, pre_attr);
+ if (IS_ERR(sample_flow->pre_rule)) {
+ err = PTR_ERR(sample_flow->pre_rule);
+ goto err_pre_offload_rule;
+ }
+ sample_flow->pre_attr = pre_attr;
+
+ return sample_flow->rule;
+
+err_pre_offload_rule:
+ kfree(sample_attr);
+err_alloc_sample_attr:
+ kfree(pre_attr);
+err_alloc_flow_attr:
+ sample_restore_put(esw_psample, sample_flow->restore);
+err_sample_restore:
+ mapping_remove(esw->offloads.reg_c0_obj_pool, obj_id);
+err_obj_id:
+ sampler_put(esw_psample, sample_flow->sampler);
+err_sampler:
+ /* For sample offload, rule is added in default_tbl. No need to call
+ * mlx5_esw_chains_put_table()
+ */
+ attr->prio = 0;
+ attr->chain = 0;
+ mlx5_eswitch_del_offloaded_rule(esw, sample_flow->rule, attr);
+err_offload_rule:
+ mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr);
+err_default_tbl:
+ return ERR_PTR(err);
+}
+
+void
+mlx5_esw_sample_unoffload(struct mlx5_esw_psample *esw_psample,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_sample_flow *sample_flow;
+ struct mlx5_vport_tbl_attr tbl_attr;
+ struct mlx5_flow_attr *pre_attr;
+ struct mlx5_eswitch *esw;
+
+ if (IS_ERR_OR_NULL(esw_psample))
+ return;
+
+ /* If slow path flag is set, sample action is not offloaded.
+ * No need to delete sample rule.
+ */
+ esw = esw_psample->priv->mdev->priv.eswitch;
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+ return;
+ }
+
+ sample_flow = esw_attr->sample->sample_flow;
+ pre_attr = sample_flow->pre_attr;
+ memset(pre_attr, 0, sizeof(*pre_attr));
+ esw = esw_psample->priv->mdev->priv.eswitch;
+ mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, pre_attr);
+ mlx5_eswitch_del_offloaded_rule(esw, sample_flow->rule, attr);
+
+ sample_restore_put(esw_psample, sample_flow->restore);
+ mapping_remove(esw->offloads.reg_c0_obj_pool, esw_attr->sample->restore_obj_id);
+ sampler_put(esw_psample, sample_flow->sampler);
+ tbl_attr.chain = attr->chain;
+ tbl_attr.prio = attr->prio;
+ tbl_attr.vport = esw_attr->in_rep->vport;
+ tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
+ mlx5_esw_vporttbl_put(esw, &tbl_attr);
+
+ kfree(pre_attr->esw_attr->sample);
+ kfree(pre_attr);
+ kfree(sample_flow);
+}
+
+struct mlx5_esw_psample *
+mlx5_esw_sample_init(struct mlx5e_priv *priv)
+{
+ struct mlx5_esw_psample *esw_psample;
+ int err;
+
+ esw_psample = kzalloc(sizeof(*esw_psample), GFP_KERNEL);
+ if (!esw_psample)
+ return ERR_PTR(-ENOMEM);
+ esw_psample->priv = priv;
+ err = sampler_termtbl_create(esw_psample);
+ if (err)
+ goto err_termtbl;
+
+ mutex_init(&esw_psample->ht_lock);
+ mutex_init(&esw_psample->restore_lock);
+
+ return esw_psample;
+
+err_termtbl:
+ kfree(esw_psample);
+ return ERR_PTR(err);
+}
+
+void
+mlx5_esw_sample_cleanup(struct mlx5_esw_psample *esw_psample)
+{
+ if (IS_ERR_OR_NULL(esw_psample))
+ return;
+
+ mutex_destroy(&esw_psample->restore_lock);
+ mutex_destroy(&esw_psample->ht_lock);
+ sampler_termtbl_destroy(esw_psample);
+ kfree(esw_psample);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h
new file mode 100644
index 000000000000..2a3f4be10030
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_TC_SAMPLE_H__
+#define __MLX5_EN_TC_SAMPLE_H__
+
+#include "en.h"
+#include "eswitch.h"
+
+struct mlx5e_priv;
+struct mlx5_flow_attr;
+struct mlx5_esw_psample;
+
+struct mlx5_sample_attr {
+ u32 group_num;
+ u32 rate;
+ u32 trunc_size;
+ u32 restore_obj_id;
+ u32 sampler_id;
+ struct mlx5_flow_table *sample_default_tbl;
+ struct mlx5_sample_flow *sample_flow;
+};
+
+void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj);
+
+struct mlx5_flow_handle *
+mlx5_esw_sample_offload(struct mlx5_esw_psample *sample_priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+
+void
+mlx5_esw_sample_unoffload(struct mlx5_esw_psample *sample_priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr);
+
+struct mlx5_esw_psample *
+mlx5_esw_sample_init(struct mlx5e_priv *priv);
+
+void
+mlx5_esw_sample_cleanup(struct mlx5_esw_psample *esw_psample);
+
+#endif /* __MLX5_EN_TC_SAMPLE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
new file mode 100644
index 000000000000..9e72118f2e4c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021 Mellanox Technologies.
+
+#include "eswitch.h"
+
+/* This struct is used as a key to the hash table and we need it to be packed
+ * so hash result is consistent
+ */
+struct mlx5_vport_key {
+ u32 chain;
+ u16 prio;
+ u16 vport;
+ u16 vhca_id;
+ const struct esw_vport_tbl_namespace *vport_ns;
+} __packed;
+
+struct mlx5_vport_table {
+ struct hlist_node hlist;
+ struct mlx5_flow_table *fdb;
+ u32 num_rules;
+ struct mlx5_vport_key key;
+};
+
+static struct mlx5_flow_table *
+esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns,
+ const struct esw_vport_tbl_namespace *vport_ns)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *fdb;
+
+ if (vport_ns->max_num_groups)
+ ft_attr.autogroup.max_num_groups = vport_ns->max_num_groups;
+ else
+ ft_attr.autogroup.max_num_groups = esw->params.large_group_num;
+ ft_attr.max_fte = vport_ns->max_fte;
+ ft_attr.prio = FDB_PER_VPORT;
+ ft_attr.flags = vport_ns->flags;
+ fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(fdb)) {
+ esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
+ PTR_ERR(fdb));
+ }
+
+ return fdb;
+}
+
+static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
+ struct mlx5_vport_tbl_attr *attr,
+ struct mlx5_vport_key *key)
+{
+ key->vport = attr->vport;
+ key->chain = attr->chain;
+ key->prio = attr->prio;
+ key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+ key->vport_ns = attr->vport_ns;
+ return jhash(key, sizeof(*key), 0);
+}
+
+/* caller must hold vports.lock */
+static struct mlx5_vport_table *
+esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
+{
+ struct mlx5_vport_table *e;
+
+ hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
+ if (!memcmp(&e->key, skey, sizeof(*skey)))
+ return e;
+
+ return NULL;
+}
+
+struct mlx5_flow_table *
+mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *fdb;
+ struct mlx5_vport_table *e;
+ struct mlx5_vport_key skey;
+ u32 hkey;
+
+ mutex_lock(&esw->fdb_table.offloads.vports.lock);
+ hkey = flow_attr_to_vport_key(esw, attr, &skey);
+ e = esw_vport_tbl_lookup(esw, &skey, hkey);
+ if (e) {
+ e->num_rules++;
+ goto out;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e) {
+ fdb = ERR_PTR(-ENOMEM);
+ goto err_alloc;
+ }
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!ns) {
+ esw_warn(dev, "Failed to get FDB namespace\n");
+ fdb = ERR_PTR(-ENOENT);
+ goto err_ns;
+ }
+
+ fdb = esw_vport_tbl_create(esw, ns, attr->vport_ns);
+ if (IS_ERR(fdb))
+ goto err_ns;
+
+ e->fdb = fdb;
+ e->num_rules = 1;
+ e->key = skey;
+ hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
+out:
+ mutex_unlock(&esw->fdb_table.offloads.vports.lock);
+ return e->fdb;
+
+err_ns:
+ kfree(e);
+err_alloc:
+ mutex_unlock(&esw->fdb_table.offloads.vports.lock);
+ return fdb;
+}
+
+void
+mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
+{
+ struct mlx5_vport_table *e;
+ struct mlx5_vport_key key;
+ u32 hkey;
+
+ mutex_lock(&esw->fdb_table.offloads.vports.lock);
+ hkey = flow_attr_to_vport_key(esw, attr, &key);
+ e = esw_vport_tbl_lookup(esw, &key, hkey);
+ if (!e || --e->num_rules)
+ goto out;
+
+ hash_del(&e->hlist);
+ mlx5_destroy_flow_table(e->fdb);
+ kfree(e);
+out:
+ mutex_unlock(&esw->fdb_table.offloads.vports.lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index aba17835465b..570f2280823c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include "esw/acl/lgcy.h"
+#include "esw/legacy.h"
#include "mlx5_core.h"
#include "lib/eq.h"
#include "eswitch.h"
@@ -61,9 +62,6 @@ struct vport_addr {
bool mc_promisc;
};
-static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
-static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
-
static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
{
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
@@ -90,20 +88,17 @@ struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
- u16 idx;
+ struct mlx5_vport *vport;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
return ERR_PTR(-EPERM);
- idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
-
- if (idx > esw->total_vports - 1) {
- esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
- vport_num, idx);
+ vport = xa_load(&esw->vports, vport_num);
+ if (!vport) {
+ esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num);
return ERR_PTR(-EINVAL);
}
-
- return &esw->vports[idx];
+ return vport;
}
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
@@ -278,224 +273,6 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
}
-enum {
- LEGACY_VEPA_PRIO = 0,
- LEGACY_FDB_PRIO,
-};
-
-static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
-{
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_namespace *root_ns;
- struct mlx5_flow_table *fdb;
- int err;
-
- root_ns = mlx5_get_fdb_sub_ns(dev, 0);
- if (!root_ns) {
- esw_warn(dev, "Failed to get FDB flow namespace\n");
- return -EOPNOTSUPP;
- }
-
- /* num FTE 2, num FG 2 */
- ft_attr.prio = LEGACY_VEPA_PRIO;
- ft_attr.max_fte = 2;
- ft_attr.autogroup.max_num_groups = 2;
- fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
- if (IS_ERR(fdb)) {
- err = PTR_ERR(fdb);
- esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
- return err;
- }
- esw->fdb_table.legacy.vepa_fdb = fdb;
-
- return 0;
-}
-
-static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_namespace *root_ns;
- struct mlx5_flow_table *fdb;
- struct mlx5_flow_group *g;
- void *match_criteria;
- int table_size;
- u32 *flow_group_in;
- u8 *dmac;
- int err = 0;
-
- esw_debug(dev, "Create FDB log_max_size(%d)\n",
- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
-
- root_ns = mlx5_get_fdb_sub_ns(dev, 0);
- if (!root_ns) {
- esw_warn(dev, "Failed to get FDB flow namespace\n");
- return -EOPNOTSUPP;
- }
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in)
- return -ENOMEM;
-
- table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- ft_attr.max_fte = table_size;
- ft_attr.prio = LEGACY_FDB_PRIO;
- fdb = mlx5_create_flow_table(root_ns, &ft_attr);
- if (IS_ERR(fdb)) {
- err = PTR_ERR(fdb);
- esw_warn(dev, "Failed to create FDB Table err %d\n", err);
- goto out;
- }
- esw->fdb_table.legacy.fdb = fdb;
-
- /* Addresses group : Full match unicast/multicast addresses */
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_OUTER_HEADERS);
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
- dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
- /* Preserve 2 entries for allmulti and promisc rules*/
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
- eth_broadcast_addr(dmac);
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create flow group err(%d)\n", err);
- goto out;
- }
- esw->fdb_table.legacy.addr_grp = g;
-
- /* Allmulti group : One rule that forwards any mcast traffic */
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
- eth_zero_addr(dmac);
- dmac[0] = 0x01;
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
- goto out;
- }
- esw->fdb_table.legacy.allmulti_grp = g;
-
- /* Promiscuous group :
- * One rule that forward all unmatched traffic from previous groups
- */
- eth_zero_addr(dmac);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
- goto out;
- }
- esw->fdb_table.legacy.promisc_grp = g;
-
-out:
- if (err)
- esw_destroy_legacy_fdb_table(esw);
-
- kvfree(flow_group_in);
- return err;
-}
-
-static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
-{
- esw_debug(esw->dev, "Destroy VEPA Table\n");
- if (!esw->fdb_table.legacy.vepa_fdb)
- return;
-
- mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
- esw->fdb_table.legacy.vepa_fdb = NULL;
-}
-
-static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
-{
- esw_debug(esw->dev, "Destroy FDB Table\n");
- if (!esw->fdb_table.legacy.fdb)
- return;
-
- if (esw->fdb_table.legacy.promisc_grp)
- mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
- if (esw->fdb_table.legacy.allmulti_grp)
- mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
- if (esw->fdb_table.legacy.addr_grp)
- mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
- mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
-
- esw->fdb_table.legacy.fdb = NULL;
- esw->fdb_table.legacy.addr_grp = NULL;
- esw->fdb_table.legacy.allmulti_grp = NULL;
- esw->fdb_table.legacy.promisc_grp = NULL;
-}
-
-static int esw_create_legacy_table(struct mlx5_eswitch *esw)
-{
- int err;
-
- memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
-
- err = esw_create_legacy_vepa_table(esw);
- if (err)
- return err;
-
- err = esw_create_legacy_fdb_table(esw);
- if (err)
- esw_destroy_legacy_vepa_table(esw);
-
- return err;
-}
-
-static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
-{
- esw_cleanup_vepa_rules(esw);
- esw_destroy_legacy_fdb_table(esw);
- esw_destroy_legacy_vepa_table(esw);
-}
-
-#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
- MLX5_VPORT_MC_ADDR_CHANGE | \
- MLX5_VPORT_PROMISC_CHANGE)
-
-static int esw_legacy_enable(struct mlx5_eswitch *esw)
-{
- struct mlx5_vport *vport;
- int ret, i;
-
- ret = esw_create_legacy_table(esw);
- if (ret)
- return ret;
-
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
- vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
-
- ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
- if (ret)
- esw_destroy_legacy_table(esw);
- return ret;
-}
-
-static void esw_legacy_disable(struct mlx5_eswitch *esw)
-{
- struct esw_mc_addr *mc_promisc;
-
- mlx5_eswitch_disable_pf_vf_vports(esw);
-
- mc_promisc = &esw->mc_promisc;
- if (mc_promisc->uplink_rule)
- mlx5_del_flow_rules(mc_promisc->uplink_rule);
-
- esw_destroy_legacy_table(esw);
-}
-
/* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
struct vport_addr *vaddr);
@@ -565,9 +342,10 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
{
u8 *mac = vaddr->node.addr;
struct mlx5_vport *vport;
- u16 i, vport_num;
+ unsigned long i;
+ u16 vport_num;
- mlx5_esw_for_all_vports(esw, i, vport) {
+ mlx5_esw_for_each_vport(esw, i, vport) {
struct hlist_head *vport_hash = vport->mc_list;
struct vport_addr *iter_vaddr =
l2addr_hash_find(vport_hash,
@@ -917,7 +695,7 @@ static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
(promisc_all || promisc_mc));
}
-static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
+void esw_vport_change_handle_locked(struct mlx5_vport *vport)
{
struct mlx5_core_dev *dev = vport->dev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -1141,6 +919,8 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
if (!vport->qos.enabled)
return -EOPNOTSUPP;
@@ -1166,56 +946,20 @@ static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac)
((u8 *)node_guid)[0] = mac[5];
}
-static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- int ret;
-
- /* Only non manager vports need ACL in legacy mode */
- if (mlx5_esw_is_manager_vport(esw, vport->vport))
- return 0;
-
- ret = esw_acl_ingress_lgcy_setup(esw, vport);
- if (ret)
- goto ingress_err;
-
- ret = esw_acl_egress_lgcy_setup(esw, vport);
- if (ret)
- goto egress_err;
-
- return 0;
-
-egress_err:
- esw_acl_ingress_lgcy_cleanup(esw, vport);
-ingress_err:
- return ret;
-}
-
static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (esw->mode == MLX5_ESWITCH_LEGACY)
- return esw_vport_create_legacy_acl_tables(esw, vport);
+ return esw_legacy_vport_acl_setup(esw, vport);
else
return esw_vport_create_offloads_acl_tables(esw, vport);
}
-static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-
-{
- if (mlx5_esw_is_manager_vport(esw, vport->vport))
- return;
-
- esw_acl_egress_lgcy_cleanup(esw, vport);
- esw_acl_ingress_lgcy_cleanup(esw, vport);
-}
-
static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (esw->mode == MLX5_ESWITCH_LEGACY)
- esw_vport_destroy_legacy_acl_tables(esw, vport);
+ esw_legacy_vport_acl_cleanup(esw, vport);
else
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
@@ -1231,7 +975,7 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
return err;
/* Attach vport to the eswitch rate limiter */
- esw_vport_enable_qos(esw, vport, vport->info.max_rate, vport->qos.bw_share);
+ esw_vport_enable_qos(esw, vport, vport->qos.max_rate, vport->qos.bw_share);
if (mlx5_esw_is_manager_vport(esw, vport_num))
return 0;
@@ -1279,6 +1023,8 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
int ret;
vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
@@ -1326,6 +1072,8 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return;
mutex_lock(&esw->state_lock);
if (!vport->enabled)
@@ -1382,15 +1130,9 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
- u16 max_sf_vports;
u32 *out;
int err;
- max_sf_vports = mlx5_sf_max_functions(dev);
- /* Device interface is array of 64-bits */
- if (max_sf_vports)
- outlen += DIV_ROUND_UP(max_sf_vports, BITS_PER_TYPE(__be64)) * sizeof(__be64);
-
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return ERR_PTR(-ENOMEM);
@@ -1431,7 +1173,7 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
- int i;
+ unsigned long i;
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
memset(&vport->qos, 0, sizeof(vport->qos));
@@ -1441,8 +1183,6 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
}
/* Public E-Switch API */
-#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
-
int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events)
{
@@ -1471,20 +1211,25 @@ void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
{
- int i;
+ struct mlx5_vport *vport;
+ unsigned long i;
- mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs)
- mlx5_eswitch_unload_vport(esw, i);
+ mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
+ if (!vport->enabled)
+ continue;
+ mlx5_eswitch_unload_vport(esw, vport->vport);
+ }
}
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events)
{
+ struct mlx5_vport *vport;
+ unsigned long i;
int err;
- int i;
- mlx5_esw_for_each_vf_vport_num(esw, i, num_vfs) {
- err = mlx5_eswitch_load_vport(esw, i, enabled_events);
+ mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
+ err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
if (err)
goto vf_err;
}
@@ -1492,7 +1237,7 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
return 0;
vf_err:
- mlx5_eswitch_unload_vf_vports(esw, i - 1);
+ mlx5_eswitch_unload_vf_vports(esw, num_vfs);
return err;
}
@@ -1625,6 +1370,47 @@ static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
blocking_notifier_call_chain(&esw->n_head, 0, &info);
}
+static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ int total_vports;
+ int err;
+
+ total_vports = mlx5_eswitch_get_total_vports(dev);
+
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+ err = mlx5_fs_egress_acls_init(dev, total_vports);
+ if (err)
+ return err;
+ } else {
+ esw_warn(dev, "engress ACL is not supported by FW\n");
+ }
+
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+ err = mlx5_fs_ingress_acls_init(dev, total_vports);
+ if (err)
+ goto err;
+ } else {
+ esw_warn(dev, "ingress ACL is not supported by FW\n");
+ }
+ return 0;
+
+err:
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+ mlx5_fs_egress_acls_cleanup(dev);
+ return err;
+}
+
+static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+ mlx5_fs_ingress_acls_cleanup(dev);
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+ mlx5_fs_egress_acls_cleanup(dev);
+}
+
/**
* mlx5_eswitch_enable_locked - Enable eswitch
* @esw: Pointer to eswitch
@@ -1653,14 +1439,12 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
return -EOPNOTSUPP;
}
- if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
- esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
-
- if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
- esw_warn(esw->dev, "engress ACL is not supported by FW\n");
-
mlx5_eswitch_get_devlink_param(esw);
+ err = mlx5_esw_acls_ns_init(esw);
+ if (err)
+ return err;
+
mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
esw_create_tsar(esw);
@@ -1696,6 +1480,7 @@ abort:
mlx5_rescan_drivers(esw->dev);
esw_destroy_tsar(esw);
+ mlx5_esw_acls_ns_cleanup(esw);
return err;
}
@@ -1711,10 +1496,10 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
{
int ret;
- if (!ESW_ALLOWED(esw))
+ if (!mlx5_esw_allowed(esw))
return 0;
- mutex_lock(&esw->mode_lock);
+ down_write(&esw->mode_lock);
if (esw->mode == MLX5_ESWITCH_NONE) {
ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
} else {
@@ -1726,7 +1511,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
if (!ret)
esw->esw_funcs.num_vfs = num_vfs;
}
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
return ret;
}
@@ -1764,6 +1549,7 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_rescan_drivers(esw->dev);
esw_destroy_tsar(esw);
+ mlx5_esw_acls_ns_cleanup(esw);
if (clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw);
@@ -1771,33 +1557,170 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
{
- if (!ESW_ALLOWED(esw))
+ if (!mlx5_esw_allowed(esw))
return;
- mutex_lock(&esw->mode_lock);
+ down_write(&esw->mode_lock);
mlx5_eswitch_disable_locked(esw, clear_vf);
esw->esw_funcs.num_vfs = 0;
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
+}
+
+static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out)
+{
+ u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
+ u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
+ MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF);
+ MLX5_SET(query_hca_cap_in, in, other_function, true);
+ return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
+}
+
+int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id)
+
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *query_ctx;
+ void *hca_caps;
+ int err;
+
+ if (!mlx5_core_is_ecpf(dev)) {
+ *max_sfs = 0;
+ return 0;
+ }
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_cap_host_pf(dev, query_ctx);
+ if (err)
+ goto out_free;
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ *max_sfs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_sf);
+ *sf_base_id = MLX5_GET(cmd_hca_cap, hca_caps, sf_base_id);
+
+out_free:
+ kfree(query_ctx);
+ return err;
+}
+
+static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, struct mlx5_core_dev *dev,
+ int index, u16 vport_num)
+{
+ struct mlx5_vport *vport;
+ int err;
+
+ vport = kzalloc(sizeof(*vport), GFP_KERNEL);
+ if (!vport)
+ return -ENOMEM;
+
+ vport->dev = esw->dev;
+ vport->vport = vport_num;
+ vport->index = index;
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler);
+ err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL);
+ if (err)
+ goto insert_err;
+
+ esw->total_vports++;
+ return 0;
+
+insert_err:
+ kfree(vport);
+ return err;
+}
+
+static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ xa_erase(&esw->vports, vport->vport);
+ kfree(vport);
+}
+
+static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_esw_for_each_vport(esw, i, vport)
+ mlx5_esw_vport_free(esw, vport);
+ xa_destroy(&esw->vports);
+}
+
+static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ u16 max_host_pf_sfs;
+ u16 base_sf_num;
+ int idx = 0;
+ int err;
+ int i;
+
+ xa_init(&esw->vports);
+
+ err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_PF);
+ if (err)
+ goto err;
+ if (esw->first_host_vport == MLX5_VPORT_PF)
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
+ idx++;
+
+ for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
+ err = mlx5_esw_vport_alloc(esw, dev, idx, idx);
+ if (err)
+ goto err;
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
+ idx++;
+ }
+ base_sf_num = mlx5_sf_start_function_id(dev);
+ for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
+ err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
+ if (err)
+ goto err;
+ xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
+ idx++;
+ }
+
+ err = mlx5_esw_sf_max_hpf_functions(dev, &max_host_pf_sfs, &base_sf_num);
+ if (err)
+ goto err;
+ for (i = 0; i < max_host_pf_sfs; i++) {
+ err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
+ if (err)
+ goto err;
+ xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
+ idx++;
+ }
+
+ if (mlx5_ecpf_vport_exists(dev)) {
+ err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_ECPF);
+ if (err)
+ goto err;
+ idx++;
+ }
+ err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_UPLINK);
+ if (err)
+ goto err;
+ return 0;
+
+err:
+ mlx5_esw_vports_cleanup(esw);
+ return err;
}
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- int total_vports;
- int err, i;
+ int err;
if (!MLX5_VPORT_MANAGER(dev))
return 0;
- total_vports = mlx5_eswitch_get_total_vports(dev);
-
- esw_info(dev,
- "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
- total_vports,
- MLX5_MAX_UC_PER_VPORT(dev),
- MLX5_MAX_MC_PER_VPORT(dev));
-
esw = kzalloc(sizeof(*esw), GFP_KERNEL);
if (!esw)
return -ENOMEM;
@@ -1812,18 +1735,13 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
- esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
- GFP_KERNEL);
- if (!esw->vports) {
- err = -ENOMEM;
+ err = mlx5_esw_vports_init(esw);
+ if (err)
goto abort;
- }
-
- esw->total_vports = total_vports;
err = esw_offloads_init_reps(esw);
if (err)
- goto abort;
+ goto reps_err;
mutex_init(&esw->offloads.encap_tbl_lock);
hash_init(esw->offloads.encap_tbl);
@@ -1834,15 +1752,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
ida_init(&esw->offloads.vport_metadata_ida);
xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
mutex_init(&esw->state_lock);
- mutex_init(&esw->mode_lock);
-
- mlx5_esw_for_all_vports(esw, i, vport) {
- vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
- vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
- vport->dev = dev;
- INIT_WORK(&vport->vport_change_handler,
- esw_vport_change_handler);
- }
+ init_rwsem(&esw->mode_lock);
esw->enabled_vports = 0;
esw->mode = MLX5_ESWITCH_NONE;
@@ -1850,12 +1760,19 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
+
+ esw_info(dev,
+ "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
+ esw->total_vports,
+ MLX5_MAX_UC_PER_VPORT(dev),
+ MLX5_MAX_MC_PER_VPORT(dev));
return 0;
+
+reps_err:
+ mlx5_esw_vports_cleanup(esw);
abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
- esw_offloads_cleanup_reps(esw);
- kfree(esw->vports);
kfree(esw);
return err;
}
@@ -1869,8 +1786,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
- esw_offloads_cleanup_reps(esw);
- mutex_destroy(&esw->mode_lock);
mutex_destroy(&esw->state_lock);
WARN_ON(!xa_empty(&esw->offloads.vhca_map));
xa_destroy(&esw->offloads.vhca_map);
@@ -1878,7 +1793,8 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock);
- kfree(esw->vports);
+ esw_offloads_cleanup_reps(esw);
+ mlx5_esw_vports_cleanup(esw);
kfree(esw);
}
@@ -1937,8 +1853,29 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
return err;
}
+static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark)
+{
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return false;
+
+ return xa_get_mark(&esw->vports, vport_num, mark);
+}
+
+bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF);
+}
+
+bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
+}
+
static bool
-is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num)
+is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num == MLX5_VPORT_PF ||
mlx5_eswitch_is_vf_vport(esw, vport_num) ||
@@ -2023,7 +1960,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int other_vport = 1;
int err = 0;
- if (!ESW_ALLOWED(esw))
+ if (!mlx5_esw_allowed(esw))
return -EPERM;
if (IS_ERR(evport))
return PTR_ERR(evport);
@@ -2034,6 +1971,10 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
vport = 0;
}
mutex_lock(&esw->state_lock);
+ if (esw->mode != MLX5_ESWITCH_LEGACY) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
if (err) {
@@ -2067,8 +2008,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->qos = evport->info.qos;
ivi->spoofchk = evport->info.spoofchk;
ivi->trusted = evport->info.trusted;
- ivi->min_tx_rate = evport->info.min_rate;
- ivi->max_tx_rate = evport->info.max_rate;
+ ivi->min_tx_rate = evport->qos.min_rate;
+ ivi->max_tx_rate = evport->qos.max_rate;
mutex_unlock(&esw->state_lock);
return 0;
@@ -2101,196 +2042,17 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
return err;
}
-int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
- u16 vport, u16 vlan, u8 qos)
-{
- u8 set_flags = 0;
- int err;
-
- if (!ESW_ALLOWED(esw))
- return -EPERM;
-
- if (vlan || qos)
- set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
-
- mutex_lock(&esw->state_lock);
- err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
- mutex_unlock(&esw->state_lock);
-
- return err;
-}
-
-int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
- u16 vport, bool spoofchk)
-{
- struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
- bool pschk;
- int err = 0;
-
- if (!ESW_ALLOWED(esw))
- return -EPERM;
- if (IS_ERR(evport))
- return PTR_ERR(evport);
-
- mutex_lock(&esw->state_lock);
- pschk = evport->info.spoofchk;
- evport->info.spoofchk = spoofchk;
- if (pschk && !is_valid_ether_addr(evport->info.mac))
- mlx5_core_warn(esw->dev,
- "Spoofchk in set while MAC is invalid, vport(%d)\n",
- evport->vport);
- if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
- err = esw_acl_ingress_lgcy_setup(esw, evport);
- if (err)
- evport->info.spoofchk = pschk;
- mutex_unlock(&esw->state_lock);
-
- return err;
-}
-
-static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
-{
- if (esw->fdb_table.legacy.vepa_uplink_rule)
- mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
-
- if (esw->fdb_table.legacy.vepa_star_rule)
- mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
-
- esw->fdb_table.legacy.vepa_uplink_rule = NULL;
- esw->fdb_table.legacy.vepa_star_rule = NULL;
-}
-
-static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
- u8 setting)
-{
- struct mlx5_flow_destination dest = {};
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_handle *flow_rule;
- struct mlx5_flow_spec *spec;
- int err = 0;
- void *misc;
-
- if (!setting) {
- esw_cleanup_vepa_rules(esw);
- return 0;
- }
-
- if (esw->fdb_table.legacy.vepa_uplink_rule)
- return 0;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
-
- /* Uplink rule forward uplink traffic to FDB */
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
- MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
-
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
- MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
-
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = esw->fdb_table.legacy.fdb;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
- &flow_act, &dest, 1);
- if (IS_ERR(flow_rule)) {
- err = PTR_ERR(flow_rule);
- goto out;
- } else {
- esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
- }
-
- /* Star rule to forward all traffic to uplink vport */
- memset(&dest, 0, sizeof(dest));
- dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport.num = MLX5_VPORT_UPLINK;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
- &flow_act, &dest, 1);
- if (IS_ERR(flow_rule)) {
- err = PTR_ERR(flow_rule);
- goto out;
- } else {
- esw->fdb_table.legacy.vepa_star_rule = flow_rule;
- }
-
-out:
- kvfree(spec);
- if (err)
- esw_cleanup_vepa_rules(esw);
- return err;
-}
-
-int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
-{
- int err = 0;
-
- if (!esw)
- return -EOPNOTSUPP;
-
- if (!ESW_ALLOWED(esw))
- return -EPERM;
-
- mutex_lock(&esw->state_lock);
- if (esw->mode != MLX5_ESWITCH_LEGACY) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- err = _mlx5_eswitch_set_vepa_locked(esw, setting);
-
-out:
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
-int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
-{
- if (!esw)
- return -EOPNOTSUPP;
-
- if (!ESW_ALLOWED(esw))
- return -EPERM;
-
- if (esw->mode != MLX5_ESWITCH_LEGACY)
- return -EOPNOTSUPP;
-
- *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
- return 0;
-}
-
-int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
- u16 vport, bool setting)
-{
- struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
-
- if (!ESW_ALLOWED(esw))
- return -EPERM;
- if (IS_ERR(evport))
- return PTR_ERR(evport);
-
- mutex_lock(&esw->state_lock);
- evport->info.trusted = setting;
- if (evport->enabled)
- esw_vport_change_handle_locked(evport);
- mutex_unlock(&esw->state_lock);
-
- return 0;
-}
-
static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
{
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
struct mlx5_vport *evport;
u32 max_guarantee = 0;
- int i;
+ unsigned long i;
- mlx5_esw_for_all_vports(esw, i, evport) {
- if (!evport->enabled || evport->info.min_rate < max_guarantee)
+ mlx5_esw_for_each_vport(esw, i, evport) {
+ if (!evport->enabled || evport->qos.min_rate < max_guarantee)
continue;
- max_guarantee = evport->info.min_rate;
+ max_guarantee = evport->qos.min_rate;
}
if (max_guarantee)
@@ -2305,15 +2067,15 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw)
struct mlx5_vport *evport;
u32 vport_max_rate;
u32 vport_min_rate;
+ unsigned long i;
u32 bw_share;
int err;
- int i;
- mlx5_esw_for_all_vports(esw, i, evport) {
+ mlx5_esw_for_each_vport(esw, i, evport) {
if (!evport->enabled)
continue;
- vport_min_rate = evport->info.min_rate;
- vport_max_rate = evport->info.max_rate;
+ vport_min_rate = evport->qos.min_rate;
+ vport_max_rate = evport->qos.max_rate;
bw_share = 0;
if (divider)
@@ -2345,7 +2107,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
bool max_rate_supported;
int err = 0;
- if (!ESW_ALLOWED(esw))
+ if (!mlx5_esw_allowed(esw))
return -EPERM;
if (IS_ERR(evport))
return PTR_ERR(evport);
@@ -2360,68 +2122,24 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
mutex_lock(&esw->state_lock);
- if (min_rate == evport->info.min_rate)
+ if (min_rate == evport->qos.min_rate)
goto set_max_rate;
- previous_min_rate = evport->info.min_rate;
- evport->info.min_rate = min_rate;
+ previous_min_rate = evport->qos.min_rate;
+ evport->qos.min_rate = min_rate;
err = normalize_vports_min_rate(esw);
if (err) {
- evport->info.min_rate = previous_min_rate;
+ evport->qos.min_rate = previous_min_rate;
goto unlock;
}
set_max_rate:
- if (max_rate == evport->info.max_rate)
+ if (max_rate == evport->qos.max_rate)
goto unlock;
err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
if (!err)
- evport->info.max_rate = max_rate;
-
-unlock:
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
-static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
- struct mlx5_vport *vport,
- struct mlx5_vport_drop_stats *stats)
-{
- struct mlx5_eswitch *esw = dev->priv.eswitch;
- u64 rx_discard_vport_down, tx_discard_vport_down;
- u64 bytes = 0;
- int err = 0;
-
- if (esw->mode != MLX5_ESWITCH_LEGACY)
- return 0;
-
- mutex_lock(&esw->state_lock);
- if (!vport->enabled)
- goto unlock;
-
- if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
- mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
- &stats->rx_dropped, &bytes);
-
- if (vport->ingress.legacy.drop_counter)
- mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
- &stats->tx_dropped, &bytes);
-
- if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
- !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
- goto unlock;
-
- err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
- &rx_discard_vport_down,
- &tx_discard_vport_down);
- if (err)
- goto unlock;
-
- if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
- stats->rx_dropped += rx_discard_vport_down;
- if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
- stats->tx_dropped += tx_discard_vport_down;
+ evport->qos.max_rate = max_rate;
unlock:
mutex_unlock(&esw->state_lock);
@@ -2495,7 +2213,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
vf_stats->broadcast =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
- err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+ err = mlx5_esw_query_vport_drop_stats(esw->dev, vport, &stats);
if (err)
goto free_out;
vf_stats->rx_dropped = stats.rx_dropped;
@@ -2510,7 +2228,7 @@ u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
- return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE;
+ return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_NONE;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
@@ -2520,7 +2238,7 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
struct mlx5_eswitch *esw;
esw = dev->priv.eswitch;
- return ESW_ALLOWED(esw) ? esw->offloads.encap :
+ return mlx5_esw_allowed(esw) ? esw->offloads.encap :
DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
@@ -2552,3 +2270,110 @@ void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifie
{
blocking_notifier_chain_unregister(&esw->n_head, nb);
}
+
+/**
+ * mlx5_esw_hold() - Try to take a read lock on esw mode lock.
+ * @mdev: mlx5 core device.
+ *
+ * Should be called by esw resources callers.
+ *
+ * Return: true on success or false.
+ */
+bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
+ /* e.g. VF doesn't have eswitch so nothing to do */
+ if (!mlx5_esw_allowed(esw))
+ return true;
+
+ if (down_read_trylock(&esw->mode_lock) != 0)
+ return true;
+
+ return false;
+}
+
+/**
+ * mlx5_esw_release() - Release a read lock on esw mode lock.
+ * @mdev: mlx5 core device.
+ */
+void mlx5_esw_release(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
+ if (mlx5_esw_allowed(esw))
+ up_read(&esw->mode_lock);
+}
+
+/**
+ * mlx5_esw_get() - Increase esw user count.
+ * @mdev: mlx5 core device.
+ */
+void mlx5_esw_get(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
+ if (mlx5_esw_allowed(esw))
+ atomic64_inc(&esw->user_count);
+}
+
+/**
+ * mlx5_esw_put() - Decrease esw user count.
+ * @mdev: mlx5 core device.
+ */
+void mlx5_esw_put(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
+ if (mlx5_esw_allowed(esw))
+ atomic64_dec_if_positive(&esw->user_count);
+}
+
+/**
+ * mlx5_esw_try_lock() - Take a write lock on esw mode lock.
+ * @esw: eswitch device.
+ *
+ * Should be called by esw mode change routine.
+ *
+ * Return:
+ * * 0 - esw mode if successfully locked and refcount is 0.
+ * * -EBUSY - refcount is not 0.
+ * * -EINVAL - In the middle of switching mode or lock is already held.
+ */
+int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
+{
+ if (down_write_trylock(&esw->mode_lock) == 0)
+ return -EINVAL;
+
+ if (atomic64_read(&esw->user_count) > 0) {
+ up_write(&esw->mode_lock);
+ return -EBUSY;
+ }
+
+ return esw->mode;
+}
+
+/**
+ * mlx5_esw_unlock() - Release write lock on esw mode lock
+ * @esw: eswitch device.
+ */
+void mlx5_esw_unlock(struct mlx5_eswitch *esw)
+{
+ up_write(&esw->mode_lock);
+}
+
+/**
+ * mlx5_eswitch_get_total_vports - Get total vports of the eswitch
+ *
+ * @dev: Pointer to core device
+ *
+ * mlx5_eswitch_get_total_vports returns total number of eswitch vports.
+ */
+u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw;
+
+ esw = dev->priv.eswitch;
+ return mlx5_esw_allowed(esw) ? esw->total_vports : 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index fdf5c8c05c1b..64ccb2bc0b58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -46,6 +46,24 @@
#include "lib/fs_chains.h"
#include "sf/sf.h"
#include "en/tc_ct.h"
+#include "esw/sample.h"
+
+enum mlx5_mapped_obj_type {
+ MLX5_MAPPED_OBJ_CHAIN,
+ MLX5_MAPPED_OBJ_SAMPLE,
+};
+
+struct mlx5_mapped_obj {
+ enum mlx5_mapped_obj_type type;
+ union {
+ u32 chain;
+ struct {
+ u32 group_id;
+ u32 rate;
+ u32 trunc_size;
+ } sample;
+ };
+};
#ifdef CONFIG_MLX5_ESWITCH
@@ -118,13 +136,11 @@ struct mlx5_vport_drop_stats {
struct mlx5_vport_info {
u8 mac[ETH_ALEN];
u16 vlan;
- u8 qos;
u64 node_guid;
int link_state;
- u32 min_rate;
- u32 max_rate;
- bool spoofchk;
- bool trusted;
+ u8 qos;
+ u8 spoofchk: 1;
+ u8 trusted: 1;
};
/* Vport context events */
@@ -136,7 +152,6 @@ enum mlx5_eswitch_vport_event {
struct mlx5_vport {
struct mlx5_core_dev *dev;
- int vport;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
struct mlx5_flow_handle *promisc_rule;
@@ -154,10 +169,14 @@ struct mlx5_vport {
bool enabled;
u32 esw_tsar_ix;
u32 bw_share;
+ u32 min_rate;
+ u32 max_rate;
} qos;
+ u16 vport;
bool enabled;
enum mlx5_eswitch_vport_event enabled_events;
+ int index;
struct devlink_port *dl_port;
};
@@ -206,10 +225,11 @@ struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads_restore;
struct mlx5_flow_group *restore_group;
struct mlx5_modify_hdr *restore_copy_hdr_id;
+ struct mapping_ctx *reg_c0_obj_pool;
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
- struct mlx5_eswitch_rep *vport_reps;
+ struct xarray vport_reps;
struct list_head peer_flows;
struct mutex peer_mutex;
struct mutex encap_tbl_lock; /* protects encap_tbl */
@@ -259,7 +279,7 @@ struct mlx5_eswitch {
struct esw_mc_addr mc_promisc;
/* end of legacy */
struct workqueue_struct *work_queue;
- struct mlx5_vport *vports;
+ struct xarray vports;
u32 flags;
int total_vports;
int enabled_vports;
@@ -271,7 +291,8 @@ struct mlx5_eswitch {
/* Protects eswitch mode change that occurs via one or more
* user commands, i.e. sriov state change, devlink commands.
*/
- struct mutex mode_lock;
+ struct rw_semaphore mode_lock;
+ atomic64_t user_count;
struct {
bool enabled;
@@ -294,6 +315,8 @@ int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
+bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
+int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
@@ -356,6 +379,9 @@ void
mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
struct mlx5_termtbl_handle *tt);
+void
+mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec);
+
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
@@ -403,6 +429,7 @@ enum {
MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3),
+ MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4),
};
struct mlx5_esw_flow_attr {
@@ -427,6 +454,7 @@ struct mlx5_esw_flow_attr {
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5_rx_tun_attr *rx_tun_attr;
struct mlx5_pkt_reformat *decap_pkt_reformat;
+ struct mlx5_sample_attr *sample;
};
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
@@ -494,6 +522,11 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
#define esw_debug(dev, format, ...) \
mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
+static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw)
+{
+ return esw && MLX5_ESWITCH_MANAGER(esw->dev);
+}
+
/* The returned number is valid only when the dev is eswitch manager. */
static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
{
@@ -513,94 +546,11 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
}
-static inline int mlx5_esw_sf_start_idx(const struct mlx5_eswitch *esw)
-{
- /* PF and VF vports indices start from 0 to max_vfs */
- return MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
-}
-
-static inline int mlx5_esw_sf_end_idx(const struct mlx5_eswitch *esw)
-{
- return mlx5_esw_sf_start_idx(esw) + mlx5_sf_max_functions(esw->dev);
-}
-
-static inline int
-mlx5_esw_sf_vport_num_to_index(const struct mlx5_eswitch *esw, u16 vport_num)
-{
- return vport_num - mlx5_sf_start_function_id(esw->dev) +
- MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
-}
-
-static inline u16
-mlx5_esw_sf_vport_index_to_num(const struct mlx5_eswitch *esw, int idx)
-{
- return mlx5_sf_start_function_id(esw->dev) + idx -
- (MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev));
-}
-
-static inline bool
-mlx5_esw_is_sf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
-{
- return mlx5_sf_supported(esw->dev) &&
- vport_num >= mlx5_sf_start_function_id(esw->dev) &&
- (vport_num < (mlx5_sf_start_function_id(esw->dev) +
- mlx5_sf_max_functions(esw->dev)));
-}
-
static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
{
return mlx5_core_is_ecpf_esw_manager(dev);
}
-static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
-{
- /* Uplink always locate at the last element of the array.*/
- return esw->total_vports - 1;
-}
-
-static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw)
-{
- return esw->total_vports - 2;
-}
-
-static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw,
- u16 vport_num)
-{
- if (vport_num == MLX5_VPORT_ECPF) {
- if (!mlx5_ecpf_vport_exists(esw->dev))
- esw_warn(esw->dev, "ECPF vport doesn't exist!\n");
- return mlx5_eswitch_ecpf_idx(esw);
- }
-
- if (vport_num == MLX5_VPORT_UPLINK)
- return mlx5_eswitch_uplink_idx(esw);
-
- if (mlx5_esw_is_sf_vport(esw, vport_num))
- return mlx5_esw_sf_vport_num_to_index(esw, vport_num);
-
- /* PF and VF vports start from 0 to max_vfs */
- return vport_num;
-}
-
-static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
- int index)
-{
- if (index == mlx5_eswitch_ecpf_idx(esw) &&
- mlx5_ecpf_vport_exists(esw->dev))
- return MLX5_VPORT_ECPF;
-
- if (index == mlx5_eswitch_uplink_idx(esw))
- return MLX5_VPORT_UPLINK;
-
- /* SF vports indices are after VFs and before ECPF */
- if (mlx5_sf_supported(esw->dev) &&
- index > mlx5_core_max_vfs(esw->dev))
- return mlx5_esw_sf_vport_index_to_num(esw, index);
-
- /* PF and VF vports start from 0 to max_vfs */
- return index;
-}
-
static inline unsigned int
mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
u16 vport_num)
@@ -617,82 +567,42 @@ mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
-/* The vport getter/iterator are only valid after esw->total_vports
- * and vport->vport are initialized in mlx5_eswitch_init.
+/* Each mark identifies eswitch vport type.
+ * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using
+ * a single mark.
+ * MLX5_ESW_VPT_VF identifies a SRIOV VF vport.
+ * MLX5_ESW_VPT_SF identifies SF vport.
*/
-#define mlx5_esw_for_all_vports(esw, i, vport) \
- for ((i) = MLX5_VPORT_PF; \
- (vport) = &(esw)->vports[i], \
- (i) < (esw)->total_vports; (i)++)
-
-#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
- for ((i) = (esw)->total_vports - 1; \
- (vport) = &(esw)->vports[i], \
- (i) >= MLX5_VPORT_PF; (i)--)
-
-#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
- for ((i) = MLX5_VPORT_FIRST_VF; \
- (vport) = &(esw)->vports[(i)], \
- (i) <= (nvfs); (i)++)
-
-#define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \
- for ((i) = (nvfs); \
- (vport) = &(esw)->vports[(i)], \
- (i) >= MLX5_VPORT_FIRST_VF; (i)--)
-
-/* The rep getter/iterator are only valid after esw->total_vports
- * and vport->vport are initialized in mlx5_eswitch_init.
+#define MLX5_ESW_VPT_HOST_FN XA_MARK_0
+#define MLX5_ESW_VPT_VF XA_MARK_1
+#define MLX5_ESW_VPT_SF XA_MARK_2
+
+/* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init.
+ * Borrowed the idea from xa_for_each_marked() but with support for desired last element.
*/
-#define mlx5_esw_for_all_reps(esw, i, rep) \
- for ((i) = MLX5_VPORT_PF; \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) < (esw)->total_vports; (i)++)
-
-#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
- for ((i) = MLX5_VPORT_FIRST_VF; \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) <= (nvfs); (i)++)
-
-#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
- for ((i) = (nvfs); \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) >= MLX5_VPORT_FIRST_VF; (i)--)
-
-#define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \
- for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++)
-
-#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
- for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
-
-/* Includes host PF (vport 0) if it's not esw manager. */
-#define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \
- for ((i) = (esw)->first_host_vport; \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) <= (nvfs); (i)++)
-
-#define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \
- for ((i) = (nvfs); \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) >= (esw)->first_host_vport; (i)--)
-
-#define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \
- for ((vport) = (esw)->first_host_vport; \
- (vport) <= (nvfs); (vport)++)
-
-#define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \
- for ((vport) = (nvfs); \
- (vport) >= (esw)->first_host_vport; (vport)--)
-
-#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
- for ((i) = mlx5_esw_sf_start_idx(esw); \
- (rep) = &(esw)->offloads.vport_reps[(i)], \
- (i) < mlx5_esw_sf_end_idx(esw); (i++))
+
+#define mlx5_esw_for_each_vport(esw, index, vport) \
+ xa_for_each(&((esw)->vports), index, vport)
+
+#define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \
+ for (index = 0, entry = xa_find(xa, &index, last, filter); \
+ entry; entry = xa_find_after(xa, &index, last, filter))
+
+#define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \
+ mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
+
+#define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \
+ mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
+
+#define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \
+ mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
-bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
+bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
+bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
@@ -712,13 +622,26 @@ void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
-int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw);
-void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw);
+struct esw_vport_tbl_namespace {
+ int max_fte;
+ int max_num_groups;
+ u32 flags;
+};
+
+struct mlx5_vport_tbl_attr {
+ u16 chain;
+ u16 prio;
+ u16 vport;
+ const struct esw_vport_tbl_namespace *vport_ns;
+};
+
+struct mlx5_flow_table *
+mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
+void
+mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
-u32
-esw_get_max_restore_tag(struct mlx5_eswitch *esw);
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
@@ -739,12 +662,13 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
- u16 vport_num, u32 sfnum);
+ u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
- u16 vport_num, u32 sfnum);
+ u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
@@ -761,6 +685,18 @@ struct mlx5_esw_event_info {
int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
+
+bool mlx5_esw_hold(struct mlx5_core_dev *dev);
+void mlx5_esw_release(struct mlx5_core_dev *dev);
+void mlx5_esw_get(struct mlx5_core_dev *dev);
+void mlx5_esw_put(struct mlx5_core_dev *dev);
+int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
+void mlx5_esw_unlock(struct mlx5_eswitch *esw);
+
+void esw_vport_change_handle_locked(struct mlx5_vport *vport);
+
+bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -781,6 +717,13 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
{
return ERR_PTR(-EOPNOTSUPP);
}
+
+static inline unsigned int
+mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
+ u16 vport_num)
+{
+ return vport_num;
+}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d4a2f8d1ee9f..db1e74280e57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -40,7 +40,6 @@
#include "eswitch.h"
#include "esw/indir_table.h"
#include "esw/acl/ofld.h"
-#include "esw/indir_table.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
@@ -48,6 +47,17 @@
#include "lib/eq.h"
#include "lib/fs_chains.h"
#include "en_tc.h"
+#include "en/mapping.h"
+
+#define mlx5_esw_for_each_rep(esw, i, rep) \
+ xa_for_each(&((esw)->offloads.vport_reps), i, rep)
+
+#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
+ xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF)
+
+#define mlx5_esw_for_each_vf_rep(esw, index, rep) \
+ mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \
+ rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF)
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
@@ -55,192 +65,19 @@
#define MLX5_ESW_MISS_FLOWS (2)
#define UPLINK_REP_INDEX 0
-/* Per vport tables */
-
-#define MLX5_ESW_VPORT_TABLE_SIZE 128
-
-/* This struct is used as a key to the hash table and we need it to be packed
- * so hash result is consistent
- */
-struct mlx5_vport_key {
- u32 chain;
- u16 prio;
- u16 vport;
- u16 vhca_id;
-} __packed;
-
-struct mlx5_vport_tbl_attr {
- u16 chain;
- u16 prio;
- u16 vport;
-};
-
-struct mlx5_vport_table {
- struct hlist_node hlist;
- struct mlx5_flow_table *fdb;
- u32 num_rules;
- struct mlx5_vport_key key;
-};
-
+#define MLX5_ESW_VPORT_TBL_SIZE 128
#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
-static struct mlx5_flow_table *
-esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
-{
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_flow_table *fdb;
-
- ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
- ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
- ft_attr.prio = FDB_PER_VPORT;
- fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
- if (IS_ERR(fdb)) {
- esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
- PTR_ERR(fdb));
- }
-
- return fdb;
-}
-
-static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
- struct mlx5_vport_tbl_attr *attr,
- struct mlx5_vport_key *key)
-{
- key->vport = attr->vport;
- key->chain = attr->chain;
- key->prio = attr->prio;
- key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
- return jhash(key, sizeof(*key), 0);
-}
-
-/* caller must hold vports.lock */
-static struct mlx5_vport_table *
-esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
-{
- struct mlx5_vport_table *e;
-
- hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
- if (!memcmp(&e->key, skey, sizeof(*skey)))
- return e;
-
- return NULL;
-}
-
-static void
-esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
-{
- struct mlx5_vport_table *e;
- struct mlx5_vport_key key;
- u32 hkey;
-
- mutex_lock(&esw->fdb_table.offloads.vports.lock);
- hkey = flow_attr_to_vport_key(esw, attr, &key);
- e = esw_vport_tbl_lookup(esw, &key, hkey);
- if (!e || --e->num_rules)
- goto out;
-
- hash_del(&e->hlist);
- mlx5_destroy_flow_table(e->fdb);
- kfree(e);
-out:
- mutex_unlock(&esw->fdb_table.offloads.vports.lock);
-}
-
-static struct mlx5_flow_table *
-esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
-{
- struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_table *fdb;
- struct mlx5_vport_table *e;
- struct mlx5_vport_key skey;
- u32 hkey;
-
- mutex_lock(&esw->fdb_table.offloads.vports.lock);
- hkey = flow_attr_to_vport_key(esw, attr, &skey);
- e = esw_vport_tbl_lookup(esw, &skey, hkey);
- if (e) {
- e->num_rules++;
- goto out;
- }
-
- e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (!e) {
- fdb = ERR_PTR(-ENOMEM);
- goto err_alloc;
- }
-
- ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
- if (!ns) {
- esw_warn(dev, "Failed to get FDB namespace\n");
- fdb = ERR_PTR(-ENOENT);
- goto err_ns;
- }
-
- fdb = esw_vport_tbl_create(esw, ns);
- if (IS_ERR(fdb))
- goto err_ns;
-
- e->fdb = fdb;
- e->num_rules = 1;
- e->key = skey;
- hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
-out:
- mutex_unlock(&esw->fdb_table.offloads.vports.lock);
- return e->fdb;
-
-err_ns:
- kfree(e);
-err_alloc:
- mutex_unlock(&esw->fdb_table.offloads.vports.lock);
- return fdb;
-}
-
-int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
-{
- struct mlx5_vport_tbl_attr attr;
- struct mlx5_flow_table *fdb;
- struct mlx5_vport *vport;
- int i;
-
- attr.chain = 0;
- attr.prio = 1;
- mlx5_esw_for_all_vports(esw, i, vport) {
- attr.vport = vport->vport;
- fdb = esw_vport_tbl_get(esw, &attr);
- if (IS_ERR(fdb))
- goto out;
- }
- return 0;
-
-out:
- mlx5_esw_vport_tbl_put(esw);
- return PTR_ERR(fdb);
-}
-
-void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
-{
- struct mlx5_vport_tbl_attr attr;
- struct mlx5_vport *vport;
- int i;
-
- attr.chain = 0;
- attr.prio = 1;
- mlx5_esw_for_all_vports(esw, i, vport) {
- attr.vport = vport->vport;
- esw_vport_tbl_put(esw, &attr);
- }
-}
-
-/* End: Per vport tables */
+static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
+ .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
+ .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
+ .flags = 0,
+};
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
u16 vport_num)
{
- int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
-
- WARN_ON(idx > esw->total_vports - 1);
- return &esw->offloads.vport_reps[idx];
+ return xa_load(&esw->offloads.vport_reps, vport_num);
}
static void
@@ -256,6 +93,26 @@ mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
}
+/* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
+ * are not needed as well in the following process. So clear them all for simplicity.
+ */
+void
+mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
+{
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ void *misc2;
+
+ misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
+
+ misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
+
+ if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2)))
+ spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2;
+ }
+}
+
static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
@@ -327,6 +184,19 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
}
static int
+esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_esw_flow_attr *esw_attr,
+ int i)
+{
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+ dest[i].sampler_id = esw_attr->sample->sampler_id;
+
+ return 0;
+}
+
+static int
esw_setup_ft_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
@@ -561,7 +431,10 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
esw_src_port_rewrite_supported(esw))
attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
- if (attr->dest_ft) {
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SAMPLE) {
+ esw_setup_sampler_dest(dest, flow_act, esw_attr, *i);
+ (*i)++;
+ } else if (attr->dest_ft) {
esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
(*i)++;
} else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
@@ -664,12 +537,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
- if (split) {
+ /* esw_attr->sample is allocated only when there is a sample action */
+ if (esw_attr->sample && esw_attr->sample->sample_default_tbl) {
+ fdb = esw_attr->sample->sample_default_tbl;
+ } else if (split) {
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
fwd_attr.vport = esw_attr->in_rep->vport;
+ fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
- fdb = esw_vport_tbl_get(esw, &fwd_attr);
+ fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
} else {
if (attr->chain || attr->prio)
fdb = mlx5_chains_get_table(chains, attr->chain,
@@ -701,7 +578,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
err_add_rule:
if (split)
- esw_vport_tbl_put(esw, &fwd_attr);
+ mlx5_esw_vporttbl_put(esw, &fwd_attr);
else if (attr->chain || attr->prio)
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_esw_get:
@@ -734,7 +611,8 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
fwd_attr.vport = esw_attr->in_rep->vport;
- fwd_fdb = esw_vport_tbl_get(esw, &fwd_attr);
+ fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
+ fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb);
goto err_get_fwd;
@@ -779,7 +657,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
return rule;
err_chain_src_rewrite:
esw_put_dest_tables_loop(esw, attr, 0, i);
- esw_vport_tbl_put(esw, &fwd_attr);
+ mlx5_esw_vporttbl_put(esw, &fwd_attr);
err_get_fwd:
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_get_fast:
@@ -814,15 +692,16 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
fwd_attr.vport = esw_attr->in_rep->vport;
+ fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
}
if (fwd_rule) {
- esw_vport_tbl_put(esw, &fwd_attr);
+ mlx5_esw_vporttbl_put(esw, &fwd_attr);
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
} else {
if (split)
- esw_vport_tbl_put(esw, &fwd_attr);
+ mlx5_esw_vporttbl_put(esw, &fwd_attr);
else if (attr->chain || attr->prio)
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
esw_cleanup_dests(esw, attr);
@@ -848,10 +727,11 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
{
struct mlx5_eswitch_rep *rep;
- int i, err = 0;
+ unsigned long i;
+ int err = 0;
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
- mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
+ mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) {
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
continue;
@@ -1043,7 +923,8 @@ out:
}
struct mlx5_flow_handle *
-mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
+ struct mlx5_eswitch_rep *rep,
u32 sqn)
{
struct mlx5_flow_act flow_act = {0};
@@ -1061,21 +942,30 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
/* source vport is the esw manager */
- MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
+ MLX5_SET(fte_match_set_misc, misc, source_port, rep->esw->manager_vport);
+ if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
+ MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(rep->esw->dev, vhca_id));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport.num = vport;
+ dest.vport.num = rep->vport;
+ dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
+ dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
- esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
+ esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
+ PTR_ERR(flow_rule));
out:
kvfree(spec);
return flow_rule;
@@ -1090,13 +980,13 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
{
struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
- int i = 0, num_vfs = esw->esw_funcs.num_vfs, vport_num;
+ int i = 0, num_vfs = esw->esw_funcs.num_vfs;
if (!num_vfs || !flows)
return;
- mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs)
- mlx5_del_flow_rules(flows[i++]);
+ for (i = 0; i < num_vfs; i++)
+ mlx5_del_flow_rules(flows[i]);
kvfree(flows);
}
@@ -1104,12 +994,15 @@ static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
static int
mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
{
- int num_vfs, vport_num, rule_idx = 0, err = 0;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
+ int num_vfs, rule_idx = 0, err = 0;
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_handle **flows;
struct mlx5_flow_spec *spec;
+ struct mlx5_vport *vport;
+ unsigned long i;
+ u16 vport_num;
num_vfs = esw->esw_funcs.num_vfs;
flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL);
@@ -1133,7 +1026,8 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) {
+ mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
+ vport_num = vport->vport;
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
dest.vport.num = vport_num;
@@ -1275,12 +1169,14 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle **flows;
- struct mlx5_flow_handle *flow;
- struct mlx5_flow_spec *spec;
/* total vports is the same for both e-switches */
int nvports = esw->total_vports;
+ struct mlx5_flow_handle *flow;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_vport *vport;
+ unsigned long i;
void *misc;
- int err, i;
+ int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
@@ -1299,6 +1195,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
misc_parameters);
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
spec, MLX5_VPORT_PF);
@@ -1308,10 +1205,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_pf_flow_err;
}
- flows[MLX5_VPORT_PF] = flow;
+ flows[vport->index] = flow;
}
if (mlx5_ecpf_vport_exists(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
@@ -1319,13 +1217,13 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_ecpf_flow_err;
}
- flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
+ flows[vport->index] = flow;
}
- mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
+ mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
esw_set_peer_miss_rule_source_port(esw,
peer_dev->priv.eswitch,
- spec, i);
+ spec, vport->vport);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
@@ -1333,7 +1231,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_vf_flow_err;
}
- flows[i] = flow;
+ flows[vport->index] = flow;
}
esw->fdb_table.offloads.peer_miss_rules = flows;
@@ -1342,15 +1240,20 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
return 0;
add_vf_flow_err:
- nvports = --i;
- mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
- mlx5_del_flow_rules(flows[i]);
-
- if (mlx5_ecpf_vport_exists(esw->dev))
- mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
+ mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
+ if (!flows[vport->index])
+ continue;
+ mlx5_del_flow_rules(flows[vport->index]);
+ }
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ mlx5_del_flow_rules(flows[vport->index]);
+ }
add_ecpf_flow_err:
- if (mlx5_core_is_ecpf_esw_manager(esw->dev))
- mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ mlx5_del_flow_rules(flows[vport->index]);
+ }
add_pf_flow_err:
esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
kvfree(flows);
@@ -1362,20 +1265,23 @@ alloc_flows_err:
static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
{
struct mlx5_flow_handle **flows;
- int i;
+ struct mlx5_vport *vport;
+ unsigned long i;
flows = esw->fdb_table.offloads.peer_miss_rules;
- mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
- mlx5_core_max_vfs(esw->dev))
- mlx5_del_flow_rules(flows[i]);
+ mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
+ mlx5_del_flow_rules(flows[vport->index]);
- if (mlx5_ecpf_vport_exists(esw->dev))
- mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
-
- if (mlx5_core_is_ecpf_esw_manager(esw->dev))
- mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ mlx5_del_flow_rules(flows[vport->index]);
+ }
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ mlx5_del_flow_rules(flows[vport->index]);
+ }
kvfree(flows);
}
@@ -1453,14 +1359,14 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
return ERR_PTR(-EOPNOTSUPP);
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return ERR_PTR(-ENOMEM);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
- ESW_CHAIN_TAG_METADATA_MASK);
+ ESW_REG_C0_USER_DATA_METADATA_MASK);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
@@ -1476,7 +1382,7 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
dest.ft = esw->offloads.ft_offloads;
flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
- kfree(spec);
+ kvfree(spec);
if (IS_ERR(flow_rule))
esw_warn(esw->dev,
@@ -1486,12 +1392,6 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
return flow_rule;
}
-u32
-esw_get_max_restore_tag(struct mlx5_eswitch *esw)
-{
- return ESW_CHAIN_TAG_METADATA_MASK;
-}
-
#define MAX_PF_SQ 256
#define MAX_SQ_NVPORTS 32
@@ -1521,6 +1421,44 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
}
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport_tbl_attr attr;
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ attr.chain = 0;
+ attr.prio = 1;
+ mlx5_esw_for_each_vport(esw, i, vport) {
+ attr.vport = vport->vport;
+ attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
+ mlx5_esw_vporttbl_put(esw, &attr);
+ }
+}
+
+static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport_tbl_attr attr;
+ struct mlx5_flow_table *fdb;
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ attr.chain = 0;
+ attr.prio = 1;
+ mlx5_esw_for_each_vport(esw, i, vport) {
+ attr.vport = vport->vport;
+ attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
+ fdb = mlx5_esw_vporttbl_get(esw, &attr);
+ if (IS_ERR(fdb))
+ goto out;
+ }
+ return 0;
+
+out:
+ esw_vport_tbl_put(esw);
+ return PTR_ERR(fdb);
+}
+
#define fdb_modify_header_fwd_to_table_supported(esw) \
(MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
@@ -1570,7 +1508,7 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
attr.max_ft_sz = fdb_max;
attr.max_grp_num = esw->params.large_group_num;
attr.default_ft = miss_fdb;
- attr.max_restore_tag = esw_get_max_restore_tag(esw);
+ attr.mapping = esw->offloads.reg_c0_obj_pool;
chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(chains)) {
@@ -1598,7 +1536,7 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
/* Open level 1 for split fdb rules now if prios isn't supported */
if (!mlx5_chains_prios_supported(chains)) {
- err = mlx5_esw_vport_tbl_get(esw);
+ err = esw_vport_tbl_get(esw);
if (err)
goto level_1_err;
}
@@ -1622,7 +1560,7 @@ static void
esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
{
if (!mlx5_chains_prios_supported(chains))
- mlx5_esw_vport_tbl_put(esw);
+ esw_vport_tbl_put(esw);
mlx5_chains_put_table(chains, 0, 1, 0);
mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
mlx5_chains_destroy(chains);
@@ -1709,6 +1647,12 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_eswitch_owner_vhca_id);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ source_eswitch_owner_vhca_id_valid, 1);
+ }
ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
@@ -1865,6 +1809,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);
+ atomic64_set(&esw->user_count, 0);
}
static int esw_create_offloads_table(struct mlx5_eswitch *esw)
@@ -1988,12 +1933,12 @@ out:
return flow_rule;
}
-
-static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
+static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
{
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
struct mlx5_core_dev *dev = esw->dev;
- int vport;
+ struct mlx5_vport *vport;
+ unsigned long i;
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
@@ -2014,8 +1959,8 @@ static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode
query_vports:
mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
- mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
- mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
+ mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
+ mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
if (prev_mlx5_mode != mlx5_mode)
return -EINVAL;
prev_mlx5_mode = mlx5_mode;
@@ -2067,7 +2012,7 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
goto out_free;
}
- ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
+ ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS;
ft = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
@@ -2082,7 +2027,7 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
- ESW_CHAIN_TAG_METADATA_MASK);
+ ESW_REG_C0_USER_DATA_METADATA_MASK);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
ft_attr.max_fte - 1);
@@ -2158,34 +2103,82 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return err;
}
-void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
+static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ xa_mark_t mark)
{
- kfree(esw->offloads.vport_reps);
+ bool mark_set;
+
+ /* Copy the mark from vport to its rep */
+ mark_set = xa_get_mark(&esw->vports, rep->vport, mark);
+ if (mark_set)
+ xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark);
}
-int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
{
- int total_vports = esw->total_vports;
struct mlx5_eswitch_rep *rep;
- int vport_index;
- u8 rep_type;
+ int rep_type;
+ int err;
- esw->offloads.vport_reps = kcalloc(total_vports,
- sizeof(struct mlx5_eswitch_rep),
- GFP_KERNEL);
- if (!esw->offloads.vport_reps)
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
return -ENOMEM;
- mlx5_esw_for_all_reps(esw, vport_index, rep) {
- rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
- rep->vport_index = vport_index;
+ rep->vport = vport->vport;
+ rep->vport_index = vport->index;
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
+ atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
- atomic_set(&rep->rep_data[rep_type].state,
- REP_UNREGISTERED);
- }
+ err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
+ if (err)
+ goto insert_err;
+ mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN);
+ mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF);
+ mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF);
return 0;
+
+insert_err:
+ kfree(rep);
+ return err;
+}
+
+static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ xa_erase(&esw->offloads.vport_reps, rep->vport);
+ kfree(rep);
+}
+
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
+{
+ struct mlx5_eswitch_rep *rep;
+ unsigned long i;
+
+ mlx5_esw_for_each_rep(esw, i, rep)
+ mlx5_esw_offloads_rep_cleanup(esw, rep);
+ xa_destroy(&esw->offloads.vport_reps);
+}
+
+int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+ int err;
+
+ xa_init(&esw->offloads.vport_reps);
+
+ mlx5_esw_for_each_vport(esw, i, vport) {
+ err = mlx5_esw_offloads_rep_init(esw, vport);
+ if (err)
+ goto err;
+ }
+ return 0;
+
+err:
+ esw_offloads_cleanup_reps(esw);
+ return err;
}
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
@@ -2199,7 +2192,7 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
- int i;
+ unsigned long i;
mlx5_esw_for_each_sf_rep(esw, i, rep)
__esw_offloads_unload_rep(esw, rep, rep_type);
@@ -2208,11 +2201,11 @@ static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
- int i;
+ unsigned long i;
__unload_reps_sf_vport(esw, rep_type);
- mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
+ mlx5_esw_for_each_vf_rep(esw, i, rep)
__esw_offloads_unload_rep(esw, rep, rep_type);
if (mlx5_ecpf_vport_exists(esw->dev)) {
@@ -2270,9 +2263,11 @@ int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
- err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
- if (err)
- return err;
+ if (vport_num != MLX5_VPORT_UPLINK) {
+ err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
+ if (err)
+ return err;
+ }
err = mlx5_esw_offloads_rep_load(esw, vport_num);
if (err)
@@ -2280,7 +2275,8 @@ int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
return err;
load_err:
- mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
+ if (vport_num != MLX5_VPORT_UPLINK)
+ mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
return err;
}
@@ -2290,7 +2286,9 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
return;
mlx5_esw_offloads_rep_unload(esw, vport_num);
- mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
+
+ if (vport_num != MLX5_VPORT_UPLINK)
+ mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
}
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
@@ -2299,13 +2297,8 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
struct mlx5_eswitch *peer_esw)
{
- int err;
-
- err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
- if (err)
- return err;
- return 0;
+ return esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
}
static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
@@ -2430,8 +2423,7 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
}
-static bool
-esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
+bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
{
if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
return false;
@@ -2500,25 +2492,25 @@ static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
- int i;
+ unsigned long i;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return;
- mlx5_esw_for_all_vports_reverse(esw, i, vport)
+ mlx5_esw_for_each_vport(esw, i, vport)
esw_offloads_vport_metadata_cleanup(esw, vport);
}
static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
+ unsigned long i;
int err;
- int i;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0;
- mlx5_esw_for_all_vports(esw, i, vport) {
+ mlx5_esw_for_each_vport(esw, i, vport) {
err = esw_offloads_vport_metadata_setup(esw, vport);
if (err)
goto metadata_err;
@@ -2531,6 +2523,28 @@ metadata_err:
return err;
}
+int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable)
+{
+ int err = 0;
+
+ down_write(&esw->mode_lock);
+ if (esw->mode != MLX5_ESWITCH_NONE) {
+ err = -EBUSY;
+ goto done;
+ }
+ if (!mlx5_esw_vport_match_metadata_supported(esw)) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+ if (enable)
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+ else
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
+done:
+ up_write(&esw->mode_lock);
+ return err;
+}
+
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
@@ -2565,6 +2579,9 @@ static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
return esw_vport_create_offloads_acl_tables(esw, vport);
}
@@ -2573,6 +2590,9 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ if (IS_ERR(vport))
+ return;
+
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
@@ -2584,6 +2604,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.vports.lock);
hash_init(esw->fdb_table.offloads.vports.table);
+ atomic64_set(&esw->user_count, 0);
indir = mlx5_esw_indir_table_init();
if (IS_ERR(indir)) {
@@ -2726,10 +2747,25 @@ static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
return 0;
}
+bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
+{
+ /* Local controller is always valid */
+ if (controller == 0)
+ return true;
+
+ if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
+ return false;
+
+ /* External host number starts with zero in device */
+ return (controller == esw->offloads.host_number + 1);
+}
+
int esw_offloads_enable(struct mlx5_eswitch *esw)
{
+ struct mapping_ctx *reg_c0_obj_pool;
struct mlx5_vport *vport;
- int err, i;
+ unsigned long i;
+ int err;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
@@ -2744,9 +2780,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_metadata;
- if (esw_check_vport_match_metadata_supported(esw))
- esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
-
err = esw_offloads_metadata_init(esw);
if (err)
goto err_metadata;
@@ -2755,6 +2788,15 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_vport_metadata;
+ reg_c0_obj_pool = mapping_create(sizeof(struct mlx5_mapped_obj),
+ ESW_REG_C0_USER_DATA_METADATA_MASK,
+ true);
+ if (IS_ERR(reg_c0_obj_pool)) {
+ err = PTR_ERR(reg_c0_obj_pool);
+ goto err_pool;
+ }
+ esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
+
err = esw_offloads_steering_init(esw);
if (err)
goto err_steering_init;
@@ -2781,11 +2823,12 @@ err_vports:
err_uplink:
esw_offloads_steering_cleanup(esw);
err_steering_init:
+ mapping_destroy(reg_c0_obj_pool);
+err_pool:
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_metadata_uninit(esw);
err_metadata:
- esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
@@ -2819,8 +2862,8 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
+ mapping_destroy(esw->offloads.reg_c0_obj_pool);
esw_offloads_metadata_uninit(esw);
- esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
@@ -2925,8 +2968,14 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
- mutex_lock(&esw->mode_lock);
- cur_mlx5_mode = esw->mode;
+ err = mlx5_esw_try_lock(esw);
+ if (err < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
+ return err;
+ }
+ cur_mlx5_mode = err;
+ err = 0;
+
if (cur_mlx5_mode == mlx5_mode)
goto unlock;
@@ -2938,7 +2987,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
err = -EINVAL;
unlock:
- mutex_unlock(&esw->mode_lock);
+ mlx5_esw_unlock(esw);
return err;
}
@@ -2951,14 +3000,45 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- mutex_lock(&esw->mode_lock);
+ down_write(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
err = esw_mode_to_devlink(esw->mode, mode);
unlock:
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
+ return err;
+}
+
+static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_vport *vport;
+ u16 err_vport_num = 0;
+ unsigned long i;
+ int err = 0;
+
+ mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
+ err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
+ if (err) {
+ err_vport_num = vport->vport;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set min inline on vport");
+ goto revert_inline_mode;
+ }
+ }
+ return 0;
+
+revert_inline_mode:
+ mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
+ if (vport->vport == err_vport_num)
+ break;
+ mlx5_modify_nic_vport_min_inline(dev,
+ vport->vport,
+ esw->offloads.inline_mode);
+ }
return err;
}
@@ -2966,15 +3046,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- int err, vport, num_vport;
struct mlx5_eswitch *esw;
u8 mlx5_mode;
+ int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
- mutex_lock(&esw->mode_lock);
+ down_write(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto out;
@@ -3003,27 +3083,16 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (err)
goto out;
- mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
- err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack,
- "Failed to set min inline on vport");
- goto revert_inline_mode;
- }
- }
+ err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
+ if (err)
+ goto out;
esw->offloads.inline_mode = mlx5_mode;
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
return 0;
-revert_inline_mode:
- num_vport = --vport;
- mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
- mlx5_modify_nic_vport_min_inline(dev,
- vport,
- esw->offloads.inline_mode);
out:
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3036,14 +3105,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- mutex_lock(&esw->mode_lock);
+ down_write(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
unlock:
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3059,7 +3128,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
if (IS_ERR(esw))
return PTR_ERR(esw);
- mutex_lock(&esw->mode_lock);
+ down_write(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
@@ -3105,7 +3174,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
}
unlock:
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3120,14 +3189,14 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
return PTR_ERR(esw);
- mutex_lock(&esw->mode_lock);
+ down_write(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
*encap = esw->offloads.encap;
unlock:
- mutex_unlock(&esw->mode_lock);
+ up_write(&esw->mode_lock);
return 0;
}
@@ -3152,11 +3221,12 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
{
struct mlx5_eswitch_rep_data *rep_data;
struct mlx5_eswitch_rep *rep;
- int i;
+ unsigned long i;
esw->offloads.rep_ops[rep_type] = ops;
- mlx5_esw_for_all_reps(esw, i, rep) {
- if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
+ mlx5_esw_for_each_rep(esw, i, rep) {
+ if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
+ rep->esw = esw;
rep_data = &rep->rep_data[rep_type];
atomic_set(&rep_data->state, REP_REGISTERED);
}
@@ -3167,12 +3237,12 @@ EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
- int i;
+ unsigned long i;
if (esw->mode == MLX5_ESWITCH_OFFLOADS)
__unload_reps_all_vport(esw, rep_type);
- mlx5_esw_for_all_reps(esw, i, rep)
+ mlx5_esw_for_each_rep(esw, i, rep)
atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
}
EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
@@ -3213,12 +3283,6 @@ struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
-bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
-{
- return vport_num >= MLX5_VPORT_FIRST_VF &&
- vport_num <= esw->dev->priv.sriov.max_vfs;
-}
-
bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
{
return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
@@ -3244,7 +3308,7 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
- u16 vport_num, u32 sfnum)
+ u16 vport_num, u32 controller, u32 sfnum)
{
int err;
@@ -3252,7 +3316,7 @@ int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_p
if (err)
return err;
- err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, sfnum);
+ err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
if (err)
goto devlink_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index ec679560a95d..a81ece94f599 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -83,14 +83,16 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
ft_attr.autogroup.max_num_groups = 1;
tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(tt->termtbl)) {
- esw_warn(dev, "Failed to create termination table\n");
+ esw_warn(dev, "Failed to create termination table (error %d)\n",
+ IS_ERR(tt->termtbl));
return -EOPNOTSUPP;
}
tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
&tt->dest, 1);
if (IS_ERR(tt->rule)) {
- esw_warn(dev, "Failed to create termination table rule\n");
+ esw_warn(dev, "Failed to create termination table rule (error %d)\n",
+ IS_ERR(tt->rule));
goto add_flow_err;
}
return 0;
@@ -140,10 +142,9 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
memcpy(&tt->flow_act, flow_act, sizeof(*flow_act));
err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act);
- if (err) {
- esw_warn(esw->dev, "Failed to create termination table\n");
+ if (err)
goto tt_create_err;
- }
+
hash_add(esw->offloads.termtbl_tbl, &tt->termtbl_hlist, hash_key);
tt_add_ref:
tt->ref_count++;
@@ -282,7 +283,8 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
&dest[i], attr);
if (IS_ERR(tt)) {
- esw_warn(esw->dev, "Failed to create termination table\n");
+ esw_warn(esw->dev, "Failed to get termination table (error %d)\n",
+ IS_ERR(tt));
goto revert_changes;
}
attr->dests[num_vport_dests].termtbl = tt;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 22bee4990232..0bba92cf5dc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -707,7 +707,7 @@ static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
}
if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) {
- err = ida_simple_get(&fipsec->halloc, 1, 0, GFP_KERNEL);
+ err = ida_alloc_min(&fipsec->halloc, 1, GFP_KERNEL);
if (err < 0) {
context = ERR_PTR(err);
goto exists;
@@ -758,7 +758,7 @@ delete_hash:
unlock_hash:
mutex_unlock(&fipsec->sa_hash_lock);
if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
- ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
+ ida_free(&fipsec->halloc, sa_ctx->sa_handle);
exists:
mutex_unlock(&fpga_xfrm->lock);
kfree(sa_ctx);
@@ -850,9 +850,9 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
return;
}
- if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
+ if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action ==
MLX5_ACCEL_ESP_ACTION_DECRYPT)
- ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
+ ida_free(&fipsec->halloc, sa_ctx->sa_handle);
mutex_lock(&fipsec->sa_hash_lock);
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
@@ -1085,6 +1085,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns,
rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
if (IS_ERR(rule->ctx)) {
int err = PTR_ERR(rule->ctx);
+
kfree(rule);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 66ad599bd488..f74d2c834037 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -105,7 +105,7 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
-/* Promiscuous, Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
+/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
#define KERNEL_NIC_PRIO_NUM_LEVELS 7
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
@@ -590,7 +590,7 @@ static void del_sw_fte(struct fs_node *node)
&fte->hash,
rhash_fte);
WARN_ON(err);
- ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
+ ida_free(&fg->fte_allocator, fte->index - fg->start_index);
kmem_cache_free(steering->ftes_cache, fte);
}
@@ -640,7 +640,7 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
int index;
int ret;
- index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
+ index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
if (index < 0)
return index;
@@ -656,7 +656,7 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
return 0;
err_ida_remove:
- ida_simple_remove(&fg->fte_allocator, index);
+ ida_free(&fg->fte_allocator, index);
return ret;
}
@@ -2229,17 +2229,21 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d
{
struct mlx5_flow_steering *steering = dev->priv.steering;
- if (!steering || vport >= mlx5_eswitch_get_total_vports(dev))
+ if (!steering)
return NULL;
switch (type) {
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+ if (vport >= steering->esw_egress_acl_vports)
+ return NULL;
if (steering->esw_egress_root_ns &&
steering->esw_egress_root_ns[vport])
return &steering->esw_egress_root_ns[vport]->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+ if (vport >= steering->esw_ingress_acl_vports)
+ return NULL;
if (steering->esw_ingress_root_ns &&
steering->esw_ingress_root_ns[vport])
return &steering->esw_ingress_root_ns[vport]->ns;
@@ -2395,14 +2399,12 @@ static int init_root_tree(struct mlx5_flow_steering *steering,
struct init_tree_node *init_node,
struct fs_node *fs_parent_node)
{
- int i;
- struct mlx5_flow_namespace *fs_ns;
int err;
+ int i;
- fs_get_obj(fs_ns, fs_parent_node);
for (i = 0; i < init_node->ar_size; i++) {
err = init_root_tree_recursive(steering, &init_node->children[i],
- &fs_ns->node,
+ fs_parent_node,
init_node, i);
if (err)
return err;
@@ -2573,43 +2575,11 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
clean_tree(&root_ns->ns.node);
}
-static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
-{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int i;
-
- if (!steering->esw_egress_root_ns)
- return;
-
- for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
- cleanup_root_ns(steering->esw_egress_root_ns[i]);
-
- kfree(steering->esw_egress_root_ns);
- steering->esw_egress_root_ns = NULL;
-}
-
-static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
-{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int i;
-
- if (!steering->esw_ingress_root_ns)
- return;
-
- for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
- cleanup_root_ns(steering->esw_ingress_root_ns[i]);
-
- kfree(steering->esw_ingress_root_ns);
- steering->esw_ingress_root_ns = NULL;
-}
-
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
cleanup_root_ns(steering->root_ns);
- cleanup_egress_acls_root_ns(dev);
- cleanup_ingress_acls_root_ns(dev);
cleanup_root_ns(steering->fdb_root_ns);
steering->fdb_root_ns = NULL;
kfree(steering->fdb_sub_ns);
@@ -2854,10 +2824,9 @@ static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vpo
return PTR_ERR_OR_ZERO(prio);
}
-static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
+int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
- int total_vports = mlx5_eswitch_get_total_vports(dev);
int err;
int i;
@@ -2873,7 +2842,7 @@ static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
if (err)
goto cleanup_root_ns;
}
-
+ steering->esw_egress_acl_vports = total_vports;
return 0;
cleanup_root_ns:
@@ -2884,10 +2853,24 @@ cleanup_root_ns:
return err;
}
-static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int i;
+
+ if (!steering->esw_egress_root_ns)
+ return;
+
+ for (i = 0; i < steering->esw_egress_acl_vports; i++)
+ cleanup_root_ns(steering->esw_egress_root_ns[i]);
+
+ kfree(steering->esw_egress_root_ns);
+ steering->esw_egress_root_ns = NULL;
+}
+
+int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
- int total_vports = mlx5_eswitch_get_total_vports(dev);
int err;
int i;
@@ -2903,7 +2886,7 @@ static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
if (err)
goto cleanup_root_ns;
}
-
+ steering->esw_ingress_acl_vports = total_vports;
return 0;
cleanup_root_ns:
@@ -2914,6 +2897,21 @@ cleanup_root_ns:
return err;
}
+void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int i;
+
+ if (!steering->esw_ingress_root_ns)
+ return;
+
+ for (i = 0; i < steering->esw_ingress_acl_vports; i++)
+ cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+
+ kfree(steering->esw_ingress_root_ns);
+ steering->esw_ingress_root_ns = NULL;
+}
+
static int init_egress_root_ns(struct mlx5_flow_steering *steering)
{
int err;
@@ -2976,16 +2974,6 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
if (err)
goto err;
}
- if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
- err = init_egress_acls_root_ns(dev);
- if (err)
- goto err;
- }
- if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
- err = init_ingress_acls_root_ns(dev);
- if (err)
- goto err;
- }
}
if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index b24a9849c45e..e577a2c424af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -129,6 +129,8 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_tx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
+ int esw_egress_acl_vports;
+ int esw_ingress_acl_vports;
};
struct fs_node {
@@ -287,6 +289,11 @@ int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
+int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
+void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
+int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports);
+void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
+
#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
#define fs_list_for_each_entry(pos, root) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index f43caefd07a1..18e5aec14641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -497,13 +497,13 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
- bulk = kzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc),
- GFP_KERNEL);
+ bulk = kvzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc),
+ GFP_KERNEL);
if (!bulk)
goto err_alloc_bulk;
- bulk->bitmask = kcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
- GFP_KERNEL);
+ bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
+ GFP_KERNEL);
if (!bulk->bitmask)
goto err_alloc_bitmask;
@@ -521,9 +521,9 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
return bulk;
err_mlx5_cmd_bulk_alloc:
- kfree(bulk->bitmask);
+ kvfree(bulk->bitmask);
err_alloc_bitmask:
- kfree(bulk);
+ kvfree(bulk);
err_alloc_bulk:
return ERR_PTR(err);
}
@@ -537,8 +537,8 @@ mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
}
mlx5_cmd_fc_free(dev, bulk->base_id);
- kfree(bulk->bitmask);
- kfree(bulk);
+ kvfree(bulk->bitmask);
+ kvfree(bulk);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index f9042e147c7f..d5d57630015f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -104,7 +104,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
- mlx5_load_one(dev, false);
+ mlx5_load_one(dev);
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
@@ -119,7 +119,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
int err;
mlx5_enter_error_state(dev, true);
- mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev);
err = mlx5_health_wait_pci_up(dev);
if (err)
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
@@ -199,16 +199,11 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
fw_live_patch_work);
struct mlx5_core_dev *dev = fw_reset->dev;
- struct mlx5_fw_tracer *tracer;
mlx5_core_info(dev, "Live patch updated firmware version: %d.%d.%d\n", fw_rev_maj(dev),
fw_rev_min(dev), fw_rev_sub(dev));
- tracer = dev->tracer;
- if (IS_ERR_OR_NULL(tracer))
- return;
-
- if (mlx5_fw_tracer_reload(tracer))
+ if (mlx5_fw_tracer_reload(dev->tracer))
mlx5_core_err(dev, "Failed to reload FW tracer\n");
}
@@ -342,7 +337,7 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
}
mlx5_enter_error_state(dev, true);
- mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev);
done:
fw_reset->ret = err;
mlx5_fw_reset_complete_reload(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 0c32c485eb58..9ff163c5bcde 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -335,12 +335,12 @@ static int mlx5_health_try_recover(struct mlx5_core_dev *dev)
return -EIO;
}
mlx5_core_err(dev, "starting health recovery flow\n");
- mlx5_recover_device(dev);
- if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state) ||
- mlx5_health_check_fatal_sensors(dev)) {
+ if (mlx5_recover_device(dev) || mlx5_health_check_fatal_sensors(dev)) {
mlx5_core_err(dev, "health recovery failed\n");
return -EIO;
}
+
+ mlx5_core_info(dev, "health recovery succeeded\n");
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 6f7cef47e04c..7d7ed025db0d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -33,6 +33,7 @@
#include <rdma/ib_verbs.h>
#include <linux/mlx5/fs.h>
#include "en.h"
+#include "en/params.h"
#include "ipoib.h"
#define IB_DEFAULT_Q_KEY 0xb1b
@@ -372,6 +373,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ u16 max_nch = priv->max_nch;
int err;
mlx5e_create_q_counters(priv);
@@ -386,7 +388,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
+ err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
if (err)
goto err_destroy_indirect_rqts;
@@ -394,7 +396,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_direct_rqts;
- err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
+ err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
if (err)
goto err_destroy_indirect_tirs;
@@ -405,11 +407,11 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
return 0;
err_destroy_direct_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
err_close_drop_rq:
@@ -421,10 +423,12 @@ err_destroy_q_counters:
static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
+ u16 max_nch = priv->max_nch;
+
mlx5i_destroy_flow_steering(priv);
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
mlx5e_destroy_indirect_tirs(priv);
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
@@ -469,6 +473,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5i_stats_grps,
.stats_grps_num = mlx5i_stats_grps_num,
+ .rx_ptp_support = false,
};
/* mlx5i netdev NDos */
@@ -476,28 +481,19 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- struct mlx5e_channels new_channels = {};
- struct mlx5e_params *params;
+ struct mlx5e_params new_params;
int err = 0;
mutex_lock(&priv->state_lock);
- params = &priv->channels.params;
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- params->sw_mtu = new_mtu;
- netdev->mtu = params->sw_mtu;
- goto out;
- }
-
- new_channels.params = *params;
- new_channels.params.sw_mtu = new_mtu;
+ new_params = priv->channels.params;
+ new_params.sw_mtu = new_mtu;
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
if (err)
goto out;
- netdev->mtu = new_channels.params.sw_mtu;
+ netdev->mtu = new_params.sw_mtu;
out:
mutex_unlock(&priv->state_lock);
@@ -710,7 +706,7 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev)
static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev)
{
- return mdev->mlx5e_res.pdn != 0;
+ return mdev->mlx5e_res.hw_objs.pdn != 0;
}
static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev)
@@ -720,7 +716,7 @@ static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev)
return &mlx5i_nic_profile;
}
-static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
+static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
struct net_device *netdev, void *param)
{
struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 3d0a18a0bed4..18ee21b06a00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -350,6 +350,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
+ .rx_ptp_support = false,
};
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 83a05371e2aa..b8748390335f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -603,8 +603,6 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (err)
mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
err);
-
- return;
}
/* Must be called with intf_mutex held */
@@ -768,7 +766,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
- if (ldev && __mlx5_lag_is_roce(ldev)) {
+ if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = MLX5_MAX_PORTS;
mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 88e58ac902de..2c41a6920264 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -35,7 +35,7 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
}
/**
- * Set lag port affinity
+ * mlx5_lag_set_port_affinity
*
* @ldev: lag device
* @port:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 1e7f26b240de..ce696d523493 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -645,16 +645,19 @@ static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
return PTP_PF_NONE;
}
-static int mlx5_init_pin_config(struct mlx5_clock *clock)
+static void mlx5_init_pin_config(struct mlx5_clock *clock)
{
int i;
+ if (!clock->ptp_info.n_pins)
+ return;
+
clock->ptp_info.pin_config =
kcalloc(clock->ptp_info.n_pins,
sizeof(*clock->ptp_info.pin_config),
GFP_KERNEL);
if (!clock->ptp_info.pin_config)
- return -ENOMEM;
+ return;
clock->ptp_info.enable = mlx5_ptp_enable;
clock->ptp_info.verify = mlx5_ptp_verify;
clock->ptp_info.pps = 1;
@@ -667,8 +670,6 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock)
clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
clock->ptp_info.pin_config[i].chan = 0;
}
-
- return 0;
}
static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
@@ -859,6 +860,17 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
}
}
+static void mlx5_init_pps(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+
+ if (!MLX5_PPS_CAP(mdev))
+ return;
+
+ mlx5_get_pps_caps(mdev);
+ mlx5_init_pin_config(clock);
+}
+
void mlx5_init_clock(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
@@ -876,10 +888,7 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
clock->ptp_info = mlx5_ptp_clock_info;
/* Initialize 1PPS data structures */
- if (MLX5_PPS_CAP(mdev))
- mlx5_get_pps_caps(mdev);
- if (clock->ptp_info.n_pins)
- mlx5_init_pin_config(clock);
+ mlx5_init_pps(mdev);
clock->ptp = ptp_clock_register(&clock->ptp_info,
&mdev->pdev->dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
index a12c7da618a7..ceae6bc378e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -105,4 +105,15 @@ static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock,
}
#endif
+static inline cqe_ts_to_ns mlx5_rq_ts_translator(struct mlx5_core_dev *mdev)
+{
+ return mlx5_is_real_time_rq(mdev) ? mlx5_real_time_cyc2time :
+ mlx5_timecounter_cyc2time;
+}
+
+static inline cqe_ts_to_ns mlx5_sq_ts_translator(struct mlx5_core_dev *mdev)
+{
+ return mlx5_is_real_time_sq(mdev) ? mlx5_real_time_cyc2time :
+ mlx5_timecounter_cyc2time;
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index 57eb91bcbca7..e995f8378df7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -46,7 +46,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
- MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.pdn);
+ MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 81f2cc4ca1da..f607a3858ef5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -22,15 +22,15 @@ struct mlx5_cq_table {
};
struct mlx5_eq {
+ struct mlx5_frag_buf_ctrl fbc;
+ struct mlx5_frag_buf frag_buf;
struct mlx5_core_dev *dev;
struct mlx5_cq_table cq_table;
__be32 __iomem *doorbell;
u32 cons_index;
- struct mlx5_frag_buf buf;
unsigned int vecidx;
unsigned int irqn;
u8 eqn;
- int nent;
struct mlx5_rsc_debug *dbg;
};
@@ -47,16 +47,21 @@ struct mlx5_eq_comp {
struct list_head list;
};
+static inline u32 eq_get_size(struct mlx5_eq *eq)
+{
+ return eq->fbc.sz_m1 + 1;
+}
+
static inline struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
{
- return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
+ return mlx5_frag_buf_get_wqe(&eq->fbc, entry);
}
static inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
{
- struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
+ struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & eq->fbc.sz_m1);
- return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
+ return (eqe->owner ^ (eq->cons_index >> eq->fbc.log_sz)) & 1 ? NULL : eqe;
}
static inline void eq_update_ci(struct mlx5_eq *eq, int arm)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 381325b4a863..00ef10a1a9f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -7,15 +7,11 @@
#include "lib/fs_chains.h"
#include "en/mapping.h"
-#include "mlx5_core.h"
#include "fs_core.h"
-#include "eswitch.h"
-#include "en.h"
#include "en_tc.h"
#define chains_lock(chains) ((chains)->lock)
#define chains_ht(chains) ((chains)->chains_ht)
-#define chains_mapping(chains) ((chains)->chains_mapping)
#define prios_ht(chains) ((chains)->prios_ht)
#define ft_pool_left(chains) ((chains)->ft_left)
#define tc_default_ft(chains) ((chains)->tc_default_ft)
@@ -300,7 +296,7 @@ create_chain_restore(struct fs_chain *chain)
!mlx5_chains_prios_supported(chains))
return 0;
- err = mapping_add(chains_mapping(chains), &chain->chain, &index);
+ err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
if (err)
return err;
if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
@@ -310,10 +306,8 @@ create_chain_restore(struct fs_chain *chain)
*
* This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
*/
- err = mapping_add(chains_mapping(chains),
- &chain->chain, &index);
- mapping_remove(chains_mapping(chains),
- MLX5_FS_DEFAULT_FLOW_TAG);
+ err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
+ mapping_remove(chains->chains_mapping, MLX5_FS_DEFAULT_FLOW_TAG);
if (err)
return err;
}
@@ -361,7 +355,7 @@ err_mod_hdr:
mlx5_del_flow_rules(chain->restore_rule);
err_rule:
/* Datapath can't find this mapping, so we can safely remove it */
- mapping_remove(chains_mapping(chains), chain->id);
+ mapping_remove(chains->chains_mapping, chain->id);
return err;
}
@@ -376,7 +370,7 @@ static void destroy_chain_restore(struct fs_chain *chain)
mlx5_del_flow_rules(chain->restore_rule);
mlx5_modify_header_dealloc(chains->dev, chain->miss_modify_hdr);
- mapping_remove(chains_mapping(chains), chain->id);
+ mapping_remove(chains->chains_mapping, chain->id);
}
static struct fs_chain *
@@ -797,7 +791,6 @@ static struct mlx5_fs_chains *
mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
{
struct mlx5_fs_chains *chains_priv;
- struct mapping_ctx *mapping;
u32 max_flow_counter;
int err;
@@ -816,6 +809,7 @@ mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
chains_priv->flags = attr->flags;
chains_priv->ns = attr->ns;
chains_priv->group_num = attr->max_grp_num;
+ chains_priv->chains_mapping = attr->mapping;
tc_default_ft(chains_priv) = tc_end_ft(chains_priv) = attr->default_ft;
mlx5_core_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
@@ -832,20 +826,10 @@ mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
if (err)
goto init_prios_ht_err;
- mapping = mapping_create(sizeof(u32), attr->max_restore_tag,
- true);
- if (IS_ERR(mapping)) {
- err = PTR_ERR(mapping);
- goto mapping_err;
- }
- chains_mapping(chains_priv) = mapping;
-
mutex_init(&chains_lock(chains_priv));
return chains_priv;
-mapping_err:
- rhashtable_destroy(&prios_ht(chains_priv));
init_prios_ht_err:
rhashtable_destroy(&chains_ht(chains_priv));
init_chains_ht_err:
@@ -857,7 +841,6 @@ static void
mlx5_chains_cleanup(struct mlx5_fs_chains *chains)
{
mutex_destroy(&chains_lock(chains));
- mapping_destroy(chains_mapping(chains));
rhashtable_destroy(&prios_ht(chains));
rhashtable_destroy(&chains_ht(chains));
@@ -884,25 +867,18 @@ int
mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
u32 *chain_mapping)
{
- return mapping_add(chains_mapping(chains), &chain, chain_mapping);
+ struct mapping_ctx *ctx = chains->chains_mapping;
+ struct mlx5_mapped_obj mapped_obj = {};
+
+ mapped_obj.type = MLX5_MAPPED_OBJ_CHAIN;
+ mapped_obj.chain = chain;
+ return mapping_add(ctx, &mapped_obj, chain_mapping);
}
int
mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping)
{
- return mapping_remove(chains_mapping(chains), chain_mapping);
-}
-
-int mlx5_get_chain_for_tag(struct mlx5_fs_chains *chains, u32 tag,
- u32 *chain)
-{
- int err;
+ struct mapping_ctx *ctx = chains->chains_mapping;
- err = mapping_find(chains_mapping(chains), tag, chain);
- if (err) {
- mlx5_core_warn(chains->dev, "Can't find chain for tag: %d\n", tag);
- return -ENOENT;
- }
-
- return 0;
+ return mapping_remove(ctx, chain_mapping);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
index 6d5be31b05dd..e96f345e7dae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
@@ -7,6 +7,7 @@
#include <linux/mlx5/fs.h>
struct mlx5_fs_chains;
+struct mlx5_mapped_obj;
enum mlx5_chains_flags {
MLX5_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
@@ -20,7 +21,7 @@ struct mlx5_chains_attr {
u32 max_ft_sz;
u32 max_grp_num;
struct mlx5_flow_table *default_ft;
- u32 max_restore_tag;
+ struct mapping_ctx *mapping;
};
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
@@ -63,9 +64,6 @@ struct mlx5_fs_chains *
mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr);
void mlx5_chains_destroy(struct mlx5_fs_chains *chains);
-int
-mlx5_get_chain_for_tag(struct mlx5_fs_chains *chains, u32 tag, u32 *chain);
-
void
mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
index a68738c8f4bc..96ffc0a0e670 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
@@ -55,10 +55,6 @@ void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev)
int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count)
{
- if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
- mlx5_core_err(dev, "Cannot reserve GIDs when interfaces are up\n");
- return -EPERM;
- }
if (dev->roce.reserved_gids.start < count) {
mlx5_core_warn(dev, "GID table exhausted attempting to reserve %d more GIDs\n",
count);
@@ -79,7 +75,6 @@ int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count)
void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count)
{
- WARN(test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state), "Unreserving GIDs when interfaces are up");
WARN(count > dev->roce.reserved_gids.count, "Unreserving %u GIDs when only %u reserved",
count, dev->roce.reserved_gids.count);
@@ -93,12 +88,12 @@ void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count)
int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index)
{
int end = dev->roce.reserved_gids.start +
- dev->roce.reserved_gids.count;
+ dev->roce.reserved_gids.count - 1;
int index = 0;
- index = ida_simple_get(&dev->roce.reserved_gids.ida,
- dev->roce.reserved_gids.start, end,
- GFP_KERNEL);
+ index = ida_alloc_range(&dev->roce.reserved_gids.ida,
+ dev->roce.reserved_gids.start, end,
+ GFP_KERNEL);
if (index < 0)
return index;
@@ -110,7 +105,7 @@ int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index)
void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index)
{
mlx5_core_dbg(dev, "Freeing reserved GID %u\n", gid_index);
- ida_simple_remove(&dev->roce.reserved_gids.ida, gid_index);
+ ida_free(&dev->roce.reserved_gids.ida, gid_index);
}
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev)
@@ -142,10 +137,10 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
}
ether_addr_copy(addr_mac, mac);
- MLX5_SET_RA(in_addr, roce_version, roce_version);
- MLX5_SET_RA(in_addr, roce_l3_type, roce_l3_type);
memcpy(addr_l3_addr, gid, gidsz);
}
+ MLX5_SET_RA(in_addr, roce_version, roce_version);
+ MLX5_SET_RA(in_addr, roce_l3_type, roce_l3_type);
if (MLX5_CAP_GEN(dev, num_vhca_ports) > 0)
MLX5_SET(set_roce_address_in, in, vhca_port_num, port_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index d046db7bb047..2f536c5d30b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -95,4 +95,13 @@ static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
return devlink_net(priv_to_devlink(dev));
}
+static inline void mlx5_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev)
+{
+ mdev->mlx5e_res.uplink_netdev = netdev;
+}
+
+static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
+{
+ return mdev->mlx5e_res.uplink_netdev;
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c568896cfb23..c114365eb126 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -571,6 +571,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
mlx5_vhca_state_cap_handle(dev, set_hca_cap);
+ if (MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
+ MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
@@ -1235,7 +1239,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_put_uars_page(dev, dev->priv.uar);
}
-int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
+int mlx5_init_one(struct mlx5_core_dev *dev)
{
int err = 0;
@@ -1247,16 +1251,14 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
/* remove any previous indication of internal error */
dev->state = MLX5_DEVICE_STATE_UP;
- err = mlx5_function_setup(dev, boot);
+ err = mlx5_function_setup(dev, true);
if (err)
goto err_function;
- if (boot) {
- err = mlx5_init_once(dev);
- if (err) {
- mlx5_core_err(dev, "sw objs init failed\n");
- goto function_teardown;
- }
+ err = mlx5_init_once(dev);
+ if (err) {
+ mlx5_core_err(dev, "sw objs init failed\n");
+ goto function_teardown;
}
err = mlx5_load(dev);
@@ -1265,16 +1267,11 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
- if (boot) {
- err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
- if (err)
- goto err_devlink_reg;
-
- err = mlx5_register_device(dev);
- } else {
- err = mlx5_attach_device(dev);
- }
+ err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
+ if (err)
+ goto err_devlink_reg;
+ err = mlx5_register_device(dev);
if (err)
goto err_register;
@@ -1282,16 +1279,14 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
return 0;
err_register:
- if (boot)
- mlx5_devlink_unregister(priv_to_devlink(dev));
+ mlx5_devlink_unregister(priv_to_devlink(dev));
err_devlink_reg:
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
mlx5_unload(dev);
err_load:
- if (boot)
- mlx5_cleanup_once(dev);
+ mlx5_cleanup_once(dev);
function_teardown:
- mlx5_function_teardown(dev, boot);
+ mlx5_function_teardown(dev, true);
err_function:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
out:
@@ -1299,33 +1294,84 @@ out:
return err;
}
-void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
+void mlx5_uninit_one(struct mlx5_core_dev *dev)
{
mutex_lock(&dev->intf_state_mutex);
- if (cleanup) {
- mlx5_unregister_device(dev);
- mlx5_devlink_unregister(priv_to_devlink(dev));
- } else {
- mlx5_detach_device(dev);
- }
+ mlx5_unregister_device(dev);
+ mlx5_devlink_unregister(priv_to_devlink(dev));
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "%s: interface is down, NOP\n",
__func__);
- if (cleanup)
- mlx5_cleanup_once(dev);
+ mlx5_cleanup_once(dev);
goto out;
}
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+ mlx5_unload(dev);
+ mlx5_cleanup_once(dev);
+ mlx5_function_teardown(dev, true);
+out:
+ mutex_unlock(&dev->intf_state_mutex);
+}
+int mlx5_load_one(struct mlx5_core_dev *dev)
+{
+ int err = 0;
+
+ mutex_lock(&dev->intf_state_mutex);
+ if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
+ mlx5_core_warn(dev, "interface is up, NOP\n");
+ goto out;
+ }
+ /* remove any previous indication of internal error */
+ dev->state = MLX5_DEVICE_STATE_UP;
+
+ err = mlx5_function_setup(dev, false);
+ if (err)
+ goto err_function;
+
+ err = mlx5_load(dev);
+ if (err)
+ goto err_load;
+
+ set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+
+ err = mlx5_attach_device(dev);
+ if (err)
+ goto err_attach;
+
+ mutex_unlock(&dev->intf_state_mutex);
+ return 0;
+
+err_attach:
+ clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
mlx5_unload(dev);
+err_load:
+ mlx5_function_teardown(dev, false);
+err_function:
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+out:
+ mutex_unlock(&dev->intf_state_mutex);
+ return err;
+}
- if (cleanup)
- mlx5_cleanup_once(dev);
+void mlx5_unload_one(struct mlx5_core_dev *dev)
+{
+ mutex_lock(&dev->intf_state_mutex);
- mlx5_function_teardown(dev, cleanup);
+ mlx5_detach_device(dev);
+
+ if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
+ mlx5_core_warn(dev, "%s: interface is down, NOP\n",
+ __func__);
+ goto out;
+ }
+
+ clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+ mlx5_unload(dev);
+ mlx5_function_teardown(dev, false);
out:
mutex_unlock(&dev->intf_state_mutex);
}
@@ -1397,7 +1443,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mutex_destroy(&dev->intf_state_mutex);
}
-static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mlx5_core_dev *dev;
struct devlink *devlink;
@@ -1433,11 +1479,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto pci_init_err;
}
- err = mlx5_load_one(dev, true);
+ err = mlx5_init_one(dev);
if (err) {
- mlx5_core_err(dev, "mlx5_load_one failed with error code %d\n",
+ mlx5_core_err(dev, "mlx5_init_one failed with error code %d\n",
err);
- goto err_load_one;
+ goto err_init_one;
}
err = mlx5_crdump_enable(dev);
@@ -1449,7 +1495,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
devlink_reload_enable(devlink);
return 0;
-err_load_one:
+err_init_one:
mlx5_pci_close(dev);
pci_init_err:
mlx5_mdev_uninit(dev);
@@ -1469,7 +1515,7 @@ static void remove_one(struct pci_dev *pdev)
devlink_reload_disable(devlink);
mlx5_crdump_disable(dev);
mlx5_drain_health_wq(dev);
- mlx5_unload_one(dev, true);
+ mlx5_uninit_one(dev);
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);
mlx5_adev_idx_free(dev->priv.adev_idx);
@@ -1485,7 +1531,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_enter_error_state(dev, false);
mlx5_error_sw_reset(dev);
- mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev);
mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);
@@ -1555,7 +1601,7 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
mlx5_core_info(dev, "%s was called\n", __func__);
- err = mlx5_load_one(dev, false);
+ err = mlx5_load_one(dev);
if (err)
mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n",
__func__, err);
@@ -1627,7 +1673,7 @@ static void shutdown(struct pci_dev *pdev)
mlx5_core_info(dev, "Shutdown was called\n");
err = mlx5_try_fast_unload(dev);
if (err)
- mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev);
mlx5_pci_disable_device(dev);
}
@@ -1635,7 +1681,7 @@ static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev);
return 0;
}
@@ -1644,7 +1690,7 @@ static int mlx5_resume(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- return mlx5_load_one(dev, false);
+ return mlx5_load_one(dev);
}
static const struct pci_device_id mlx5_core_pci_table[] = {
@@ -1676,26 +1722,31 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
void mlx5_disable_device(struct mlx5_core_dev *dev)
{
mlx5_error_sw_reset(dev);
- mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev);
}
-void mlx5_recover_device(struct mlx5_core_dev *dev)
+int mlx5_recover_device(struct mlx5_core_dev *dev)
{
+ int ret = -EIO;
+
mlx5_pci_disable_device(dev);
if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
- mlx5_pci_resume(dev->pdev);
+ ret = mlx5_load_one(dev);
+ return ret;
}
static struct pci_driver mlx5_core_driver = {
.name = KBUILD_MODNAME,
.id_table = mlx5_core_pci_table,
- .probe = init_one,
+ .probe = probe_one,
.remove = remove_one,
.suspend = mlx5_suspend,
.resume = mlx5_resume,
.shutdown = shutdown,
.err_handler = &mlx5_err_handler,
.sriov_configure = mlx5_core_sriov_configure,
+ .sriov_get_vf_total_msix = mlx5_sriov_get_vf_total_msix,
+ .sriov_set_msix_vec_count = mlx5_core_sriov_set_msix_vec_count,
};
static void mlx5_core_verify_params(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index efe403c7e354..a22b706eebd3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -134,12 +134,13 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev);
u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev);
int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
-void mlx5_recover_device(struct mlx5_core_dev *dev);
+int mlx5_recover_device(struct mlx5_core_dev *dev);
int mlx5_sriov_init(struct mlx5_core_dev *dev);
void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
int mlx5_sriov_attach(struct mlx5_core_dev *dev);
void mlx5_sriov_detach(struct mlx5_core_dev *dev);
int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
+int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
@@ -174,6 +175,11 @@ int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
struct notifier_block *nb);
int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
struct notifier_block *nb);
+
+int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
+ int msix_vec_count);
+int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
+
struct cpumask *
mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx);
struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table);
@@ -267,10 +273,18 @@ static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
-void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
-int mlx5_load_one(struct mlx5_core_dev *dev, bool boot);
+int mlx5_init_one(struct mlx5_core_dev *dev);
+void mlx5_uninit_one(struct mlx5_core_dev *dev);
+void mlx5_unload_one(struct mlx5_core_dev *dev);
+int mlx5_load_one(struct mlx5_core_dev *dev);
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out);
void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work);
+static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+
+ return MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
+}
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index c0656d4782e1..110c0837f95b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -61,7 +61,7 @@ struct fw_page {
u32 function;
unsigned long bitmask;
struct list_head list;
- unsigned free_count;
+ unsigned int free_count;
};
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index a61e09aff152..1f907df5b3a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -61,6 +61,79 @@ static struct mlx5_irq *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx)
return &irq_table->irq[vecidx];
}
+/**
+ * mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
+ * to be ssigned to each VF.
+ * @dev: PF to work on
+ * @num_vfs: Number of enabled VFs
+ */
+int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
+{
+ int num_vf_msix, min_msix, max_msix;
+
+ num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
+ if (!num_vf_msix)
+ return 0;
+
+ min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
+ max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
+
+ /* Limit maximum number of MSI-X vectors so the default configuration
+ * has some available in the pool. This will allow the user to increase
+ * the number of vectors in a VF without having to first size-down other
+ * VFs.
+ */
+ return max(min(num_vf_msix / num_vfs, max_msix / 2), min_msix);
+}
+
+/**
+ * mlx5_set_msix_vec_count - Set dynamically allocated MSI-X on the VF
+ * @dev: PF to work on
+ * @function_id: Internal PCI VF function IDd
+ * @msix_vec_count: Number of MSI-X vectors to set
+ */
+int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
+ int msix_vec_count)
+{
+ int sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ int num_vf_msix, min_msix, max_msix;
+ void *hca_cap, *cap;
+ int ret;
+
+ num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
+ if (!num_vf_msix)
+ return 0;
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) || !mlx5_core_is_pf(dev))
+ return -EOPNOTSUPP;
+
+ min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
+ max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
+
+ if (msix_vec_count < min_msix)
+ return -EINVAL;
+
+ if (msix_vec_count > max_msix)
+ return -EOVERFLOW;
+
+ hca_cap = kzalloc(sz, GFP_KERNEL);
+ if (!hca_cap)
+ return -ENOMEM;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, function_id);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
+ ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+ kfree(hca_cap);
+ return ret;
+}
+
int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
struct notifier_block *nb)
{
@@ -94,7 +167,6 @@ static void irq_set_name(char *name, int vecidx)
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
vecidx - MLX5_IRQ_VEC_COMP_BASE);
- return;
}
static int request_irqs(struct mlx5_core_dev *dev, int nvec)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 4bb219565c58..1ef2b6a848c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -353,69 +353,123 @@ static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset
*offset -= MLX5_EEPROM_PAGE_LENGTH;
}
-int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
- u16 offset, u16 size, u8 *data)
+static int mlx5_query_mcia(struct mlx5_core_dev *dev,
+ struct mlx5_module_eeprom_query_params *params, u8 *data)
{
- int module_num, status, err, page_num = 0;
u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
- u16 i2c_addr = 0;
- u8 module_id;
+ int status, err;
void *ptr;
+ u16 size;
- err = mlx5_query_module_num(dev, &module_num);
+ size = min_t(int, params->size, MLX5_EEPROM_MAX_BYTES);
+
+ MLX5_SET(mcia_reg, in, l, 0);
+ MLX5_SET(mcia_reg, in, size, size);
+ MLX5_SET(mcia_reg, in, module, params->module_number);
+ MLX5_SET(mcia_reg, in, device_address, params->offset);
+ MLX5_SET(mcia_reg, in, page_number, params->page);
+ MLX5_SET(mcia_reg, in, i2c_device_address, params->i2c_address);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MCIA, 0, 0);
if (err)
return err;
- err = mlx5_query_module_id(dev, module_num, &module_id);
+ status = MLX5_GET(mcia_reg, out, status);
+ if (status) {
+ mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
+ status);
+ return -EIO;
+ }
+
+ ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+ memcpy(data, ptr, size);
+
+ return size;
+}
+
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+ u16 offset, u16 size, u8 *data)
+{
+ struct mlx5_module_eeprom_query_params query = {0};
+ u8 module_id;
+ int err;
+
+ err = mlx5_query_module_num(dev, &query.module_number);
+ if (err)
+ return err;
+
+ err = mlx5_query_module_id(dev, query.module_number, &module_id);
if (err)
return err;
switch (module_id) {
case MLX5_MODULE_ID_SFP:
- mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
break;
case MLX5_MODULE_ID_QSFP:
case MLX5_MODULE_ID_QSFP_PLUS:
case MLX5_MODULE_ID_QSFP28:
- mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
break;
default:
mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
return -EINVAL;
}
- if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
+ if (query.offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Cross pages read, read until offset 256 in low page */
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
- size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
+ query.size = size;
- MLX5_SET(mcia_reg, in, l, 0);
- MLX5_SET(mcia_reg, in, module, module_num);
- MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr);
- MLX5_SET(mcia_reg, in, page_number, page_num);
- MLX5_SET(mcia_reg, in, device_address, offset);
- MLX5_SET(mcia_reg, in, size, size);
+ return mlx5_query_mcia(dev, &query, data);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
- err = mlx5_core_access_reg(dev, in, sizeof(in), out,
- sizeof(out), MLX5_REG_MCIA, 0, 0);
+int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+ struct mlx5_module_eeprom_query_params *params,
+ u8 *data)
+{
+ u8 module_id;
+ int err;
+
+ err = mlx5_query_module_num(dev, &params->module_number);
if (err)
return err;
- status = MLX5_GET(mcia_reg, out, status);
- if (status) {
- mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
- status);
- return -EIO;
+ err = mlx5_query_module_id(dev, params->module_number, &module_id);
+ if (err)
+ return err;
+
+ switch (module_id) {
+ case MLX5_MODULE_ID_SFP:
+ if (params->page > 0)
+ return -EINVAL;
+ break;
+ case MLX5_MODULE_ID_QSFP:
+ case MLX5_MODULE_ID_QSFP28:
+ case MLX5_MODULE_ID_QSFP_PLUS:
+ if (params->page > 3)
+ return -EINVAL;
+ break;
+ case MLX5_MODULE_ID_DSFP:
+ break;
+ default:
+ mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
+ return -EINVAL;
}
- ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
- memcpy(data, ptr, size);
+ if (params->i2c_address != MLX5_I2C_ADDR_HIGH &&
+ params->i2c_address != MLX5_I2C_ADDR_LOW) {
+ mlx5_core_err(dev, "I2C address not recognized: 0x%x\n", params->i2c_address);
+ return -EINVAL;
+ }
- return size;
+ return mlx5_query_mcia(dev, params, data);
}
-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
+EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom_by_page);
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 8e0dddc6383f..441b5453acae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -180,5 +180,4 @@ del_roce_addr:
mlx5_rdma_del_roce_addr(dev);
disable_roce:
mlx5_nic_vport_disable_roce(dev);
- return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index 99039c47ef33..7161220afe30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -117,6 +117,9 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
bool empty_found = false;
int i;
+ lockdep_assert_held(&table->rl_lock);
+ WARN_ON(!table->rl_entry);
+
for (i = 0; i < table->max_size; i++) {
if (dedicated) {
if (!table->rl_entry[i].refcount)
@@ -172,38 +175,103 @@ bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
}
EXPORT_SYMBOL(mlx5_rl_are_equal);
+static int mlx5_rl_table_get(struct mlx5_rl_table *table)
+{
+ int i;
+
+ lockdep_assert_held(&table->rl_lock);
+
+ if (table->rl_entry) {
+ table->refcount++;
+ return 0;
+ }
+
+ table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
+ GFP_KERNEL);
+ if (!table->rl_entry)
+ return -ENOMEM;
+
+ /* The index represents the index in HW rate limit table
+ * Index 0 is reserved for unlimited rate
+ */
+ for (i = 0; i < table->max_size; i++)
+ table->rl_entry[i].index = i + 1;
+
+ table->refcount++;
+ return 0;
+}
+
+static void mlx5_rl_table_put(struct mlx5_rl_table *table)
+{
+ lockdep_assert_held(&table->rl_lock);
+ if (--table->refcount)
+ return;
+
+ kfree(table->rl_entry);
+ table->rl_entry = NULL;
+}
+
+static void mlx5_rl_table_free(struct mlx5_core_dev *dev, struct mlx5_rl_table *table)
+{
+ int i;
+
+ if (!table->rl_entry)
+ return;
+
+ /* Clear all configured rates */
+ for (i = 0; i < table->max_size; i++)
+ if (table->rl_entry[i].refcount)
+ mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i], false);
+ kfree(table->rl_entry);
+}
+
+static void mlx5_rl_entry_get(struct mlx5_rl_entry *entry)
+{
+ entry->refcount++;
+}
+
+static void
+mlx5_rl_entry_put(struct mlx5_core_dev *dev, struct mlx5_rl_entry *entry)
+{
+ entry->refcount--;
+ if (!entry->refcount)
+ mlx5_set_pp_rate_limit_cmd(dev, entry, false);
+}
+
int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
bool dedicated_entry, u16 *index)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry;
- int err = 0;
u32 rate;
+ int err;
- rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
- mutex_lock(&table->rl_lock);
+ if (!table->max_size)
+ return -EOPNOTSUPP;
+ rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
rate, table->min_rate, table->max_rate);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
+ mutex_lock(&table->rl_lock);
+ err = mlx5_rl_table_get(table);
+ if (err)
+ goto out;
+
entry = find_rl_entry(table, rl_in, uid, dedicated_entry);
if (!entry) {
mlx5_core_err(dev, "Max number of %u rates reached\n",
table->max_size);
err = -ENOSPC;
- goto out;
+ goto rl_err;
}
- if (entry->refcount) {
- /* rate already configured */
- entry->refcount++;
- } else {
+ if (!entry->refcount) {
+ /* new rate limit */
memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw));
entry->uid = uid;
- /* new rate limit */
err = mlx5_set_pp_rate_limit_cmd(dev, entry, true);
if (err) {
mlx5_core_err(
@@ -214,14 +282,18 @@ int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
burst_upper_bound),
MLX5_GET(set_pp_rate_limit_context, rl_in,
typical_packet_size));
- goto out;
+ goto rl_err;
}
- entry->refcount = 1;
entry->dedicated = dedicated_entry;
}
+ mlx5_rl_entry_get(entry);
*index = entry->index;
+ mutex_unlock(&table->rl_lock);
+ return 0;
+rl_err:
+ mlx5_rl_table_put(table);
out:
mutex_unlock(&table->rl_lock);
return err;
@@ -235,10 +307,8 @@ void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index)
mutex_lock(&table->rl_lock);
entry = &table->rl_entry[index - 1];
- entry->refcount--;
- if (!entry->refcount)
- /* need to remove rate */
- mlx5_set_pp_rate_limit_cmd(dev, entry, false);
+ mlx5_rl_entry_put(dev, entry);
+ mlx5_rl_table_put(table);
mutex_unlock(&table->rl_lock);
}
EXPORT_SYMBOL(mlx5_rl_remove_rate_raw);
@@ -286,12 +356,8 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
goto out;
}
-
- entry->refcount--;
- if (!entry->refcount)
- /* need to remove rate */
- mlx5_set_pp_rate_limit_cmd(dev, entry, false);
-
+ mlx5_rl_entry_put(dev, entry);
+ mlx5_rl_table_put(table);
out:
mutex_unlock(&table->rl_lock);
}
@@ -300,31 +366,19 @@ EXPORT_SYMBOL(mlx5_rl_remove_rate);
int mlx5_init_rl_table(struct mlx5_core_dev *dev)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
- int i;
- mutex_init(&table->rl_lock);
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) {
table->max_size = 0;
return 0;
}
+ mutex_init(&table->rl_lock);
+
/* First entry is reserved for unlimited rate */
table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1;
table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate);
table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate);
- table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
- GFP_KERNEL);
- if (!table->rl_entry)
- return -ENOMEM;
-
- /* The index represents the index in HW rate limit table
- * Index 0 is reserved for unlimited rate
- */
- for (i = 0; i < table->max_size; i++)
- table->rl_entry[i].index = i + 1;
-
- /* Index 0 is reserved */
mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n",
table->max_size,
table->min_rate >> 10,
@@ -336,13 +390,10 @@ int mlx5_init_rl_table(struct mlx5_core_dev *dev)
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
- int i;
- /* Clear all configured rates */
- for (i = 0; i < table->max_size; i++)
- if (table->rl_entry[i].refcount)
- mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i],
- false);
+ if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing))
+ return;
- kfree(dev->priv.rl_table.rl_entry);
+ mlx5_rl_table_free(dev, table);
+ mutex_destroy(&table->rl_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 90b524c59f3c..6a0c6f965ad1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -148,9 +148,19 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb);
const struct mlx5_vhca_state_event *event = data;
struct mlx5_sf_dev *sf_dev;
+ u16 max_functions;
u16 sf_index;
+ u16 base_id;
+
+ max_functions = mlx5_sf_max_functions(table->dev);
+ if (!max_functions)
+ return 0;
+
+ base_id = MLX5_CAP_GEN(table->dev, sf_base_id);
+ if (event->function_id < base_id || event->function_id >= (base_id + max_functions))
+ return 0;
- sf_index = event->function_id - MLX5_CAP_GEN(table->dev, sf_base_id);
+ sf_index = event->function_id - base_id;
sf_dev = xa_load(&table->devices, sf_index);
switch (event->new_vhca_state) {
case MLX5_VHCA_STATE_ALLOCATED:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
index 4de02902aef1..149fd9e698cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
@@ -47,7 +47,7 @@ static inline void mlx5_sf_driver_unregister(void)
static inline bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev)
{
- return 0;
+ return false;
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index c4bf555c25ea..42c8ee03fe3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -41,14 +41,15 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto remap_err;
}
- err = mlx5_load_one(mdev, true);
+ err = mlx5_init_one(mdev);
if (err) {
- mlx5_core_warn(mdev, "mlx5_load_one err=%d\n", err);
- goto load_one_err;
+ mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err);
+ goto init_one_err;
}
+ devlink_reload_enable(devlink);
return 0;
-load_one_err:
+init_one_err:
iounmap(mdev->iseg);
remap_err:
mlx5_mdev_uninit(mdev);
@@ -63,7 +64,8 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
struct devlink *devlink;
devlink = priv_to_devlink(sf_dev->mdev);
- mlx5_unload_one(sf_dev->mdev, true);
+ devlink_reload_disable(devlink);
+ mlx5_uninit_one(sf_dev->mdev);
iounmap(sf_dev->mdev->iseg);
mlx5_mdev_uninit(sf_dev->mdev);
mlx5_devlink_free(devlink);
@@ -73,7 +75,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
- mlx5_unload_one(sf_dev->mdev, false);
+ mlx5_unload_one(sf_dev->mdev);
}
static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index c2ba41bb7a70..a8e73c9ed1ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -12,6 +12,7 @@
struct mlx5_sf {
struct devlink_port dl_port;
unsigned int port_index;
+ u32 controller;
u16 id;
u16 hw_fn_id;
u16 hw_state;
@@ -58,7 +59,8 @@ static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
}
static struct mlx5_sf *
-mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *extack)
+mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
+ u32 controller, u32 sfnum, struct netlink_ext_ack *extack)
{
unsigned int dl_port_index;
struct mlx5_sf *sf;
@@ -66,7 +68,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
int id_err;
int err;
- id_err = mlx5_sf_hw_table_sf_alloc(table->dev, sfnum);
+ if (!mlx5_esw_offloads_controller_valid(esw, controller)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid controller number");
+ return ERR_PTR(-EINVAL);
+ }
+
+ id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum);
if (id_err < 0) {
err = id_err;
goto id_err;
@@ -78,11 +85,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
goto alloc_err;
}
sf->id = id_err;
- hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sf->id);
+ hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
sf->port_index = dl_port_index;
sf->hw_fn_id = hw_fn_id;
sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
+ sf->controller = controller;
err = mlx5_sf_id_insert(table, sf);
if (err)
@@ -93,7 +101,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
insert_err:
kfree(sf);
alloc_err:
- mlx5_sf_hw_table_sf_free(table->dev, id_err);
+ mlx5_sf_hw_table_sf_free(table->dev, controller, id_err);
id_err:
if (err == -EEXIST)
NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
@@ -103,7 +111,7 @@ id_err:
static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
mlx5_sf_id_erase(table, sf);
- mlx5_sf_hw_table_sf_free(table->dev, sf->id);
+ mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
kfree(sf);
}
@@ -270,15 +278,14 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5_sf *sf;
- u16 hw_fn_id;
int err;
- sf = mlx5_sf_alloc(table, new_attr->sfnum, extack);
+ sf = mlx5_sf_alloc(table, esw, new_attr->controller, new_attr->sfnum, extack);
if (IS_ERR(sf))
return PTR_ERR(sf);
- hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sf->id);
- err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, hw_fn_id, new_attr->sfnum);
+ err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id,
+ new_attr->controller, new_attr->sfnum);
if (err)
goto esw_err;
*new_port_index = sf->port_index;
@@ -307,7 +314,8 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
"User must provide unique sfnum. Driver does not support auto assignment");
return -EOPNOTSUPP;
}
- if (new_attr->controller_valid && new_attr->controller) {
+ if (new_attr->controller_valid && new_attr->controller &&
+ !mlx5_core_is_ecpf_esw_manager(dev)) {
NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
return -EOPNOTSUPP;
}
@@ -353,10 +361,10 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
* firmware gives confirmation that it is detached by the driver.
*/
mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
- mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id);
+ mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
kfree(sf);
} else {
- mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id);
+ mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
kfree(sf);
}
}
@@ -438,9 +446,6 @@ sf_err:
static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
{
- if (!mlx5_sf_max_functions(table->dev))
- return;
-
init_completion(&table->disable_complete);
refcount_set(&table->refcount, 1);
}
@@ -463,9 +468,6 @@ static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
{
- if (!mlx5_sf_max_functions(table->dev))
- return;
-
if (!refcount_read(&table->refcount))
return;
@@ -492,14 +494,15 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
break;
default:
break;
- };
+ }
return 0;
}
static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
{
- return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && mlx5_sf_supported(dev);
+ return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
+ mlx5_sf_hw_table_supported(dev);
}
int mlx5_sf_table_init(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
index a5a0f60bef66..ef5f892aafad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -5,8 +5,10 @@
#include "priv.h"
#include "sf.h"
#include "mlx5_ifc_vhca_event.h"
+#include "ecpf.h"
#include "vhca_event.h"
#include "mlx5_core.h"
+#include "eswitch.h"
struct mlx5_sf_hw {
u32 usr_sfnum;
@@ -14,60 +16,114 @@ struct mlx5_sf_hw {
u8 pending_delete: 1;
};
+struct mlx5_sf_hwc_table {
+ struct mlx5_sf_hw *sfs;
+ int max_fn;
+ u16 start_fn_id;
+};
+
+enum mlx5_sf_hwc_index {
+ MLX5_SF_HWC_LOCAL,
+ MLX5_SF_HWC_EXTERNAL,
+ MLX5_SF_HWC_MAX,
+};
+
struct mlx5_sf_hw_table {
struct mlx5_core_dev *dev;
- struct mlx5_sf_hw *sfs;
- int max_local_functions;
struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
struct notifier_block vhca_nb;
+ struct mlx5_sf_hwc_table hwc[MLX5_SF_HWC_MAX];
};
-u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id)
+static struct mlx5_sf_hwc_table *
+mlx5_sf_controller_to_hwc(struct mlx5_core_dev *dev, u32 controller)
{
- return sw_id + mlx5_sf_start_function_id(dev);
+ int idx = !!controller;
+
+ return &dev->priv.sf_hw_table->hwc[idx];
}
-static u16 mlx5_sf_hw_to_sw_id(const struct mlx5_core_dev *dev, u16 hw_id)
+u16 mlx5_sf_sw_to_hw_id(struct mlx5_core_dev *dev, u32 controller, u16 sw_id)
{
- return hw_id - mlx5_sf_start_function_id(dev);
+ struct mlx5_sf_hwc_table *hwc;
+
+ hwc = mlx5_sf_controller_to_hwc(dev, controller);
+ return hwc->start_fn_id + sw_id;
}
-int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
+static u16 mlx5_sf_hw_to_sw_id(struct mlx5_sf_hwc_table *hwc, u16 hw_id)
+{
+ return hw_id - hwc->start_fn_id;
+}
+
+static struct mlx5_sf_hwc_table *
+mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id)
{
- struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
- int sw_id = -ENOSPC;
- u16 hw_fn_id;
- int err;
int i;
- if (!table->max_local_functions)
- return -EOPNOTSUPP;
+ for (i = 0; i < ARRAY_SIZE(table->hwc); i++) {
+ if (table->hwc[i].max_fn &&
+ fn_id >= table->hwc[i].start_fn_id &&
+ fn_id < (table->hwc[i].start_fn_id + table->hwc[i].max_fn))
+ return &table->hwc[i];
+ }
+ return NULL;
+}
+
+static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller,
+ u32 usr_sfnum)
+{
+ struct mlx5_sf_hwc_table *hwc;
+ int i;
+
+ hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
+ if (!hwc->sfs)
+ return -ENOSPC;
- mutex_lock(&table->table_lock);
/* Check if sf with same sfnum already exists or not. */
- for (i = 0; i < table->max_local_functions; i++) {
- if (table->sfs[i].allocated && table->sfs[i].usr_sfnum == usr_sfnum) {
- err = -EEXIST;
- goto exist_err;
- }
+ for (i = 0; i < hwc->max_fn; i++) {
+ if (hwc->sfs[i].allocated && hwc->sfs[i].usr_sfnum == usr_sfnum)
+ return -EEXIST;
}
-
/* Find the free entry and allocate the entry from the array */
- for (i = 0; i < table->max_local_functions; i++) {
- if (!table->sfs[i].allocated) {
- table->sfs[i].usr_sfnum = usr_sfnum;
- table->sfs[i].allocated = true;
- sw_id = i;
- break;
+ for (i = 0; i < hwc->max_fn; i++) {
+ if (!hwc->sfs[i].allocated) {
+ hwc->sfs[i].usr_sfnum = usr_sfnum;
+ hwc->sfs[i].allocated = true;
+ return i;
}
}
- if (sw_id == -ENOSPC) {
- err = -ENOSPC;
+ return -ENOSPC;
+}
+
+static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id)
+{
+ struct mlx5_sf_hwc_table *hwc;
+
+ hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
+ hwc->sfs[id].allocated = false;
+ hwc->sfs[id].pending_delete = false;
+}
+
+int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr_sfnum)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+ u16 hw_fn_id;
+ int sw_id;
+ int err;
+
+ if (!table)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&table->table_lock);
+ sw_id = mlx5_sf_hw_table_id_alloc(table, controller, usr_sfnum);
+ if (sw_id < 0) {
+ err = sw_id;
goto exist_err;
}
- hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id);
- err = mlx5_cmd_alloc_sf(table->dev, hw_fn_id);
+ hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, sw_id);
+ err = mlx5_cmd_alloc_sf(dev, hw_fn_id);
if (err)
goto err;
@@ -75,101 +131,161 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
if (err)
goto vhca_err;
+ if (controller) {
+ /* If this SF is for external controller, SF manager
+ * needs to arm firmware to receive the events.
+ */
+ err = mlx5_vhca_event_arm(dev, hw_fn_id);
+ if (err)
+ goto vhca_err;
+ }
+
mutex_unlock(&table->table_lock);
return sw_id;
vhca_err:
- mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
+ mlx5_cmd_dealloc_sf(dev, hw_fn_id);
err:
- table->sfs[i].allocated = false;
+ mlx5_sf_hw_table_id_free(table, controller, sw_id);
exist_err:
mutex_unlock(&table->table_lock);
return err;
}
-static void _mlx5_sf_hw_id_free(struct mlx5_core_dev *dev, u16 id)
+void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
{
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
u16 hw_fn_id;
- hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, id);
- mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
- table->sfs[id].allocated = false;
- table->sfs[id].pending_delete = false;
+ mutex_lock(&table->table_lock);
+ hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
+ mlx5_cmd_dealloc_sf(dev, hw_fn_id);
+ mlx5_sf_hw_table_id_free(table, controller, id);
+ mutex_unlock(&table->table_lock);
}
-void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id)
+static void mlx5_sf_hw_table_hwc_sf_free(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hwc_table *hwc, int idx)
{
- struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
-
- mutex_lock(&table->table_lock);
- _mlx5_sf_hw_id_free(dev, id);
- mutex_unlock(&table->table_lock);
+ mlx5_cmd_dealloc_sf(dev, hwc->start_fn_id + idx);
+ hwc->sfs[idx].allocated = false;
+ hwc->sfs[idx].pending_delete = false;
}
-void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id)
+void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
{
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
+ struct mlx5_sf_hwc_table *hwc;
u16 hw_fn_id;
u8 state;
int err;
- hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id);
+ hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
+ hwc = mlx5_sf_controller_to_hwc(dev, controller);
mutex_lock(&table->table_lock);
err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out));
if (err)
goto err;
state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
if (state == MLX5_VHCA_STATE_ALLOCATED) {
- mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
- table->sfs[id].allocated = false;
+ mlx5_cmd_dealloc_sf(dev, hw_fn_id);
+ hwc->sfs[id].allocated = false;
} else {
- table->sfs[id].pending_delete = true;
+ hwc->sfs[id].pending_delete = true;
}
err:
mutex_unlock(&table->table_lock);
}
-static void mlx5_sf_hw_dealloc_all(struct mlx5_sf_hw_table *table)
+static void mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hwc_table *hwc)
{
int i;
- for (i = 0; i < table->max_local_functions; i++) {
- if (table->sfs[i].allocated)
- _mlx5_sf_hw_id_free(table->dev, i);
+ for (i = 0; i < hwc->max_fn; i++) {
+ if (hwc->sfs[i].allocated)
+ mlx5_sf_hw_table_hwc_sf_free(dev, hwc, i);
}
}
+static void mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table *table)
+{
+ mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_EXTERNAL]);
+ mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
+}
+
+static int mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table *hwc, u16 max_fn, u16 base_id)
+{
+ struct mlx5_sf_hw *sfs;
+
+ if (!max_fn)
+ return 0;
+
+ sfs = kcalloc(max_fn, sizeof(*sfs), GFP_KERNEL);
+ if (!sfs)
+ return -ENOMEM;
+
+ hwc->sfs = sfs;
+ hwc->max_fn = max_fn;
+ hwc->start_fn_id = base_id;
+ return 0;
+}
+
+static void mlx5_sf_hw_table_hwc_cleanup(struct mlx5_sf_hwc_table *hwc)
+{
+ kfree(hwc->sfs);
+}
+
int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_sf_hw_table *table;
- struct mlx5_sf_hw *sfs;
- int max_functions;
+ u16 max_ext_fn = 0;
+ u16 ext_base_id;
+ u16 max_fn = 0;
+ u16 base_id;
+ int err;
- if (!mlx5_sf_supported(dev) || !mlx5_vhca_event_supported(dev))
+ if (!mlx5_vhca_event_supported(dev))
+ return 0;
+
+ if (mlx5_sf_supported(dev))
+ max_fn = mlx5_sf_max_functions(dev);
+
+ err = mlx5_esw_sf_max_hpf_functions(dev, &max_ext_fn, &ext_base_id);
+ if (err)
+ return err;
+
+ if (!max_fn && !max_ext_fn)
return 0;
- max_functions = mlx5_sf_max_functions(dev);
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
- sfs = kcalloc(max_functions, sizeof(*sfs), GFP_KERNEL);
- if (!sfs)
- goto table_err;
-
mutex_init(&table->table_lock);
table->dev = dev;
- table->sfs = sfs;
- table->max_local_functions = max_functions;
dev->priv.sf_hw_table = table;
- mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions);
+
+ base_id = mlx5_sf_start_function_id(dev);
+ err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_LOCAL], max_fn, base_id);
+ if (err)
+ goto table_err;
+
+ err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_EXTERNAL],
+ max_ext_fn, ext_base_id);
+ if (err)
+ goto ext_err;
+
+ mlx5_core_dbg(dev, "SF HW table: max sfs = %d, ext sfs = %d\n", max_fn, max_ext_fn);
return 0;
+ext_err:
+ mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
table_err:
+ mutex_destroy(&table->table_lock);
kfree(table);
- return -ENOMEM;
+ return err;
}
void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
@@ -180,7 +296,8 @@ void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
return;
mutex_destroy(&table->table_lock);
- kfree(table->sfs);
+ mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_EXTERNAL]);
+ mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
kfree(table);
}
@@ -188,21 +305,26 @@ static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode
{
struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
const struct mlx5_vhca_state_event *event = data;
+ struct mlx5_sf_hwc_table *hwc;
struct mlx5_sf_hw *sf_hw;
u16 sw_id;
if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
return 0;
- sw_id = mlx5_sf_hw_to_sw_id(table->dev, event->function_id);
- sf_hw = &table->sfs[sw_id];
+ hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id);
+ if (!hwc)
+ return 0;
+
+ sw_id = mlx5_sf_hw_to_sw_id(hwc, event->function_id);
+ sf_hw = &hwc->sfs[sw_id];
mutex_lock(&table->table_lock);
/* SF driver notified through firmware that SF is finally detached.
* Hence recycle the sf hardware id for reuse.
*/
if (sf_hw->allocated && sf_hw->pending_delete)
- _mlx5_sf_hw_id_free(table->dev, sw_id);
+ mlx5_sf_hw_table_hwc_sf_free(table->dev, hwc, sw_id);
mutex_unlock(&table->table_lock);
return 0;
}
@@ -215,7 +337,7 @@ int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
return 0;
table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
- return mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
+ return mlx5_vhca_event_notifier_register(dev, &table->vhca_nb);
}
void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
@@ -225,7 +347,12 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
if (!table)
return;
- mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
+ mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb);
/* Dealloc SFs whose firmware event has been missed. */
- mlx5_sf_hw_dealloc_all(table);
+ mlx5_sf_hw_table_dealloc_all(table);
+}
+
+bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev)
+{
+ return !!dev->priv.sf_hw_table;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h
index cb02a51d0986..7114f3fc335f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h
@@ -12,10 +12,11 @@ int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id);
int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
-u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id);
+u16 mlx5_sf_sw_to_hw_id(struct mlx5_core_dev *dev, u32 controller, u16 sw_id);
-int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum);
-void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id);
-void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id);
+int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr_sfnum);
+void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id);
+void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id);
+bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 3094d20297a9..2338989d4403 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -71,8 +71,7 @@ static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int err;
- int vf;
+ int err, vf, num_msix_count;
if (!MLX5_ESWITCH_MANAGER(dev))
goto enable_vfs_hca;
@@ -85,12 +84,22 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
}
enable_vfs_hca:
+ num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs);
for (vf = 0; vf < num_vfs; vf++) {
err = mlx5_core_enable_hca(dev, vf + 1);
if (err) {
mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
continue;
}
+
+ err = mlx5_set_msix_vec_count(dev, vf + 1, num_msix_count);
+ if (err) {
+ mlx5_core_warn(dev,
+ "failed to set MSI-X vector counts VF %d, err %d\n",
+ vf, err);
+ continue;
+ }
+
sriov->vfs_ctx[vf].enabled = 1;
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) {
err = sriov_restore_guids(dev, vf);
@@ -178,6 +187,41 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
return err ? err : num_vfs;
}
+int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count)
+{
+ struct pci_dev *pf = pci_physfn(vf);
+ struct mlx5_core_sriov *sriov;
+ struct mlx5_core_dev *dev;
+ int num_vf_msix, id;
+
+ dev = pci_get_drvdata(pf);
+ num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
+ if (!num_vf_msix)
+ return -EOPNOTSUPP;
+
+ if (!msix_vec_count)
+ msix_vec_count =
+ mlx5_get_default_msix_vec_count(dev, pci_num_vf(pf));
+
+ sriov = &dev->priv.sriov;
+
+ /* Reversed translation of PCI VF function number to the internal
+ * function_id, which exists in the name of virtfn symlink.
+ */
+ for (id = 0; id < pci_num_vf(pf); id++) {
+ if (!sriov->vfs_ctx[id].enabled)
+ continue;
+
+ if (vf->devfn == pci_iov_virtfn_devfn(pf, id))
+ break;
+ }
+
+ if (id == pci_num_vf(pf) || !sriov->vfs_ctx[id].enabled)
+ return -EINVAL;
+
+ return mlx5_set_msix_vec_count(dev, id + 1, msix_vec_count);
+}
+
int mlx5_sriov_attach(struct mlx5_core_dev *dev)
{
if (!mlx5_core_is_pf(dev) || !pci_num_vf(dev->pdev))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 28a7971cac6a..949879cf2092 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -313,8 +313,8 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
* table, since there is an *assumption* that in such case FW
* will recalculate the CS.
*/
- if (dest_action->dest_tbl.is_fw_tbl) {
- *final_icm_addr = dest_action->dest_tbl.fw_tbl.rx_icm_addr;
+ if (dest_action->dest_tbl->is_fw_tbl) {
+ *final_icm_addr = dest_action->dest_tbl->fw_tbl.rx_icm_addr;
} else {
mlx5dr_dbg(dmn,
"Destination FT should be terminating when modify TTL is used\n");
@@ -326,8 +326,8 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
/* If destination is vport we will get the FW flow table
* that recalculates the CS and forwards to the vport.
*/
- ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport.dmn,
- dest_action->vport.caps->num,
+ ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport->dmn,
+ dest_action->vport->caps->num,
final_icm_addr);
if (ret) {
mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n");
@@ -369,6 +369,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->ste_type);
for (i = 0; i < num_actions; i++) {
+ struct mlx5dr_action_dest_tbl *dest_tbl;
struct mlx5dr_action *action;
int max_actions_type = 1;
u32 action_type;
@@ -382,37 +383,38 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
break;
case DR_ACTION_TYP_FT:
dest_action = action;
- if (!action->dest_tbl.is_fw_tbl) {
- if (action->dest_tbl.tbl->dmn != dmn) {
+ dest_tbl = action->dest_tbl;
+ if (!dest_tbl->is_fw_tbl) {
+ if (dest_tbl->tbl->dmn != dmn) {
mlx5dr_err(dmn,
"Destination table belongs to a different domain\n");
goto out_invalid_arg;
}
- if (action->dest_tbl.tbl->level <= matcher->tbl->level) {
+ if (dest_tbl->tbl->level <= matcher->tbl->level) {
mlx5_core_warn_once(dmn->mdev,
"Connecting table to a lower/same level destination table\n");
mlx5dr_dbg(dmn,
"Connecting table at level %d to a destination table at level %d\n",
matcher->tbl->level,
- action->dest_tbl.tbl->level);
+ dest_tbl->tbl->level);
}
attr.final_icm_addr = rx_rule ?
- action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr :
- action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr;
+ dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
+ dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
} else {
struct mlx5dr_cmd_query_flow_table_details output;
int ret;
/* get the relevant addresses */
- if (!action->dest_tbl.fw_tbl.rx_icm_addr) {
+ if (!action->dest_tbl->fw_tbl.rx_icm_addr) {
ret = mlx5dr_cmd_query_flow_table(dmn->mdev,
- action->dest_tbl.fw_tbl.type,
- action->dest_tbl.fw_tbl.id,
+ dest_tbl->fw_tbl.type,
+ dest_tbl->fw_tbl.id,
&output);
if (!ret) {
- action->dest_tbl.fw_tbl.tx_icm_addr =
+ dest_tbl->fw_tbl.tx_icm_addr =
output.sw_owner_icm_root_1;
- action->dest_tbl.fw_tbl.rx_icm_addr =
+ dest_tbl->fw_tbl.rx_icm_addr =
output.sw_owner_icm_root_0;
} else {
mlx5dr_err(dmn,
@@ -422,50 +424,50 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
}
}
attr.final_icm_addr = rx_rule ?
- action->dest_tbl.fw_tbl.rx_icm_addr :
- action->dest_tbl.fw_tbl.tx_icm_addr;
+ dest_tbl->fw_tbl.rx_icm_addr :
+ dest_tbl->fw_tbl.tx_icm_addr;
}
break;
case DR_ACTION_TYP_QP:
mlx5dr_info(dmn, "Domain doesn't support QP\n");
goto out_invalid_arg;
case DR_ACTION_TYP_CTR:
- attr.ctr_id = action->ctr.ctr_id +
- action->ctr.offeset;
+ attr.ctr_id = action->ctr->ctr_id +
+ action->ctr->offeset;
break;
case DR_ACTION_TYP_TAG:
- attr.flow_tag = action->flow_tag;
+ attr.flow_tag = action->flow_tag->flow_tag;
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
break;
case DR_ACTION_TYP_TNL_L3_TO_L2:
- attr.decap_index = action->rewrite.index;
- attr.decap_actions = action->rewrite.num_of_actions;
+ attr.decap_index = action->rewrite->index;
+ attr.decap_actions = action->rewrite->num_of_actions;
attr.decap_with_vlan =
attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS;
break;
case DR_ACTION_TYP_MODIFY_HDR:
- attr.modify_index = action->rewrite.index;
- attr.modify_actions = action->rewrite.num_of_actions;
- recalc_cs_required = action->rewrite.modify_ttl &&
+ attr.modify_index = action->rewrite->index;
+ attr.modify_actions = action->rewrite->num_of_actions;
+ recalc_cs_required = action->rewrite->modify_ttl &&
!mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps);
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3:
- attr.reformat_size = action->reformat.reformat_size;
- attr.reformat_id = action->reformat.reformat_id;
+ attr.reformat_size = action->reformat->reformat_size;
+ attr.reformat_id = action->reformat->reformat_id;
break;
case DR_ACTION_TYP_VPORT:
- attr.hit_gvmi = action->vport.caps->vhca_gvmi;
+ attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action;
if (rx_rule) {
/* Loopback on WIRE vport is not supported */
- if (action->vport.caps->num == WIRE_PORT)
+ if (action->vport->caps->num == WIRE_PORT)
goto out_invalid_arg;
- attr.final_icm_addr = action->vport.caps->icm_address_rx;
+ attr.final_icm_addr = action->vport->caps->icm_address_rx;
} else {
- attr.final_icm_addr = action->vport.caps->icm_address_tx;
+ attr.final_icm_addr = action->vport->caps->icm_address_tx;
}
break;
case DR_ACTION_TYP_POP_VLAN:
@@ -477,7 +479,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
if (attr.vlans.count == MLX5DR_MAX_VLANS)
return -EINVAL;
- attr.vlans.headers[attr.vlans.count++] = action->push_vlan.vlan_hdr;
+ attr.vlans.headers[attr.vlans.count++] = action->push_vlan->vlan_hdr;
break;
default:
goto out_invalid_arg;
@@ -530,17 +532,37 @@ out_invalid_arg:
return -EINVAL;
}
+static unsigned int action_size[DR_ACTION_TYP_MAX] = {
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = sizeof(struct mlx5dr_action_reformat),
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = sizeof(struct mlx5dr_action_reformat),
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = sizeof(struct mlx5dr_action_rewrite),
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = sizeof(struct mlx5dr_action_reformat),
+ [DR_ACTION_TYP_FT] = sizeof(struct mlx5dr_action_dest_tbl),
+ [DR_ACTION_TYP_CTR] = sizeof(struct mlx5dr_action_ctr),
+ [DR_ACTION_TYP_TAG] = sizeof(struct mlx5dr_action_flow_tag),
+ [DR_ACTION_TYP_MODIFY_HDR] = sizeof(struct mlx5dr_action_rewrite),
+ [DR_ACTION_TYP_VPORT] = sizeof(struct mlx5dr_action_vport),
+ [DR_ACTION_TYP_PUSH_VLAN] = sizeof(struct mlx5dr_action_push_vlan),
+};
+
static struct mlx5dr_action *
dr_action_create_generic(enum mlx5dr_action_type action_type)
{
struct mlx5dr_action *action;
+ int extra_size;
+
+ if (action_type < DR_ACTION_TYP_MAX)
+ extra_size = action_size[action_type];
+ else
+ return NULL;
- action = kzalloc(sizeof(*action), GFP_KERNEL);
+ action = kzalloc(sizeof(*action) + extra_size, GFP_KERNEL);
if (!action)
return NULL;
action->action_type = action_type;
refcount_set(&action->refcount, 1);
+ action->data = action + 1;
return action;
}
@@ -559,10 +581,10 @@ mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num)
if (!action)
return NULL;
- action->dest_tbl.is_fw_tbl = true;
- action->dest_tbl.fw_tbl.dmn = dmn;
- action->dest_tbl.fw_tbl.id = table_num;
- action->dest_tbl.fw_tbl.type = FS_FT_FDB;
+ action->dest_tbl->is_fw_tbl = true;
+ action->dest_tbl->fw_tbl.dmn = dmn;
+ action->dest_tbl->fw_tbl.id = table_num;
+ action->dest_tbl->fw_tbl.type = FS_FT_FDB;
refcount_inc(&dmn->refcount);
return action;
@@ -579,7 +601,7 @@ mlx5dr_action_create_dest_table(struct mlx5dr_table *tbl)
if (!action)
goto dec_ref;
- action->dest_tbl.tbl = tbl;
+ action->dest_tbl->tbl = tbl;
return action;
@@ -624,12 +646,12 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
case DR_ACTION_TYP_VPORT:
hw_dests[i].vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- hw_dests[i].vport.num = dest_action->vport.caps->num;
- hw_dests[i].vport.vhca_id = dest_action->vport.caps->vhca_gvmi;
+ hw_dests[i].vport.num = dest_action->vport->caps->num;
+ hw_dests[i].vport.vhca_id = dest_action->vport->caps->vhca_gvmi;
if (reformat_action) {
reformat_req = true;
hw_dests[i].vport.reformat_id =
- reformat_action->reformat.reformat_id;
+ reformat_action->reformat->reformat_id;
ref_actions[num_of_ref++] = reformat_action;
hw_dests[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
@@ -637,10 +659,10 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
case DR_ACTION_TYP_FT:
hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- if (dest_action->dest_tbl.is_fw_tbl)
- hw_dests[i].ft_id = dest_action->dest_tbl.fw_tbl.id;
+ if (dest_action->dest_tbl->is_fw_tbl)
+ hw_dests[i].ft_id = dest_action->dest_tbl->fw_tbl.id;
else
- hw_dests[i].ft_id = dest_action->dest_tbl.tbl->table_id;
+ hw_dests[i].ft_id = dest_action->dest_tbl->tbl->table_id;
break;
default:
@@ -657,8 +679,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
hw_dests,
num_of_dests,
reformat_req,
- &action->dest_tbl.fw_tbl.id,
- &action->dest_tbl.fw_tbl.group_id);
+ &action->dest_tbl->fw_tbl.id,
+ &action->dest_tbl->fw_tbl.group_id);
if (ret)
goto free_action;
@@ -667,11 +689,11 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
for (i = 0; i < num_of_ref; i++)
refcount_inc(&ref_actions[i]->refcount);
- action->dest_tbl.is_fw_tbl = true;
- action->dest_tbl.fw_tbl.dmn = dmn;
- action->dest_tbl.fw_tbl.type = FS_FT_FDB;
- action->dest_tbl.fw_tbl.ref_actions = ref_actions;
- action->dest_tbl.fw_tbl.num_of_ref_actions = num_of_ref;
+ action->dest_tbl->is_fw_tbl = true;
+ action->dest_tbl->fw_tbl.dmn = dmn;
+ action->dest_tbl->fw_tbl.type = FS_FT_FDB;
+ action->dest_tbl->fw_tbl.ref_actions = ref_actions;
+ action->dest_tbl->fw_tbl.num_of_ref_actions = num_of_ref;
kfree(hw_dests);
@@ -696,10 +718,10 @@ mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *dmn,
if (!action)
return NULL;
- action->dest_tbl.is_fw_tbl = 1;
- action->dest_tbl.fw_tbl.type = ft->type;
- action->dest_tbl.fw_tbl.id = ft->id;
- action->dest_tbl.fw_tbl.dmn = dmn;
+ action->dest_tbl->is_fw_tbl = 1;
+ action->dest_tbl->fw_tbl.type = ft->type;
+ action->dest_tbl->fw_tbl.id = ft->id;
+ action->dest_tbl->fw_tbl.dmn = dmn;
refcount_inc(&dmn->refcount);
@@ -715,7 +737,7 @@ mlx5dr_action_create_flow_counter(u32 counter_id)
if (!action)
return NULL;
- action->ctr.ctr_id = counter_id;
+ action->ctr->ctr_id = counter_id;
return action;
}
@@ -728,7 +750,7 @@ struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value)
if (!action)
return NULL;
- action->flow_tag = tag_value & 0xffffff;
+ action->flow_tag->flow_tag = tag_value & 0xffffff;
return action;
}
@@ -794,8 +816,8 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
if (ret)
return ret;
- action->reformat.reformat_id = reformat_id;
- action->reformat.reformat_size = data_sz;
+ action->reformat->reformat_id = reformat_id;
+ action->reformat->reformat_size = data_sz;
return 0;
}
case DR_ACTION_TYP_TNL_L2_TO_L2:
@@ -811,28 +833,28 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
data, data_sz,
hw_actions,
ACTION_CACHE_LINE_SIZE,
- &action->rewrite.num_of_actions);
+ &action->rewrite->num_of_actions);
if (ret) {
mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n");
return ret;
}
- action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
- DR_CHUNK_SIZE_8);
- if (!action->rewrite.chunk) {
+ action->rewrite->chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
+ DR_CHUNK_SIZE_8);
+ if (!action->rewrite->chunk) {
mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n");
return -ENOMEM;
}
- action->rewrite.data = (void *)hw_actions;
- action->rewrite.index = (action->rewrite.chunk->icm_addr -
+ action->rewrite->data = (void *)hw_actions;
+ action->rewrite->index = (action->rewrite->chunk->icm_addr -
dmn->info.caps.hdr_modify_icm_addr) /
ACTION_CACHE_LINE_SIZE;
ret = mlx5dr_send_postsend_action(dmn, action);
if (ret) {
mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n");
- mlx5dr_icm_free_chunk(action->rewrite.chunk);
+ mlx5dr_icm_free_chunk(action->rewrite->chunk);
return ret;
}
return 0;
@@ -867,7 +889,7 @@ struct mlx5dr_action *mlx5dr_action_create_push_vlan(struct mlx5dr_domain *dmn,
if (!action)
return NULL;
- action->push_vlan.vlan_hdr = vlan_hdr_h;
+ action->push_vlan->vlan_hdr = vlan_hdr_h;
return action;
}
@@ -898,7 +920,7 @@ mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
if (!action)
goto dec_ref;
- action->reformat.dmn = dmn;
+ action->reformat->dmn = dmn;
ret = dr_action_create_reformat_action(dmn,
data_sz,
@@ -1104,17 +1126,17 @@ dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action,
const __be64 *sw_action)
{
u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
- struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ struct mlx5dr_domain *dmn = action->rewrite->dmn;
if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
- action->rewrite.allow_rx = 0;
+ action->rewrite->allow_rx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
sw_field);
return -EINVAL;
}
} else if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
- action->rewrite.allow_tx = 0;
+ action->rewrite->allow_tx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
sw_field);
@@ -1122,7 +1144,7 @@ dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action,
}
}
- if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) {
+ if (!action->rewrite->allow_rx && !action->rewrite->allow_tx) {
mlx5dr_dbg(dmn, "Modify SET actions not supported on both RX and TX\n");
return -EINVAL;
}
@@ -1135,7 +1157,7 @@ dr_action_modify_check_add_field_limitation(struct mlx5dr_action *action,
const __be64 *sw_action)
{
u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
- struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ struct mlx5dr_domain *dmn = action->rewrite->dmn;
if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL &&
sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT &&
@@ -1153,7 +1175,7 @@ static int
dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action,
const __be64 *sw_action)
{
- struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ struct mlx5dr_domain *dmn = action->rewrite->dmn;
u16 sw_fields[2];
int i;
@@ -1162,14 +1184,14 @@ dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action,
for (i = 0; i < 2; i++) {
if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
- action->rewrite.allow_rx = 0;
+ action->rewrite->allow_rx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
sw_fields[i]);
return -EINVAL;
}
} else if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
- action->rewrite.allow_tx = 0;
+ action->rewrite->allow_tx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
sw_fields[i]);
@@ -1178,7 +1200,7 @@ dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action,
}
}
- if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) {
+ if (!action->rewrite->allow_rx && !action->rewrite->allow_tx) {
mlx5dr_dbg(dmn, "Modify copy actions not supported on both RX and TX\n");
return -EINVAL;
}
@@ -1190,7 +1212,7 @@ static int
dr_action_modify_check_field_limitation(struct mlx5dr_action *action,
const __be64 *sw_action)
{
- struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ struct mlx5dr_domain *dmn = action->rewrite->dmn;
u8 action_type;
int ret;
@@ -1239,7 +1261,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
{
const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
- struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ struct mlx5dr_domain *dmn = action->rewrite->dmn;
int ret, i, hw_idx = 0;
__be64 *sw_action;
__be64 hw_action;
@@ -1249,8 +1271,8 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
*modify_ttl = false;
- action->rewrite.allow_rx = 1;
- action->rewrite.allow_tx = 1;
+ action->rewrite->allow_rx = 1;
+ action->rewrite->allow_tx = 1;
for (i = 0; i < num_sw_actions; i++) {
sw_action = &sw_actions[i];
@@ -1358,13 +1380,13 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
if (ret)
goto free_hw_actions;
- action->rewrite.chunk = chunk;
- action->rewrite.modify_ttl = modify_ttl;
- action->rewrite.data = (u8 *)hw_actions;
- action->rewrite.num_of_actions = num_hw_actions;
- action->rewrite.index = (chunk->icm_addr -
- dmn->info.caps.hdr_modify_icm_addr) /
- ACTION_CACHE_LINE_SIZE;
+ action->rewrite->chunk = chunk;
+ action->rewrite->modify_ttl = modify_ttl;
+ action->rewrite->data = (u8 *)hw_actions;
+ action->rewrite->num_of_actions = num_hw_actions;
+ action->rewrite->index = (chunk->icm_addr -
+ dmn->info.caps.hdr_modify_icm_addr) /
+ ACTION_CACHE_LINE_SIZE;
ret = mlx5dr_send_postsend_action(dmn, action);
if (ret)
@@ -1399,7 +1421,7 @@ mlx5dr_action_create_modify_header(struct mlx5dr_domain *dmn,
if (!action)
goto dec_ref;
- action->rewrite.dmn = dmn;
+ action->rewrite->dmn = dmn;
ret = dr_action_create_modify_action(dmn,
actions_sz,
@@ -1451,8 +1473,8 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
if (!action)
return NULL;
- action->vport.dmn = vport_dmn;
- action->vport.caps = vport_cap;
+ action->vport->dmn = vport_dmn;
+ action->vport->caps = vport_cap;
return action;
}
@@ -1464,44 +1486,44 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
switch (action->action_type) {
case DR_ACTION_TYP_FT:
- if (action->dest_tbl.is_fw_tbl)
- refcount_dec(&action->dest_tbl.fw_tbl.dmn->refcount);
+ if (action->dest_tbl->is_fw_tbl)
+ refcount_dec(&action->dest_tbl->fw_tbl.dmn->refcount);
else
- refcount_dec(&action->dest_tbl.tbl->refcount);
+ refcount_dec(&action->dest_tbl->tbl->refcount);
- if (action->dest_tbl.is_fw_tbl &&
- action->dest_tbl.fw_tbl.num_of_ref_actions) {
+ if (action->dest_tbl->is_fw_tbl &&
+ action->dest_tbl->fw_tbl.num_of_ref_actions) {
struct mlx5dr_action **ref_actions;
int i;
- ref_actions = action->dest_tbl.fw_tbl.ref_actions;
- for (i = 0; i < action->dest_tbl.fw_tbl.num_of_ref_actions; i++)
+ ref_actions = action->dest_tbl->fw_tbl.ref_actions;
+ for (i = 0; i < action->dest_tbl->fw_tbl.num_of_ref_actions; i++)
refcount_dec(&ref_actions[i]->refcount);
kfree(ref_actions);
- mlx5dr_fw_destroy_md_tbl(action->dest_tbl.fw_tbl.dmn,
- action->dest_tbl.fw_tbl.id,
- action->dest_tbl.fw_tbl.group_id);
+ mlx5dr_fw_destroy_md_tbl(action->dest_tbl->fw_tbl.dmn,
+ action->dest_tbl->fw_tbl.id,
+ action->dest_tbl->fw_tbl.group_id);
}
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
- refcount_dec(&action->reformat.dmn->refcount);
+ refcount_dec(&action->reformat->dmn->refcount);
break;
case DR_ACTION_TYP_TNL_L3_TO_L2:
- mlx5dr_icm_free_chunk(action->rewrite.chunk);
- refcount_dec(&action->reformat.dmn->refcount);
+ mlx5dr_icm_free_chunk(action->rewrite->chunk);
+ refcount_dec(&action->rewrite->dmn->refcount);
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3:
- mlx5dr_cmd_destroy_reformat_ctx((action->reformat.dmn)->mdev,
- action->reformat.reformat_id);
- refcount_dec(&action->reformat.dmn->refcount);
+ mlx5dr_cmd_destroy_reformat_ctx((action->reformat->dmn)->mdev,
+ action->reformat->reformat_id);
+ refcount_dec(&action->reformat->dmn->refcount);
break;
case DR_ACTION_TYP_MODIFY_HDR:
- mlx5dr_icm_free_chunk(action->rewrite.chunk);
- kfree(action->rewrite.data);
- refcount_dec(&action->rewrite.dmn->refcount);
+ mlx5dr_icm_free_chunk(action->rewrite->chunk);
+ kfree(action->rewrite->data);
+ refcount_dec(&action->rewrite->dmn->refcount);
break;
default:
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 30b0136b5bc7..5970cb8fc0c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -85,15 +85,53 @@ int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
return 0;
}
+static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev,
+ u16 vport, bool *roce_en)
+{
+ u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
+ int err;
+
+ MLX5_SET(query_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+ MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *roce_en = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.roce_en);
+ return 0;
+}
+
int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
struct mlx5dr_cmd_caps *caps)
{
+ bool roce_en;
+ int err;
+
caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required);
caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager);
caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
+ if (MLX5_CAP_GEN(mdev, roce)) {
+ err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
+ if (err)
+ return err;
+
+ caps->roce_caps.roce_en = roce_en;
+ caps->roce_caps.fl_rc_qp_when_roce_disabled =
+ MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
+ caps->roce_caps.fl_rc_qp_when_roce_enabled =
+ MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
+ }
+
+ caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
+
if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
@@ -106,6 +144,34 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
}
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
+ caps->flex_parser_id_geneve_tlv_option_0 =
+ MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
+ caps->flex_parser_id_mpls_over_gre =
+ MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
+
+ if (caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
+ caps->flex_parser_id_mpls_over_udp =
+ MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)
+ caps->flex_parser_id_gtpu_dw_0 =
+ MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)
+ caps->flex_parser_id_gtpu_teid =
+ MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)
+ caps->flex_parser_id_gtpu_dw_2 =
+ MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)
+ caps->flex_parser_id_gtpu_first_ext_dw_0 =
+ MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_first_ext_dw_0);
+
caps->nic_rx_drop_address =
MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
caps->nic_tx_drop_address =
@@ -287,7 +353,7 @@ int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
u32 *in;
int err;
- in = kzalloc(inlen, GFP_KERNEL);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -302,7 +368,7 @@ int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
*group_id = MLX5_GET(create_flow_group_out, out, group_id);
out:
- kfree(in);
+ kvfree(in);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 15673cd10039..6f6191d1d5a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -92,15 +92,17 @@ static bool dr_mask_is_tnl_gre_set(struct mlx5dr_match_misc *misc)
misc->gre_k_present || misc->gre_s_present);
}
-#define DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET(_misc2, gre_udp) ( \
- (_misc2).outer_first_mpls_over_##gre_udp##_label || \
- (_misc2).outer_first_mpls_over_##gre_udp##_exp || \
- (_misc2).outer_first_mpls_over_##gre_udp##_s_bos || \
- (_misc2).outer_first_mpls_over_##gre_udp##_ttl)
-
-#define DR_MASK_IS_TNL_MPLS_SET(_misc2) ( \
- DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \
- DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp))
+#define DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_gre_label || \
+ (_misc)->outer_first_mpls_over_gre_exp || \
+ (_misc)->outer_first_mpls_over_gre_s_bos || \
+ (_misc)->outer_first_mpls_over_gre_ttl)
+
+#define DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_udp_label || \
+ (_misc)->outer_first_mpls_over_udp_exp || \
+ (_misc)->outer_first_mpls_over_udp_s_bos || \
+ (_misc)->outer_first_mpls_over_udp_ttl)
static bool
dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
@@ -133,6 +135,11 @@ static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc)
misc->geneve_opt_len;
}
+static bool dr_mask_is_tnl_geneve_tlv_opt(struct mlx5dr_match_misc3 *misc3)
+{
+ return misc3->geneve_tlv_option_0_data;
+}
+
static bool
dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
{
@@ -148,6 +155,109 @@ dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask,
dr_matcher_supp_tnl_geneve(&dmn->info.caps);
}
+static bool dr_mask_is_tnl_gtpu_set(struct mlx5dr_match_misc3 *misc3)
+{
+ return misc3->gtpu_msg_flags || misc3->gtpu_msg_type || misc3->gtpu_teid;
+}
+
+static bool dr_matcher_supp_tnl_gtpu(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED;
+}
+
+static bool dr_mask_is_tnl_gtpu(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return dr_mask_is_tnl_gtpu_set(&mask->misc3) &&
+ dr_matcher_supp_tnl_gtpu(&dmn->info.caps);
+}
+
+static int dr_matcher_supp_tnl_gtpu_dw_0(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED;
+}
+
+static bool dr_mask_is_tnl_gtpu_dw_0(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return mask->misc3.gtpu_dw_0 &&
+ dr_matcher_supp_tnl_gtpu_dw_0(&dmn->info.caps);
+}
+
+static int dr_matcher_supp_tnl_gtpu_teid(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED;
+}
+
+static bool dr_mask_is_tnl_gtpu_teid(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return mask->misc3.gtpu_teid &&
+ dr_matcher_supp_tnl_gtpu_teid(&dmn->info.caps);
+}
+
+static int dr_matcher_supp_tnl_gtpu_dw_2(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED;
+}
+
+static bool dr_mask_is_tnl_gtpu_dw_2(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return mask->misc3.gtpu_dw_2 &&
+ dr_matcher_supp_tnl_gtpu_dw_2(&dmn->info.caps);
+}
+
+static int dr_matcher_supp_tnl_gtpu_first_ext(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED;
+}
+
+static bool dr_mask_is_tnl_gtpu_first_ext(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return mask->misc3.gtpu_first_ext_dw_0 &&
+ dr_matcher_supp_tnl_gtpu_first_ext(&dmn->info.caps);
+}
+
+static bool dr_mask_is_tnl_gtpu_flex_parser_0(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+
+ return (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_0) &&
+ dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
+ (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_teid) &&
+ dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
+ (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_2) &&
+ dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
+ (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
+ dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
+}
+
+static bool dr_mask_is_tnl_gtpu_flex_parser_1(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+
+ return (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_0) &&
+ dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
+ (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_teid) &&
+ dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
+ (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_2) &&
+ dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
+ (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
+ dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
+}
+
+static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return dr_mask_is_tnl_gtpu_flex_parser_0(mask, dmn) ||
+ dr_mask_is_tnl_gtpu_flex_parser_1(mask, dmn) ||
+ dr_mask_is_tnl_gtpu(mask, dmn);
+}
+
static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
{
return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
@@ -199,6 +309,65 @@ static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc)
return (misc->source_sqn || misc->source_port);
}
+static bool dr_mask_is_flex_parser_id_0_3_set(u32 flex_parser_id,
+ u32 flex_parser_value)
+{
+ if (flex_parser_id)
+ return flex_parser_id <= DR_STE_MAX_FLEX_0_ID;
+
+ /* Using flex_parser 0 means that id is zero, thus value must be set. */
+ return flex_parser_value;
+}
+
+static bool dr_mask_is_flex_parser_0_3_set(struct mlx5dr_match_misc4 *misc4)
+{
+ return (dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_0,
+ misc4->prog_sample_field_value_0) ||
+ dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_1,
+ misc4->prog_sample_field_value_1) ||
+ dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_2,
+ misc4->prog_sample_field_value_2) ||
+ dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_3,
+ misc4->prog_sample_field_value_3));
+}
+
+static bool dr_mask_is_flex_parser_id_4_7_set(u32 flex_parser_id)
+{
+ return flex_parser_id > DR_STE_MAX_FLEX_0_ID &&
+ flex_parser_id <= DR_STE_MAX_FLEX_1_ID;
+}
+
+static bool dr_mask_is_flex_parser_4_7_set(struct mlx5dr_match_misc4 *misc4)
+{
+ return (dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_0) ||
+ dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_1) ||
+ dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_2) ||
+ dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_3));
+}
+
+static int dr_matcher_supp_tnl_mpls_over_gre(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED;
+}
+
+static bool dr_mask_is_tnl_mpls_over_gre(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(&mask->misc2) &&
+ dr_matcher_supp_tnl_mpls_over_gre(&dmn->info.caps);
+}
+
+static int dr_matcher_supp_tnl_mpls_over_udp(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED;
+}
+
+static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(&mask->misc2) &&
+ dr_matcher_supp_tnl_mpls_over_udp(&dmn->info.caps);
+}
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
enum mlx5dr_ipv outer_ipv,
@@ -251,6 +420,9 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC3)
mask.misc3 = matcher->mask.misc3;
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4)
+ mask.misc4 = matcher->mask.misc4;
+
ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
&matcher->mask, NULL);
if (ret)
@@ -321,9 +493,28 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++],
&mask, inner, rx);
- else if (dr_mask_is_tnl_geneve(&mask, dmn))
+ else if (dr_mask_is_tnl_geneve(&mask, dmn)) {
mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++],
&mask, inner, rx);
+ if (dr_mask_is_tnl_geneve_tlv_opt(&mask.misc3))
+ mlx5dr_ste_build_tnl_geneve_tlv_opt(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
+ } else if (dr_mask_is_tnl_gtpu_any(&mask, dmn)) {
+ if (dr_mask_is_tnl_gtpu_flex_parser_0(&mask, dmn))
+ mlx5dr_ste_build_tnl_gtpu_flex_parser_0(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
+
+ if (dr_mask_is_tnl_gtpu_flex_parser_1(&mask, dmn))
+ mlx5dr_ste_build_tnl_gtpu_flex_parser_1(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
+
+ if (dr_mask_is_tnl_gtpu(&mask, dmn))
+ mlx5dr_ste_build_tnl_gtpu(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
+ }
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
@@ -333,17 +524,20 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
- mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
- &mask, inner, rx);
+ if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
+ mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
+ else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
+ mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
+
+ if (dr_mask_is_icmp(&mask, dmn))
+ mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
- if (dr_mask_is_icmp(&mask, dmn)) {
- ret = mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
- &mask, &dmn->info.caps,
- inner, rx);
- if (ret)
- return ret;
- }
if (dr_mask_is_tnl_gre_set(&mask.misc))
mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++],
&mask, inner, rx);
@@ -404,10 +598,26 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
- mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
- &mask, inner, rx);
+ if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
+ mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
+ else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
+ mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
}
+
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4) {
+ if (dr_mask_is_flex_parser_0_3_set(&mask.misc4))
+ mlx5dr_ste_build_flex_parser_0(ste_ctx, &sb[idx++],
+ &mask, false, rx);
+
+ if (dr_mask_is_flex_parser_4_7_set(&mask.misc4))
+ mlx5dr_ste_build_flex_parser_1(ste_ctx, &sb[idx++],
+ &mask, false, rx);
+ }
+
/* Empty matcher, takes all */
if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index b337d6626bff..43356fad53de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -952,6 +952,17 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
return false;
}
}
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
+ s_idx = offsetof(struct mlx5dr_match_param, misc4);
+ e_idx = min(s_idx + sizeof(param->misc4), value_size);
+
+ if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+ mlx5dr_err(matcher->tbl->dmn,
+ "Rule misc4 parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 8a6a56f9dc4e..12cf323a5943 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -32,6 +32,7 @@ struct dr_qp_rtr_attr {
u8 min_rnr_timer;
u8 sgid_index;
u16 udp_src_port;
+ u8 fl:1;
};
struct dr_qp_rts_attr {
@@ -45,6 +46,7 @@ struct dr_qp_init_attr {
u32 pdn;
u32 max_send_wr;
struct mlx5_uars_page *uar;
+ u8 isolate_vl_tc:1;
};
static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64)
@@ -157,6 +159,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, isolate_vl_tc, attr->isolate_vl_tc);
MLX5_SET(qpc, qpc, pd, attr->pdn);
MLX5_SET(qpc, qpc, uar_page, attr->uar->index);
MLX5_SET(qpc, qpc, log_page_size,
@@ -213,7 +216,7 @@ static void dr_destroy_qp(struct mlx5_core_dev *mdev,
static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl)
{
dma_wmb();
- *dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xfffff);
+ *dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xffff);
/* After wmb() the hw aware of new work */
wmb();
@@ -223,7 +226,7 @@ static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl)
static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
u32 rkey, struct dr_data_seg *data_seg,
- u32 opcode, int nreq)
+ u32 opcode, bool notify_hw)
{
struct mlx5_wqe_raddr_seg *wq_raddr;
struct mlx5_wqe_ctrl_seg *wq_ctrl;
@@ -255,16 +258,16 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
dr_qp->sq.wqe_head[idx] = dr_qp->sq.pc++;
- if (nreq)
+ if (notify_hw)
dr_cmd_notify_hw(dr_qp, wq_ctrl);
}
static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_info)
{
dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
- &send_info->write, MLX5_OPCODE_RDMA_WRITE, 0);
+ &send_info->write, MLX5_OPCODE_RDMA_WRITE, false);
dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
- &send_info->read, MLX5_OPCODE_RDMA_READ, 1);
+ &send_info->read, MLX5_OPCODE_RDMA_READ, true);
}
/**
@@ -406,7 +409,7 @@ static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
alloc_size = *num_stes * DR_STE_SIZE;
}
- *data = kzalloc(alloc_size, GFP_KERNEL);
+ *data = kvzalloc(alloc_size, GFP_KERNEL);
if (!*data)
return -ENOMEM;
@@ -505,7 +508,7 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
}
out_free:
- kfree(data);
+ kvfree(data);
return ret;
}
@@ -562,7 +565,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
}
out_free:
- kfree(data);
+ kvfree(data);
return ret;
}
@@ -572,12 +575,12 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
struct postsend_info send_info = {};
int ret;
- send_info.write.addr = (uintptr_t)action->rewrite.data;
- send_info.write.length = action->rewrite.num_of_actions *
+ send_info.write.addr = (uintptr_t)action->rewrite->data;
+ send_info.write.length = action->rewrite->num_of_actions *
DR_MODIFY_ACTION_SIZE;
send_info.write.lkey = 0;
- send_info.remote_addr = action->rewrite.chunk->mr_addr;
- send_info.rkey = action->rewrite.chunk->rkey;
+ send_info.remote_addr = action->rewrite->chunk->mr_addr;
+ send_info.rkey = action->rewrite->chunk->rkey;
ret = dr_postsend_icm_data(dmn, &send_info);
@@ -650,6 +653,7 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
attr->udp_src_port);
MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num);
+ MLX5_SET(qpc, qpc, primary_address_path.fl, attr->fl);
MLX5_SET(qpc, qpc, min_rnr_nak, 1);
MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
@@ -658,6 +662,19 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
}
+static bool dr_send_allow_fl(struct mlx5dr_cmd_caps *caps)
+{
+ /* Check whether RC RoCE QP creation with force loopback is allowed.
+ * There are two separate capability bits for this:
+ * - force loopback when RoCE is enabled
+ * - force loopback when RoCE is disabled
+ */
+ return ((caps->roce_caps.roce_en &&
+ caps->roce_caps.fl_rc_qp_when_roce_enabled) ||
+ (!caps->roce_caps.roce_en &&
+ caps->roce_caps.fl_rc_qp_when_roce_disabled));
+}
+
static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
{
struct mlx5dr_qp *dr_qp = dmn->send_ring->qp;
@@ -676,17 +693,26 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
}
/* RTR */
- ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr);
- if (ret)
- return ret;
-
rtr_attr.mtu = mtu;
rtr_attr.qp_num = dr_qp->qpn;
rtr_attr.min_rnr_timer = 12;
rtr_attr.port_num = port;
- rtr_attr.sgid_index = gid_index;
rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp;
+ /* If QP creation with force loopback is allowed, then there
+ * is no need for GID index when creating the QP.
+ * Otherwise we query GID attributes and use GID index.
+ */
+ rtr_attr.fl = dr_send_allow_fl(&dmn->info.caps);
+ if (!rtr_attr.fl) {
+ ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index,
+ &rtr_attr.dgid_attr);
+ if (ret)
+ return ret;
+
+ rtr_attr.sgid_index = gid_index;
+ }
+
ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
if (ret) {
mlx5dr_err(dmn, "Failed modify QP init2rtr\n");
@@ -900,6 +926,11 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
init_attr.pdn = dmn->pdn;
init_attr.uar = dmn->uar;
init_attr.max_send_wr = QUEUE_SIZE;
+
+ /* Isolated VL is applicable only if force loopback is supported */
+ if (dr_send_allow_fl(&dmn->info.caps))
+ init_attr.isolate_vl_tc = dmn->info.caps.isolate_vl_tc;
+
spin_lock_init(&dmn->send_ring->lock);
dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index f49abc7a4b9b..9b1529137cba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -852,6 +852,35 @@ static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
+ spec->geneve_tlv_option_0_data =
+ MLX5_GET(fte_match_set_misc3, mask, geneve_tlv_option_0_data);
+ spec->gtpu_msg_flags = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_flags);
+ spec->gtpu_msg_type = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_type);
+ spec->gtpu_teid = MLX5_GET(fte_match_set_misc3, mask, gtpu_teid);
+ spec->gtpu_dw_0 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_0);
+ spec->gtpu_dw_2 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_2);
+ spec->gtpu_first_ext_dw_0 =
+ MLX5_GET(fte_match_set_misc3, mask, gtpu_first_ext_dw_0);
+}
+
+static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec)
+{
+ spec->prog_sample_field_id_0 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_0);
+ spec->prog_sample_field_value_0 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_0);
+ spec->prog_sample_field_id_1 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_1);
+ spec->prog_sample_field_value_1 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_1);
+ spec->prog_sample_field_id_2 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_2);
+ spec->prog_sample_field_value_2 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_2);
+ spec->prog_sample_field_id_3 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_3);
+ spec->prog_sample_field_value_3 =
+ MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_3);
}
void mlx5dr_ste_copy_param(u8 match_criteria,
@@ -925,6 +954,20 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
}
dr_ste_copy_mask_misc3(buff, &set_param->misc3);
}
+
+ param_location += sizeof(struct mlx5dr_match_misc3);
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
+ if (mask->match_sz < param_location +
+ sizeof(struct mlx5dr_match_misc4)) {
+ memcpy(tail_param, data + param_location,
+ mask->match_sz - param_location);
+ buff = tail_param;
+ } else {
+ buff = data + param_location;
+ }
+ dr_ste_copy_mask_misc4(buff, &set_param->misc4);
+ }
}
void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
@@ -1051,26 +1094,40 @@ void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_tnl_gre_init(sb, mask);
}
-void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
- struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx)
+void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->caps = caps;
+ return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
{
sb->rx = rx;
sb->inner = inner;
- ste_ctx->build_tnl_mpls_init(sb, mask);
+ sb->caps = caps;
+ return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
}
-int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
- struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- struct mlx5dr_cmd_caps *caps,
- bool inner, bool rx)
+void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
{
sb->rx = rx;
sb->inner = inner;
sb->caps = caps;
- return ste_ctx->build_icmp_init(sb, mask);
+ ste_ctx->build_icmp_init(sb, mask);
}
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
@@ -1113,6 +1170,52 @@ void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_tnl_geneve_init(sb, mask);
}
+void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->caps = caps;
+ sb->inner = inner;
+ ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->inner = inner;
+ ste_ctx->build_tnl_gtpu_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->caps = caps;
+ sb->inner = inner;
+ ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->caps = caps;
+ sb->inner = inner;
+ ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
+}
+
void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@@ -1148,6 +1251,26 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_src_gvmi_qpn_init(sb, mask);
}
+void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->inner = inner;
+ ste_ctx->build_flex_parser_0_init(sb, mask);
+}
+
+void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->inner = inner;
+ ste_ctx->build_flex_parser_1_init(sb, mask);
+}
+
static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
[MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
[MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
index 06bcb0ee8f96..992b591bf0c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -62,6 +62,13 @@
in_out##_first_mpls_ttl); \
} while (0)
+#define DR_STE_SET_FLEX_PARSER_FIELD(tag, fname, caps, spec) do { \
+ u8 parser_id = (caps)->flex_parser_id_##fname; \
+ u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id); \
+ *(__be32 *)parser_ptr = cpu_to_be32((spec)->fname);\
+ (spec)->fname = 0;\
+} while (0)
+
#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
(_misc)->outer_first_mpls_over_gre_label || \
(_misc)->outer_first_mpls_over_gre_exp || \
@@ -86,8 +93,22 @@ enum dr_ste_action_modify_type_l4 {
DR_STE_ACTION_MDFY_TYPE_L4_UDP = 0x2,
};
+enum {
+ HDR_MPLS_OFFSET_LABEL = 12,
+ HDR_MPLS_OFFSET_EXP = 9,
+ HDR_MPLS_OFFSET_S_BOS = 8,
+ HDR_MPLS_OFFSET_TTL = 0,
+};
+
u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask);
+static inline u8 *
+dr_ste_calc_flex_parser_offset(u8 *tag, u8 parser_id)
+{
+ /* Calculate tag byte offset based on flex parser id */
+ return tag + 4 * (3 - (parser_id % 4));
+}
+
#define DR_STE_CTX_BUILDER(fname) \
((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \
struct mlx5dr_match_param *mask))
@@ -106,14 +127,22 @@ struct mlx5dr_ste_ctx {
void DR_STE_CTX_BUILDER(mpls);
void DR_STE_CTX_BUILDER(tnl_gre);
void DR_STE_CTX_BUILDER(tnl_mpls);
- int DR_STE_CTX_BUILDER(icmp);
+ void DR_STE_CTX_BUILDER(tnl_mpls_over_gre);
+ void DR_STE_CTX_BUILDER(tnl_mpls_over_udp);
+ void DR_STE_CTX_BUILDER(icmp);
void DR_STE_CTX_BUILDER(general_purpose);
void DR_STE_CTX_BUILDER(eth_l4_misc);
void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
void DR_STE_CTX_BUILDER(tnl_geneve);
+ void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt);
void DR_STE_CTX_BUILDER(register_0);
void DR_STE_CTX_BUILDER(register_1);
void DR_STE_CTX_BUILDER(src_gvmi_qpn);
+ void DR_STE_CTX_BUILDER(flex_parser_0);
+ void DR_STE_CTX_BUILDER(flex_parser_1);
+ void DR_STE_CTX_BUILDER(tnl_gtpu);
+ void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_0);
+ void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_1);
/* Getters and Setters */
void (*ste_init)(u8 *hw_ste_p, u16 lu_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index 9ec079247c4b..0757a4e8540e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -331,7 +331,7 @@ static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
DR_STE_ACTION_TYPE_PUSH_VLAN);
MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
- /* Due to HW limitation we need to set this bit, otherwise reforamt +
+ /* Due to HW limitation we need to set this bit, otherwise reformat +
* push vlan will not work.
*/
if (go_back)
@@ -1248,32 +1248,29 @@ dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
+ u32 mpls_hdr;
if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) {
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
- misc_2, outer_first_mpls_over_gre_label);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
- misc_2, outer_first_mpls_over_gre_exp);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
- misc_2, outer_first_mpls_over_gre_s_bos);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
- misc_2, outer_first_mpls_over_gre_ttl);
+ mpls_hdr = misc_2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
+ misc_2->outer_first_mpls_over_gre_label = 0;
+ mpls_hdr |= misc_2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
+ misc_2->outer_first_mpls_over_gre_exp = 0;
+ mpls_hdr |= misc_2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
+ misc_2->outer_first_mpls_over_gre_s_bos = 0;
+ mpls_hdr |= misc_2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
+ misc_2->outer_first_mpls_over_gre_ttl = 0;
} else {
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
- misc_2, outer_first_mpls_over_udp_label);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
- misc_2, outer_first_mpls_over_udp_exp);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
- misc_2, outer_first_mpls_over_udp_s_bos);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
- misc_2, outer_first_mpls_over_udp_ttl);
+ mpls_hdr = misc_2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
+ misc_2->outer_first_mpls_over_udp_label = 0;
+ mpls_hdr |= misc_2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
+ misc_2->outer_first_mpls_over_udp_exp = 0;
+ mpls_hdr |= misc_2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
+ misc_2->outer_first_mpls_over_udp_s_bos = 0;
+ mpls_hdr |= misc_2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
+ misc_2->outer_first_mpls_over_udp_ttl = 0;
}
+
+ MLX5_SET(ste_flex_parser_0, tag, flex_parser_3, mpls_hdr);
return 0;
}
@@ -1288,6 +1285,91 @@ dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag;
}
+static int
+dr_ste_v0_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+ u8 *parser_ptr;
+ u8 parser_id;
+ u32 mpls_hdr;
+
+ mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
+ misc2->outer_first_mpls_over_udp_label = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
+ misc2->outer_first_mpls_over_udp_exp = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
+ misc2->outer_first_mpls_over_udp_s_bos = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
+ misc2->outer_first_mpls_over_udp_ttl = 0;
+
+ parser_id = sb->caps->flex_parser_id_mpls_over_udp;
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+ *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
+ /* STEs with lookup type FLEX_PARSER_{0/1} includes
+ * flex parsers_{0-3}/{4-7} respectively.
+ */
+ sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_udp_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+ u8 *parser_ptr;
+ u8 parser_id;
+ u32 mpls_hdr;
+
+ mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
+ misc2->outer_first_mpls_over_gre_label = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
+ misc2->outer_first_mpls_over_gre_exp = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
+ misc2->outer_first_mpls_over_gre_s_bos = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
+ misc2->outer_first_mpls_over_gre_ttl = 0;
+
+ parser_id = sb->caps->flex_parser_id_mpls_over_gre;
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+ *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
+
+ /* STEs with lookup type FLEX_PARSER_{0/1} includes
+ * flex parsers_{0-3}/{4-7} respectively.
+ */
+ sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_gre_tag;
+}
+
#define ICMP_TYPE_OFFSET_FIRST_DW 24
#define ICMP_CODE_OFFSET_FIRST_DW 16
@@ -1300,9 +1382,11 @@ dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
u32 *icmp_header_data;
int dw0_location;
int dw1_location;
+ u8 *parser_ptr;
u8 *icmp_type;
u8 *icmp_code;
bool is_ipv4;
+ u32 icmp_hdr;
is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
if (is_ipv4) {
@@ -1319,47 +1403,40 @@ dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
}
- switch (dw0_location) {
- case 4:
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
- (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
- (*icmp_code << ICMP_TYPE_OFFSET_FIRST_DW));
-
- *icmp_type = 0;
- *icmp_code = 0;
- break;
- default:
- return -EINVAL;
- }
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw0_location);
+ icmp_hdr = (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
+ (*icmp_code << ICMP_CODE_OFFSET_FIRST_DW);
+ *(__be32 *)parser_ptr = cpu_to_be32(icmp_hdr);
+ *icmp_code = 0;
+ *icmp_type = 0;
- switch (dw1_location) {
- case 5:
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
- *icmp_header_data);
- *icmp_header_data = 0;
- break;
- default:
- return -EINVAL;
- }
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw1_location);
+ *(__be32 *)parser_ptr = cpu_to_be32(*icmp_header_data);
+ *icmp_header_data = 0;
return 0;
}
-static int
+static void
dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
- int ret;
+ u8 parser_id;
+ bool is_ipv4;
- ret = dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
- if (ret)
- return ret;
+ dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
- sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
+ /* STEs with lookup type FLEX_PARSER_{0/1} includes
+ * flex parsers_{0-3}/{4-7} respectively.
+ */
+ is_ipv4 = DR_MASK_IS_ICMPV4_SET(&mask->misc3);
+ parser_id = is_ipv4 ? sb->caps->flex_parser_id_icmp_dw0 :
+ sb->caps->flex_parser_id_icmpv6_dw0;
+ sb->lu_type = parser_id > DR_STE_MAX_FLEX_0_ID ?
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag;
-
- return 0;
}
static int
@@ -1595,6 +1672,185 @@ dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag;
}
+static void dr_ste_v0_set_flex_parser(u32 *misc4_field_id,
+ u32 *misc4_field_value,
+ bool *parser_is_used,
+ u8 *tag)
+{
+ u32 id = *misc4_field_id;
+ u8 *parser_ptr;
+
+ if (parser_is_used[id])
+ return;
+
+ parser_is_used[id] = true;
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
+
+ *(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
+ *misc4_field_id = 0;
+ *misc4_field_value = 0;
+}
+
+static int dr_ste_v0_build_flex_parser_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
+ bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
+
+ dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
+ &misc_4_mask->prog_sample_field_value_0,
+ parser_is_used, tag);
+
+ dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
+ &misc_4_mask->prog_sample_field_value_1,
+ parser_is_used, tag);
+
+ dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
+ &misc_4_mask->prog_sample_field_value_2,
+ parser_is_used, tag);
+
+ dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
+ &misc_4_mask->prog_sample_field_value_3,
+ parser_is_used, tag);
+
+ return 0;
+}
+
+static void dr_ste_v0_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+ dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
+}
+
+static void dr_ste_v0_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
+ dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
+}
+
+static int
+dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+ u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
+ u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+
+ MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
+ misc3->geneve_tlv_option_0_data);
+ misc3->geneve_tlv_option_0_data = 0;
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
+
+ /* STEs with lookup type FLEX_PARSER_{0/1} includes
+ * flex parsers_{0-3}/{4-7} respectively.
+ */
+ sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag;
+}
+
+static int dr_ste_v0_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
+ gtpu_msg_flags, misc3,
+ gtpu_msg_flags);
+ DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
+ gtpu_msg_type, misc3,
+ gtpu_msg_type);
+ DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
+ gtpu_teid, misc3,
+ gtpu_teid);
+
+ return 0;
+}
+
+static void dr_ste_v0_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_gtpu_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag;
+}
+
struct mlx5dr_ste_ctx ste_ctx_v0 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
@@ -1609,14 +1865,22 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = {
.build_mpls_init = &dr_ste_v0_build_mpls_init,
.build_tnl_gre_init = &dr_ste_v0_build_tnl_gre_init,
.build_tnl_mpls_init = &dr_ste_v0_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v0_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v0_build_tnl_mpls_over_gre_init,
.build_icmp_init = &dr_ste_v0_build_icmp_init,
.build_general_purpose_init = &dr_ste_v0_build_general_purpose_init,
.build_eth_l4_misc_init = &dr_ste_v0_build_eth_l4_misc_init,
.build_tnl_vxlan_gpe_init = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init,
.build_tnl_geneve_init = &dr_ste_v0_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init,
.build_register_0_init = &dr_ste_v0_build_register_0_init,
.build_register_1_init = &dr_ste_v0_build_register_1_init,
.build_src_gvmi_qpn_init = &dr_ste_v0_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v0_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v0_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v0_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_init,
/* Getters and Setters */
.ste_init = &dr_ste_v0_init,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index 9143ec326ebf..054c2e2b6554 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -437,21 +437,6 @@ static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_rx_decap_l3(u8 *hw_ste_p,
- u8 *s_action,
- u16 decap_actions,
- u32 decap_index)
-{
- MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
- DR_STE_V1_ACTION_ID_MODIFY_LIST);
- MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
- decap_actions);
- MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
- decap_index);
-
- dr_ste_v1_set_reparse(hw_ste_p);
-}
-
static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
u8 *s_action,
u16 num_of_actions,
@@ -571,9 +556,6 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
bool allow_ctr = true;
if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
- dr_ste_v1_set_rx_decap_l3(last_ste, action,
- attr->decap_actions,
- attr->decap_index);
dr_ste_v1_set_rewrite_actions(last_ste, action,
attr->decap_actions,
attr->decap_index);
@@ -1324,6 +1306,88 @@ static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag;
}
+static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+ u8 *parser_ptr;
+ u8 parser_id;
+ u32 mpls_hdr;
+
+ mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
+ misc2->outer_first_mpls_over_udp_label = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
+ misc2->outer_first_mpls_over_udp_exp = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
+ misc2->outer_first_mpls_over_udp_s_bos = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
+ misc2->outer_first_mpls_over_udp_ttl = 0;
+
+ parser_id = sb->caps->flex_parser_id_mpls_over_udp;
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+ *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
+
+ /* STEs with lookup type FLEX_PARSER_{0/1} includes
+ * flex parsers_{0-3}/{4-7} respectively.
+ */
+ sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
+
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_udp_tag;
+}
+
+static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+ u8 *parser_ptr;
+ u8 parser_id;
+ u32 mpls_hdr;
+
+ mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
+ misc2->outer_first_mpls_over_gre_label = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
+ misc2->outer_first_mpls_over_gre_exp = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
+ misc2->outer_first_mpls_over_gre_s_bos = 0;
+ mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
+ misc2->outer_first_mpls_over_gre_ttl = 0;
+
+ parser_id = sb->caps->flex_parser_id_mpls_over_gre;
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+ *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
+
+ /* STEs with lookup type FLEX_PARSER_{0/1} includes
+ * flex parsers_{0-3}/{4-7} respectively.
+ */
+ sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
+
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_gre_tag;
+}
+
static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
@@ -1355,16 +1419,14 @@ static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
return 0;
}
-static int dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+static void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag;
-
- return 0;
}
static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
@@ -1532,6 +1594,7 @@ static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *val
DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
+ misc_mask->source_eswitch_owner_vhca_id = 0;
}
static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
@@ -1588,6 +1651,179 @@ static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag;
}
+static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
+ u32 *misc4_field_value,
+ bool *parser_is_used,
+ u8 *tag)
+{
+ u32 id = *misc4_field_id;
+ u8 *parser_ptr;
+
+ if (parser_is_used[id])
+ return;
+
+ parser_is_used[id] = true;
+ parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
+
+ *(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
+ *misc4_field_id = 0;
+ *misc4_field_value = 0;
+}
+
+static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
+ bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
+
+ dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
+ &misc_4_mask->prog_sample_field_value_0,
+ parser_is_used, tag);
+
+ dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
+ &misc_4_mask->prog_sample_field_value_1,
+ parser_is_used, tag);
+
+ dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
+ &misc_4_mask->prog_sample_field_value_2,
+ parser_is_used, tag);
+
+ dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
+ &misc_4_mask->prog_sample_field_value_3,
+ parser_is_used, tag);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
+ dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
+}
+
+static void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
+ dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
+}
+
+static int
+dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+ u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
+ u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+
+ MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
+ misc3->geneve_tlv_option_0_data);
+ misc3->geneve_tlv_option_0_data = 0;
+
+ return 0;
+}
+
+static void
+dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
+
+ /* STEs with lookup type FLEX_PARSER_{0/1} includes
+ * flex parsers_{0-3}/{4-7} respectively.
+ */
+ sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
+
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
+}
+
+static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_flags, misc3, gtpu_msg_flags);
+ DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_type, misc3, gtpu_msg_type);
+ DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_teid, misc3, gtpu_teid);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_gtpu_tag;
+}
+
+static int
+dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
+ return 0;
+}
+
+static void
+dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag;
+}
+
+static int
+dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
+ if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
+ DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
+ return 0;
+}
+
+static void
+dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
+}
+
struct mlx5dr_ste_ctx ste_ctx_v1 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
@@ -1602,14 +1838,23 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
.build_mpls_init = &dr_ste_v1_build_mpls_init,
.build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
.build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
.build_icmp_init = &dr_ste_v1_build_icmp_init,
.build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
.build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
.build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
.build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
.build_register_0_init = &dr_ste_v1_build_register_0_init,
.build_register_1_init = &dr_ste_v1_build_register_1_init,
.build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
/* Getters and Setters */
.ste_init = &dr_ste_v1_init,
.set_next_lu_type = &dr_ste_v1_set_next_lu_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index b599b6beb5b9..30ae3cda6d2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -29,7 +29,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
last_htbl = tbl->rx.s_anchor;
tbl->rx.default_icm_addr = action ?
- action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr :
+ action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
tbl->rx.nic_dmn->default_icm_addr;
info.type = CONNECT_MISS;
@@ -53,7 +53,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
last_htbl = tbl->tx.s_anchor;
tbl->tx.default_icm_addr = action ?
- action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr :
+ action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr :
tbl->tx.nic_dmn->default_icm_addr;
info.type = CONNECT_MISS;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 4af0e4e6a13c..67460c42a99b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -12,17 +12,30 @@
#include "mlx5_ifc_dr.h"
#include "mlx5dr.h"
-#define DR_RULE_MAX_STES 17
+#define DR_RULE_MAX_STES 18
#define DR_ACTION_MAX_STES 5
#define WIRE_PORT 0xFFFF
#define DR_STE_SVLAN 0x1
#define DR_STE_CVLAN 0x2
#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
+#define DR_NUM_OF_FLEX_PARSERS 8
+#define DR_STE_MAX_FLEX_0_ID 3
+#define DR_STE_MAX_FLEX_1_ID 7
#define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
#define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
#define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
+static inline bool dr_is_flex_parser_0_id(u8 parser_id)
+{
+ return parser_id <= DR_STE_MAX_FLEX_0_ID;
+}
+
+static inline bool dr_is_flex_parser_1_id(u8 parser_id)
+{
+ return parser_id > DR_STE_MAX_FLEX_0_ID;
+}
+
enum mlx5dr_icm_chunk_size {
DR_CHUNK_SIZE_1,
DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */
@@ -87,7 +100,8 @@ enum mlx5dr_matcher_criteria {
DR_MATCHER_CRITERIA_INNER = 1 << 2,
DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
- DR_MATCHER_CRITERIA_MAX = 1 << 5,
+ DR_MATCHER_CRITERIA_MISC4 = 1 << 5,
+ DR_MATCHER_CRITERIA_MAX = 1 << 6,
};
enum mlx5dr_action_type {
@@ -389,11 +403,21 @@ void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
- struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- struct mlx5dr_cmd_caps *caps,
- bool inner, bool rx);
+void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
+void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
+void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@@ -402,6 +426,25 @@ void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
+void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
+void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
+void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@@ -419,6 +462,14 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx);
+void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
/* Actions utils */
@@ -646,7 +697,24 @@ struct mlx5dr_match_misc3 {
u8 icmpv6_type;
u8 icmpv4_code;
u8 icmpv4_type;
- u8 reserved_auto3[0x1c];
+ u32 geneve_tlv_option_0_data;
+ u8 gtpu_msg_flags;
+ u8 gtpu_msg_type;
+ u32 gtpu_teid;
+ u32 gtpu_dw_2;
+ u32 gtpu_first_ext_dw_0;
+ u32 gtpu_dw_0;
+};
+
+struct mlx5dr_match_misc4 {
+ u32 prog_sample_field_value_0;
+ u32 prog_sample_field_id_0;
+ u32 prog_sample_field_value_1;
+ u32 prog_sample_field_id_1;
+ u32 prog_sample_field_value_2;
+ u32 prog_sample_field_id_2;
+ u32 prog_sample_field_value_3;
+ u32 prog_sample_field_id_3;
};
struct mlx5dr_match_param {
@@ -655,6 +723,7 @@ struct mlx5dr_match_param {
struct mlx5dr_match_spec inner;
struct mlx5dr_match_misc2 misc2;
struct mlx5dr_match_misc3 misc3;
+ struct mlx5dr_match_misc4 misc4;
};
#define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
@@ -678,6 +747,12 @@ struct mlx5dr_cmd_vport_cap {
u32 num;
};
+struct mlx5dr_roce_cap {
+ u8 roce_en:1;
+ u8 fl_rc_qp_when_roce_disabled:1;
+ u8 fl_rc_qp_when_roce_enabled:1;
+};
+
struct mlx5dr_cmd_caps {
u16 gvmi;
u64 nic_rx_drop_address;
@@ -692,6 +767,13 @@ struct mlx5dr_cmd_caps {
u8 flex_parser_id_icmp_dw1;
u8 flex_parser_id_icmpv6_dw0;
u8 flex_parser_id_icmpv6_dw1;
+ u8 flex_parser_id_geneve_tlv_option_0;
+ u8 flex_parser_id_mpls_over_gre;
+ u8 flex_parser_id_mpls_over_udp;
+ u8 flex_parser_id_gtpu_dw_0;
+ u8 flex_parser_id_gtpu_teid;
+ u8 flex_parser_id_gtpu_dw_2;
+ u8 flex_parser_id_gtpu_first_ext_dw_0;
u8 max_ft_level;
u16 roce_min_src_udp;
u8 num_esw_ports;
@@ -707,6 +789,8 @@ struct mlx5dr_cmd_caps {
struct mlx5dr_esw_caps esw_caps;
struct mlx5dr_cmd_vport_cap *vports_caps;
bool prio_tag_required;
+ struct mlx5dr_roce_cap roce_caps;
+ u8 isolate_vl_tc:1;
};
struct mlx5dr_domain_rx_tx {
@@ -806,53 +890,71 @@ struct mlx5dr_ste_action_modify_field {
u8 l4_type;
};
+struct mlx5dr_action_rewrite {
+ struct mlx5dr_domain *dmn;
+ struct mlx5dr_icm_chunk *chunk;
+ u8 *data;
+ u16 num_of_actions;
+ u32 index;
+ u8 allow_rx:1;
+ u8 allow_tx:1;
+ u8 modify_ttl:1;
+};
+
+struct mlx5dr_action_reformat {
+ struct mlx5dr_domain *dmn;
+ u32 reformat_id;
+ u32 reformat_size;
+};
+
+struct mlx5dr_action_dest_tbl {
+ u8 is_fw_tbl:1;
+ union {
+ struct mlx5dr_table *tbl;
+ struct {
+ struct mlx5dr_domain *dmn;
+ u32 id;
+ u32 group_id;
+ enum fs_flow_table_type type;
+ u64 rx_icm_addr;
+ u64 tx_icm_addr;
+ struct mlx5dr_action **ref_actions;
+ u32 num_of_ref_actions;
+ } fw_tbl;
+ };
+};
+
+struct mlx5dr_action_ctr {
+ u32 ctr_id;
+ u32 offeset;
+};
+
+struct mlx5dr_action_vport {
+ struct mlx5dr_domain *dmn;
+ struct mlx5dr_cmd_vport_cap *caps;
+};
+
+struct mlx5dr_action_push_vlan {
+ u32 vlan_hdr; /* tpid_pcp_dei_vid */
+};
+
+struct mlx5dr_action_flow_tag {
+ u32 flow_tag;
+};
+
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
+
union {
- struct {
- struct mlx5dr_domain *dmn;
- struct mlx5dr_icm_chunk *chunk;
- u8 *data;
- u16 num_of_actions;
- u32 index;
- u8 allow_rx:1;
- u8 allow_tx:1;
- u8 modify_ttl:1;
- } rewrite;
- struct {
- struct mlx5dr_domain *dmn;
- u32 reformat_id;
- u32 reformat_size;
- } reformat;
- struct {
- u8 is_fw_tbl:1;
- union {
- struct mlx5dr_table *tbl;
- struct {
- struct mlx5dr_domain *dmn;
- u32 id;
- u32 group_id;
- enum fs_flow_table_type type;
- u64 rx_icm_addr;
- u64 tx_icm_addr;
- struct mlx5dr_action **ref_actions;
- u32 num_of_ref_actions;
- } fw_tbl;
- };
- } dest_tbl;
- struct {
- u32 ctr_id;
- u32 offeset;
- } ctr;
- struct {
- struct mlx5dr_domain *dmn;
- struct mlx5dr_cmd_vport_cap *caps;
- } vport;
- struct {
- u32 vlan_hdr; /* tpid_pcp_dei_vid */
- } push_vlan;
- u32 flow_tag;
+ void *data;
+ struct mlx5dr_action_rewrite *rewrite;
+ struct mlx5dr_action_reformat *reformat;
+ struct mlx5dr_action_dest_tbl *dest_tbl;
+ struct mlx5dr_action_ctr *ctr;
+ struct mlx5dr_action_vport *vport;
+ struct mlx5dr_action_push_vlan *push_vlan;
+ struct mlx5dr_action_flow_tag *flow_tag;
};
};
@@ -1063,6 +1165,7 @@ struct mlx5dr_cmd_qp_create_attr {
u32 sq_wqe_cnt;
u32 rq_wqe_cnt;
u32 rq_wqe_shift;
+ u8 isolate_vl_tc:1;
};
int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index 83df6df6b459..9643ee647f57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -434,10 +434,7 @@ struct mlx5_ifc_ste_gre_bits {
};
struct mlx5_ifc_ste_flex_parser_0_bits {
- u8 parser_3_label[0x14];
- u8 parser_3_exp[0x3];
- u8 parser_3_s_bos[0x1];
- u8 parser_3_ttl[0x8];
+ u8 flex_parser_3[0x20];
u8 flex_parser_2[0x20];
@@ -488,6 +485,17 @@ struct mlx5_ifc_ste_flex_parser_tnl_geneve_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_ste_flex_parser_tnl_gtpu_bits {
+ u8 reserved_at_0[0x5];
+ u8 gtpu_msg_flags[0x3];
+ u8 gtpu_msg_type[0x8];
+ u8 reserved_at_10[0x10];
+
+ u8 gtpu_teid[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_ste_general_purpose_bits {
u8 general_purpose_lookup_field[0x20];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index e05c5c0f3ae1..457ad42eaa2a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1151,20 +1151,6 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
-/**
- * mlx5_eswitch_get_total_vports - Get total vports of the eswitch
- *
- * @dev: Pointer to core device
- *
- * mlx5_eswitch_get_total_vports returns total number of vports for
- * the eswitch.
- */
-u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
-{
- return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev) + mlx5_sf_max_functions(dev);
-}
-EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
-
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out)
{
u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 01f075fac276..3091dd014650 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -34,11 +34,6 @@
#include "wq.h"
#include "mlx5_core.h"
-static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
-{
- return ((u32)1 << log_sz) << log_stride;
-}
-
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 52fdc34251ba..7e9a7cb31720 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1728,7 +1728,7 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
return err;
event_id = mlxsw_reg_mfde_event_id_get(mfde_pl);
- err = devlink_fmsg_u8_pair_put(fmsg, "id", event_id);
+ err = devlink_fmsg_u32_pair_put(fmsg, "id", event_id);
if (err)
return err;
switch (event_id) {
@@ -1806,6 +1806,10 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
if (err)
return err;
+ val = mlxsw_reg_mfde_log_ip_get(mfde_pl);
+ err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
+ if (err)
+ return err;
} else if (event_id == MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP) {
val = mlxsw_reg_mfde_pipes_mask_get(mfde_pl);
err = devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 8af7d9d03475..80712dc803d0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -58,6 +58,25 @@ struct mlxsw_tx_info {
bool is_emad;
};
+struct mlxsw_rx_md_info {
+ u32 cookie_index;
+ u32 latency;
+ u32 tx_congestion;
+ union {
+ /* Valid when 'tx_port_valid' is set. */
+ u16 tx_sys_port;
+ u16 tx_lag_id;
+ };
+ u8 tx_lag_port_index; /* Valid when 'tx_port_is_lag' is set. */
+ u8 tx_tc;
+ u8 latency_valid:1,
+ tx_congestion_valid:1,
+ tx_tc_valid:1,
+ tx_port_valid:1,
+ tx_port_is_lag:1,
+ unused:3;
+};
+
bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
const struct mlxsw_tx_info *tx_info);
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
@@ -515,7 +534,7 @@ enum mlxsw_devlink_param_id {
struct mlxsw_skb_cb {
union {
struct mlxsw_tx_info tx_info;
- u32 cookie_index; /* Only used during receive */
+ struct mlxsw_rx_md_info rx_md_info;
};
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 4d699fe98cb6..78d9c0196f2b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -2007,3 +2007,134 @@ int mlxsw_afa_block_append_l4port(struct mlxsw_afa_block *block, bool is_dport,
return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_l4port);
+
+/* Mirror Sampler Action
+ * ---------------------
+ * The SAMPLER_ACTION is used to mirror packets with a probability (sampling).
+ */
+
+#define MLXSW_AFA_SAMPLER_CODE 0x13
+#define MLXSW_AFA_SAMPLER_SIZE 1
+
+/* afa_sampler_mirror_agent
+ * Mirror (SPAN) agent.
+ */
+MLXSW_ITEM32(afa, sampler, mirror_agent, 0x04, 0, 3);
+
+#define MLXSW_AFA_SAMPLER_RATE_MAX (BIT(24) - 1)
+
+/* afa_sampler_mirror_probability_rate
+ * Mirroring probability.
+ * Valid values are 1 to 2^24 - 1
+ */
+MLXSW_ITEM32(afa, sampler, mirror_probability_rate, 0x08, 0, 24);
+
+static void mlxsw_afa_sampler_pack(char *payload, u8 mirror_agent, u32 rate)
+{
+ mlxsw_afa_sampler_mirror_agent_set(payload, mirror_agent);
+ mlxsw_afa_sampler_mirror_probability_rate_set(payload, rate);
+}
+
+struct mlxsw_afa_sampler {
+ struct mlxsw_afa_resource resource;
+ int span_id;
+ u8 local_port;
+ bool ingress;
+};
+
+static void mlxsw_afa_sampler_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_sampler *sampler)
+{
+ mlxsw_afa_resource_del(&sampler->resource);
+ block->afa->ops->sampler_del(block->afa->ops_priv, sampler->local_port,
+ sampler->span_id, sampler->ingress);
+ kfree(sampler);
+}
+
+static void mlxsw_afa_sampler_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_sampler *sampler;
+
+ sampler = container_of(resource, struct mlxsw_afa_sampler, resource);
+ mlxsw_afa_sampler_destroy(block, sampler);
+}
+
+static struct mlxsw_afa_sampler *
+mlxsw_afa_sampler_create(struct mlxsw_afa_block *block, u8 local_port,
+ struct psample_group *psample_group, u32 rate,
+ u32 trunc_size, bool truncate, bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_sampler *sampler;
+ int err;
+
+ sampler = kzalloc(sizeof(*sampler), GFP_KERNEL);
+ if (!sampler)
+ return ERR_PTR(-ENOMEM);
+
+ err = block->afa->ops->sampler_add(block->afa->ops_priv, local_port,
+ psample_group, rate, trunc_size,
+ truncate, ingress, &sampler->span_id,
+ extack);
+ if (err)
+ goto err_sampler_add;
+
+ sampler->ingress = ingress;
+ sampler->local_port = local_port;
+ sampler->resource.destructor = mlxsw_afa_sampler_destructor;
+ mlxsw_afa_resource_add(block, &sampler->resource);
+ return sampler;
+
+err_sampler_add:
+ kfree(sampler);
+ return ERR_PTR(err);
+}
+
+static int
+mlxsw_afa_block_append_allocated_sampler(struct mlxsw_afa_block *block,
+ u8 mirror_agent, u32 rate)
+{
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_SAMPLER_CODE,
+ MLXSW_AFA_SAMPLER_SIZE);
+
+ if (IS_ERR(act))
+ return PTR_ERR(act);
+ mlxsw_afa_sampler_pack(act, mirror_agent, rate);
+ return 0;
+}
+
+int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port,
+ struct psample_group *psample_group,
+ u32 rate, u32 trunc_size, bool truncate,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_sampler *sampler;
+ int err;
+
+ if (rate > MLXSW_AFA_SAMPLER_RATE_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Sampling rate is too high");
+ return -EINVAL;
+ }
+
+ sampler = mlxsw_afa_sampler_create(block, local_port, psample_group,
+ rate, trunc_size, truncate, ingress,
+ extack);
+ if (IS_ERR(sampler))
+ return PTR_ERR(sampler);
+
+ err = mlxsw_afa_block_append_allocated_sampler(block, sampler->span_id,
+ rate);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append sampler action");
+ goto err_append_allocated_sampler;
+ }
+
+ return 0;
+
+err_append_allocated_sampler:
+ mlxsw_afa_sampler_destroy(block, sampler);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_sampler);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index b652497b1002..b65bf98eb5ab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -30,6 +30,12 @@ struct mlxsw_afa_ops {
u16 *p_policer_index,
struct netlink_ext_ack *extack);
void (*policer_del)(void *priv, u16 policer_index);
+ int (*sampler_add)(void *priv, u8 local_port,
+ struct psample_group *psample_group, u32 rate,
+ u32 trunc_size, bool truncate, bool ingress,
+ int *p_span_id, struct netlink_ext_ack *extack);
+ void (*sampler_del)(void *priv, u8 local_port, int span_id,
+ bool ingress);
bool dummy_first_set;
};
@@ -92,5 +98,10 @@ int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block,
u32 fa_index, u64 rate_bytes_ps, u32 burst,
u16 *p_policer_index,
struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port,
+ struct psample_group *psample_group,
+ u32 rate, u32 trunc_size, bool truncate,
+ bool ingress,
+ struct netlink_ext_ack *extack);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index bf85ce9835d7..dfea14399607 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -19,7 +19,6 @@
#define MLXSW_THERMAL_ASIC_TEMP_NORM 75000 /* 75C */
#define MLXSW_THERMAL_ASIC_TEMP_HIGH 85000 /* 85C */
#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */
-#define MLXSW_THERMAL_ASIC_TEMP_CRIT 140000 /* 140C */
#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
#define MLXSW_THERMAL_ZONE_MAX_NAME 16
@@ -45,7 +44,6 @@ enum mlxsw_thermal_trips {
MLXSW_THERMAL_TEMP_TRIP_NORM,
MLXSW_THERMAL_TEMP_TRIP_HIGH,
MLXSW_THERMAL_TEMP_TRIP_HOT,
- MLXSW_THERMAL_TEMP_TRIP_CRIT,
};
struct mlxsw_thermal_trip {
@@ -75,16 +73,9 @@ static const struct mlxsw_thermal_trip default_thermal_trips[] = {
{ /* Warning */
.type = THERMAL_TRIP_HOT,
.temp = MLXSW_THERMAL_ASIC_TEMP_HOT,
- .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP,
.min_state = MLXSW_THERMAL_MAX_STATE,
.max_state = MLXSW_THERMAL_MAX_STATE,
},
- { /* Critical - soft poweroff */
- .type = THERMAL_TRIP_CRITICAL,
- .temp = MLXSW_THERMAL_ASIC_TEMP_CRIT,
- .min_state = MLXSW_THERMAL_MAX_STATE,
- .max_state = MLXSW_THERMAL_MAX_STATE,
- }
};
#define MLXSW_THERMAL_NUM_TRIPS ARRAY_SIZE(default_thermal_trips)
@@ -141,7 +132,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
/* Allow mlxsw thermal zone binding to an external cooling device */
for (i = 0; i < ARRAY_SIZE(mlxsw_thermal_external_allowed_cdev); i++) {
if (strnstr(cdev->type, mlxsw_thermal_external_allowed_cdev[i],
- sizeof(cdev->type)))
+ strlen(cdev->type)))
return 0;
}
@@ -154,7 +145,6 @@ mlxsw_thermal_module_trips_reset(struct mlxsw_thermal_module *tz)
tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = 0;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = 0;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = 0;
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = 0;
}
static int
@@ -183,11 +173,10 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
}
/* According to the system thermal requirements, the thermal zones are
- * defined with four trip points. The critical and emergency
+ * defined with three trip points. The critical and emergency
* temperature thresholds, provided by QSFP module are set as "active"
- * and "hot" trip points, "normal" and "critical" trip points are
- * derived from "active" and "hot" by subtracting or adding double
- * hysteresis value.
+ * and "hot" trip points, "normal" trip point is derived from "active"
+ * by subtracting double hysteresis value.
*/
if (crit_temp >= MLXSW_THERMAL_MODULE_TEMP_SHIFT)
tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp -
@@ -196,8 +185,6 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp;
tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp;
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
- MLXSW_THERMAL_MODULE_TEMP_SHIFT;
return 0;
}
@@ -210,7 +197,7 @@ static void mlxsw_thermal_tz_score_update(struct mlxsw_thermal *thermal,
struct mlxsw_thermal_trip *trip = trips;
unsigned int score, delta, i, shift = 1;
- /* Calculate thermal zone score, if temperature is above the critical
+ /* Calculate thermal zone score, if temperature is above the hot
* threshold score is set to MLXSW_THERMAL_TEMP_SCORE_MAX.
*/
score = MLXSW_THERMAL_TEMP_SCORE_MAX;
@@ -333,8 +320,7 @@ static int mlxsw_thermal_set_trip_temp(struct thermal_zone_device *tzdev,
{
struct mlxsw_thermal *thermal = tzdev->devdata;
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS ||
- temp > MLXSW_THERMAL_ASIC_TEMP_CRIT)
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
return -EINVAL;
thermal->trips[trip].temp = temp;
@@ -502,8 +488,7 @@ mlxsw_thermal_module_trip_temp_set(struct thermal_zone_device *tzdev,
{
struct mlxsw_thermal_module *tz = tzdev->devdata;
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS ||
- temp > tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp)
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
return -EINVAL;
tz->trips[trip].temp = temp;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index d0052537e627..8e8456811384 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -540,6 +540,55 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
spin_unlock(&q->lock);
}
+static void mlxsw_pci_cqe_rdq_md_tx_port_init(struct sk_buff *skb,
+ const char *cqe)
+{
+ struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
+
+ if (mlxsw_pci_cqe2_tx_lag_get(cqe)) {
+ cb->rx_md_info.tx_port_is_lag = true;
+ cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(cqe);
+ cb->rx_md_info.tx_lag_port_index =
+ mlxsw_pci_cqe2_tx_lag_subport_get(cqe);
+ } else {
+ cb->rx_md_info.tx_port_is_lag = false;
+ cb->rx_md_info.tx_sys_port =
+ mlxsw_pci_cqe2_tx_system_port_get(cqe);
+ }
+
+ if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT &&
+ cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID)
+ cb->rx_md_info.tx_port_valid = 1;
+ else
+ cb->rx_md_info.tx_port_valid = 0;
+}
+
+static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
+{
+ struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
+
+ cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe);
+ if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID)
+ cb->rx_md_info.tx_congestion_valid = 1;
+ else
+ cb->rx_md_info.tx_congestion_valid = 0;
+ cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT;
+
+ cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(cqe);
+ if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID)
+ cb->rx_md_info.latency_valid = 1;
+ else
+ cb->rx_md_info.latency_valid = 0;
+
+ cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(cqe);
+ if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID)
+ cb->rx_md_info.tx_tc_valid = 1;
+ else
+ cb->rx_md_info.tx_tc_valid = 0;
+
+ mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
+}
+
static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
@@ -581,11 +630,15 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2)
cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe);
- mlxsw_skb_cb(skb)->cookie_index = cookie_index;
+ mlxsw_skb_cb(skb)->rx_md_info.cookie_index = cookie_index;
} else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 &&
rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 &&
mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe);
+ mlxsw_pci_cqe_rdq_md_init(skb, cqe);
+ } else if (rx_info.trap_id == MLXSW_TRAP_ID_PKT_SAMPLE &&
+ mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
+ mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
}
byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index a2c1fbd3e0d1..7b531228d6c0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -173,6 +173,15 @@ MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
*/
MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
+#define MLXSW_PCI_CQE2_MIRROR_CONG_INVALID 0xFFFF
+
+/* pci_cqe_mirror_cong_high
+ * Congestion level in units of 8KB of the egress traffic class of the original
+ * packet that does mirroring to the CPU. Value of 0xFFFF means that the
+ * congestion level is invalid.
+ */
+MLXSW_ITEM32(pci, cqe2, mirror_cong_high, 0x08, 16, 4);
+
/* pci_cqe_trap_id
* Trap ID that captured the packet.
*/
@@ -208,6 +217,59 @@ MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5);
MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6);
mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
+#define MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID 0x1F
+
+/* pci_cqe_mirror_tclass
+ * The egress traffic class of the original packet that does mirroring to the
+ * CPU. Value of 0x1F means that the traffic class is invalid.
+ */
+MLXSW_ITEM32(pci, cqe2, mirror_tclass, 0x10, 27, 5);
+
+/* pci_cqe_tx_lag
+ * The Tx port of a packet that is mirrored / sampled to the CPU is a LAG.
+ */
+MLXSW_ITEM32(pci, cqe2, tx_lag, 0x10, 24, 1);
+
+/* pci_cqe_tx_lag_subport
+ * The port index within the LAG of a packet that is mirrored / sampled to the
+ * CPU. Reserved when tx_lag is 0.
+ */
+MLXSW_ITEM32(pci, cqe2, tx_lag_subport, 0x10, 16, 8);
+
+#define MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT 0xFFFE
+#define MLXSW_PCI_CQE2_TX_PORT_INVALID 0xFFFF
+
+/* pci_cqe_tx_lag_id
+ * The Tx LAG ID of the original packet that is mirrored / sampled to the CPU.
+ * Value of 0xFFFE means multi-port. Value fo 0xFFFF means that the Tx LAG ID
+ * is invalid. Reserved when tx_lag is 0.
+ */
+MLXSW_ITEM32(pci, cqe2, tx_lag_id, 0x10, 0, 16);
+
+/* pci_cqe_tx_system_port
+ * The Tx port of the original packet that is mirrored / sampled to the CPU.
+ * Value of 0xFFFE means multi-port. Value fo 0xFFFF means that the Tx port is
+ * invalid. Reserved when tx_lag is 1.
+ */
+MLXSW_ITEM32(pci, cqe2, tx_system_port, 0x10, 0, 16);
+
+/* pci_cqe_mirror_cong_low
+ * Congestion level in units of 8KB of the egress traffic class of the original
+ * packet that does mirroring to the CPU. Value of 0xFFFF means that the
+ * congestion level is invalid.
+ */
+MLXSW_ITEM32(pci, cqe2, mirror_cong_low, 0x14, 20, 12);
+
+#define MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT 13 /* Units of 8KB. */
+
+static inline u16 mlxsw_pci_cqe2_mirror_cong_get(const char *cqe)
+{
+ u16 cong_high = mlxsw_pci_cqe2_mirror_cong_high_get(cqe);
+ u16 cong_low = mlxsw_pci_cqe2_mirror_cong_low_get(cqe);
+
+ return cong_high << 12 | cong_low;
+}
+
/* pci_cqe_user_def_val_orig_pkt_len
* When trap_id is an ACL: User defined value from policy engine action.
*/
@@ -218,6 +280,15 @@ MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20);
*/
MLXSW_ITEM32(pci, cqe2, mirror_reason, 0x18, 24, 8);
+#define MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID 0xFFFFFF
+
+/* pci_cqe_mirror_latency
+ * End-to-end latency of the original packet that does mirroring to the CPU.
+ * Value of 0xFFFFFF means that the latency is invalid. Units are according to
+ * MOGCR.mirror_latency_units.
+ */
+MLXSW_ITEM32(pci, cqe2, mirror_latency, 0x1C, 8, 24);
+
/* pci_cqe_owner
* Ownership bit.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index c4adc7f740d3..900b4bf5bb5b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -842,6 +842,14 @@ MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8);
+/* reg_spvid_egr_et_set
+ * When VLAN is pushed at ingress (for untagged packets or for
+ * QinQ push mode) then the EtherType is decided at the egress port.
+ * Reserved when Spectrum-1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvid, egr_et_set, 0x04, 24, 1);
+
/* reg_spvid_et_vlan
* EtherType used for when VLAN is pushed at ingress (for untagged
* packets or for QinQ push mode).
@@ -849,6 +857,7 @@ MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8);
* 1: ether_type1
* 2: ether_type2 - Reserved when Spectrum-1, supported by Spectrum-2
* Ethertype IDs are configured by SVER.
+ * Reserved when egr_et_set = 1.
* Access: RW
*/
MLXSW_ITEM32(reg, spvid, et_vlan, 0x04, 16, 2);
@@ -2079,6 +2088,41 @@ static inline void mlxsw_reg_spvc_pack(char *payload, u8 local_port, bool et1,
mlxsw_reg_spvc_et0_set(payload, et0);
}
+/* SPEVET - Switch Port Egress VLAN EtherType
+ * ------------------------------------------
+ * The switch port egress VLAN EtherType configures which EtherType to push at
+ * egress for packets incoming through a local port for which 'SPVID.egr_et_set'
+ * is set.
+ */
+#define MLXSW_REG_SPEVET_ID 0x202A
+#define MLXSW_REG_SPEVET_LEN 0x08
+
+MLXSW_REG_DEFINE(spevet, MLXSW_REG_SPEVET_ID, MLXSW_REG_SPEVET_LEN);
+
+/* reg_spevet_local_port
+ * Egress Local port number.
+ * Not supported to CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spevet, local_port, 0x00, 16, 8);
+
+/* reg_spevet_et_vlan
+ * Egress EtherType VLAN to push when SPVID.egr_et_set field set for the packet:
+ * 0: ether_type0 - (default)
+ * 1: ether_type1
+ * 2: ether_type2
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spevet, et_vlan, 0x04, 16, 2);
+
+static inline void mlxsw_reg_spevet_pack(char *payload, u8 local_port,
+ u8 et_vlan)
+{
+ MLXSW_REG_ZERO(spevet, payload);
+ mlxsw_reg_spevet_local_port_set(payload, local_port);
+ mlxsw_reg_spevet_et_vlan_set(payload, et_vlan);
+}
+
/* CWTP - Congetion WRED ECN TClass Profile
* ----------------------------------------
* Configures the profiles for queues of egress port and traffic class
@@ -5637,7 +5681,7 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
MLXSW_REG_DEFINE(pmaos, MLXSW_REG_PMAOS_ID, MLXSW_REG_PMAOS_LEN);
-/* reg_slot_index
+/* reg_pmaos_slot_index
* Slot index.
* Access: Index
*/
@@ -8086,6 +8130,60 @@ mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif,
mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key);
}
+/* RATRAD - Router Adjacency Table Activity Dump Register
+ * ------------------------------------------------------
+ * The RATRAD register is used to dump and optionally clear activity bits of
+ * router adjacency table entries.
+ */
+#define MLXSW_REG_RATRAD_ID 0x8022
+#define MLXSW_REG_RATRAD_LEN 0x210
+
+MLXSW_REG_DEFINE(ratrad, MLXSW_REG_RATRAD_ID, MLXSW_REG_RATRAD_LEN);
+
+enum {
+ /* Read activity */
+ MLXSW_REG_RATRAD_OP_READ_ACTIVITY,
+ /* Read and clear activity */
+ MLXSW_REG_RATRAD_OP_READ_CLEAR_ACTIVITY,
+};
+
+/* reg_ratrad_op
+ * Access: Operation
+ */
+MLXSW_ITEM32(reg, ratrad, op, 0x00, 30, 2);
+
+/* reg_ratrad_ecmp_size
+ * ecmp_size is the amount of sequential entries from adjacency_index. Valid
+ * ranges:
+ * Spectrum-1: 32-64, 512, 1024, 2048, 4096
+ * Spectrum-2/3: 32-128, 256, 512, 1024, 2048, 4096
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ratrad, ecmp_size, 0x00, 0, 13);
+
+/* reg_ratrad_adjacency_index
+ * Index into the adjacency table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ratrad, adjacency_index, 0x04, 0, 24);
+
+/* reg_ratrad_activity_vector
+ * Activity bit per adjacency index.
+ * Bits higher than ecmp_size are reserved.
+ * Access: RO
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, ratrad, activity_vector, 0x10, 0x200, 1);
+
+static inline void mlxsw_reg_ratrad_pack(char *payload, u32 adjacency_index,
+ u16 ecmp_size)
+{
+ MLXSW_REG_ZERO(ratrad, payload);
+ mlxsw_reg_ratrad_op_set(payload,
+ MLXSW_REG_RATRAD_OP_READ_CLEAR_ACTIVITY);
+ mlxsw_reg_ratrad_ecmp_size_set(payload, ecmp_size);
+ mlxsw_reg_ratrad_adjacency_index_set(payload, adjacency_index);
+}
+
/* RIGR-V2 - Router Interface Group Register Version 2
* ---------------------------------------------------
* The RIGR_V2 register is used to add, remove and query egress interface list
@@ -9925,15 +10023,28 @@ MLXSW_ITEM32(reg, mpar, enable, 0x04, 31, 1);
*/
MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4);
+#define MLXSW_REG_MPAR_RATE_MAX 3500000000UL
+
+/* reg_mpar_probability_rate
+ * Sampling rate.
+ * Valid values are: 1 to 3.5*10^9
+ * Value of 1 means "sample all". Default is 1.
+ * Reserved when Spectrum-1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpar, probability_rate, 0x08, 0, 32);
+
static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
enum mlxsw_reg_mpar_i_e i_e,
- bool enable, u8 pa_id)
+ bool enable, u8 pa_id,
+ u32 probability_rate)
{
MLXSW_REG_ZERO(mpar, payload);
mlxsw_reg_mpar_local_port_set(payload, local_port);
mlxsw_reg_mpar_enable_set(payload, enable);
mlxsw_reg_mpar_i_e_set(payload, i_e);
mlxsw_reg_mpar_pa_id_set(payload, pa_id);
+ mlxsw_reg_mpar_probability_rate_set(payload, probability_rate);
}
/* MGIR - Management General Information Register
@@ -10577,6 +10688,8 @@ MLXSW_ITEM32(reg, mpagr, trigger, 0x00, 0, 4);
*/
MLXSW_ITEM32(reg, mpagr, pa_id, 0x04, 0, 4);
+#define MLXSW_REG_MPAGR_RATE_MAX 3500000000UL
+
/* reg_mpagr_probability_rate
* Sampling rate.
* Valid values are: 1 to 3.5*10^9
@@ -10919,7 +11032,7 @@ MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN);
* Which irisc triggered the event
* Access: RO
*/
-MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 8, 4);
+MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 24, 8);
enum mlxsw_reg_mfde_event_id {
MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO = 1,
@@ -10930,7 +11043,7 @@ enum mlxsw_reg_mfde_event_id {
/* reg_mfde_event_id
* Access: RO
*/
-MLXSW_ITEM32(reg, mfde, event_id, 0x00, 0, 8);
+MLXSW_ITEM32(reg, mfde, event_id, 0x00, 0, 16);
enum mlxsw_reg_mfde_method {
MLXSW_REG_MFDE_METHOD_QUERY,
@@ -10979,6 +11092,13 @@ MLXSW_ITEM32(reg, mfde, log_address, 0x10, 0, 32);
*/
MLXSW_ITEM32(reg, mfde, log_id, 0x14, 0, 4);
+/* reg_mfde_log_ip
+ * IP (instruction pointer) that triggered the timeout.
+ * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, mfde, log_ip, 0x18, 0, 64);
+
/* reg_mfde_pipes_mask
* Bit per kvh pipe.
* Access: RO
@@ -11995,6 +12115,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(sfmr),
MLXSW_REG(spvmlr),
MLXSW_REG(spvc),
+ MLXSW_REG(spevet),
MLXSW_REG(cwtp),
MLXSW_REG(cwtpm),
MLXSW_REG(pgcr),
@@ -12047,6 +12168,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rtar),
MLXSW_REG(ratr),
MLXSW_REG(rtdp),
+ MLXSW_REG(ratrad),
MLXSW_REG(rdpm),
MLXSW_REG(ricnt),
MLXSW_REG(rrcr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 1650d9852b5b..bca0354482cb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -23,6 +23,8 @@
#include <linux/netlink.h>
#include <linux/jhash.h>
#include <linux/log2.h>
+#include <linux/refcount.h>
+#include <linux/rhashtable.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/netevent.h>
@@ -45,7 +47,7 @@
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 2008
-#define MLXSW_SP1_FWREV_SUBMINOR 2018
+#define MLXSW_SP1_FWREV_SUBMINOR 2406
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -62,7 +64,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
#define MLXSW_SP2_FWREV_MAJOR 29
#define MLXSW_SP2_FWREV_MINOR 2008
-#define MLXSW_SP2_FWREV_SUBMINOR 2018
+#define MLXSW_SP2_FWREV_SUBMINOR 2406
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -77,7 +79,7 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
#define MLXSW_SP3_FWREV_MAJOR 30
#define MLXSW_SP3_FWREV_MINOR 2008
-#define MLXSW_SP3_FWREV_SUBMINOR 2018
+#define MLXSW_SP3_FWREV_SUBMINOR 2406
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
@@ -400,6 +402,22 @@ int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
return 0;
}
+int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 ethtype)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spevet_pl[MLXSW_REG_SPEVET_LEN];
+ u8 sver_type;
+ int err;
+
+ err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
+ if (err)
+ return err;
+
+ mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
+}
+
static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid, u16 ethtype)
{
@@ -2212,32 +2230,6 @@ void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
}
-void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
- struct mlxsw_sp_port_sample *sample;
- u32 size;
-
- if (unlikely(!mlxsw_sp_port)) {
- dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
- local_port);
- goto out;
- }
-
- rcu_read_lock();
- sample = rcu_dereference(mlxsw_sp_port->sample);
- if (!sample)
- goto out_unlock;
- size = sample->truncate ? sample->trunc_size : skb->len;
- psample_sample_packet(sample->psample_group, skb, size,
- mlxsw_sp_port->dev->ifindex, 0, sample->rate);
-out_unlock:
- rcu_read_unlock();
-out:
- consume_skb(skb);
-}
-
#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
_is_ctrl, SP_##_trap_group, DISCARD)
@@ -2576,6 +2568,147 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.get_stats = mlxsw_sp2_get_stats,
};
+struct mlxsw_sp_sample_trigger_node {
+ struct mlxsw_sp_sample_trigger trigger;
+ struct mlxsw_sp_sample_params params;
+ struct rhash_head ht_node;
+ struct rcu_head rcu;
+ refcount_t refcount;
+};
+
+static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
+ .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_sample_trigger),
+ .automatic_shrinking = true,
+};
+
+static void
+mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
+ const struct mlxsw_sp_sample_trigger *trigger)
+{
+ memset(key, 0, sizeof(*key));
+ key->type = trigger->type;
+ key->local_port = trigger->local_port;
+}
+
+/* RCU read lock must be held */
+struct mlxsw_sp_sample_params *
+mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger)
+{
+ struct mlxsw_sp_sample_trigger_node *trigger_node;
+ struct mlxsw_sp_sample_trigger key;
+
+ mlxsw_sp_sample_trigger_key_init(&key, trigger);
+ trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
+ mlxsw_sp_sample_trigger_ht_params);
+ if (!trigger_node)
+ return NULL;
+
+ return &trigger_node->params;
+}
+
+static int
+mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger,
+ const struct mlxsw_sp_sample_params *params)
+{
+ struct mlxsw_sp_sample_trigger_node *trigger_node;
+ int err;
+
+ trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
+ if (!trigger_node)
+ return -ENOMEM;
+
+ trigger_node->trigger = *trigger;
+ trigger_node->params = *params;
+ refcount_set(&trigger_node->refcount, 1);
+
+ err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
+ &trigger_node->ht_node,
+ mlxsw_sp_sample_trigger_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return 0;
+
+err_rhashtable_insert:
+ kfree(trigger_node);
+ return err;
+}
+
+static void
+mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_sample_trigger_node *trigger_node)
+{
+ rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
+ &trigger_node->ht_node,
+ mlxsw_sp_sample_trigger_ht_params);
+ kfree_rcu(trigger_node, rcu);
+}
+
+int
+mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger,
+ const struct mlxsw_sp_sample_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_sample_trigger_node *trigger_node;
+ struct mlxsw_sp_sample_trigger key;
+
+ ASSERT_RTNL();
+
+ mlxsw_sp_sample_trigger_key_init(&key, trigger);
+
+ trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
+ &key,
+ mlxsw_sp_sample_trigger_ht_params);
+ if (!trigger_node)
+ return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
+ params);
+
+ if (trigger_node->trigger.local_port) {
+ NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
+ return -EINVAL;
+ }
+
+ if (trigger_node->params.psample_group != params->psample_group ||
+ trigger_node->params.truncate != params->truncate ||
+ trigger_node->params.rate != params->rate ||
+ trigger_node->params.trunc_size != params->trunc_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
+ return -EINVAL;
+ }
+
+ refcount_inc(&trigger_node->refcount);
+
+ return 0;
+}
+
+void
+mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger)
+{
+ struct mlxsw_sp_sample_trigger_node *trigger_node;
+ struct mlxsw_sp_sample_trigger key;
+
+ ASSERT_RTNL();
+
+ mlxsw_sp_sample_trigger_key_init(&key, trigger);
+
+ trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
+ &key,
+ mlxsw_sp_sample_trigger_ht_params);
+ if (!trigger_node)
+ return;
+
+ if (!refcount_dec_and_test(&trigger_node->refcount))
+ return;
+
+ mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
+}
+
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr);
@@ -2730,6 +2863,13 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_port_module_info_init;
}
+ err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
+ &mlxsw_sp_sample_trigger_ht_params);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
+ goto err_sample_trigger_init;
+ }
+
err = mlxsw_sp_ports_create(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -2739,6 +2879,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return 0;
err_ports_create:
+ rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
+err_sample_trigger_init:
mlxsw_sp_port_module_info_fini(mlxsw_sp);
err_port_module_info_init:
mlxsw_sp_dpipe_fini(mlxsw_sp);
@@ -2788,6 +2930,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
@@ -2796,7 +2939,6 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
- mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
@@ -2804,6 +2946,8 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
+ mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
+ mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
@@ -2817,6 +2961,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
@@ -2825,7 +2970,6 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
- mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
@@ -2833,6 +2977,8 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
+ mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
+ mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -2844,6 +2990,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
@@ -2852,7 +2999,6 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
- mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
@@ -2860,6 +3006,8 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
+ mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
+ mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -2870,6 +3018,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_ports_remove(mlxsw_sp);
+ rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
mlxsw_sp_port_module_info_fini(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
@@ -4283,7 +4432,7 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
if (br_vlan_enabled(br_dev)) {
br_vlan_get_proto(br_dev, &proto);
if (proto == ETH_P_8021AD) {
- NL_SET_ERR_MSG_MOD(extack, "Uppers are not supported on top of an 802.1ad bridge");
+ NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
return -EOPNOTSUPP;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index ba28ac7e79bc..f99db88ee884 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -16,6 +16,7 @@
#include <linux/in6.h>
#include <linux/notifier.h>
#include <linux/net_namespace.h>
+#include <linux/spinlock.h>
#include <net/psample.h>
#include <net/pkt_cls.h>
#include <net/red.h>
@@ -87,10 +88,15 @@ enum mlxsw_sp_rif_type {
MLXSW_SP_RIF_TYPE_MAX,
};
-struct mlxsw_sp_rif_ops;
+struct mlxsw_sp_router_ops;
-extern const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[];
-extern const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[];
+extern const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops;
+extern const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops;
+
+struct mlxsw_sp_switchdev_ops;
+
+extern const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops;
+extern const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops;
enum mlxsw_sp_fid_type {
MLXSW_SP_FID_TYPE_8021Q,
@@ -134,6 +140,7 @@ struct mlxsw_sp_ptp_state;
struct mlxsw_sp_ptp_ops;
struct mlxsw_sp_span_ops;
struct mlxsw_sp_qdisc_state;
+struct mlxsw_sp_mall_entry;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -149,6 +156,7 @@ struct mlxsw_sp {
const unsigned char *mac_mask;
struct mlxsw_sp_upper *lags;
struct mlxsw_sp_port_mapping **port_mapping;
+ struct rhashtable sample_trigger_ht;
struct mlxsw_sp_sb *sb;
struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp_router *router;
@@ -165,6 +173,7 @@ struct mlxsw_sp {
struct mlxsw_sp_counter_pool *counter_pool;
struct mlxsw_sp_span *span;
struct mlxsw_sp_trap *trap;
+ const struct mlxsw_sp_switchdev_ops *switchdev_ops;
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
const struct mlxsw_afa_ops *afa_ops;
const struct mlxsw_afk_ops *afk_ops;
@@ -172,7 +181,6 @@ struct mlxsw_sp {
const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops;
const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
const struct mlxsw_sp_nve_ops **nve_ops_arr;
- const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_sb_vals *sb_vals;
const struct mlxsw_sp_sb_ops *sb_ops;
const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
@@ -180,6 +188,8 @@ struct mlxsw_sp {
const struct mlxsw_sp_span_ops *span_ops;
const struct mlxsw_sp_policer_core_ops *policer_core_ops;
const struct mlxsw_sp_trap_ops *trap_ops;
+ const struct mlxsw_sp_mall_ops *mall_ops;
+ const struct mlxsw_sp_router_ops *router_ops;
const struct mlxsw_listener *listeners;
size_t listeners_count;
u32 lowest_shaper_bs;
@@ -233,7 +243,18 @@ struct mlxsw_sp_port_pcpu_stats {
u32 tx_dropped;
};
-struct mlxsw_sp_port_sample {
+enum mlxsw_sp_sample_trigger_type {
+ MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS,
+ MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS,
+ MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE,
+};
+
+struct mlxsw_sp_sample_trigger {
+ enum mlxsw_sp_sample_trigger_type type;
+ u8 local_port; /* Reserved when trigger type is not ingress / egress. */
+};
+
+struct mlxsw_sp_sample_params {
struct psample_group *psample_group;
u32 trunc_size;
u32 rate;
@@ -303,7 +324,6 @@ struct mlxsw_sp_port {
struct mlxsw_sp_port_xstats xstats;
struct delayed_work update_dw;
} periodic_hw_stats;
- struct mlxsw_sp_port_sample __rcu *sample;
struct list_head vlans_list;
struct mlxsw_sp_port_vlan *default_vlan;
struct mlxsw_sp_qdisc_state *qdisc;
@@ -546,6 +566,17 @@ void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_hdroom *hdroom);
int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
const struct mlxsw_sp_hdroom *hdroom);
+struct mlxsw_sp_sample_params *
+mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger);
+int
+mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger,
+ const struct mlxsw_sp_sample_params *params,
+ struct netlink_ext_ack *extack);
+void
+mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger);
extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals;
extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals;
@@ -583,8 +614,6 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
u8 local_port, void *priv);
void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
u8 local_port);
-void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port);
int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
@@ -601,6 +630,8 @@ int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable);
int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
bool learn_enable);
int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type);
+int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 ethtype);
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
u16 ethtype);
struct mlxsw_sp_port_vlan *
@@ -939,6 +970,12 @@ int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u16 fid, struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_flow_block *block,
+ struct psample_group *psample_group, u32 rate,
+ u32 trunc_size, bool truncate,
+ struct netlink_ext_ack *extack);
struct mlxsw_sp_acl_rule;
@@ -1048,6 +1085,19 @@ extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
/* spectrum_matchall.c */
+struct mlxsw_sp_mall_ops {
+ int (*sample_add)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack);
+ void (*sample_del)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry);
+};
+
+extern const struct mlxsw_sp_mall_ops mlxsw_sp1_mall_ops;
+extern const struct mlxsw_sp_mall_ops mlxsw_sp2_mall_ops;
+
enum mlxsw_sp_mall_action_type {
MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
@@ -1063,6 +1113,11 @@ struct mlxsw_sp_mall_trap_entry {
int span_id;
};
+struct mlxsw_sp_mall_sample_entry {
+ struct mlxsw_sp_sample_params params;
+ int span_id; /* Relevant for Spectrum-2 onwards. */
+};
+
struct mlxsw_sp_mall_entry {
struct list_head list;
unsigned long cookie;
@@ -1072,7 +1127,7 @@ struct mlxsw_sp_mall_entry {
union {
struct mlxsw_sp_mall_mirror_entry mirror;
struct mlxsw_sp_mall_trap_entry trap;
- struct mlxsw_sp_port_sample sample;
+ struct mlxsw_sp_mall_sample_entry sample;
};
struct rcu_head rcu;
};
@@ -1083,7 +1138,8 @@ int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
struct tc_cls_matchall_offload *f);
int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port);
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack);
void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 8cfa03a75374..67cedfa76f78 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -688,6 +688,31 @@ int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
}
+int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_flow_block *block,
+ struct psample_group *psample_group, u32 rate,
+ u32 trunc_size, bool truncate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ if (!list_is_singular(&block->binding_list)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only a single sampling source is allowed");
+ return -EOPNOTSUPP;
+ }
+ binding = list_first_entry(&block->binding_list,
+ struct mlxsw_sp_flow_block_binding, list);
+ mlxsw_sp_port = binding->mlxsw_sp_port;
+
+ return mlxsw_afa_block_append_sampler(rulei->act_block,
+ mlxsw_sp_port->local_port,
+ psample_group, rate, trunc_size,
+ truncate, binding->ingress,
+ extack);
+}
+
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index 90372d1c28d4..c72aa38424dc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -192,6 +192,22 @@ static void mlxsw_sp_act_policer_del(void *priv, u16 policer_index)
policer_index);
}
+static int mlxsw_sp1_act_sampler_add(void *priv, u8 local_port,
+ struct psample_group *psample_group,
+ u32 rate, u32 trunc_size, bool truncate,
+ bool ingress, int *p_span_id,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Sampling action is not supported on Spectrum-1");
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp1_act_sampler_del(void *priv, u8 local_port, int span_id,
+ bool ingress)
+{
+ WARN_ON_ONCE(1);
+}
+
const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
.kvdl_set_add = mlxsw_sp1_act_kvdl_set_add,
.kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
@@ -204,8 +220,73 @@ const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
.mirror_del = mlxsw_sp_act_mirror_del,
.policer_add = mlxsw_sp_act_policer_add,
.policer_del = mlxsw_sp_act_policer_del,
+ .sampler_add = mlxsw_sp1_act_sampler_add,
+ .sampler_del = mlxsw_sp1_act_sampler_del,
};
+static int mlxsw_sp2_act_sampler_add(void *priv, u8 local_port,
+ struct psample_group *psample_group,
+ u32 rate, u32 trunc_size, bool truncate,
+ bool ingress, int *p_span_id,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_span_agent_parms agent_parms = {
+ .session_id = MLXSW_SP_SPAN_SESSION_ID_SAMPLING,
+ };
+ struct mlxsw_sp_sample_trigger trigger = {
+ .type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE,
+ };
+ struct mlxsw_sp_sample_params params;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp = priv;
+ int err;
+
+ params.psample_group = psample_group;
+ params.trunc_size = trunc_size;
+ params.rate = rate;
+ params.truncate = truncate;
+ err = mlxsw_sp_sample_trigger_params_set(mlxsw_sp, &trigger, &params,
+ extack);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, p_span_id, &agent_parms);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get SPAN agent");
+ goto err_span_agent_get;
+ }
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get analyzed port");
+ goto err_analyzed_port_get;
+ }
+
+ return 0;
+
+err_analyzed_port_get:
+ mlxsw_sp_span_agent_put(mlxsw_sp, *p_span_id);
+err_span_agent_get:
+ mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
+ return err;
+}
+
+static void mlxsw_sp2_act_sampler_del(void *priv, u8 local_port, int span_id,
+ bool ingress)
+{
+ struct mlxsw_sp_sample_trigger trigger = {
+ .type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE,
+ };
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
+ mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
+ mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
+}
+
const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = {
.kvdl_set_add = mlxsw_sp2_act_kvdl_set_add,
.kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
@@ -218,6 +299,8 @@ const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = {
.mirror_del = mlxsw_sp_act_mirror_del,
.policer_add = mlxsw_sp_act_policer_add,
.policer_del = mlxsw_sp_act_policer_del,
+ .sampler_add = mlxsw_sp2_act_sampler_add,
+ .sampler_del = mlxsw_sp2_act_sampler_del,
.dummy_first_set = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index ed81d4fa48ac..1a2fef2a5379 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -912,9 +912,8 @@ static u64 mlxsw_sp_dpipe_table_adj_size(struct mlxsw_sp *mlxsw_sp)
u64 size = 0;
mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router)
- if (mlxsw_sp_nexthop_offload(nh) &&
- !mlxsw_sp_nexthop_group_has_ipip(nh) &&
- !mlxsw_sp_nexthop_is_discard(nh))
+ if (mlxsw_sp_nexthop_is_forward(nh) &&
+ !mlxsw_sp_nexthop_group_has_ipip(nh))
size++;
return size;
}
@@ -1105,9 +1104,8 @@ start_again:
nh_skip = nh_count;
nh_count = 0;
mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
- if (!mlxsw_sp_nexthop_offload(nh) ||
- mlxsw_sp_nexthop_group_has_ipip(nh) ||
- mlxsw_sp_nexthop_is_discard(nh))
+ if (!mlxsw_sp_nexthop_is_forward(nh) ||
+ mlxsw_sp_nexthop_group_has_ipip(nh))
continue;
if (nh_count < nh_skip)
@@ -1180,6 +1178,7 @@ out:
static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
{
+ char ratr_pl[MLXSW_REG_RATR_LEN];
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_nexthop *nh;
u32 adj_hash_index = 0;
@@ -1187,9 +1186,8 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
u32 adj_size = 0;
mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
- if (!mlxsw_sp_nexthop_offload(nh) ||
- mlxsw_sp_nexthop_group_has_ipip(nh) ||
- mlxsw_sp_nexthop_is_discard(nh))
+ if (!mlxsw_sp_nexthop_is_forward(nh) ||
+ mlxsw_sp_nexthop_group_has_ipip(nh))
continue;
mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size,
@@ -1198,8 +1196,9 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
else
mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
- mlxsw_sp_nexthop_update(mlxsw_sp,
- adj_index + adj_hash_index, nh);
+ mlxsw_sp_nexthop_eth_update(mlxsw_sp,
+ adj_index + adj_hash_index, nh,
+ true, ratr_pl);
}
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 078601d31cde..c8061beed6db 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1059,6 +1059,131 @@ mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info);
}
+static void
+mlxsw_sp_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+
+ if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
+ 0, ppcnt_pl))
+ return;
+
+ phy_stats->SymbolErrorDuringCarrier =
+ mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get(ppcnt_pl);
+}
+
+static void
+mlxsw_sp_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+
+ if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
+ 0, ppcnt_pl))
+ return;
+
+ mac_stats->FramesTransmittedOK =
+ mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
+ mac_stats->FramesReceivedOK =
+ mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
+ mac_stats->FrameCheckSequenceErrors =
+ mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
+ mac_stats->AlignmentErrors =
+ mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
+ mac_stats->OctetsTransmittedOK =
+ mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
+ mac_stats->OctetsReceivedOK =
+ mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
+ mac_stats->MulticastFramesXmittedOK =
+ mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get(ppcnt_pl);
+ mac_stats->BroadcastFramesXmittedOK =
+ mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get(ppcnt_pl);
+ mac_stats->MulticastFramesReceivedOK =
+ mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
+ mac_stats->BroadcastFramesReceivedOK =
+ mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get(ppcnt_pl);
+ mac_stats->InRangeLengthErrors =
+ mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl);
+ mac_stats->OutOfRangeLengthField =
+ mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl);
+ mac_stats->FrameTooLongErrors =
+ mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl);
+}
+
+static void
+mlxsw_sp_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+
+ if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
+ 0, ppcnt_pl))
+ return;
+
+ ctrl_stats->MACControlFramesTransmitted =
+ mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get(ppcnt_pl);
+ ctrl_stats->MACControlFramesReceived =
+ mlxsw_reg_ppcnt_a_mac_control_frames_received_get(ppcnt_pl);
+ ctrl_stats->UnsupportedOpcodesReceived =
+ mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get(ppcnt_pl);
+}
+
+static const struct ethtool_rmon_hist_range mlxsw_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 2047 },
+ { 2048, 4095 },
+ { 4096, 8191 },
+ { 8192, 10239 },
+ {}
+};
+
+static void
+mlxsw_sp_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+
+ if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_RFC_2819_CNT,
+ 0, ppcnt_pl))
+ return;
+
+ rmon->undersize_pkts =
+ mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get(ppcnt_pl);
+ rmon->oversize_pkts =
+ mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get(ppcnt_pl);
+ rmon->fragments =
+ mlxsw_reg_ppcnt_ether_stats_fragments_get(ppcnt_pl);
+
+ rmon->hist[0] = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get(ppcnt_pl);
+ rmon->hist[1] =
+ mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get(ppcnt_pl);
+ rmon->hist[2] =
+ mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get(ppcnt_pl);
+ rmon->hist[3] =
+ mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get(ppcnt_pl);
+ rmon->hist[4] =
+ mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get(ppcnt_pl);
+ rmon->hist[5] =
+ mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get(ppcnt_pl);
+ rmon->hist[6] =
+ mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get(ppcnt_pl);
+ rmon->hist[7] =
+ mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get(ppcnt_pl);
+ rmon->hist[8] =
+ mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get(ppcnt_pl);
+ rmon->hist[9] =
+ mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get(ppcnt_pl);
+
+ *ranges = mlxsw_rmon_ranges;
+}
+
const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.cap_link_lanes_supported = true,
.get_drvinfo = mlxsw_sp_port_get_drvinfo,
@@ -1075,6 +1200,10 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_module_info = mlxsw_sp_get_module_info,
.get_module_eeprom = mlxsw_sp_get_module_eeprom,
.get_ts_info = mlxsw_sp_get_ts_info,
+ .get_eth_phy_stats = mlxsw_sp_get_eth_phy_stats,
+ .get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats,
+ .get_eth_ctrl_stats = mlxsw_sp_get_eth_ctrl_stats,
+ .get_rmon_stats = mlxsw_sp_get_rmon_stats,
};
struct mlxsw_sp1_port_link_mode {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
index 0456cda33808..9e50c823a354 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
@@ -71,7 +71,7 @@ static int mlxsw_sp_flow_block_bind(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
- err = mlxsw_sp_mall_port_bind(block, mlxsw_sp_port);
+ err = mlxsw_sp_mall_port_bind(block, mlxsw_sp_port, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 41855e58564b..be3791ca6069 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -24,6 +24,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
const struct flow_action_entry *act;
int mirror_act_count = 0;
int police_act_count = 0;
+ int sample_act_count = 0;
int err, i;
if (!flow_action_has_entries(flow_action))
@@ -190,6 +191,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
/* The kernel might adjust the requested burst size so
* that it is not exactly a power of two. Re-adjust it
* here since the hardware only supports burst sizes
@@ -204,6 +210,23 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return err;
break;
}
+ case FLOW_ACTION_SAMPLE: {
+ if (sample_act_count++) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple sample actions per rule are not supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei,
+ block,
+ act->sample.psample_group,
+ act->sample.rate,
+ act->sample.trunc_size,
+ act->sample.truncate,
+ extack);
+ if (err)
+ return err;
+ break;
+ }
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 64a8f838eb53..5facabd86882 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -127,14 +127,16 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr)
static int
mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
- struct mlxsw_sp_ipip_entry *ipip_entry)
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl)
{
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
__be32 daddr4 = mlxsw_sp_ipip_netdev_daddr4(ipip_entry->ol_dev);
- char ratr_pl[MLXSW_REG_RATR_LEN];
+ enum mlxsw_reg_ratr_op op;
- mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
- true, MLXSW_REG_RATR_TYPE_IPIP,
+ op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
+ mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_IPIP,
adj_index, rif_index);
mlxsw_reg_ratr_ipip4_entry_pack(ratr_pl, be32_to_cpu(daddr4));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index 87bef9880e5e..f0837b42d1d6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -40,7 +40,8 @@ struct mlxsw_sp_ipip_ops {
enum mlxsw_sp_l3proto ul_proto; /* Underlay. */
int (*nexthop_update)(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
- struct mlxsw_sp_ipip_entry *ipip_entry);
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl);
bool (*can_offload)(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
index f30599ad6019..07b371cd9818 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
@@ -24,7 +24,8 @@ mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie
static int
mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_mall_entry *mall_entry)
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_span_agent_parms agent_parms = {};
@@ -33,28 +34,35 @@ mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
int err;
if (!mall_entry->mirror.to_dev) {
- netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
+ NL_SET_ERR_MSG(extack, "Could not find requested device");
return -EINVAL;
}
agent_parms.to_dev = mall_entry->mirror.to_dev;
err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->mirror.span_id,
&agent_parms);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to get SPAN agent");
return err;
+ }
err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
mall_entry->ingress);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to get analyzed port");
goto err_analyzed_port_get;
+ }
trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
MLXSW_SP_SPAN_TRIGGER_EGRESS;
parms.span_id = mall_entry->mirror.span_id;
+ parms.probability_rate = 1;
err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port,
&parms);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent");
goto err_agent_bind;
+ }
return 0;
@@ -93,46 +101,64 @@ static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_mall_entry *mall_entry)
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_sample_trigger trigger;
int err;
- if (rtnl_dereference(mlxsw_sp_port->sample)) {
- netdev_err(mlxsw_sp_port->dev, "sample already active\n");
- return -EEXIST;
- }
- rcu_assign_pointer(mlxsw_sp_port->sample, &mall_entry->sample);
+ if (mall_entry->ingress)
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS;
+ else
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS;
+ trigger.local_port = mlxsw_sp_port->local_port;
+ err = mlxsw_sp_sample_trigger_params_set(mlxsw_sp, &trigger,
+ &mall_entry->sample.params,
+ extack);
+ if (err)
+ return err;
- err = mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true,
- mall_entry->sample.rate);
+ err = mlxsw_sp->mall_ops->sample_add(mlxsw_sp, mlxsw_sp_port,
+ mall_entry, extack);
if (err)
goto err_port_sample_set;
return 0;
err_port_sample_set:
- RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
+ mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
return err;
}
static void
-mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port)
+mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
{
- if (!mlxsw_sp_port->sample)
- return;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_sample_trigger trigger;
- mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
- RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
+ if (mall_entry->ingress)
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS;
+ else
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS;
+ trigger.local_port = mlxsw_sp_port->local_port;
+
+ mlxsw_sp->mall_ops->sample_del(mlxsw_sp, mlxsw_sp_port, mall_entry);
+ mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
}
static int
mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_mall_entry *mall_entry)
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack)
{
switch (mall_entry->type) {
case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
- return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry);
+ return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry,
+ extack);
case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
- return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry);
+ return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry,
+ extack);
default:
WARN_ON(1);
return -EINVAL;
@@ -148,7 +174,7 @@ mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
break;
case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
- mlxsw_sp_mall_port_sample_del(mlxsw_sp_port);
+ mlxsw_sp_mall_port_sample_del(mlxsw_sp_port, mall_entry);
break;
default:
WARN_ON(1);
@@ -212,6 +238,11 @@ int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
flower_prio_valid = true;
}
+ if (protocol != htons(ETH_P_ALL)) {
+ NL_SET_ERR_MSG(f->common.extack, "matchall rules only supported with 'all' protocol");
+ return -EOPNOTSUPP;
+ }
+
mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
if (!mall_entry)
return -ENOMEM;
@@ -219,54 +250,41 @@ int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
mall_entry->priority = f->common.prio;
mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
+ if (flower_prio_valid && mall_entry->ingress &&
+ mall_entry->priority >= flower_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ if (flower_prio_valid && !mall_entry->ingress &&
+ mall_entry->priority <= flower_max_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
act = &f->rule->action.entries[0];
- if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
- if (flower_prio_valid && mall_entry->ingress &&
- mall_entry->priority >= flower_min_prio) {
- NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
- err = -EOPNOTSUPP;
- goto errout;
- }
- if (flower_prio_valid && !mall_entry->ingress &&
- mall_entry->priority <= flower_max_prio) {
- NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
- err = -EOPNOTSUPP;
- goto errout;
- }
+ switch (act->id) {
+ case FLOW_ACTION_MIRRED:
mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
mall_entry->mirror.to_dev = act->dev;
- } else if (act->id == FLOW_ACTION_SAMPLE &&
- protocol == htons(ETH_P_ALL)) {
- if (!mall_entry->ingress) {
- NL_SET_ERR_MSG(f->common.extack, "Sample is not supported on egress");
- err = -EOPNOTSUPP;
- goto errout;
- }
- if (flower_prio_valid &&
- mall_entry->priority >= flower_min_prio) {
- NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
- err = -EOPNOTSUPP;
- goto errout;
- }
- if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
- NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported");
- err = -EOPNOTSUPP;
- goto errout;
- }
+ break;
+ case FLOW_ACTION_SAMPLE:
mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
- mall_entry->sample.psample_group = act->sample.psample_group;
- mall_entry->sample.truncate = act->sample.truncate;
- mall_entry->sample.trunc_size = act->sample.trunc_size;
- mall_entry->sample.rate = act->sample.rate;
- } else {
+ mall_entry->sample.params.psample_group = act->sample.psample_group;
+ mall_entry->sample.params.truncate = act->sample.truncate;
+ mall_entry->sample.params.trunc_size = act->sample.trunc_size;
+ mall_entry->sample.params.rate = act->sample.rate;
+ break;
+ default:
err = -EOPNOTSUPP;
goto errout;
}
list_for_each_entry(binding, &block->binding_list, list) {
err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
- mall_entry);
+ mall_entry, f->common.extack);
if (err)
goto rollback;
}
@@ -314,13 +332,15 @@ void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
}
int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_mall_entry *mall_entry;
int err;
list_for_each_entry(mall_entry, &block->mall.list, list) {
- err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
+ err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry,
+ extack);
if (err)
goto rollback;
}
@@ -355,3 +375,104 @@ int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
*p_max_prio = block->mall.max_prio;
return 0;
}
+
+static int mlxsw_sp1_mall_sample_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack)
+{
+ u32 rate = mall_entry->sample.params.rate;
+
+ if (!mall_entry->ingress) {
+ NL_SET_ERR_MSG(extack, "Sampling is not supported on egress");
+ return -EOPNOTSUPP;
+ }
+
+ if (rate > MLXSW_REG_MPSC_RATE_MAX) {
+ NL_SET_ERR_MSG(extack, "Unsupported sampling rate");
+ return -EOPNOTSUPP;
+ }
+
+ return mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true, rate);
+}
+
+static void mlxsw_sp1_mall_sample_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
+}
+
+const struct mlxsw_sp_mall_ops mlxsw_sp1_mall_ops = {
+ .sample_add = mlxsw_sp1_mall_sample_add,
+ .sample_del = mlxsw_sp1_mall_sample_del,
+};
+
+static int mlxsw_sp2_mall_sample_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_span_trigger_parms trigger_parms = {};
+ struct mlxsw_sp_span_agent_parms agent_parms = {
+ .to_dev = NULL, /* Mirror to CPU. */
+ .session_id = MLXSW_SP_SPAN_SESSION_ID_SAMPLING,
+ };
+ u32 rate = mall_entry->sample.params.rate;
+ enum mlxsw_sp_span_trigger span_trigger;
+ int err;
+
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->sample.span_id,
+ &agent_parms);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to get SPAN agent");
+ return err;
+ }
+
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
+ mall_entry->ingress);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to get analyzed port");
+ goto err_analyzed_port_get;
+ }
+
+ span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
+ MLXSW_SP_SPAN_TRIGGER_EGRESS;
+ trigger_parms.span_id = mall_entry->sample.span_id;
+ trigger_parms.probability_rate = rate;
+ err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port,
+ &trigger_parms);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent");
+ goto err_agent_bind;
+ }
+
+ return 0;
+
+err_agent_bind:
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
+err_analyzed_port_get:
+ mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id);
+ return err;
+}
+
+static void mlxsw_sp2_mall_sample_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ struct mlxsw_sp_span_trigger_parms trigger_parms = {};
+ enum mlxsw_sp_span_trigger span_trigger;
+
+ span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
+ MLXSW_SP_SPAN_TRIGGER_EGRESS;
+ trigger_parms.span_id = mall_entry->sample.span_id;
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
+ &trigger_parms);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
+ mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id);
+}
+
+const struct mlxsw_sp_mall_ops mlxsw_sp2_mall_ops = {
+ .sample_add = mlxsw_sp2_mall_sample_add,
+ .sample_del = mlxsw_sp2_mall_sample_del,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
index 2796d3659979..d8104fc6c900 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -18,7 +18,6 @@ struct mlxsw_sp_nve_config {
u32 ul_tb_id;
enum mlxsw_sp_l3proto ul_proto;
union mlxsw_sp_l3addr ul_sip;
- u16 ethertype;
};
struct mlxsw_sp_nve {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index 3e2bb22e9ca6..b84bb4b65098 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -113,7 +113,6 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
config->udp_dport = cfg->dst_port;
- config->ethertype = params->ethertype;
}
static int __mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
@@ -318,20 +317,14 @@ static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
}
static int
-mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp, u16 ethertype)
+mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp)
{
char spvid_pl[MLXSW_REG_SPVID_LEN] = {};
- u8 sver_type;
- int err;
mlxsw_reg_spvid_tport_set(spvid_pl, true);
mlxsw_reg_spvid_local_port_set(spvid_pl,
MLXSW_REG_TUNNEL_PORT_NVE);
- err = mlxsw_sp_ethtype_to_sver_type(ethertype, &sver_type);
- if (err)
- return err;
-
- mlxsw_reg_spvid_et_vlan_set(spvid_pl, sver_type);
+ mlxsw_reg_spvid_egr_et_set_set(spvid_pl, true);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
}
@@ -367,7 +360,7 @@ mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_spvtr_write;
- err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, config->ethertype);
+ err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp);
if (err)
goto err_decap_ethertype_set;
@@ -392,8 +385,6 @@ static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
char spvtr_pl[MLXSW_REG_SPVTR_LEN];
char tngcr_pl[MLXSW_REG_TNGCR_LEN];
- /* Set default EtherType */
- mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, ETH_P_8021Q);
mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index fd672c6c9133..04672eb5c7f3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -29,7 +29,6 @@ struct mlxsw_sp_qdisc;
struct mlxsw_sp_qdisc_ops {
enum mlxsw_sp_qdisc_type type;
int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params);
int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
@@ -48,11 +47,14 @@ struct mlxsw_sp_qdisc_ops {
*/
void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+ struct mlxsw_sp_qdisc *(*find_class)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u32 parent);
+ unsigned int num_classes;
};
struct mlxsw_sp_qdisc {
u32 handle;
- u8 tclass_num;
+ int tclass_num;
u8 prio_bitmap;
union {
struct red_stats red;
@@ -66,11 +68,13 @@ struct mlxsw_sp_qdisc {
} stats_base;
struct mlxsw_sp_qdisc_ops *ops;
+ struct mlxsw_sp_qdisc *parent;
+ struct mlxsw_sp_qdisc *qdiscs;
+ unsigned int num_classes;
};
struct mlxsw_sp_qdisc_state {
struct mlxsw_sp_qdisc root_qdisc;
- struct mlxsw_sp_qdisc tclass_qdiscs[IEEE_8021QAZ_MAX_TCS];
/* When a PRIO or ETS are added, the invisible FIFOs in their bands are
* created first. When notifications for these FIFOs arrive, it is not
@@ -85,15 +89,55 @@ struct mlxsw_sp_qdisc_state {
*/
u32 future_handle;
bool future_fifos[IEEE_8021QAZ_MAX_TCS];
+ struct mutex lock; /* Protects qdisc state. */
};
static bool
-mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
- enum mlxsw_sp_qdisc_type type)
+mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle)
+{
+ return mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->handle == handle;
+}
+
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_walk(struct mlxsw_sp_qdisc *qdisc,
+ struct mlxsw_sp_qdisc *(*pre)(struct mlxsw_sp_qdisc *,
+ void *),
+ void *data)
+{
+ struct mlxsw_sp_qdisc *tmp;
+ unsigned int i;
+
+ if (pre) {
+ tmp = pre(qdisc, data);
+ if (tmp)
+ return tmp;
+ }
+
+ if (qdisc->ops) {
+ for (i = 0; i < qdisc->num_classes; i++) {
+ tmp = &qdisc->qdiscs[i];
+ if (qdisc->ops) {
+ tmp = mlxsw_sp_qdisc_walk(tmp, pre, data);
+ if (tmp)
+ return tmp;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc *qdisc, void *data)
{
- return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
- mlxsw_sp_qdisc->ops->type == type &&
- mlxsw_sp_qdisc->handle == handle;
+ u32 parent = *(u32 *)data;
+
+ if (qdisc->ops && TC_H_MAJ(qdisc->handle) == TC_H_MAJ(parent)) {
+ if (qdisc->ops->find_class)
+ return qdisc->ops->find_class(qdisc, parent);
+ }
+
+ return NULL;
}
static struct mlxsw_sp_qdisc *
@@ -101,39 +145,46 @@ mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
bool root_only)
{
struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
- int tclass, child_index;
+ if (!qdisc_state)
+ return NULL;
if (parent == TC_H_ROOT)
return &qdisc_state->root_qdisc;
-
- if (root_only || !qdisc_state ||
- !qdisc_state->root_qdisc.ops ||
- TC_H_MAJ(parent) != qdisc_state->root_qdisc.handle ||
- TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
+ if (root_only)
return NULL;
+ return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
+ mlxsw_sp_qdisc_walk_cb_find, &parent);
+}
- child_index = TC_H_MIN(parent);
- tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
- return &qdisc_state->tclass_qdiscs[tclass];
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_walk_cb_find_by_handle(struct mlxsw_sp_qdisc *qdisc, void *data)
+{
+ u32 handle = *(u32 *)data;
+
+ if (qdisc->ops && qdisc->handle == handle)
+ return qdisc;
+ return NULL;
}
static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
{
struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
- int i;
-
- if (qdisc_state->root_qdisc.handle == handle)
- return &qdisc_state->root_qdisc;
- if (qdisc_state->root_qdisc.handle == TC_H_UNSPEC)
+ if (!qdisc_state)
return NULL;
+ return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
+ mlxsw_sp_qdisc_walk_cb_find_by_handle,
+ &handle);
+}
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- if (qdisc_state->tclass_qdiscs[i].handle == handle)
- return &qdisc_state->tclass_qdiscs[i];
+static void
+mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *tmp;
- return NULL;
+ for (tmp = mlxsw_sp_qdisc->parent; tmp; tmp = tmp->parent)
+ tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog;
}
static int
@@ -157,32 +208,48 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
}
- if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
+ if (!mlxsw_sp_qdisc->ops)
+ return 0;
+
+ mlxsw_sp_qdisc_reduce_parent_backlog(mlxsw_sp_qdisc);
+ if (mlxsw_sp_qdisc->ops->destroy)
err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
mlxsw_sp_qdisc);
+ if (mlxsw_sp_qdisc->ops->clean_stats)
+ mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
mlxsw_sp_qdisc->ops = NULL;
-
+ mlxsw_sp_qdisc->num_classes = 0;
+ kfree(mlxsw_sp_qdisc->qdiscs);
+ mlxsw_sp_qdisc->qdiscs = NULL;
return err_hdroom ?: err;
}
-static int
-mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- struct mlxsw_sp_qdisc_ops *ops, void *params)
+static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_ops *ops, void *params)
{
struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
struct mlxsw_sp_hdroom orig_hdroom;
+ unsigned int i;
int err;
- if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
- /* In case this location contained a different qdisc of the
- * same type we can override the old qdisc configuration.
- * Otherwise, we need to remove the old qdisc before setting the
- * new one.
- */
- mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ err = ops->check_params(mlxsw_sp_port, params);
+ if (err)
+ return err;
+
+ if (ops->num_classes) {
+ mlxsw_sp_qdisc->qdiscs = kcalloc(ops->num_classes,
+ sizeof(*mlxsw_sp_qdisc->qdiscs),
+ GFP_KERNEL);
+ if (!mlxsw_sp_qdisc->qdiscs)
+ return -ENOMEM;
+
+ for (i = 0; i < ops->num_classes; i++)
+ mlxsw_sp_qdisc->qdiscs[i].parent = mlxsw_sp_qdisc;
+ }
orig_hdroom = *mlxsw_sp_port->hdroom;
if (root_qdisc == mlxsw_sp_qdisc) {
@@ -198,20 +265,46 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
goto err_hdroom_configure;
}
- err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ mlxsw_sp_qdisc->num_classes = ops->num_classes;
+ mlxsw_sp_qdisc->ops = ops;
+ mlxsw_sp_qdisc->handle = handle;
+ err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
if (err)
- goto err_bad_param;
+ goto err_replace;
+
+ return 0;
+
+err_replace:
+ mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
+ mlxsw_sp_qdisc->ops = NULL;
+ mlxsw_sp_qdisc->num_classes = 0;
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
+err_hdroom_configure:
+ kfree(mlxsw_sp_qdisc->qdiscs);
+ mlxsw_sp_qdisc->qdiscs = NULL;
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_change(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params)
+{
+ struct mlxsw_sp_qdisc_ops *ops = mlxsw_sp_qdisc->ops;
+ int err;
+
+ err = ops->check_params(mlxsw_sp_port, params);
+ if (err)
+ goto unoffload;
err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
if (err)
- goto err_config;
+ goto unoffload;
/* Check if the Qdisc changed. That includes a situation where an
* invisible Qdisc replaces another one, or is being added for the
* first time.
*/
- if (mlxsw_sp_qdisc->handle != handle || handle == TC_H_UNSPEC) {
- mlxsw_sp_qdisc->ops = ops;
+ if (mlxsw_sp_qdisc->handle != handle) {
if (ops->clean_stats)
ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
}
@@ -219,11 +312,8 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
mlxsw_sp_qdisc->handle = handle;
return 0;
-err_bad_param:
-err_config:
- mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
-err_hdroom_configure:
- if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
+unoffload:
+ if (ops->unoffload)
ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
@@ -231,6 +321,27 @@ err_hdroom_configure:
}
static int
+mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_ops *ops, void *params)
+{
+ if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
+ /* In case this location contained a different qdisc of the
+ * same type we can override the old qdisc configuration.
+ * Otherwise, we need to remove the old qdisc before setting the
+ * new one.
+ */
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+
+ if (!mlxsw_sp_qdisc->ops)
+ return mlxsw_sp_qdisc_create(mlxsw_sp_port, handle,
+ mlxsw_sp_qdisc, ops, params);
+ else
+ return mlxsw_sp_qdisc_change(mlxsw_sp_port, handle,
+ mlxsw_sp_qdisc, params);
+}
+
+static int
mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
@@ -295,7 +406,7 @@ mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *p_tx_bytes, u64 *p_tx_packets,
u64 *p_drops, u64 *p_backlog)
{
- u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
u64 tx_bytes, tx_packets;
@@ -395,7 +506,7 @@ static void
mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct red_stats *red_base;
@@ -421,20 +532,12 @@ static int
mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
- struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
-
- if (root_qdisc != mlxsw_sp_qdisc)
- root_qdisc->stats_base.backlog -=
- mlxsw_sp_qdisc->stats_base.backlog;
-
return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
mlxsw_sp_qdisc->tclass_num);
}
static int
mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -467,7 +570,7 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct tc_red_qopt_offload_params *p = params;
- u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ int tclass_num = mlxsw_sp_qdisc->tclass_num;
u32 min, max;
u64 prob;
@@ -512,7 +615,7 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
void *xstats_ptr)
{
struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
- u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
struct red_stats *res = xstats_ptr;
int early_drops, pdrops;
@@ -536,7 +639,7 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
- u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
u64 overlimits;
@@ -553,6 +656,13 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u32 parent)
+{
+ return NULL;
+}
+
#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
@@ -564,10 +674,11 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
.get_stats = mlxsw_sp_qdisc_get_red_stats,
.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
+ .find_class = mlxsw_sp_qdisc_leaf_find_class,
};
-int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_red_qopt_offload *p)
+static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_red_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
@@ -581,8 +692,7 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
&mlxsw_sp_qdisc_ops_red,
&p->set);
- if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
- MLXSW_SP_QDISC_RED))
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
return -EOPNOTSUPP;
switch (p->command) {
@@ -599,6 +709,18 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_red_qopt_offload *p)
+{
+ int err;
+
+ mutex_lock(&mlxsw_sp_port->qdisc->lock);
+ err = __mlxsw_sp_setup_tc_red(mlxsw_sp_port, p);
+ mutex_unlock(&mlxsw_sp_port->qdisc->lock);
+
+ return err;
+}
+
static void
mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
@@ -622,13 +744,6 @@ static int
mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
- struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
-
- if (root_qdisc != mlxsw_sp_qdisc)
- root_qdisc->stats_base.backlog -=
- mlxsw_sp_qdisc->stats_base.backlog;
-
return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
mlxsw_sp_qdisc->tclass_num, 0,
@@ -678,7 +793,6 @@ mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
static int
mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_tbf_qopt_offload_replace_params *p = params;
@@ -766,10 +880,11 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
.destroy = mlxsw_sp_qdisc_tbf_destroy,
.get_stats = mlxsw_sp_qdisc_get_tbf_stats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
+ .find_class = mlxsw_sp_qdisc_leaf_find_class,
};
-int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_tbf_qopt_offload *p)
+static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_tbf_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
@@ -783,8 +898,7 @@ int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
&mlxsw_sp_qdisc_ops_tbf,
&p->replace_params);
- if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
- MLXSW_SP_QDISC_TBF))
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
return -EOPNOTSUPP;
switch (p->command) {
@@ -798,22 +912,20 @@ int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
-static int
-mlxsw_sp_qdisc_fifo_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_tbf_qopt_offload *p)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
- struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
+ int err;
- if (root_qdisc != mlxsw_sp_qdisc)
- root_qdisc->stats_base.backlog -=
- mlxsw_sp_qdisc->stats_base.backlog;
- return 0;
+ mutex_lock(&mlxsw_sp_port->qdisc->lock);
+ err = __mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, p);
+ mutex_unlock(&mlxsw_sp_port->qdisc->lock);
+
+ return err;
}
static int
mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
return 0;
@@ -841,25 +953,18 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
.type = MLXSW_SP_QDISC_FIFO,
.check_params = mlxsw_sp_qdisc_fifo_check_params,
.replace = mlxsw_sp_qdisc_fifo_replace,
- .destroy = mlxsw_sp_qdisc_fifo_destroy,
.get_stats = mlxsw_sp_qdisc_get_fifo_stats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
};
-int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_fifo_qopt_offload *p)
+static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_fifo_qopt_offload *p)
{
struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- int tclass, child_index;
+ unsigned int band;
u32 parent_handle;
- /* Invisible FIFOs are tracked in future_handle and future_fifos. Make
- * sure that not more than one qdisc is created for a port at a time.
- * RTNL is a simple proxy for that.
- */
- ASSERT_RTNL();
-
mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
parent_handle = TC_H_MAJ(p->parent);
@@ -872,13 +977,12 @@ int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
qdisc_state->future_handle = parent_handle;
}
- child_index = TC_H_MIN(p->parent);
- tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
- if (tclass < IEEE_8021QAZ_MAX_TCS) {
+ band = TC_H_MIN(p->parent) - 1;
+ if (band < IEEE_8021QAZ_MAX_TCS) {
if (p->command == TC_FIFO_REPLACE)
- qdisc_state->future_fifos[tclass] = true;
+ qdisc_state->future_fifos[band] = true;
else if (p->command == TC_FIFO_DESTROY)
- qdisc_state->future_fifos[tclass] = false;
+ qdisc_state->future_fifos[band] = false;
}
}
if (!mlxsw_sp_qdisc)
@@ -890,16 +994,12 @@ int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
&mlxsw_sp_qdisc_ops_fifo, NULL);
}
- if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
- MLXSW_SP_QDISC_FIFO))
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
return -EOPNOTSUPP;
switch (p->command) {
case TC_FIFO_DESTROY:
- if (p->handle == mlxsw_sp_qdisc->handle)
- return mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- mlxsw_sp_qdisc);
- return 0;
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
case TC_FIFO_STATS:
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
@@ -910,21 +1010,32 @@ int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
-static int
-__mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
+int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_fifo_qopt_offload *p)
+{
+ int err;
+
+ mutex_lock(&mlxsw_sp_port->qdisc->lock);
+ err = __mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, p);
+ mutex_unlock(&mlxsw_sp_port->qdisc->lock);
+
+ return err;
+}
+
+static int __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
int i;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
MLXSW_SP_PORT_DEFAULT_TCLASS);
mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
i, 0, false, 0);
mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- &qdisc_state->tclass_qdiscs[i]);
- qdisc_state->tclass_qdiscs[i].prio_bitmap = 0;
+ &mlxsw_sp_qdisc->qdiscs[i]);
+ mlxsw_sp_qdisc->qdiscs[i].prio_bitmap = 0;
}
return 0;
@@ -934,7 +1045,7 @@ static int
mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
+ return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
}
static int
@@ -948,7 +1059,6 @@ __mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
static int
mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_prio_qopt_offload_params *p = params;
@@ -957,8 +1067,9 @@ mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- unsigned int nbands,
+__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u32 handle, unsigned int nbands,
const unsigned int *quanta,
const unsigned int *weights,
const u8 *priomap)
@@ -971,7 +1082,7 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
for (band = 0; band < nbands; band++) {
tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
- child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
+ child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
old_priomap = child_qdisc->prio_bitmap;
child_qdisc->prio_bitmap = 0;
@@ -993,6 +1104,9 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
return err;
}
}
+
+ child_qdisc->tclass_num = tclass;
+
if (old_priomap != child_qdisc->prio_bitmap &&
child_qdisc->ops && child_qdisc->ops->clean_stats) {
backlog = child_qdisc->stats_base.backlog;
@@ -1002,7 +1116,7 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
}
if (handle == qdisc_state->future_handle &&
- qdisc_state->future_fifos[tclass]) {
+ qdisc_state->future_fifos[band]) {
err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
child_qdisc,
&mlxsw_sp_qdisc_ops_fifo,
@@ -1013,7 +1127,7 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
}
for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
- child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
+ child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
child_qdisc->prio_bitmap = 0;
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
mlxsw_sp_port_ets_set(mlxsw_sp_port,
@@ -1034,8 +1148,9 @@ mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct tc_prio_qopt_offload_params *p = params;
unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
- return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
- zeroes, zeroes, p->priomap);
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
+ handle, p->bands, zeroes,
+ zeroes, p->priomap);
}
static void
@@ -1066,7 +1181,6 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
struct mlxsw_sp_qdisc *tc_qdisc;
u64 tx_packets = 0;
u64 tx_bytes = 0;
@@ -1074,8 +1188,8 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 drops = 0;
int i;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- tc_qdisc = &qdisc_state->tclass_qdiscs[i];
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
+ tc_qdisc = &mlxsw_sp_qdisc->qdiscs[i];
mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
&tx_bytes, &tx_packets,
&drops, &backlog);
@@ -1112,6 +1226,18 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_qdisc->stats_base.backlog = 0;
}
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u32 parent)
+{
+ int child_index = TC_H_MIN(parent);
+ int band = child_index - 1;
+
+ if (band < 0 || band >= mlxsw_sp_qdisc->num_classes)
+ return NULL;
+ return &mlxsw_sp_qdisc->qdiscs[band];
+}
+
static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.type = MLXSW_SP_QDISC_PRIO,
.check_params = mlxsw_sp_qdisc_prio_check_params,
@@ -1120,11 +1246,12 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.destroy = mlxsw_sp_qdisc_prio_destroy,
.get_stats = mlxsw_sp_qdisc_get_prio_stats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
+ .find_class = mlxsw_sp_qdisc_prio_find_class,
+ .num_classes = IEEE_8021QAZ_MAX_TCS,
};
static int
mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_ets_qopt_offload_replace_params *p = params;
@@ -1139,8 +1266,9 @@ mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
{
struct tc_ets_qopt_offload_replace_params *p = params;
- return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
- p->quanta, p->weights, p->priomap);
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
+ handle, p->bands, p->quanta,
+ p->weights, p->priomap);
}
static void
@@ -1158,7 +1286,7 @@ static int
mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
+ return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
}
static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
@@ -1169,6 +1297,8 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
.destroy = mlxsw_sp_qdisc_ets_destroy,
.get_stats = mlxsw_sp_qdisc_get_prio_stats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
+ .find_class = mlxsw_sp_qdisc_prio_find_class,
+ .num_classes = IEEE_8021QAZ_MAX_TCS,
};
/* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
@@ -1201,12 +1331,10 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
u8 band, u32 child_handle)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
- int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
struct mlxsw_sp_qdisc *old_qdisc;
- if (band < IEEE_8021QAZ_MAX_TCS &&
- qdisc_state->tclass_qdiscs[tclass_num].handle == child_handle)
+ if (band < mlxsw_sp_qdisc->num_classes &&
+ mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
return 0;
if (!child_handle) {
@@ -1224,8 +1352,10 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
if (old_qdisc)
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
- mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- &qdisc_state->tclass_qdiscs[tclass_num]);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc, band);
+ if (!WARN_ON(!mlxsw_sp_qdisc))
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+
return -EOPNOTSUPP;
}
@@ -1238,8 +1368,8 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
p->band, p->child_handle);
}
-int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_prio_qopt_offload *p)
+static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
@@ -1253,8 +1383,7 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
&mlxsw_sp_qdisc_ops_prio,
&p->replace_params);
- if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
- MLXSW_SP_QDISC_PRIO))
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
return -EOPNOTSUPP;
switch (p->command) {
@@ -1271,8 +1400,20 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
-int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_ets_qopt_offload *p)
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p)
+{
+ int err;
+
+ mutex_lock(&mlxsw_sp_port->qdisc->lock);
+ err = __mlxsw_sp_setup_tc_prio(mlxsw_sp_port, p);
+ mutex_unlock(&mlxsw_sp_port->qdisc->lock);
+
+ return err;
+}
+
+static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_ets_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
@@ -1286,8 +1427,7 @@ int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
&mlxsw_sp_qdisc_ops_ets,
&p->replace_params);
- if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
- MLXSW_SP_QDISC_ETS))
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
return -EOPNOTSUPP;
switch (p->command) {
@@ -1305,6 +1445,18 @@ int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_ets_qopt_offload *p)
+{
+ int err;
+
+ mutex_lock(&mlxsw_sp_port->qdisc->lock);
+ err = __mlxsw_sp_setup_tc_ets(mlxsw_sp_port, p);
+ mutex_unlock(&mlxsw_sp_port->qdisc->lock);
+
+ return err;
+}
+
struct mlxsw_sp_qevent_block {
struct list_head binding_list;
struct list_head mall_entry_list;
@@ -1341,6 +1493,7 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
goto err_analyzed_port_get;
trigger_parms.span_id = span_id;
+ trigger_parms.probability_rate = 1;
err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
&trigger_parms);
if (err)
@@ -1404,7 +1557,9 @@ static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mall_entry *mall_entry,
struct mlxsw_sp_qevent_binding *qevent_binding)
{
- struct mlxsw_sp_span_agent_parms agent_parms = {};
+ struct mlxsw_sp_span_agent_parms agent_parms = {
+ .session_id = MLXSW_SP_SPAN_SESSION_ID_BUFFER,
+ };
int err;
err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp,
@@ -1831,22 +1986,20 @@ int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_por
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_qdisc_state *qdisc_state;
- int i;
qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL);
if (!qdisc_state)
return -ENOMEM;
+ mutex_init(&qdisc_state->lock);
qdisc_state->root_qdisc.prio_bitmap = 0xff;
qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- qdisc_state->tclass_qdiscs[i].tclass_num = i;
-
mlxsw_sp_port->qdisc = qdisc_state;
return 0;
}
void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ mutex_destroy(&mlxsw_sp_port->qdisc->lock);
kfree(mlxsw_sp_port->qdisc);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index eda99d82766a..41259c0004d1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -113,6 +113,10 @@ struct mlxsw_sp_rif_ops {
void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
+struct mlxsw_sp_router_ops {
+ int (*init)(struct mlxsw_sp *mlxsw_sp);
+};
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
@@ -2662,6 +2666,10 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
goto out;
}
+ if (neigh_entry->connected && entry_connected &&
+ !memcmp(neigh_entry->ha, ha, ETH_ALEN))
+ goto out;
+
memcpy(neigh_entry->ha, ha, ETH_ALEN);
mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
@@ -2842,6 +2850,15 @@ enum mlxsw_sp_nexthop_type {
MLXSW_SP_NEXTHOP_TYPE_IPIP,
};
+enum mlxsw_sp_nexthop_action {
+ /* Nexthop forwards packets to an egress RIF */
+ MLXSW_SP_NEXTHOP_ACTION_FORWARD,
+ /* Nexthop discards packets */
+ MLXSW_SP_NEXTHOP_ACTION_DISCARD,
+ /* Nexthop traps packets */
+ MLXSW_SP_NEXTHOP_ACTION_TRAP,
+};
+
struct mlxsw_sp_nexthop_key {
struct fib_nh *fib_nh;
};
@@ -2862,16 +2879,16 @@ struct mlxsw_sp_nexthop {
int norm_nh_weight;
int num_adj_entries;
struct mlxsw_sp_rif *rif;
- u8 should_offload:1, /* set indicates this neigh is connected and
- * should be put to KVD linear area of this group.
+ u8 should_offload:1, /* set indicates this nexthop should be written
+ * to the adjacency table.
*/
- offloaded:1, /* set in case the neigh is actually put into
- * KVD linear area of this group.
+ offloaded:1, /* set indicates this nexthop was written to the
+ * adjacency table.
*/
- update:1, /* set indicates that MAC of this neigh should be
- * updated in HW
+ update:1; /* set indicates this nexthop should be updated in the
+ * adjacency table (f.e., its MAC changed).
*/
- discard:1; /* nexthop is programmed to discard packets */
+ enum mlxsw_sp_nexthop_action action;
enum mlxsw_sp_nexthop_type type;
union {
struct mlxsw_sp_neigh_entry *neigh_entry;
@@ -2894,7 +2911,9 @@ struct mlxsw_sp_nexthop_group_info {
u16 count;
int sum_norm_weight;
u8 adj_index_valid:1,
- gateway:1; /* routes using the group use a gateway */
+ gateway:1, /* routes using the group use a gateway */
+ is_resilient:1;
+ struct list_head list; /* member in nh_res_grp_list */
struct mlxsw_sp_nexthop nexthops[0];
#define nh_rif nexthops[0].rif
};
@@ -2979,14 +2998,15 @@ struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
return list_next_entry(nh, router_list_node);
}
-bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
+bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
{
- return nh->offloaded;
+ return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
}
unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
{
- if (!nh->offloaded)
+ if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
+ !mlxsw_sp_nexthop_is_forward(nh))
return NULL;
return nh->neigh_entry->ha;
}
@@ -3036,11 +3056,6 @@ bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
return false;
}
-bool mlxsw_sp_nexthop_is_discard(const struct mlxsw_sp_nexthop *nh)
-{
- return nh->discard;
-}
-
static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
.key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
.head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
@@ -3403,20 +3418,38 @@ err_mass_update_vr:
return err;
}
-static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
- struct mlxsw_sp_nexthop *nh)
+static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
+ u32 adj_index,
+ struct mlxsw_sp_nexthop *nh,
+ bool force, char *ratr_pl)
{
struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
- char ratr_pl[MLXSW_REG_RATR_LEN];
+ enum mlxsw_reg_ratr_op op;
+ u16 rif_index;
- mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
- true, MLXSW_REG_RATR_TYPE_ETHERNET,
- adj_index, nh->rif->rif_index);
- if (nh->discard)
+ rif_index = nh->rif ? nh->rif->rif_index :
+ mlxsw_sp->router->lb_rif_index;
+ op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
+ mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
+ adj_index, rif_index);
+ switch (nh->action) {
+ case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
+ mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
+ break;
+ case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
mlxsw_reg_ratr_trap_action_set(ratr_pl,
MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
- else
- mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
+ break;
+ case MLXSW_SP_NEXTHOP_ACTION_TRAP:
+ mlxsw_reg_ratr_trap_action_set(ratr_pl,
+ MLXSW_REG_RATR_TRAP_ACTION_TRAP);
+ mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
if (nh->counter_valid)
mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
else
@@ -3425,15 +3458,17 @@ static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
}
-int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
- struct mlxsw_sp_nexthop *nh)
+int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh, bool force,
+ char *ratr_pl)
{
int i;
for (i = 0; i < nh->num_adj_entries; i++) {
int err;
- err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
+ err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
+ nh, force, ratr_pl);
if (err)
return err;
}
@@ -3443,17 +3478,20 @@ int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
u32 adj_index,
- struct mlxsw_sp_nexthop *nh)
+ struct mlxsw_sp_nexthop *nh,
+ bool force, char *ratr_pl)
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
- return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
+ return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
+ force, ratr_pl);
}
static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
u32 adj_index,
- struct mlxsw_sp_nexthop *nh)
+ struct mlxsw_sp_nexthop *nh, bool force,
+ char *ratr_pl)
{
int i;
@@ -3461,7 +3499,7 @@ static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
int err;
err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
- nh);
+ nh, force, ratr_pl);
if (err)
return err;
}
@@ -3469,11 +3507,29 @@ static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh, bool force,
+ char *ratr_pl)
+{
+ /* When action is discard or trap, the nexthop must be
+ * programmed as an Ethernet nexthop.
+ */
+ if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
+ nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
+ nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
+ return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
+ force, ratr_pl);
+ else
+ return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
+ force, ratr_pl);
+}
+
static int
mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_info *nhgi,
bool reallocate)
{
+ char ratr_pl[MLXSW_REG_RATR_LEN];
u32 adj_index = nhgi->adj_index; /* base */
struct mlxsw_sp_nexthop *nh;
int i;
@@ -3489,16 +3545,8 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
if (nh->update || reallocate) {
int err = 0;
- switch (nh->type) {
- case MLXSW_SP_NEXTHOP_TYPE_ETH:
- err = mlxsw_sp_nexthop_update
- (mlxsw_sp, adj_index, nh);
- break;
- case MLXSW_SP_NEXTHOP_TYPE_IPIP:
- err = mlxsw_sp_nexthop_ipip_update
- (mlxsw_sp, adj_index, nh);
- break;
- }
+ err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
+ true, ratr_pl);
if (err)
return err;
nh->update = 0;
@@ -3524,34 +3572,69 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
+struct mlxsw_sp_adj_grp_size_range {
+ u16 start; /* Inclusive */
+ u16 end; /* Inclusive */
+};
+
+/* Ordered by range start value */
+static const struct mlxsw_sp_adj_grp_size_range
+mlxsw_sp1_adj_grp_size_ranges[] = {
+ { .start = 1, .end = 64 },
+ { .start = 512, .end = 512 },
+ { .start = 1024, .end = 1024 },
+ { .start = 2048, .end = 2048 },
+ { .start = 4096, .end = 4096 },
+};
+
+/* Ordered by range start value */
+static const struct mlxsw_sp_adj_grp_size_range
+mlxsw_sp2_adj_grp_size_ranges[] = {
+ { .start = 1, .end = 128 },
+ { .start = 256, .end = 256 },
+ { .start = 512, .end = 512 },
+ { .start = 1024, .end = 1024 },
+ { .start = 2048, .end = 2048 },
+ { .start = 4096, .end = 4096 },
+};
+
+static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
+ u16 *p_adj_grp_size)
{
- /* Valid sizes for an adjacency group are:
- * 1-64, 512, 1024, 2048 and 4096.
- */
- if (*p_adj_grp_size <= 64)
- return;
- else if (*p_adj_grp_size <= 512)
- *p_adj_grp_size = 512;
- else if (*p_adj_grp_size <= 1024)
- *p_adj_grp_size = 1024;
- else if (*p_adj_grp_size <= 2048)
- *p_adj_grp_size = 2048;
- else
- *p_adj_grp_size = 4096;
+ int i;
+
+ for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
+ const struct mlxsw_sp_adj_grp_size_range *size_range;
+
+ size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
+
+ if (*p_adj_grp_size >= size_range->start &&
+ *p_adj_grp_size <= size_range->end)
+ return;
+
+ if (*p_adj_grp_size <= size_range->end) {
+ *p_adj_grp_size = size_range->end;
+ return;
+ }
+ }
}
-static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
+static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
+ u16 *p_adj_grp_size,
unsigned int alloc_size)
{
- if (alloc_size >= 4096)
- *p_adj_grp_size = 4096;
- else if (alloc_size >= 2048)
- *p_adj_grp_size = 2048;
- else if (alloc_size >= 1024)
- *p_adj_grp_size = 1024;
- else if (alloc_size >= 512)
- *p_adj_grp_size = 512;
+ int i;
+
+ for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
+ const struct mlxsw_sp_adj_grp_size_range *size_range;
+
+ size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
+
+ if (alloc_size >= size_range->end) {
+ *p_adj_grp_size = size_range->end;
+ return;
+ }
+ }
}
static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
@@ -3563,7 +3646,7 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
/* Round up the requested group size to the next size supported
* by the device and make sure the request can be satisfied.
*/
- mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
+ mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
*p_adj_grp_size, &alloc_size);
@@ -3573,7 +3656,7 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
* entries than requested. Try to use as much of them as
* possible.
*/
- mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
+ mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
return 0;
}
@@ -3681,9 +3764,29 @@ mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
}
static void
+mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nexthop *nh,
+ u16 bucket_index)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
+ bool offload = false, trap = false;
+
+ if (nh->offloaded) {
+ if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
+ trap = true;
+ else
+ offload = true;
+ }
+ nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
+ bucket_index, offload, trap);
+}
+
+static void
mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
+ int i;
+
/* Do not update the flags if the nexthop group is being destroyed
* since:
* 1. The nexthop objects is being deleted, in which case the flags are
@@ -3697,6 +3800,18 @@ mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
nh_grp->nhgi->adj_index_valid, false);
+
+ /* Update flags of individual nexthop buckets in case of a resilient
+ * nexthop group.
+ */
+ if (!nh_grp->nhgi->is_resilient)
+ return;
+
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
+
+ mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
+ }
}
static void
@@ -3750,6 +3865,10 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
}
+ /* Flags of individual nexthop buckets might need to be
+ * updated.
+ */
+ mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
return 0;
}
mlxsw_sp_nexthop_group_normalize(nhgi);
@@ -3832,10 +3951,15 @@ set_trap:
static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
bool removing)
{
- if (!removing)
+ if (!removing) {
+ nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
nh->should_offload = 1;
- else
+ } else if (nh->nhgi->is_resilient) {
+ nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
+ nh->should_offload = 1;
+ } else {
nh->should_offload = 0;
+ }
nh->update = 1;
}
@@ -4250,6 +4374,85 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
}
}
+static void
+mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nexthop_group *nh_grp,
+ unsigned long *activity)
+{
+ char *ratrad_pl;
+ int i, err;
+
+ ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
+ if (!ratrad_pl)
+ return;
+
+ mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
+ nh_grp->nhgi->count);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
+ if (err)
+ goto out;
+
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
+ continue;
+ bitmap_set(activity, i, 1);
+ }
+
+out:
+ kfree(ratrad_pl);
+}
+
+#define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
+
+static void
+mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ unsigned long *activity;
+
+ activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
+ if (!activity)
+ return;
+
+ mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
+ nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
+ nh_grp->nhgi->count, activity);
+
+ bitmap_free(activity);
+}
+
+static void
+mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
+
+ mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
+ msecs_to_jiffies(interval));
+}
+
+static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_router *router;
+ bool reschedule = false;
+
+ router = container_of(work, struct mlxsw_sp_router,
+ nh_grp_activity_dw.work);
+
+ mutex_lock(&router->lock);
+
+ list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
+ mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
+ reschedule = true;
+ }
+
+ mutex_unlock(&router->lock);
+
+ if (!reschedule)
+ return;
+ mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
+}
+
static int
mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
const struct nh_notifier_single_info *nh,
@@ -4268,6 +4471,29 @@ mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
}
static int
+mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_single_info *nh,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
+ if (err)
+ return err;
+
+ /* Device only nexthops with an IPIP device are programmed as
+ * encapsulating adjacency entries.
+ */
+ if (!nh->gw_family && !nh->is_reject &&
+ !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
const struct nh_notifier_grp_info *nh_grp,
struct netlink_ext_ack *extack)
@@ -4284,21 +4510,83 @@ mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
int err;
nh = &nh_grp->nh_entries[i].nh;
- err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh,
- extack);
+ err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
+ extack);
if (err)
return err;
+ }
- /* Device only nexthops with an IPIP device are programmed as
- * encapsulating adjacency entries.
- */
- if (!nh->gw_family && !nh->is_reject &&
- !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
- NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
- return -EINVAL;
+ return 0;
+}
+
+static int
+mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_res_table_info *nh_res_table,
+ struct netlink_ext_ack *extack)
+{
+ unsigned int alloc_size;
+ bool valid_size = false;
+ int err, i;
+
+ if (nh_res_table->num_nh_buckets < 32) {
+ NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
+ const struct mlxsw_sp_adj_grp_size_range *size_range;
+
+ size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
+
+ if (nh_res_table->num_nh_buckets >= size_range->start &&
+ nh_res_table->num_nh_buckets <= size_range->end) {
+ valid_size = true;
+ break;
}
}
+ if (!valid_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ nh_res_table->num_nh_buckets,
+ &alloc_size);
+ if (err || nh_res_table->num_nh_buckets != alloc_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_res_table_info *nh_res_table,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+ u16 i;
+
+ err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
+ nh_res_table,
+ extack);
+ if (err)
+ return err;
+
+ for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
+ const struct nh_notifier_single_info *nh;
+ int err;
+
+ nh = &nh_res_table->nhs[i];
+ err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
+ extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -4306,7 +4594,11 @@ static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
unsigned long event,
struct nh_notifier_info *info)
{
- if (event != NEXTHOP_EVENT_REPLACE)
+ struct nh_notifier_single_info *nh;
+
+ if (event != NEXTHOP_EVENT_REPLACE &&
+ event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
+ event != NEXTHOP_EVENT_BUCKET_REPLACE)
return 0;
switch (info->type) {
@@ -4317,6 +4609,14 @@ static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
info->nh_grp,
info->extack);
+ case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
+ return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
+ info->nh_res_table,
+ info->extack);
+ case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
+ nh = &info->nh_res_bucket->new_nh;
+ return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
+ info->extack);
default:
NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
return -EOPNOTSUPP;
@@ -4334,6 +4634,7 @@ static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
return info->nh->gw_family || info->nh->is_reject ||
mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
case NH_NOTIFIER_INFO_TYPE_GRP:
+ case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
/* Already validated earlier. */
return true;
default:
@@ -4346,7 +4647,7 @@ static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
{
u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
- nh->discard = 1;
+ nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
nh->should_offload = 1;
/* While nexthops that discard packets do not forward packets
* via an egress RIF, they still need to be programmed using a
@@ -4398,6 +4699,15 @@ mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
if (nh_obj->is_reject)
mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
+ /* In a resilient nexthop group, all the nexthops must be written to
+ * the adjacency table. Even if they do not have a valid neighbour or
+ * RIF.
+ */
+ if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
+ nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
+ nh->should_offload = 1;
+ }
+
return 0;
err_type_init:
@@ -4409,11 +4719,12 @@ err_type_init:
static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- if (nh->discard)
+ if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ nh->should_offload = 0;
}
static int
@@ -4423,6 +4734,7 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group_info *nhgi;
struct mlxsw_sp_nexthop *nh;
+ bool is_resilient = false;
unsigned int nhs;
int err, i;
@@ -4433,6 +4745,10 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
case NH_NOTIFIER_INFO_TYPE_GRP:
nhs = info->nh_grp->num_nh;
break;
+ case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
+ nhs = info->nh_res_table->num_nh_buckets;
+ is_resilient = true;
+ break;
default:
return -EINVAL;
}
@@ -4443,6 +4759,7 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
nh_grp->nhgi = nhgi;
nhgi->nh_grp = nh_grp;
nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
+ nhgi->is_resilient = is_resilient;
nhgi->count = nhs;
for (i = 0; i < nhgi->count; i++) {
struct nh_notifier_single_info *nh_obj;
@@ -4458,6 +4775,10 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
nh_obj = &info->nh_grp->nh_entries[i].nh;
weight = info->nh_grp->nh_entries[i].weight;
break;
+ case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
+ nh_obj = &info->nh_res_table->nhs[i];
+ weight = 1;
+ break;
default:
err = -EINVAL;
goto err_nexthop_obj_init;
@@ -4473,6 +4794,15 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
goto err_group_refresh;
}
+ /* Add resilient nexthop groups to a list so that the activity of their
+ * nexthop buckets will be periodically queried and cleared.
+ */
+ if (nhgi->is_resilient) {
+ if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
+ mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
+ list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
+ }
+
return 0;
err_group_refresh:
@@ -4491,8 +4821,15 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
int i;
+ if (nhgi->is_resilient) {
+ list_del(&nhgi->list);
+ if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
+ cancel_delayed_work(&router->nh_grp_activity_dw);
+ }
+
for (i = nhgi->count - 1; i >= 0; i--) {
struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
@@ -4685,6 +5022,136 @@ static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
}
+static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
+ u32 adj_index, char *ratr_pl)
+{
+ MLXSW_REG_ZERO(ratr, ratr_pl);
+ mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
+ mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
+ mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
+
+ return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+}
+
+static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
+{
+ /* Clear the opcode and activity on both the old and new payload as
+ * they are irrelevant for the comparison.
+ */
+ mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
+ mlxsw_reg_ratr_a_set(ratr_pl, 0);
+ mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
+ mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
+
+ /* If the contents of the adjacency entry are consistent with the
+ * replacement request, then replacement was successful.
+ */
+ if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
+ return 0;
+
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh,
+ struct nh_notifier_info *info)
+{
+ u16 bucket_index = info->nh_res_bucket->bucket_index;
+ struct netlink_ext_ack *extack = info->extack;
+ bool force = info->nh_res_bucket->force;
+ char ratr_pl_new[MLXSW_REG_RATR_LEN];
+ char ratr_pl[MLXSW_REG_RATR_LEN];
+ u32 adj_index;
+ int err;
+
+ /* No point in trying an atomic replacement if the idle timer interval
+ * is smaller than the interval in which we query and clear activity.
+ */
+ if (!force && info->nh_res_bucket->idle_timer_ms <
+ MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
+ force = true;
+
+ adj_index = nh->nhgi->adj_index + bucket_index;
+ err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
+ return err;
+ }
+
+ if (!force) {
+ err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
+ ratr_pl_new);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
+ return err;
+ }
+
+ err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
+ return err;
+ }
+ }
+
+ nh->update = 0;
+ nh->offloaded = 1;
+ mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
+
+ return 0;
+}
+
+static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ u16 bucket_index = info->nh_res_bucket->bucket_index;
+ struct netlink_ext_ack *extack = info->extack;
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct nh_notifier_single_info *nh_obj;
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ struct mlxsw_sp_nexthop *nh;
+ int err;
+
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
+ if (!nh_grp) {
+ NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
+ return -EINVAL;
+ }
+
+ nhgi = nh_grp->nhgi;
+
+ if (bucket_index >= nhgi->count) {
+ NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
+ return -EINVAL;
+ }
+
+ nh = &nhgi->nexthops[bucket_index];
+ mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
+
+ nh_obj = &info->nh_res_bucket->new_nh;
+ err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
+ goto err_nexthop_obj_init;
+ }
+
+ err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
+ if (err)
+ goto err_nexthop_obj_bucket_adj_update;
+
+ return 0;
+
+err_nexthop_obj_bucket_adj_update:
+ mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
+err_nexthop_obj_init:
+ nh_obj = &info->nh_res_bucket->old_nh;
+ mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
+ /* The old adjacency entry was not overwritten */
+ nh->update = 0;
+ nh->offloaded = 1;
+ return err;
+}
+
static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -4699,8 +5166,6 @@ static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
mutex_lock(&router->lock);
- ASSERT_RTNL();
-
switch (event) {
case NEXTHOP_EVENT_REPLACE:
err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
@@ -4708,6 +5173,10 @@ static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
case NEXTHOP_EVENT_DEL:
mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
break;
+ case NEXTHOP_EVENT_BUCKET_REPLACE:
+ err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
+ info);
+ break;
default:
break;
}
@@ -7667,7 +8136,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
int i, err;
type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
- ops = mlxsw_sp->rif_ops_arr[type];
+ ops = mlxsw_sp->router->rif_ops_arr[type];
vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
if (IS_ERR(vr))
@@ -8865,7 +9334,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
.deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
};
-const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
+static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
[MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
@@ -9050,7 +9519,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
.deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
};
-const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
+static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
[MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
@@ -9302,6 +9771,36 @@ static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
}
+static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
+
+ mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
+ mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
+ mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
+
+ return 0;
+}
+
+const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
+ .init = mlxsw_sp1_router_init,
+};
+
+static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
+
+ mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
+ mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
+ mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
+
+ return 0;
+}
+
+const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
+ .init = mlxsw_sp2_router_init,
+};
+
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack)
{
@@ -9315,6 +9814,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp->router = router;
router->mlxsw_sp = mlxsw_sp;
+ err = mlxsw_sp->router_ops->init(mlxsw_sp);
+ if (err)
+ goto err_router_ops_init;
+
err = mlxsw_sp_router_xm_init(mlxsw_sp);
if (err)
goto err_xm_init;
@@ -9328,6 +9831,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_ll_op_ctx_init;
+ INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
+ INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
+ mlxsw_sp_nh_grp_activity_work);
+
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -9451,10 +9958,12 @@ err_ipips_init:
err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
mlxsw_sp_router_ll_op_ctx_fini(router);
err_ll_op_ctx_init:
mlxsw_sp_router_xm_fini(mlxsw_sp);
err_xm_init:
+err_router_ops_init:
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
return err;
@@ -9481,6 +9990,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_ipips_fini(mlxsw_sp);
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
+ cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
mlxsw_sp_router_xm_fini(mlxsw_sp);
mutex_destroy(&mlxsw_sp->router->lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 2875ee8ec537..be7708a375e1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -78,6 +78,10 @@ struct mlxsw_sp_router {
struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx;
u16 lb_rif_index;
struct mlxsw_sp_router_xm *xm;
+ const struct mlxsw_sp_adj_grp_size_range *adj_grp_size_ranges;
+ size_t adj_grp_size_ranges_count;
+ struct delayed_work nh_grp_activity_dw;
+ struct list_head nh_res_grp_list;
};
struct mlxsw_sp_fib_entry_priv {
@@ -195,20 +199,20 @@ mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_entry *except);
struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
struct mlxsw_sp_nexthop *nh);
-bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh);
+bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh);
unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh);
int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
u32 *p_adj_size, u32 *p_adj_hash_index);
struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh);
bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh);
-bool mlxsw_sp_nexthop_is_discard(const struct mlxsw_sp_nexthop *nh);
#define mlxsw_sp_nexthop_for_each(nh, router) \
for (nh = mlxsw_sp_nexthop_next(router, NULL); nh; \
nh = mlxsw_sp_nexthop_next(router, nh))
int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh, u64 *p_counter);
-int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
- struct mlxsw_sp_nexthop *nh);
+int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh, bool force,
+ char *ratr_pl);
void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh);
void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 1892cea05ee7..3398cc01e5ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -186,6 +186,7 @@ mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
/* Create a new port analayzer entry for local_port. */
mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
+ mlxsw_reg_mpat_session_id_set(mpat_pl, sparms.session_id);
mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
@@ -203,6 +204,7 @@ mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
int pa_id = span_entry->id;
mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
+ mlxsw_reg_mpat_session_id_set(mpat_pl, span_entry->parms.session_id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
}
@@ -938,7 +940,8 @@ mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp,
if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev &&
curr->parms.policer_enable == sparms->policer_enable &&
- curr->parms.policer_id == sparms->policer_id)
+ curr->parms.policer_id == sparms->policer_id &&
+ curr->parms.session_id == sparms->session_id)
return curr;
}
return NULL;
@@ -1085,6 +1088,7 @@ int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id,
sparms.policer_id = parms->policer_id;
sparms.policer_enable = parms->policer_enable;
+ sparms.session_id = parms->session_id;
span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
if (!span_entry)
return -ENOBUFS;
@@ -1227,8 +1231,12 @@ __mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span,
return -EINVAL;
}
+ if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAR_RATE_MAX)
+ return -EINVAL;
+
mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable,
- trigger_entry->parms.span_id);
+ trigger_entry->parms.span_id,
+ trigger_entry->parms.probability_rate);
return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
}
@@ -1362,8 +1370,11 @@ mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry *
return -EINVAL;
}
+ if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAGR_RATE_MAX)
+ return -EINVAL;
+
mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id,
- 1);
+ trigger_entry->parms.probability_rate);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl);
}
@@ -1561,7 +1572,9 @@ int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
trigger,
mlxsw_sp_port);
if (trigger_entry) {
- if (trigger_entry->parms.span_id != parms->span_id)
+ if (trigger_entry->parms.span_id != parms->span_id ||
+ trigger_entry->parms.probability_rate !=
+ parms->probability_rate)
return -EINVAL;
refcount_inc(&trigger_entry->ref_count);
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index aa1cd409c0e2..efaefd1ae863 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -13,6 +13,19 @@
struct mlxsw_sp;
struct mlxsw_sp_port;
+/* SPAN session identifiers that correspond to MLXSW_TRAP_ID_MIRROR_SESSION<i>
+ * trap identifiers. The session identifier is an attribute of the SPAN agent,
+ * which determines the trap identifier of packets that are mirrored to the
+ * CPU. Packets that are trapped to the CPU for the same logical reason (e.g.,
+ * buffer drops) should use the same session identifier.
+ */
+enum mlxsw_sp_span_session_id {
+ MLXSW_SP_SPAN_SESSION_ID_BUFFER,
+ MLXSW_SP_SPAN_SESSION_ID_SAMPLING,
+
+ __MLXSW_SP_SPAN_SESSION_ID_MAX = 8,
+};
+
struct mlxsw_sp_span_parms {
struct mlxsw_sp_port *dest_port; /* NULL for unoffloaded SPAN. */
unsigned int ttl;
@@ -23,6 +36,7 @@ struct mlxsw_sp_span_parms {
u16 vid;
u16 policer_id;
bool policer_enable;
+ enum mlxsw_sp_span_session_id session_id;
};
enum mlxsw_sp_span_trigger {
@@ -35,12 +49,14 @@ enum mlxsw_sp_span_trigger {
struct mlxsw_sp_span_trigger_parms {
int span_id;
+ u32 probability_rate;
};
struct mlxsw_sp_span_agent_parms {
const struct net_device *to_dev;
u16 policer_id;
bool policer_enable;
+ enum mlxsw_sp_span_session_id session_id;
};
struct mlxsw_sp_span_entry_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 23b7e8d6386b..eeccd586e781 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -98,6 +98,10 @@ struct mlxsw_sp_bridge_ops {
const struct mlxsw_sp_fid *fid);
};
+struct mlxsw_sp_switchdev_ops {
+ void (*init)(struct mlxsw_sp *mlxsw_sp);
+};
+
static int
mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
@@ -2296,7 +2300,7 @@ mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
vid, ETH_P_8021AD, extack);
}
-static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021ad_ops = {
+static const struct mlxsw_sp_bridge_ops mlxsw_sp1_bridge_8021ad_ops = {
.port_join = mlxsw_sp_bridge_8021ad_port_join,
.port_leave = mlxsw_sp_bridge_8021ad_port_leave,
.vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join,
@@ -2305,6 +2309,53 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021ad_ops = {
.fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
};
+static int
+mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ /* The EtherType of decapsulated packets is determined at the egress
+ * port to allow 802.1d and 802.1ad bridges with VXLAN devices to
+ * co-exist.
+ */
+ err = mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021AD);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_bridge_8021ad_port_join(bridge_device, bridge_port,
+ mlxsw_sp_port, extack);
+ if (err)
+ goto err_bridge_8021ad_port_join;
+
+ return 0;
+
+err_bridge_8021ad_port_join:
+ mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
+ return err;
+}
+
+static void
+mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_bridge_8021ad_port_leave(bridge_device, bridge_port,
+ mlxsw_sp_port);
+ mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
+}
+
+static const struct mlxsw_sp_bridge_ops mlxsw_sp2_bridge_8021ad_ops = {
+ .port_join = mlxsw_sp2_bridge_8021ad_port_join,
+ .port_leave = mlxsw_sp2_bridge_8021ad_port_leave,
+ .vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join,
+ .fid_get = mlxsw_sp_bridge_8021q_fid_get,
+ .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
+ .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
+};
+
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev,
@@ -2865,7 +2916,8 @@ mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
return;
if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
- !switchdev_work->fdb_info.added_by_user)
+ (!switchdev_work->fdb_info.added_by_user ||
+ switchdev_work->fdb_info.is_local))
return;
if (!netif_running(dev))
@@ -2920,7 +2972,7 @@ static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
- if (!fdb_info->added_by_user)
+ if (!fdb_info->added_by_user || fdb_info->is_local)
break;
err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
if (err)
@@ -3535,6 +3587,24 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
}
+static void mlxsw_sp1_switchdev_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp1_bridge_8021ad_ops;
+}
+
+const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops = {
+ .init = mlxsw_sp1_switchdev_init,
+};
+
+static void mlxsw_sp2_switchdev_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp2_bridge_8021ad_ops;
+}
+
+const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops = {
+ .init = mlxsw_sp2_switchdev_init,
+};
+
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_bridge *bridge;
@@ -3549,7 +3619,8 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
- bridge->bridge_8021ad_ops = &mlxsw_sp_bridge_8021ad_ops;
+
+ mlxsw_sp->switchdev_ops->init(mlxsw_sp);
return mlxsw_sp_fdb_init(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 4ef12e3e021a..26d01adbedad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -49,8 +49,14 @@ enum {
#define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
enum {
+ /* Packet was mirrored from ingress. */
+ MLXSW_SP_MIRROR_REASON_INGRESS = 1,
+ /* Packet was mirrored from policy engine. */
+ MLXSW_SP_MIRROR_REASON_POLICY_ENGINE = 2,
/* Packet was early dropped. */
MLXSW_SP_MIRROR_REASON_INGRESS_WRED = 9,
+ /* Packet was mirrored from egress. */
+ MLXSW_SP_MIRROR_REASON_EGRESS = 14,
};
static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
@@ -106,7 +112,7 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port,
void *trap_ctx)
{
- u32 cookie_index = mlxsw_skb_cb(skb)->cookie_index;
+ u32 cookie_index = mlxsw_skb_cb(skb)->rx_md_info.cookie_index;
const struct flow_action_cookie *fa_cookie;
struct devlink_port *in_devlink_port;
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -202,21 +208,175 @@ static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u8 local_port,
mlxsw_sp_ptp_receive(mlxsw_sp, skb, local_port);
}
+static struct mlxsw_sp_port *
+mlxsw_sp_sample_tx_port_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_rx_md_info *rx_md_info)
+{
+ u8 local_port;
+
+ if (!rx_md_info->tx_port_valid)
+ return NULL;
+
+ if (rx_md_info->tx_port_is_lag)
+ local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
+ rx_md_info->tx_lag_id,
+ rx_md_info->tx_lag_port_index);
+ else
+ local_port = rx_md_info->tx_sys_port;
+
+ if (local_port >= mlxsw_core_max_ports(mlxsw_sp->core))
+ return NULL;
+
+ return mlxsw_sp->ports[local_port];
+}
+
+/* The latency units are determined according to MOGCR.mirror_latency_units. It
+ * defaults to 64 nanoseconds.
+ */
+#define MLXSW_SP_MIRROR_LATENCY_SHIFT 6
+
+static void mlxsw_sp_psample_md_init(struct mlxsw_sp *mlxsw_sp,
+ struct psample_metadata *md,
+ struct sk_buff *skb, int in_ifindex,
+ bool truncate, u32 trunc_size)
+{
+ struct mlxsw_rx_md_info *rx_md_info = &mlxsw_skb_cb(skb)->rx_md_info;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ md->trunc_size = truncate ? trunc_size : skb->len;
+ md->in_ifindex = in_ifindex;
+ mlxsw_sp_port = mlxsw_sp_sample_tx_port_get(mlxsw_sp, rx_md_info);
+ md->out_ifindex = mlxsw_sp_port && mlxsw_sp_port->dev ?
+ mlxsw_sp_port->dev->ifindex : 0;
+ md->out_tc_valid = rx_md_info->tx_tc_valid;
+ md->out_tc = rx_md_info->tx_tc;
+ md->out_tc_occ_valid = rx_md_info->tx_congestion_valid;
+ md->out_tc_occ = rx_md_info->tx_congestion;
+ md->latency_valid = rx_md_info->latency_valid;
+ md->latency = rx_md_info->latency;
+ md->latency <<= MLXSW_SP_MIRROR_LATENCY_SHIFT;
+}
+
static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port,
void *trap_ctx)
{
struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ struct mlxsw_sp_sample_trigger trigger;
+ struct mlxsw_sp_sample_params *params;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct psample_metadata md = {};
+ int err;
+
+ err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+ if (err)
+ return;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port)
+ goto out;
+
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS;
+ trigger.local_port = local_port;
+ params = mlxsw_sp_sample_trigger_params_lookup(mlxsw_sp, &trigger);
+ if (!params)
+ goto out;
+
+ /* The psample module expects skb->data to point to the start of the
+ * Ethernet header.
+ */
+ skb_push(skb, ETH_HLEN);
+ mlxsw_sp_psample_md_init(mlxsw_sp, &md, skb,
+ mlxsw_sp_port->dev->ifindex, params->truncate,
+ params->trunc_size);
+ psample_sample_packet(params->psample_group, skb, params->rate, &md);
+out:
+ consume_skb(skb);
+}
+
+static void mlxsw_sp_rx_sample_tx_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ struct mlxsw_rx_md_info *rx_md_info = &mlxsw_skb_cb(skb)->rx_md_info;
+ struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ struct mlxsw_sp_port *mlxsw_sp_port, *mlxsw_sp_port_tx;
+ struct mlxsw_sp_sample_trigger trigger;
+ struct mlxsw_sp_sample_params *params;
+ struct psample_metadata md = {};
+ int err;
+
+ /* Locally generated packets are not reported from the policy engine
+ * trigger, so do not report them from the egress trigger as well.
+ */
+ if (local_port == MLXSW_PORT_CPU_PORT)
+ goto out;
+
+ err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+ if (err)
+ return;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port)
+ goto out;
+
+ /* Packet was sampled from Tx, so we need to retrieve the sample
+ * parameters based on the Tx port and not the Rx port.
+ */
+ mlxsw_sp_port_tx = mlxsw_sp_sample_tx_port_get(mlxsw_sp, rx_md_info);
+ if (!mlxsw_sp_port_tx)
+ goto out;
+
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS;
+ trigger.local_port = mlxsw_sp_port_tx->local_port;
+ params = mlxsw_sp_sample_trigger_params_lookup(mlxsw_sp, &trigger);
+ if (!params)
+ goto out;
+
+ /* The psample module expects skb->data to point to the start of the
+ * Ethernet header.
+ */
+ skb_push(skb, ETH_HLEN);
+ mlxsw_sp_psample_md_init(mlxsw_sp, &md, skb,
+ mlxsw_sp_port->dev->ifindex, params->truncate,
+ params->trunc_size);
+ psample_sample_packet(params->psample_group, skb, params->rate, &md);
+out:
+ consume_skb(skb);
+}
+
+static void mlxsw_sp_rx_sample_acl_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ struct mlxsw_sp_sample_trigger trigger = {
+ .type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE,
+ };
+ struct mlxsw_sp_sample_params *params;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct psample_metadata md = {};
int err;
err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
if (err)
return;
- /* The sample handler expects skb->data to point to the start of the
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port)
+ goto out;
+
+ params = mlxsw_sp_sample_trigger_params_lookup(mlxsw_sp, &trigger);
+ if (!params)
+ goto out;
+
+ /* The psample module expects skb->data to point to the start of the
* Ethernet header.
*/
skb_push(skb, ETH_HLEN);
- mlxsw_sp_sample_receive(mlxsw_sp, skb, local_port);
+ mlxsw_sp_psample_md_init(mlxsw_sp, &md, skb,
+ mlxsw_sp_port->dev->ifindex, params->truncate,
+ params->trunc_size);
+ psample_sample_packet(params->psample_group, skb, params->rate, &md);
+out:
+ consume_skb(skb);
}
#define MLXSW_SP_TRAP_DROP(_id, _group_id) \
@@ -464,11 +624,6 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
.priority = 2,
},
{
- .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0),
- .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE,
- .priority = 0,
- },
- {
.group = DEVLINK_TRAP_GROUP_GENERIC(ACL_TRAP, 18),
.hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_FLOW_LOGGING,
.priority = 4,
@@ -993,14 +1148,6 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
},
},
{
- .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE,
- MIRROR),
- .listeners_arr = {
- MLXSW_RXL(mlxsw_sp_rx_sample_listener, PKT_SAMPLE,
- MIRROR_TO_CPU, false, SP_PKT_SAMPLE, DISCARD),
- },
- },
- {
.trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_TRAP, ACL_TRAP, TRAP),
.listeners_arr = {
MLXSW_SP_RXL_NO_MARK(ACL0, FLOW_LOGGING, TRAP_TO_CPU,
@@ -1709,10 +1856,23 @@ int mlxsw_sp_trap_group_policer_hw_id_get(struct mlxsw_sp *mlxsw_sp, u16 id,
static const struct mlxsw_sp_trap_group_item
mlxsw_sp1_trap_group_items_arr[] = {
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE,
+ .priority = 0,
+ },
};
static const struct mlxsw_sp_trap_item
mlxsw_sp1_trap_items_arr[] = {
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE,
+ MIRROR),
+ .listeners_arr = {
+ MLXSW_RXL(mlxsw_sp_rx_sample_listener, PKT_SAMPLE,
+ MIRROR_TO_CPU, false, SP_PKT_SAMPLE, DISCARD),
+ },
+ },
};
static int
@@ -1749,6 +1909,12 @@ mlxsw_sp2_trap_group_items_arr[] = {
.priority = 0,
.fixed_policer = true,
},
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE,
+ .priority = 0,
+ .fixed_policer = true,
+ },
};
static const struct mlxsw_sp_trap_item
@@ -1760,6 +1926,21 @@ mlxsw_sp2_trap_items_arr[] = {
},
.is_source = true,
},
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE,
+ MIRROR),
+ .listeners_arr = {
+ MLXSW_RXL_MIRROR(mlxsw_sp_rx_sample_listener, 1,
+ SP_PKT_SAMPLE,
+ MLXSW_SP_MIRROR_REASON_INGRESS),
+ MLXSW_RXL_MIRROR(mlxsw_sp_rx_sample_tx_listener, 1,
+ SP_PKT_SAMPLE,
+ MLXSW_SP_MIRROR_REASON_EGRESS),
+ MLXSW_RXL_MIRROR(mlxsw_sp_rx_sample_acl_listener, 1,
+ SP_PKT_SAMPLE,
+ MLXSW_SP_MIRROR_REASON_POLICY_ENGINE),
+ },
+ },
};
static int
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 2feed6ce19d3..13eef6e9bd2d 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -193,11 +193,10 @@ static void ks8851_read_mac_addr(struct net_device *dev)
static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np)
{
struct net_device *dev = ks->netdev;
- const u8 *mac_addr;
+ int ret;
- mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr)) {
- ether_addr_copy(dev->dev_addr, mac_addr);
+ ret = of_get_mac_address(np, dev->dev_addr);
+ if (!ret) {
ks8851_write_mac_addr(dev);
return;
}
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 2c0dcd7acf3f..3658c4ae3c37 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -222,7 +222,6 @@ static int encx24j600_wait_for_autoneg(struct encx24j600_priv *priv)
unsigned long timeout = jiffies + msecs_to_jiffies(2000);
u16 phstat1;
u16 estat;
- int ret = 0;
phstat1 = encx24j600_read_phy(priv, PHSTAT1);
while ((phstat1 & ANDONE) == 0) {
@@ -258,7 +257,7 @@ static int encx24j600_wait_for_autoneg(struct encx24j600_priv *priv)
encx24j600_write_reg(priv, MACLCON, 0x370f);
}
- return ret;
+ return 0;
}
/* Access the PHY to determine link status */
@@ -1118,17 +1117,7 @@ static struct spi_driver encx24j600_spi_net_driver = {
.id_table = encx24j600_spi_id_table,
};
-static int __init encx24j600_init(void)
-{
- return spi_register_driver(&encx24j600_spi_net_driver);
-}
-module_init(encx24j600_init);
-
-static void encx24j600_exit(void)
-{
- spi_unregister_driver(&encx24j600_spi_net_driver);
-}
-module_exit(encx24j600_exit);
+module_spi_driver(encx24j600_spi_net_driver);
MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>");
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index c5de8f46cdd3..91a755efe2e6 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -730,8 +730,8 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
static int lan743x_ethtool_set_eee(struct net_device *netdev,
struct ethtool_eee *eee)
{
- struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct phy_device *phydev = NULL;
+ struct lan743x_adapter *adapter;
+ struct phy_device *phydev;
u32 buf = 0;
int ret = 0;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 7b6794aa8ea9..dae10328c6cf 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -2771,7 +2771,6 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
{
struct lan743x_adapter *adapter = NULL;
struct net_device *netdev = NULL;
- const void *mac_addr;
int ret = -ENODEV;
netdev = devm_alloc_etherdev(&pdev->dev,
@@ -2788,9 +2787,7 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
netdev->max_mtu = LAN743X_MAX_FRAME_SIZE;
- mac_addr = of_get_mac_address(pdev->dev.of_node);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(adapter->mac_address, mac_addr);
+ of_get_mac_address(pdev->dev.of_node, adapter->mac_address);
ret = lan743x_pci_init(adapter, pdev);
if (ret)
@@ -3004,7 +3001,7 @@ static int lan743x_pm_suspend(struct device *dev)
lan743x_pm_set_wol(adapter);
/* Host sets PME_En, put D3hot */
- return pci_prepare_to_sleep(pdev);;
+ return pci_prepare_to_sleep(pdev);
}
static int lan743x_pm_resume(struct device *dev)
diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig
new file mode 100644
index 000000000000..fe4e7a7d9c0b
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/Kconfig
@@ -0,0 +1,29 @@
+#
+# Microsoft Azure network device configuration
+#
+
+config NET_VENDOR_MICROSOFT
+ bool "Microsoft Network Devices"
+ default y
+ help
+ If you have a network (Ethernet) device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip the
+ question about Microsoft network devices. If you say Y, you will be
+ asked for your specific device in the following question.
+
+if NET_VENDOR_MICROSOFT
+
+config MICROSOFT_MANA
+ tristate "Microsoft Azure Network Adapter (MANA) support"
+ depends on PCI_MSI && X86_64
+ depends on PCI_HYPERV
+ help
+ This driver supports Microsoft Azure Network Adapter (MANA).
+ So far, the driver is only supported on X86_64.
+
+ To compile this driver as a module, choose M here.
+ The module will be called mana.
+
+endif #NET_VENDOR_MICROSOFT
diff --git a/drivers/net/ethernet/microsoft/Makefile b/drivers/net/ethernet/microsoft/Makefile
new file mode 100644
index 000000000000..d2ddc218135f
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Microsoft Azure network device driver.
+#
+
+obj-$(CONFIG_MICROSOFT_MANA) += mana/
diff --git a/drivers/net/ethernet/microsoft/mana/Makefile b/drivers/net/ethernet/microsoft/mana/Makefile
new file mode 100644
index 000000000000..0edd5bb685f3
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+#
+# Makefile for the Microsoft Azure Network Adapter driver
+
+obj-$(CONFIG_MICROSOFT_MANA) += mana.o
+mana-objs := gdma_main.o shm_channel.o hw_channel.o mana_en.o mana_ethtool.o
diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h
new file mode 100644
index 000000000000..33e53d32e891
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/gdma.h
@@ -0,0 +1,673 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#ifndef _GDMA_H
+#define _GDMA_H
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+
+#include "shm_channel.h"
+
+/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
+ * them are naturally aligned and hence don't need __packed.
+ */
+
+enum gdma_request_type {
+ GDMA_VERIFY_VF_DRIVER_VERSION = 1,
+ GDMA_QUERY_MAX_RESOURCES = 2,
+ GDMA_LIST_DEVICES = 3,
+ GDMA_REGISTER_DEVICE = 4,
+ GDMA_DEREGISTER_DEVICE = 5,
+ GDMA_GENERATE_TEST_EQE = 10,
+ GDMA_CREATE_QUEUE = 12,
+ GDMA_DISABLE_QUEUE = 13,
+ GDMA_CREATE_DMA_REGION = 25,
+ GDMA_DMA_REGION_ADD_PAGES = 26,
+ GDMA_DESTROY_DMA_REGION = 27,
+};
+
+enum gdma_queue_type {
+ GDMA_INVALID_QUEUE,
+ GDMA_SQ,
+ GDMA_RQ,
+ GDMA_CQ,
+ GDMA_EQ,
+};
+
+enum gdma_work_request_flags {
+ GDMA_WR_NONE = 0,
+ GDMA_WR_OOB_IN_SGL = BIT(0),
+ GDMA_WR_PAD_BY_SGE0 = BIT(1),
+};
+
+enum gdma_eqe_type {
+ GDMA_EQE_COMPLETION = 3,
+ GDMA_EQE_TEST_EVENT = 64,
+ GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
+ GDMA_EQE_HWC_INIT_DATA = 130,
+ GDMA_EQE_HWC_INIT_DONE = 131,
+};
+
+enum {
+ GDMA_DEVICE_NONE = 0,
+ GDMA_DEVICE_HWC = 1,
+ GDMA_DEVICE_MANA = 2,
+};
+
+struct gdma_resource {
+ /* Protect the bitmap */
+ spinlock_t lock;
+
+ /* The bitmap size in bits. */
+ u32 size;
+
+ /* The bitmap tracks the resources. */
+ unsigned long *map;
+};
+
+union gdma_doorbell_entry {
+ u64 as_uint64;
+
+ struct {
+ u64 id : 24;
+ u64 reserved : 8;
+ u64 tail_ptr : 31;
+ u64 arm : 1;
+ } cq;
+
+ struct {
+ u64 id : 24;
+ u64 wqe_cnt : 8;
+ u64 tail_ptr : 32;
+ } rq;
+
+ struct {
+ u64 id : 24;
+ u64 reserved : 8;
+ u64 tail_ptr : 32;
+ } sq;
+
+ struct {
+ u64 id : 16;
+ u64 reserved : 16;
+ u64 tail_ptr : 31;
+ u64 arm : 1;
+ } eq;
+}; /* HW DATA */
+
+struct gdma_msg_hdr {
+ u32 hdr_type;
+ u32 msg_type;
+ u16 msg_version;
+ u16 hwc_msg_id;
+ u32 msg_size;
+}; /* HW DATA */
+
+struct gdma_dev_id {
+ union {
+ struct {
+ u16 type;
+ u16 instance;
+ };
+
+ u32 as_uint32;
+ };
+}; /* HW DATA */
+
+struct gdma_req_hdr {
+ struct gdma_msg_hdr req;
+ struct gdma_msg_hdr resp; /* The expected response */
+ struct gdma_dev_id dev_id;
+ u32 activity_id;
+}; /* HW DATA */
+
+struct gdma_resp_hdr {
+ struct gdma_msg_hdr response;
+ struct gdma_dev_id dev_id;
+ u32 activity_id;
+ u32 status;
+ u32 reserved;
+}; /* HW DATA */
+
+struct gdma_general_req {
+ struct gdma_req_hdr hdr;
+}; /* HW DATA */
+
+#define GDMA_MESSAGE_V1 1
+
+struct gdma_general_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+#define GDMA_STANDARD_HEADER_TYPE 0
+
+static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
+ u32 req_size, u32 resp_size)
+{
+ hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
+ hdr->req.msg_type = code;
+ hdr->req.msg_version = GDMA_MESSAGE_V1;
+ hdr->req.msg_size = req_size;
+
+ hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
+ hdr->resp.msg_type = code;
+ hdr->resp.msg_version = GDMA_MESSAGE_V1;
+ hdr->resp.msg_size = resp_size;
+}
+
+/* The 16-byte struct is part of the GDMA work queue entry (WQE). */
+struct gdma_sge {
+ u64 address;
+ u32 mem_key;
+ u32 size;
+}; /* HW DATA */
+
+struct gdma_wqe_request {
+ struct gdma_sge *sgl;
+ u32 num_sge;
+
+ u32 inline_oob_size;
+ const void *inline_oob_data;
+
+ u32 flags;
+ u32 client_data_unit;
+};
+
+enum gdma_page_type {
+ GDMA_PAGE_TYPE_4K,
+};
+
+#define GDMA_INVALID_DMA_REGION 0
+
+struct gdma_mem_info {
+ struct device *dev;
+
+ dma_addr_t dma_handle;
+ void *virt_addr;
+ u64 length;
+
+ /* Allocated by the PF driver */
+ u64 gdma_region;
+};
+
+#define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
+
+struct gdma_dev {
+ struct gdma_context *gdma_context;
+
+ struct gdma_dev_id dev_id;
+
+ u32 pdid;
+ u32 doorbell;
+ u32 gpa_mkey;
+
+ /* GDMA driver specific pointer */
+ void *driver_data;
+};
+
+#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
+
+#define GDMA_CQE_SIZE 64
+#define GDMA_EQE_SIZE 16
+#define GDMA_MAX_SQE_SIZE 512
+#define GDMA_MAX_RQE_SIZE 256
+
+#define GDMA_COMP_DATA_SIZE 0x3C
+
+#define GDMA_EVENT_DATA_SIZE 0xC
+
+/* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
+#define GDMA_WQE_BU_SIZE 32
+
+#define INVALID_PDID UINT_MAX
+#define INVALID_DOORBELL UINT_MAX
+#define INVALID_MEM_KEY UINT_MAX
+#define INVALID_QUEUE_ID UINT_MAX
+#define INVALID_PCI_MSIX_INDEX UINT_MAX
+
+struct gdma_comp {
+ u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
+ u32 wq_num;
+ bool is_sq;
+};
+
+struct gdma_event {
+ u32 details[GDMA_EVENT_DATA_SIZE / 4];
+ u8 type;
+};
+
+struct gdma_queue;
+
+#define CQE_POLLING_BUFFER 512
+struct mana_eq {
+ struct gdma_queue *eq;
+ struct gdma_comp cqe_poll[CQE_POLLING_BUFFER];
+};
+
+typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
+ struct gdma_event *e);
+
+typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
+
+/* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
+ * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
+ * driver increases the 'head' in BUs rather than in bytes, and notifies
+ * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
+ * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
+ *
+ * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
+ * processed, the driver increases the 'tail' to indicate that WQEs have
+ * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
+ *
+ * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
+ * that the EQ/CQ is big enough so they can't overflow, and the driver uses
+ * the owner bits mechanism to detect if the queue has become empty.
+ */
+struct gdma_queue {
+ struct gdma_dev *gdma_dev;
+
+ enum gdma_queue_type type;
+ u32 id;
+
+ struct gdma_mem_info mem_info;
+
+ void *queue_mem_ptr;
+ u32 queue_size;
+
+ bool monitor_avl_buf;
+
+ u32 head;
+ u32 tail;
+
+ /* Extra fields specific to EQ/CQ. */
+ union {
+ struct {
+ bool disable_needed;
+
+ gdma_eq_callback *callback;
+ void *context;
+
+ unsigned int msix_index;
+
+ u32 log2_throttle_limit;
+
+ /* NAPI data */
+ struct napi_struct napi;
+ int work_done;
+ int budget;
+ } eq;
+
+ struct {
+ gdma_cq_callback *callback;
+ void *context;
+
+ struct gdma_queue *parent; /* For CQ/EQ relationship */
+ } cq;
+ };
+};
+
+struct gdma_queue_spec {
+ enum gdma_queue_type type;
+ bool monitor_avl_buf;
+ unsigned int queue_size;
+
+ /* Extra fields specific to EQ/CQ. */
+ union {
+ struct {
+ gdma_eq_callback *callback;
+ void *context;
+
+ unsigned long log2_throttle_limit;
+
+ /* Only used by the MANA device. */
+ struct net_device *ndev;
+ } eq;
+
+ struct {
+ gdma_cq_callback *callback;
+ void *context;
+
+ struct gdma_queue *parent_eq;
+
+ } cq;
+ };
+};
+
+struct gdma_irq_context {
+ void (*handler)(void *arg);
+ void *arg;
+};
+
+struct gdma_context {
+ struct device *dev;
+
+ /* Per-vPort max number of queues */
+ unsigned int max_num_queues;
+ unsigned int max_num_msix;
+ unsigned int num_msix_usable;
+ struct gdma_resource msix_resource;
+ struct gdma_irq_context *irq_contexts;
+
+ /* This maps a CQ index to the queue structure. */
+ unsigned int max_num_cqs;
+ struct gdma_queue **cq_table;
+
+ /* Protect eq_test_event and test_event_eq_id */
+ struct mutex eq_test_event_mutex;
+ struct completion eq_test_event;
+ u32 test_event_eq_id;
+
+ void __iomem *bar0_va;
+ void __iomem *shm_base;
+ void __iomem *db_page_base;
+ u32 db_page_size;
+
+ /* Shared memory chanenl (used to bootstrap HWC) */
+ struct shm_channel shm_channel;
+
+ /* Hardware communication channel (HWC) */
+ struct gdma_dev hwc;
+
+ /* Azure network adapter */
+ struct gdma_dev mana;
+};
+
+#define MAX_NUM_GDMA_DEVICES 4
+
+static inline bool mana_gd_is_mana(struct gdma_dev *gd)
+{
+ return gd->dev_id.type == GDMA_DEVICE_MANA;
+}
+
+static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
+{
+ return gd->dev_id.type == GDMA_DEVICE_HWC;
+}
+
+u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
+u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
+
+int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
+
+int mana_gd_create_hwc_queue(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ struct gdma_queue **queue_ptr);
+
+int mana_gd_create_mana_eq(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ struct gdma_queue **queue_ptr);
+
+int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ struct gdma_queue **queue_ptr);
+
+void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
+
+int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
+
+void mana_gd_arm_cq(struct gdma_queue *cq);
+
+struct gdma_wqe {
+ u32 reserved :24;
+ u32 last_vbytes :8;
+
+ union {
+ u32 flags;
+
+ struct {
+ u32 num_sge :8;
+ u32 inline_oob_size_div4:3;
+ u32 client_oob_in_sgl :1;
+ u32 reserved1 :4;
+ u32 client_data_unit :14;
+ u32 reserved2 :2;
+ };
+ };
+}; /* HW DATA */
+
+#define INLINE_OOB_SMALL_SIZE 8
+#define INLINE_OOB_LARGE_SIZE 24
+
+#define MAX_TX_WQE_SIZE 512
+#define MAX_RX_WQE_SIZE 256
+
+struct gdma_cqe {
+ u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
+
+ union {
+ u32 as_uint32;
+
+ struct {
+ u32 wq_num : 24;
+ u32 is_sq : 1;
+ u32 reserved : 4;
+ u32 owner_bits : 3;
+ };
+ } cqe_info;
+}; /* HW DATA */
+
+#define GDMA_CQE_OWNER_BITS 3
+
+#define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
+
+#define SET_ARM_BIT 1
+
+#define GDMA_EQE_OWNER_BITS 3
+
+union gdma_eqe_info {
+ u32 as_uint32;
+
+ struct {
+ u32 type : 8;
+ u32 reserved1 : 8;
+ u32 client_id : 2;
+ u32 reserved2 : 11;
+ u32 owner_bits : 3;
+ };
+}; /* HW DATA */
+
+#define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
+#define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
+
+struct gdma_eqe {
+ u32 details[GDMA_EVENT_DATA_SIZE / 4];
+ u32 eqe_info;
+}; /* HW DATA */
+
+#define GDMA_REG_DB_PAGE_OFFSET 8
+#define GDMA_REG_DB_PAGE_SIZE 0x10
+#define GDMA_REG_SHM_OFFSET 0x18
+
+struct gdma_posted_wqe_info {
+ u32 wqe_size_in_bu;
+};
+
+/* GDMA_GENERATE_TEST_EQE */
+struct gdma_generate_test_event_req {
+ struct gdma_req_hdr hdr;
+ u32 queue_index;
+}; /* HW DATA */
+
+/* GDMA_VERIFY_VF_DRIVER_VERSION */
+enum {
+ GDMA_PROTOCOL_V1 = 1,
+ GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1,
+ GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1,
+};
+
+struct gdma_verify_ver_req {
+ struct gdma_req_hdr hdr;
+
+ /* Mandatory fields required for protocol establishment */
+ u64 protocol_ver_min;
+ u64 protocol_ver_max;
+ u64 drv_cap_flags1;
+ u64 drv_cap_flags2;
+ u64 drv_cap_flags3;
+ u64 drv_cap_flags4;
+
+ /* Advisory fields */
+ u64 drv_ver;
+ u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
+ u32 reserved;
+ u32 os_ver_major;
+ u32 os_ver_minor;
+ u32 os_ver_build;
+ u32 os_ver_platform;
+ u64 reserved_2;
+ u8 os_ver_str1[128];
+ u8 os_ver_str2[128];
+ u8 os_ver_str3[128];
+ u8 os_ver_str4[128];
+}; /* HW DATA */
+
+struct gdma_verify_ver_resp {
+ struct gdma_resp_hdr hdr;
+ u64 gdma_protocol_ver;
+ u64 pf_cap_flags1;
+ u64 pf_cap_flags2;
+ u64 pf_cap_flags3;
+ u64 pf_cap_flags4;
+}; /* HW DATA */
+
+/* GDMA_QUERY_MAX_RESOURCES */
+struct gdma_query_max_resources_resp {
+ struct gdma_resp_hdr hdr;
+ u32 status;
+ u32 max_sq;
+ u32 max_rq;
+ u32 max_cq;
+ u32 max_eq;
+ u32 max_db;
+ u32 max_mst;
+ u32 max_cq_mod_ctx;
+ u32 max_mod_cq;
+ u32 max_msix;
+}; /* HW DATA */
+
+/* GDMA_LIST_DEVICES */
+struct gdma_list_devices_resp {
+ struct gdma_resp_hdr hdr;
+ u32 num_of_devs;
+ u32 reserved;
+ struct gdma_dev_id devs[64];
+}; /* HW DATA */
+
+/* GDMA_REGISTER_DEVICE */
+struct gdma_register_device_resp {
+ struct gdma_resp_hdr hdr;
+ u32 pdid;
+ u32 gpa_mkey;
+ u32 db_id;
+}; /* HW DATA */
+
+/* GDMA_CREATE_QUEUE */
+struct gdma_create_queue_req {
+ struct gdma_req_hdr hdr;
+ u32 type;
+ u32 reserved1;
+ u32 pdid;
+ u32 doolbell_id;
+ u64 gdma_region;
+ u32 reserved2;
+ u32 queue_size;
+ u32 log2_throttle_limit;
+ u32 eq_pci_msix_index;
+ u32 cq_mod_ctx_id;
+ u32 cq_parent_eq_id;
+ u8 rq_drop_on_overrun;
+ u8 rq_err_on_wqe_overflow;
+ u8 rq_chain_rec_wqes;
+ u8 sq_hw_db;
+ u32 reserved3;
+}; /* HW DATA */
+
+struct gdma_create_queue_resp {
+ struct gdma_resp_hdr hdr;
+ u32 queue_index;
+}; /* HW DATA */
+
+/* GDMA_DISABLE_QUEUE */
+struct gdma_disable_queue_req {
+ struct gdma_req_hdr hdr;
+ u32 type;
+ u32 queue_index;
+ u32 alloc_res_id_on_creation;
+}; /* HW DATA */
+
+/* GDMA_CREATE_DMA_REGION */
+struct gdma_create_dma_region_req {
+ struct gdma_req_hdr hdr;
+
+ /* The total size of the DMA region */
+ u64 length;
+
+ /* The offset in the first page */
+ u32 offset_in_page;
+
+ /* enum gdma_page_type */
+ u32 gdma_page_type;
+
+ /* The total number of pages */
+ u32 page_count;
+
+ /* If page_addr_list_len is smaller than page_count,
+ * the remaining page addresses will be added via the
+ * message GDMA_DMA_REGION_ADD_PAGES.
+ */
+ u32 page_addr_list_len;
+ u64 page_addr_list[];
+}; /* HW DATA */
+
+struct gdma_create_dma_region_resp {
+ struct gdma_resp_hdr hdr;
+ u64 gdma_region;
+}; /* HW DATA */
+
+/* GDMA_DMA_REGION_ADD_PAGES */
+struct gdma_dma_region_add_pages_req {
+ struct gdma_req_hdr hdr;
+
+ u64 gdma_region;
+
+ u32 page_addr_list_len;
+ u32 reserved3;
+
+ u64 page_addr_list[];
+}; /* HW DATA */
+
+/* GDMA_DESTROY_DMA_REGION */
+struct gdma_destroy_dma_region_req {
+ struct gdma_req_hdr hdr;
+
+ u64 gdma_region;
+}; /* HW DATA */
+
+int mana_gd_verify_vf_version(struct pci_dev *pdev);
+
+int mana_gd_register_device(struct gdma_dev *gd);
+int mana_gd_deregister_device(struct gdma_dev *gd);
+
+int mana_gd_post_work_request(struct gdma_queue *wq,
+ const struct gdma_wqe_request *wqe_req,
+ struct gdma_posted_wqe_info *wqe_info);
+
+int mana_gd_post_and_ring(struct gdma_queue *queue,
+ const struct gdma_wqe_request *wqe,
+ struct gdma_posted_wqe_info *wqe_info);
+
+int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
+void mana_gd_free_res_map(struct gdma_resource *r);
+
+void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
+ struct gdma_queue *queue);
+
+int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
+ struct gdma_mem_info *gmi);
+
+void mana_gd_free_memory(struct gdma_mem_info *gmi);
+
+int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
+ u32 resp_len, void *resp);
+#endif /* _GDMA_H */
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
new file mode 100644
index 000000000000..2f87bf90f8ec
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -0,0 +1,1415 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mana.h"
+
+static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
+{
+ return readl(g->bar0_va + offset);
+}
+
+static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
+{
+ return readq(g->bar0_va + offset);
+}
+
+static void mana_gd_init_registers(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
+
+ gc->db_page_base = gc->bar0_va +
+ mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
+
+ gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
+}
+
+static int mana_gd_query_max_resources(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_query_max_resources_resp resp = {};
+ struct gdma_general_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
+ sizeof(req), sizeof(resp));
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "Failed to query resource info: %d, 0x%x\n",
+ err, resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ if (gc->num_msix_usable > resp.max_msix)
+ gc->num_msix_usable = resp.max_msix;
+
+ if (gc->num_msix_usable <= 1)
+ return -ENOSPC;
+
+ gc->max_num_queues = num_online_cpus();
+ if (gc->max_num_queues > MANA_MAX_NUM_QUEUES)
+ gc->max_num_queues = MANA_MAX_NUM_QUEUES;
+
+ if (gc->max_num_queues > resp.max_eq)
+ gc->max_num_queues = resp.max_eq;
+
+ if (gc->max_num_queues > resp.max_cq)
+ gc->max_num_queues = resp.max_cq;
+
+ if (gc->max_num_queues > resp.max_sq)
+ gc->max_num_queues = resp.max_sq;
+
+ if (gc->max_num_queues > resp.max_rq)
+ gc->max_num_queues = resp.max_rq;
+
+ return 0;
+}
+
+static int mana_gd_detect_devices(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_list_devices_resp resp = {};
+ struct gdma_general_req req = {};
+ struct gdma_dev_id dev;
+ u32 i, max_num_devs;
+ u16 dev_type;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
+ sizeof(resp));
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "Failed to detect devices: %d, 0x%x\n", err,
+ resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
+
+ for (i = 0; i < max_num_devs; i++) {
+ dev = resp.devs[i];
+ dev_type = dev.type;
+
+ /* HWC is already detected in mana_hwc_create_channel(). */
+ if (dev_type == GDMA_DEVICE_HWC)
+ continue;
+
+ if (dev_type == GDMA_DEVICE_MANA) {
+ gc->mana.gdma_context = gc;
+ gc->mana.dev_id = dev;
+ }
+ }
+
+ return gc->mana.dev_id.type == 0 ? -ENODEV : 0;
+}
+
+int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
+ u32 resp_len, void *resp)
+{
+ struct hw_channel_context *hwc = gc->hwc.driver_data;
+
+ return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
+}
+
+int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
+ struct gdma_mem_info *gmi)
+{
+ dma_addr_t dma_handle;
+ void *buf;
+
+ if (length < PAGE_SIZE || !is_power_of_2(length))
+ return -EINVAL;
+
+ gmi->dev = gc->dev;
+ buf = dma_alloc_coherent(gmi->dev, length, &dma_handle, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ gmi->dma_handle = dma_handle;
+ gmi->virt_addr = buf;
+ gmi->length = length;
+
+ return 0;
+}
+
+void mana_gd_free_memory(struct gdma_mem_info *gmi)
+{
+ dma_free_coherent(gmi->dev, gmi->length, gmi->virt_addr,
+ gmi->dma_handle);
+}
+
+static int mana_gd_create_hw_eq(struct gdma_context *gc,
+ struct gdma_queue *queue)
+{
+ struct gdma_create_queue_resp resp = {};
+ struct gdma_create_queue_req req = {};
+ int err;
+
+ if (queue->type != GDMA_EQ)
+ return -EINVAL;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE,
+ sizeof(req), sizeof(resp));
+
+ req.hdr.dev_id = queue->gdma_dev->dev_id;
+ req.type = queue->type;
+ req.pdid = queue->gdma_dev->pdid;
+ req.doolbell_id = queue->gdma_dev->doorbell;
+ req.gdma_region = queue->mem_info.gdma_region;
+ req.queue_size = queue->queue_size;
+ req.log2_throttle_limit = queue->eq.log2_throttle_limit;
+ req.eq_pci_msix_index = queue->eq.msix_index;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "Failed to create queue: %d, 0x%x\n", err,
+ resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ queue->id = resp.queue_index;
+ queue->eq.disable_needed = true;
+ queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+ return 0;
+}
+
+static int mana_gd_disable_queue(struct gdma_queue *queue)
+{
+ struct gdma_context *gc = queue->gdma_dev->gdma_context;
+ struct gdma_disable_queue_req req = {};
+ struct gdma_general_resp resp = {};
+ int err;
+
+ WARN_ON(queue->type != GDMA_EQ);
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE,
+ sizeof(req), sizeof(resp));
+
+ req.hdr.dev_id = queue->gdma_dev->dev_id;
+ req.type = queue->type;
+ req.queue_index = queue->id;
+ req.alloc_res_id_on_creation = 1;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
+ resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ return 0;
+}
+
+#define DOORBELL_OFFSET_SQ 0x0
+#define DOORBELL_OFFSET_RQ 0x400
+#define DOORBELL_OFFSET_CQ 0x800
+#define DOORBELL_OFFSET_EQ 0xFF8
+
+static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index,
+ enum gdma_queue_type q_type, u32 qid,
+ u32 tail_ptr, u8 num_req)
+{
+ void __iomem *addr = gc->db_page_base + gc->db_page_size * db_index;
+ union gdma_doorbell_entry e = {};
+
+ switch (q_type) {
+ case GDMA_EQ:
+ e.eq.id = qid;
+ e.eq.tail_ptr = tail_ptr;
+ e.eq.arm = num_req;
+
+ addr += DOORBELL_OFFSET_EQ;
+ break;
+
+ case GDMA_CQ:
+ e.cq.id = qid;
+ e.cq.tail_ptr = tail_ptr;
+ e.cq.arm = num_req;
+
+ addr += DOORBELL_OFFSET_CQ;
+ break;
+
+ case GDMA_RQ:
+ e.rq.id = qid;
+ e.rq.tail_ptr = tail_ptr;
+ e.rq.wqe_cnt = num_req;
+
+ addr += DOORBELL_OFFSET_RQ;
+ break;
+
+ case GDMA_SQ:
+ e.sq.id = qid;
+ e.sq.tail_ptr = tail_ptr;
+
+ addr += DOORBELL_OFFSET_SQ;
+ break;
+
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ /* Ensure all writes are done before ring doorbell */
+ wmb();
+
+ writeq(e.as_uint64, addr);
+}
+
+void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
+{
+ mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
+ queue->id, queue->head * GDMA_WQE_BU_SIZE, 1);
+}
+
+void mana_gd_arm_cq(struct gdma_queue *cq)
+{
+ struct gdma_context *gc = cq->gdma_dev->gdma_context;
+
+ u32 num_cqe = cq->queue_size / GDMA_CQE_SIZE;
+
+ u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS);
+
+ mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
+ head, SET_ARM_BIT);
+}
+
+static void mana_gd_process_eqe(struct gdma_queue *eq)
+{
+ u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
+ struct gdma_context *gc = eq->gdma_dev->gdma_context;
+ struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
+ union gdma_eqe_info eqe_info;
+ enum gdma_eqe_type type;
+ struct gdma_event event;
+ struct gdma_queue *cq;
+ struct gdma_eqe *eqe;
+ u32 cq_id;
+
+ eqe = &eq_eqe_ptr[head];
+ eqe_info.as_uint32 = eqe->eqe_info;
+ type = eqe_info.type;
+
+ switch (type) {
+ case GDMA_EQE_COMPLETION:
+ cq_id = eqe->details[0] & 0xFFFFFF;
+ if (WARN_ON_ONCE(cq_id >= gc->max_num_cqs))
+ break;
+
+ cq = gc->cq_table[cq_id];
+ if (WARN_ON_ONCE(!cq || cq->type != GDMA_CQ || cq->id != cq_id))
+ break;
+
+ if (cq->cq.callback)
+ cq->cq.callback(cq->cq.context, cq);
+
+ break;
+
+ case GDMA_EQE_TEST_EVENT:
+ gc->test_event_eq_id = eq->id;
+ complete(&gc->eq_test_event);
+ break;
+
+ case GDMA_EQE_HWC_INIT_EQ_ID_DB:
+ case GDMA_EQE_HWC_INIT_DATA:
+ case GDMA_EQE_HWC_INIT_DONE:
+ if (!eq->eq.callback)
+ break;
+
+ event.type = type;
+ memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE);
+ eq->eq.callback(eq->eq.context, eq, &event);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void mana_gd_process_eq_events(void *arg)
+{
+ u32 owner_bits, new_bits, old_bits;
+ union gdma_eqe_info eqe_info;
+ struct gdma_eqe *eq_eqe_ptr;
+ struct gdma_queue *eq = arg;
+ struct gdma_context *gc;
+ struct gdma_eqe *eqe;
+ unsigned int arm_bit;
+ u32 head, num_eqe;
+ int i;
+
+ gc = eq->gdma_dev->gdma_context;
+
+ num_eqe = eq->queue_size / GDMA_EQE_SIZE;
+ eq_eqe_ptr = eq->queue_mem_ptr;
+
+ /* Process up to 5 EQEs at a time, and update the HW head. */
+ for (i = 0; i < 5; i++) {
+ eqe = &eq_eqe_ptr[eq->head % num_eqe];
+ eqe_info.as_uint32 = eqe->eqe_info;
+ owner_bits = eqe_info.owner_bits;
+
+ old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
+ /* No more entries */
+ if (owner_bits == old_bits)
+ break;
+
+ new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
+ if (owner_bits != new_bits) {
+ dev_err(gc->dev, "EQ %d: overflow detected\n", eq->id);
+ break;
+ }
+
+ mana_gd_process_eqe(eq);
+
+ eq->head++;
+ }
+
+ /* Always rearm the EQ for HWC. For MANA, rearm it when NAPI is done. */
+ if (mana_gd_is_hwc(eq->gdma_dev)) {
+ arm_bit = SET_ARM_BIT;
+ } else if (eq->eq.work_done < eq->eq.budget &&
+ napi_complete_done(&eq->eq.napi, eq->eq.work_done)) {
+ arm_bit = SET_ARM_BIT;
+ } else {
+ arm_bit = 0;
+ }
+
+ head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
+
+ mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
+ head, arm_bit);
+}
+
+static int mana_poll(struct napi_struct *napi, int budget)
+{
+ struct gdma_queue *eq = container_of(napi, struct gdma_queue, eq.napi);
+
+ eq->eq.work_done = 0;
+ eq->eq.budget = budget;
+
+ mana_gd_process_eq_events(eq);
+
+ return min(eq->eq.work_done, budget);
+}
+
+static void mana_gd_schedule_napi(void *arg)
+{
+ struct gdma_queue *eq = arg;
+ struct napi_struct *napi;
+
+ napi = &eq->eq.napi;
+ napi_schedule_irqoff(napi);
+}
+
+static int mana_gd_register_irq(struct gdma_queue *queue,
+ const struct gdma_queue_spec *spec)
+{
+ struct gdma_dev *gd = queue->gdma_dev;
+ bool is_mana = mana_gd_is_mana(gd);
+ struct gdma_irq_context *gic;
+ struct gdma_context *gc;
+ struct gdma_resource *r;
+ unsigned int msi_index;
+ unsigned long flags;
+ int err;
+
+ gc = gd->gdma_context;
+ r = &gc->msix_resource;
+
+ spin_lock_irqsave(&r->lock, flags);
+
+ msi_index = find_first_zero_bit(r->map, r->size);
+ if (msi_index >= r->size) {
+ err = -ENOSPC;
+ } else {
+ bitmap_set(r->map, msi_index, 1);
+ queue->eq.msix_index = msi_index;
+ err = 0;
+ }
+
+ spin_unlock_irqrestore(&r->lock, flags);
+
+ if (err)
+ return err;
+
+ WARN_ON(msi_index >= gc->num_msix_usable);
+
+ gic = &gc->irq_contexts[msi_index];
+
+ if (is_mana) {
+ netif_napi_add(spec->eq.ndev, &queue->eq.napi, mana_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&queue->eq.napi);
+ }
+
+ WARN_ON(gic->handler || gic->arg);
+
+ gic->arg = queue;
+
+ if (is_mana)
+ gic->handler = mana_gd_schedule_napi;
+ else
+ gic->handler = mana_gd_process_eq_events;
+
+ return 0;
+}
+
+static void mana_gd_deregiser_irq(struct gdma_queue *queue)
+{
+ struct gdma_dev *gd = queue->gdma_dev;
+ struct gdma_irq_context *gic;
+ struct gdma_context *gc;
+ struct gdma_resource *r;
+ unsigned int msix_index;
+ unsigned long flags;
+
+ gc = gd->gdma_context;
+ r = &gc->msix_resource;
+
+ /* At most num_online_cpus() + 1 interrupts are used. */
+ msix_index = queue->eq.msix_index;
+ if (WARN_ON(msix_index >= gc->num_msix_usable))
+ return;
+
+ gic = &gc->irq_contexts[msix_index];
+ gic->handler = NULL;
+ gic->arg = NULL;
+
+ spin_lock_irqsave(&r->lock, flags);
+ bitmap_clear(r->map, msix_index, 1);
+ spin_unlock_irqrestore(&r->lock, flags);
+
+ queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
+}
+
+int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
+{
+ struct gdma_generate_test_event_req req = {};
+ struct gdma_general_resp resp = {};
+ struct device *dev = gc->dev;
+ int err;
+
+ mutex_lock(&gc->eq_test_event_mutex);
+
+ init_completion(&gc->eq_test_event);
+ gc->test_event_eq_id = INVALID_QUEUE_ID;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE,
+ sizeof(req), sizeof(resp));
+
+ req.hdr.dev_id = eq->gdma_dev->dev_id;
+ req.queue_index = eq->id;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ dev_err(dev, "test_eq failed: %d\n", err);
+ goto out;
+ }
+
+ err = -EPROTO;
+
+ if (resp.hdr.status) {
+ dev_err(dev, "test_eq failed: 0x%x\n", resp.hdr.status);
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(&gc->eq_test_event, 30 * HZ)) {
+ dev_err(dev, "test_eq timed out on queue %d\n", eq->id);
+ goto out;
+ }
+
+ if (eq->id != gc->test_event_eq_id) {
+ dev_err(dev, "test_eq got an event on wrong queue %d (%d)\n",
+ gc->test_event_eq_id, eq->id);
+ goto out;
+ }
+
+ err = 0;
+out:
+ mutex_unlock(&gc->eq_test_event_mutex);
+ return err;
+}
+
+static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
+ struct gdma_queue *queue)
+{
+ int err;
+
+ if (flush_evenets) {
+ err = mana_gd_test_eq(gc, queue);
+ if (err)
+ dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
+ }
+
+ mana_gd_deregiser_irq(queue);
+
+ if (mana_gd_is_mana(queue->gdma_dev)) {
+ napi_disable(&queue->eq.napi);
+ netif_napi_del(&queue->eq.napi);
+ }
+
+ if (queue->eq.disable_needed)
+ mana_gd_disable_queue(queue);
+}
+
+static int mana_gd_create_eq(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ bool create_hwq, struct gdma_queue *queue)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct device *dev = gc->dev;
+ u32 log2_num_entries;
+ int err;
+
+ queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
+
+ log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
+
+ if (spec->eq.log2_throttle_limit > log2_num_entries) {
+ dev_err(dev, "EQ throttling limit (%lu) > maximum EQE (%u)\n",
+ spec->eq.log2_throttle_limit, log2_num_entries);
+ return -EINVAL;
+ }
+
+ err = mana_gd_register_irq(queue, spec);
+ if (err) {
+ dev_err(dev, "Failed to register irq: %d\n", err);
+ return err;
+ }
+
+ queue->eq.callback = spec->eq.callback;
+ queue->eq.context = spec->eq.context;
+ queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
+ queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;
+
+ if (create_hwq) {
+ err = mana_gd_create_hw_eq(gc, queue);
+ if (err)
+ goto out;
+
+ err = mana_gd_test_eq(gc, queue);
+ if (err)
+ goto out;
+ }
+
+ return 0;
+out:
+ dev_err(dev, "Failed to create EQ: %d\n", err);
+ mana_gd_destroy_eq(gc, false, queue);
+ return err;
+}
+
+static void mana_gd_create_cq(const struct gdma_queue_spec *spec,
+ struct gdma_queue *queue)
+{
+ u32 log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE);
+
+ queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
+ queue->cq.parent = spec->cq.parent_eq;
+ queue->cq.context = spec->cq.context;
+ queue->cq.callback = spec->cq.callback;
+}
+
+static void mana_gd_destroy_cq(struct gdma_context *gc,
+ struct gdma_queue *queue)
+{
+ u32 id = queue->id;
+
+ if (id >= gc->max_num_cqs)
+ return;
+
+ if (!gc->cq_table[id])
+ return;
+
+ gc->cq_table[id] = NULL;
+}
+
+int mana_gd_create_hwc_queue(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ struct gdma_queue **queue_ptr)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct gdma_mem_info *gmi;
+ struct gdma_queue *queue;
+ int err;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return -ENOMEM;
+
+ gmi = &queue->mem_info;
+ err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
+ if (err)
+ goto free_q;
+
+ queue->head = 0;
+ queue->tail = 0;
+ queue->queue_mem_ptr = gmi->virt_addr;
+ queue->queue_size = spec->queue_size;
+ queue->monitor_avl_buf = spec->monitor_avl_buf;
+ queue->type = spec->type;
+ queue->gdma_dev = gd;
+
+ if (spec->type == GDMA_EQ)
+ err = mana_gd_create_eq(gd, spec, false, queue);
+ else if (spec->type == GDMA_CQ)
+ mana_gd_create_cq(spec, queue);
+
+ if (err)
+ goto out;
+
+ *queue_ptr = queue;
+ return 0;
+out:
+ mana_gd_free_memory(gmi);
+free_q:
+ kfree(queue);
+ return err;
+}
+
+static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region)
+{
+ struct gdma_destroy_dma_region_req req = {};
+ struct gdma_general_resp resp = {};
+ int err;
+
+ if (gdma_region == GDMA_INVALID_DMA_REGION)
+ return;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
+ sizeof(resp));
+ req.gdma_region = gdma_region;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status)
+ dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
+ err, resp.hdr.status);
+}
+
+static int mana_gd_create_dma_region(struct gdma_dev *gd,
+ struct gdma_mem_info *gmi)
+{
+ unsigned int num_page = gmi->length / PAGE_SIZE;
+ struct gdma_create_dma_region_req *req = NULL;
+ struct gdma_create_dma_region_resp resp = {};
+ struct gdma_context *gc = gd->gdma_context;
+ struct hw_channel_context *hwc;
+ u32 length = gmi->length;
+ u32 req_msg_size;
+ int err;
+ int i;
+
+ if (length < PAGE_SIZE || !is_power_of_2(length))
+ return -EINVAL;
+
+ if (offset_in_page(gmi->virt_addr) != 0)
+ return -EINVAL;
+
+ hwc = gc->hwc.driver_data;
+ req_msg_size = sizeof(*req) + num_page * sizeof(u64);
+ if (req_msg_size > hwc->max_req_msg_size)
+ return -EINVAL;
+
+ req = kzalloc(req_msg_size, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION,
+ req_msg_size, sizeof(resp));
+ req->length = length;
+ req->offset_in_page = 0;
+ req->gdma_page_type = GDMA_PAGE_TYPE_4K;
+ req->page_count = num_page;
+ req->page_addr_list_len = num_page;
+
+ for (i = 0; i < num_page; i++)
+ req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE;
+
+ err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
+ if (err)
+ goto out;
+
+ if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
+ dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
+ resp.hdr.status);
+ err = -EPROTO;
+ goto out;
+ }
+
+ gmi->gdma_region = resp.gdma_region;
+out:
+ kfree(req);
+ return err;
+}
+
+int mana_gd_create_mana_eq(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ struct gdma_queue **queue_ptr)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct gdma_mem_info *gmi;
+ struct gdma_queue *queue;
+ int err;
+
+ if (spec->type != GDMA_EQ)
+ return -EINVAL;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return -ENOMEM;
+
+ gmi = &queue->mem_info;
+ err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
+ if (err)
+ goto free_q;
+
+ err = mana_gd_create_dma_region(gd, gmi);
+ if (err)
+ goto out;
+
+ queue->head = 0;
+ queue->tail = 0;
+ queue->queue_mem_ptr = gmi->virt_addr;
+ queue->queue_size = spec->queue_size;
+ queue->monitor_avl_buf = spec->monitor_avl_buf;
+ queue->type = spec->type;
+ queue->gdma_dev = gd;
+
+ err = mana_gd_create_eq(gd, spec, true, queue);
+ if (err)
+ goto out;
+
+ *queue_ptr = queue;
+ return 0;
+out:
+ mana_gd_free_memory(gmi);
+free_q:
+ kfree(queue);
+ return err;
+}
+
+int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ struct gdma_queue **queue_ptr)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct gdma_mem_info *gmi;
+ struct gdma_queue *queue;
+ int err;
+
+ if (spec->type != GDMA_CQ && spec->type != GDMA_SQ &&
+ spec->type != GDMA_RQ)
+ return -EINVAL;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return -ENOMEM;
+
+ gmi = &queue->mem_info;
+ err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
+ if (err)
+ goto free_q;
+
+ err = mana_gd_create_dma_region(gd, gmi);
+ if (err)
+ goto out;
+
+ queue->head = 0;
+ queue->tail = 0;
+ queue->queue_mem_ptr = gmi->virt_addr;
+ queue->queue_size = spec->queue_size;
+ queue->monitor_avl_buf = spec->monitor_avl_buf;
+ queue->type = spec->type;
+ queue->gdma_dev = gd;
+
+ if (spec->type == GDMA_CQ)
+ mana_gd_create_cq(spec, queue);
+
+ *queue_ptr = queue;
+ return 0;
+out:
+ mana_gd_free_memory(gmi);
+free_q:
+ kfree(queue);
+ return err;
+}
+
+void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
+{
+ struct gdma_mem_info *gmi = &queue->mem_info;
+
+ switch (queue->type) {
+ case GDMA_EQ:
+ mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue);
+ break;
+
+ case GDMA_CQ:
+ mana_gd_destroy_cq(gc, queue);
+ break;
+
+ case GDMA_RQ:
+ break;
+
+ case GDMA_SQ:
+ break;
+
+ default:
+ dev_err(gc->dev, "Can't destroy unknown queue: type=%d\n",
+ queue->type);
+ return;
+ }
+
+ mana_gd_destroy_dma_region(gc, gmi->gdma_region);
+ mana_gd_free_memory(gmi);
+ kfree(queue);
+}
+
+int mana_gd_verify_vf_version(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_verify_ver_resp resp = {};
+ struct gdma_verify_ver_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION,
+ sizeof(req), sizeof(resp));
+
+ req.protocol_ver_min = GDMA_PROTOCOL_FIRST;
+ req.protocol_ver_max = GDMA_PROTOCOL_LAST;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n",
+ err, resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ return 0;
+}
+
+int mana_gd_register_device(struct gdma_dev *gd)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct gdma_register_device_resp resp = {};
+ struct gdma_general_req req = {};
+ int err;
+
+ gd->pdid = INVALID_PDID;
+ gd->doorbell = INVALID_DOORBELL;
+ gd->gpa_mkey = INVALID_MEM_KEY;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req),
+ sizeof(resp));
+
+ req.hdr.dev_id = gd->dev_id;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "gdma_register_device_resp failed: %d, 0x%x\n",
+ err, resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ gd->pdid = resp.pdid;
+ gd->gpa_mkey = resp.gpa_mkey;
+ gd->doorbell = resp.db_id;
+
+ return 0;
+}
+
+int mana_gd_deregister_device(struct gdma_dev *gd)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct gdma_general_resp resp = {};
+ struct gdma_general_req req = {};
+ int err;
+
+ if (gd->pdid == INVALID_PDID)
+ return -EINVAL;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req),
+ sizeof(resp));
+
+ req.hdr.dev_id = gd->dev_id;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
+ err, resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+ }
+
+ gd->pdid = INVALID_PDID;
+ gd->doorbell = INVALID_DOORBELL;
+ gd->gpa_mkey = INVALID_MEM_KEY;
+
+ return err;
+}
+
+u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
+{
+ u32 used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE;
+ u32 wq_size = wq->queue_size;
+
+ WARN_ON_ONCE(used_space > wq_size);
+
+ return wq_size - used_space;
+}
+
+u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset)
+{
+ u32 offset = (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1);
+
+ WARN_ON_ONCE((offset + GDMA_WQE_BU_SIZE) > wq->queue_size);
+
+ return wq->queue_mem_ptr + offset;
+}
+
+static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
+ enum gdma_queue_type q_type,
+ u32 client_oob_size, u32 sgl_data_size,
+ u8 *wqe_ptr)
+{
+ bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL);
+ bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0);
+ struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr;
+ u8 *ptr;
+
+ memset(header, 0, sizeof(struct gdma_wqe));
+ header->num_sge = wqe_req->num_sge;
+ header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
+
+ if (oob_in_sgl) {
+ WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
+
+ header->client_oob_in_sgl = 1;
+
+ if (pad_data)
+ header->last_vbytes = wqe_req->sgl[0].size;
+ }
+
+ if (q_type == GDMA_SQ)
+ header->client_data_unit = wqe_req->client_data_unit;
+
+ /* The size of gdma_wqe + client_oob_size must be less than or equal
+ * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond
+ * the queue memory buffer boundary.
+ */
+ ptr = wqe_ptr + sizeof(header);
+
+ if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) {
+ memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size);
+
+ if (client_oob_size > wqe_req->inline_oob_size)
+ memset(ptr + wqe_req->inline_oob_size, 0,
+ client_oob_size - wqe_req->inline_oob_size);
+ }
+
+ return sizeof(header) + client_oob_size;
+}
+
+static void mana_gd_write_sgl(struct gdma_queue *wq, u8 *wqe_ptr,
+ const struct gdma_wqe_request *wqe_req)
+{
+ u32 sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
+ const u8 *address = (u8 *)wqe_req->sgl;
+ u8 *base_ptr, *end_ptr;
+ u32 size_to_end;
+
+ base_ptr = wq->queue_mem_ptr;
+ end_ptr = base_ptr + wq->queue_size;
+ size_to_end = (u32)(end_ptr - wqe_ptr);
+
+ if (size_to_end < sgl_size) {
+ memcpy(wqe_ptr, address, size_to_end);
+
+ wqe_ptr = base_ptr;
+ address += size_to_end;
+ sgl_size -= size_to_end;
+ }
+
+ memcpy(wqe_ptr, address, sgl_size);
+}
+
+int mana_gd_post_work_request(struct gdma_queue *wq,
+ const struct gdma_wqe_request *wqe_req,
+ struct gdma_posted_wqe_info *wqe_info)
+{
+ u32 client_oob_size = wqe_req->inline_oob_size;
+ struct gdma_context *gc;
+ u32 sgl_data_size;
+ u32 max_wqe_size;
+ u32 wqe_size;
+ u8 *wqe_ptr;
+
+ if (wqe_req->num_sge == 0)
+ return -EINVAL;
+
+ if (wq->type == GDMA_RQ) {
+ if (client_oob_size != 0)
+ return -EINVAL;
+
+ client_oob_size = INLINE_OOB_SMALL_SIZE;
+
+ max_wqe_size = GDMA_MAX_RQE_SIZE;
+ } else {
+ if (client_oob_size != INLINE_OOB_SMALL_SIZE &&
+ client_oob_size != INLINE_OOB_LARGE_SIZE)
+ return -EINVAL;
+
+ max_wqe_size = GDMA_MAX_SQE_SIZE;
+ }
+
+ sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
+ wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size +
+ sgl_data_size, GDMA_WQE_BU_SIZE);
+ if (wqe_size > max_wqe_size)
+ return -EINVAL;
+
+ if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
+ gc = wq->gdma_dev->gdma_context;
+ dev_err(gc->dev, "unsuccessful flow control!\n");
+ return -ENOSPC;
+ }
+
+ if (wqe_info)
+ wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
+
+ wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head);
+ wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size,
+ sgl_data_size, wqe_ptr);
+ if (wqe_ptr >= (u8 *)wq->queue_mem_ptr + wq->queue_size)
+ wqe_ptr -= wq->queue_size;
+
+ mana_gd_write_sgl(wq, wqe_ptr, wqe_req);
+
+ wq->head += wqe_size / GDMA_WQE_BU_SIZE;
+
+ return 0;
+}
+
+int mana_gd_post_and_ring(struct gdma_queue *queue,
+ const struct gdma_wqe_request *wqe_req,
+ struct gdma_posted_wqe_info *wqe_info)
+{
+ struct gdma_context *gc = queue->gdma_dev->gdma_context;
+ int err;
+
+ err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
+ if (err)
+ return err;
+
+ mana_gd_wq_ring_doorbell(gc, queue);
+
+ return 0;
+}
+
+static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
+{
+ unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe);
+ struct gdma_cqe *cq_cqe = cq->queue_mem_ptr;
+ u32 owner_bits, new_bits, old_bits;
+ struct gdma_cqe *cqe;
+
+ cqe = &cq_cqe[cq->head % num_cqe];
+ owner_bits = cqe->cqe_info.owner_bits;
+
+ old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK;
+ /* Return 0 if no more entries. */
+ if (owner_bits == old_bits)
+ return 0;
+
+ new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK;
+ /* Return -1 if overflow detected. */
+ if (owner_bits != new_bits)
+ return -1;
+
+ comp->wq_num = cqe->cqe_info.wq_num;
+ comp->is_sq = cqe->cqe_info.is_sq;
+ memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
+
+ return 1;
+}
+
+int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
+{
+ int cqe_idx;
+ int ret;
+
+ for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) {
+ ret = mana_gd_read_cqe(cq, &comp[cqe_idx]);
+
+ if (ret < 0) {
+ cq->head -= cqe_idx;
+ return ret;
+ }
+
+ if (ret == 0)
+ break;
+
+ cq->head++;
+ }
+
+ return cqe_idx;
+}
+
+static irqreturn_t mana_gd_intr(int irq, void *arg)
+{
+ struct gdma_irq_context *gic = arg;
+
+ if (gic->handler)
+ gic->handler(gic->arg);
+
+ return IRQ_HANDLED;
+}
+
+int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r)
+{
+ r->map = bitmap_zalloc(res_avail, GFP_KERNEL);
+ if (!r->map)
+ return -ENOMEM;
+
+ r->size = res_avail;
+ spin_lock_init(&r->lock);
+
+ return 0;
+}
+
+void mana_gd_free_res_map(struct gdma_resource *r)
+{
+ bitmap_free(r->map);
+ r->map = NULL;
+ r->size = 0;
+}
+
+static int mana_gd_setup_irqs(struct pci_dev *pdev)
+{
+ unsigned int max_queues_per_port = num_online_cpus();
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_irq_context *gic;
+ unsigned int max_irqs;
+ int nvec, irq;
+ int err, i, j;
+
+ if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
+ max_queues_per_port = MANA_MAX_NUM_QUEUES;
+
+ max_irqs = max_queues_per_port * MAX_PORTS_IN_MANA_DEV;
+
+ /* Need 1 interrupt for the Hardware communication Channel (HWC) */
+ max_irqs++;
+
+ nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
+ if (nvec < 0)
+ return nvec;
+
+ gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
+ GFP_KERNEL);
+ if (!gc->irq_contexts) {
+ err = -ENOMEM;
+ goto free_irq_vector;
+ }
+
+ for (i = 0; i < nvec; i++) {
+ gic = &gc->irq_contexts[i];
+ gic->handler = NULL;
+ gic->arg = NULL;
+
+ irq = pci_irq_vector(pdev, i);
+ if (irq < 0) {
+ err = irq;
+ goto free_irq;
+ }
+
+ err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
+ if (err)
+ goto free_irq;
+ }
+
+ err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
+ if (err)
+ goto free_irq;
+
+ gc->max_num_msix = nvec;
+ gc->num_msix_usable = nvec;
+
+ return 0;
+
+free_irq:
+ for (j = i - 1; j >= 0; j--) {
+ irq = pci_irq_vector(pdev, j);
+ gic = &gc->irq_contexts[j];
+ free_irq(irq, gic);
+ }
+
+ kfree(gc->irq_contexts);
+ gc->irq_contexts = NULL;
+free_irq_vector:
+ pci_free_irq_vectors(pdev);
+ return err;
+}
+
+static void mana_gd_remove_irqs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_irq_context *gic;
+ int irq, i;
+
+ if (gc->max_num_msix < 1)
+ return;
+
+ mana_gd_free_res_map(&gc->msix_resource);
+
+ for (i = 0; i < gc->max_num_msix; i++) {
+ irq = pci_irq_vector(pdev, i);
+ if (irq < 0)
+ continue;
+
+ gic = &gc->irq_contexts[i];
+ free_irq(irq, gic);
+ }
+
+ pci_free_irq_vectors(pdev);
+
+ gc->max_num_msix = 0;
+ gc->num_msix_usable = 0;
+ kfree(gc->irq_contexts);
+ gc->irq_contexts = NULL;
+}
+
+static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct gdma_context *gc;
+ void __iomem *bar0_va;
+ int bar = 0;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return -ENXIO;
+
+ pci_set_master(pdev);
+
+ err = pci_request_regions(pdev, "mana");
+ if (err)
+ goto disable_dev;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err)
+ goto release_region;
+
+ err = -ENOMEM;
+ gc = vzalloc(sizeof(*gc));
+ if (!gc)
+ goto release_region;
+
+ bar0_va = pci_iomap(pdev, bar, 0);
+ if (!bar0_va)
+ goto free_gc;
+
+ gc->bar0_va = bar0_va;
+ gc->dev = &pdev->dev;
+
+ pci_set_drvdata(pdev, gc);
+
+ mana_gd_init_registers(pdev);
+
+ mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
+
+ err = mana_gd_setup_irqs(pdev);
+ if (err)
+ goto unmap_bar;
+
+ mutex_init(&gc->eq_test_event_mutex);
+
+ err = mana_hwc_create_channel(gc);
+ if (err)
+ goto remove_irq;
+
+ err = mana_gd_verify_vf_version(pdev);
+ if (err)
+ goto remove_irq;
+
+ err = mana_gd_query_max_resources(pdev);
+ if (err)
+ goto remove_irq;
+
+ err = mana_gd_detect_devices(pdev);
+ if (err)
+ goto remove_irq;
+
+ err = mana_probe(&gc->mana);
+ if (err)
+ goto clean_up_gdma;
+
+ return 0;
+
+clean_up_gdma:
+ mana_hwc_destroy_channel(gc);
+ vfree(gc->cq_table);
+ gc->cq_table = NULL;
+remove_irq:
+ mana_gd_remove_irqs(pdev);
+unmap_bar:
+ pci_iounmap(pdev, bar0_va);
+free_gc:
+ vfree(gc);
+release_region:
+ pci_release_regions(pdev);
+disable_dev:
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+ dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err);
+ return err;
+}
+
+static void mana_gd_remove(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ mana_remove(&gc->mana);
+
+ mana_hwc_destroy_channel(gc);
+ vfree(gc->cq_table);
+ gc->cq_table = NULL;
+
+ mana_gd_remove_irqs(pdev);
+
+ pci_iounmap(pdev, gc->bar0_va);
+
+ vfree(gc);
+
+ pci_release_regions(pdev);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
+#ifndef PCI_VENDOR_ID_MICROSOFT
+#define PCI_VENDOR_ID_MICROSOFT 0x1414
+#endif
+
+static const struct pci_device_id mana_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, 0x00BA) },
+ { }
+};
+
+static struct pci_driver mana_driver = {
+ .name = "mana",
+ .id_table = mana_id_table,
+ .probe = mana_gd_probe,
+ .remove = mana_gd_remove,
+};
+
+module_pci_driver(mana_driver);
+
+MODULE_DEVICE_TABLE(pci, mana_id_table);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Microsoft Azure Network Adapter driver");
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
new file mode 100644
index 000000000000..1a923fd99990
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -0,0 +1,843 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#include "gdma.h"
+#include "hw_channel.h"
+
+static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
+{
+ struct gdma_resource *r = &hwc->inflight_msg_res;
+ unsigned long flags;
+ u32 index;
+
+ down(&hwc->sema);
+
+ spin_lock_irqsave(&r->lock, flags);
+
+ index = find_first_zero_bit(hwc->inflight_msg_res.map,
+ hwc->inflight_msg_res.size);
+
+ bitmap_set(hwc->inflight_msg_res.map, index, 1);
+
+ spin_unlock_irqrestore(&r->lock, flags);
+
+ *msg_id = index;
+
+ return 0;
+}
+
+static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
+{
+ struct gdma_resource *r = &hwc->inflight_msg_res;
+ unsigned long flags;
+
+ spin_lock_irqsave(&r->lock, flags);
+ bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
+ spin_unlock_irqrestore(&r->lock, flags);
+
+ up(&hwc->sema);
+}
+
+static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
+ const struct gdma_resp_hdr *resp_msg,
+ u32 resp_len)
+{
+ if (resp_len < sizeof(*resp_msg))
+ return -EPROTO;
+
+ if (resp_len > caller_ctx->output_buflen)
+ return -EPROTO;
+
+ return 0;
+}
+
+static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
+ const struct gdma_resp_hdr *resp_msg)
+{
+ struct hwc_caller_ctx *ctx;
+ int err;
+
+ if (!test_bit(resp_msg->response.hwc_msg_id,
+ hwc->inflight_msg_res.map)) {
+ dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
+ resp_msg->response.hwc_msg_id);
+ return;
+ }
+
+ ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
+ err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
+ if (err)
+ goto out;
+
+ ctx->status_code = resp_msg->status;
+
+ memcpy(ctx->output_buf, resp_msg, resp_len);
+out:
+ ctx->error = err;
+ complete(&ctx->comp_event);
+}
+
+static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
+ struct hwc_work_request *req)
+{
+ struct device *dev = hwc_rxq->hwc->dev;
+ struct gdma_sge *sge;
+ int err;
+
+ sge = &req->sge;
+ sge->address = (u64)req->buf_sge_addr;
+ sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
+ sge->size = req->buf_len;
+
+ memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
+ req->wqe_req.sgl = sge;
+ req->wqe_req.num_sge = 1;
+ req->wqe_req.client_data_unit = 0;
+
+ err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
+ if (err)
+ dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
+ return err;
+}
+
+static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
+ struct gdma_event *event)
+{
+ struct hw_channel_context *hwc = ctx;
+ struct gdma_dev *gd = hwc->gdma_dev;
+ union hwc_init_type_data type_data;
+ union hwc_init_eq_id_db eq_db;
+ u32 type, val;
+
+ switch (event->type) {
+ case GDMA_EQE_HWC_INIT_EQ_ID_DB:
+ eq_db.as_uint32 = event->details[0];
+ hwc->cq->gdma_eq->id = eq_db.eq_id;
+ gd->doorbell = eq_db.doorbell;
+ break;
+
+ case GDMA_EQE_HWC_INIT_DATA:
+ type_data.as_uint32 = event->details[0];
+ type = type_data.type;
+ val = type_data.value;
+
+ switch (type) {
+ case HWC_INIT_DATA_CQID:
+ hwc->cq->gdma_cq->id = val;
+ break;
+
+ case HWC_INIT_DATA_RQID:
+ hwc->rxq->gdma_wq->id = val;
+ break;
+
+ case HWC_INIT_DATA_SQID:
+ hwc->txq->gdma_wq->id = val;
+ break;
+
+ case HWC_INIT_DATA_QUEUE_DEPTH:
+ hwc->hwc_init_q_depth_max = (u16)val;
+ break;
+
+ case HWC_INIT_DATA_MAX_REQUEST:
+ hwc->hwc_init_max_req_msg_size = val;
+ break;
+
+ case HWC_INIT_DATA_MAX_RESPONSE:
+ hwc->hwc_init_max_resp_msg_size = val;
+ break;
+
+ case HWC_INIT_DATA_MAX_NUM_CQS:
+ gd->gdma_context->max_num_cqs = val;
+ break;
+
+ case HWC_INIT_DATA_PDID:
+ hwc->gdma_dev->pdid = val;
+ break;
+
+ case HWC_INIT_DATA_GPA_MKEY:
+ hwc->rxq->msg_buf->gpa_mkey = val;
+ hwc->txq->msg_buf->gpa_mkey = val;
+ break;
+ }
+
+ break;
+
+ case GDMA_EQE_HWC_INIT_DONE:
+ complete(&hwc->hwc_init_eqe_comp);
+ break;
+
+ default:
+ /* Ignore unknown events, which should never happen. */
+ break;
+ }
+}
+
+static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
+ const struct hwc_rx_oob *rx_oob)
+{
+ struct hw_channel_context *hwc = ctx;
+ struct hwc_wq *hwc_rxq = hwc->rxq;
+ struct hwc_work_request *rx_req;
+ struct gdma_resp_hdr *resp;
+ struct gdma_wqe *dma_oob;
+ struct gdma_queue *rq;
+ struct gdma_sge *sge;
+ u64 rq_base_addr;
+ u64 rx_req_idx;
+ u8 *wqe;
+
+ if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
+ return;
+
+ rq = hwc_rxq->gdma_wq;
+ wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
+ dma_oob = (struct gdma_wqe *)wqe;
+
+ sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
+
+ /* Select the RX work request for virtual address and for reposting. */
+ rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
+ rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
+
+ rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
+ resp = (struct gdma_resp_hdr *)rx_req->buf_va;
+
+ if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
+ dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
+ resp->response.hwc_msg_id);
+ return;
+ }
+
+ mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
+
+ /* Do no longer use 'resp', because the buffer is posted to the HW
+ * in the below mana_hwc_post_rx_wqe().
+ */
+ resp = NULL;
+
+ mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
+}
+
+static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
+ const struct hwc_rx_oob *rx_oob)
+{
+ struct hw_channel_context *hwc = ctx;
+ struct hwc_wq *hwc_txq = hwc->txq;
+
+ WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
+}
+
+static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
+ enum gdma_queue_type type, u64 queue_size,
+ struct gdma_queue **queue)
+{
+ struct gdma_queue_spec spec = {};
+
+ if (type != GDMA_SQ && type != GDMA_RQ)
+ return -EINVAL;
+
+ spec.type = type;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = queue_size;
+
+ return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
+}
+
+static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
+ u64 queue_size,
+ void *ctx, gdma_cq_callback *cb,
+ struct gdma_queue *parent_eq,
+ struct gdma_queue **queue)
+{
+ struct gdma_queue_spec spec = {};
+
+ spec.type = GDMA_CQ;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = queue_size;
+ spec.cq.context = ctx;
+ spec.cq.callback = cb;
+ spec.cq.parent_eq = parent_eq;
+
+ return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
+}
+
+static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
+ u64 queue_size,
+ void *ctx, gdma_eq_callback *cb,
+ struct gdma_queue **queue)
+{
+ struct gdma_queue_spec spec = {};
+
+ spec.type = GDMA_EQ;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = queue_size;
+ spec.eq.context = ctx;
+ spec.eq.callback = cb;
+ spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
+
+ return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
+}
+
+static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
+{
+ struct hwc_rx_oob comp_data = {};
+ struct gdma_comp *completions;
+ struct hwc_cq *hwc_cq = ctx;
+ int comp_read, i;
+
+ WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
+
+ completions = hwc_cq->comp_buf;
+ comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
+ WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
+
+ for (i = 0; i < comp_read; ++i) {
+ comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
+
+ if (completions[i].is_sq)
+ hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
+ completions[i].wq_num,
+ &comp_data);
+ else
+ hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
+ completions[i].wq_num,
+ &comp_data);
+ }
+
+ mana_gd_arm_cq(q_self);
+}
+
+static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
+{
+ if (!hwc_cq)
+ return;
+
+ kfree(hwc_cq->comp_buf);
+
+ if (hwc_cq->gdma_cq)
+ mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
+
+ if (hwc_cq->gdma_eq)
+ mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
+
+ kfree(hwc_cq);
+}
+
+static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
+ gdma_eq_callback *callback, void *ctx,
+ hwc_rx_event_handler_t *rx_ev_hdlr,
+ void *rx_ev_ctx,
+ hwc_tx_event_handler_t *tx_ev_hdlr,
+ void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
+{
+ struct gdma_queue *eq, *cq;
+ struct gdma_comp *comp_buf;
+ struct hwc_cq *hwc_cq;
+ u32 eq_size, cq_size;
+ int err;
+
+ eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
+ if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
+ eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
+
+ cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
+ if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
+ cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
+
+ hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
+ if (!hwc_cq)
+ return -ENOMEM;
+
+ err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
+ if (err) {
+ dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
+ goto out;
+ }
+ hwc_cq->gdma_eq = eq;
+
+ err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
+ eq, &cq);
+ if (err) {
+ dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
+ goto out;
+ }
+ hwc_cq->gdma_cq = cq;
+
+ comp_buf = kcalloc(q_depth, sizeof(struct gdma_comp), GFP_KERNEL);
+ if (!comp_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ hwc_cq->hwc = hwc;
+ hwc_cq->comp_buf = comp_buf;
+ hwc_cq->queue_depth = q_depth;
+ hwc_cq->rx_event_handler = rx_ev_hdlr;
+ hwc_cq->rx_event_ctx = rx_ev_ctx;
+ hwc_cq->tx_event_handler = tx_ev_hdlr;
+ hwc_cq->tx_event_ctx = tx_ev_ctx;
+
+ *hwc_cq_ptr = hwc_cq;
+ return 0;
+out:
+ mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
+ return err;
+}
+
+static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
+ u32 max_msg_size,
+ struct hwc_dma_buf **dma_buf_ptr)
+{
+ struct gdma_context *gc = hwc->gdma_dev->gdma_context;
+ struct hwc_work_request *hwc_wr;
+ struct hwc_dma_buf *dma_buf;
+ struct gdma_mem_info *gmi;
+ void *virt_addr;
+ u32 buf_size;
+ u8 *base_pa;
+ int err;
+ u16 i;
+
+ dma_buf = kzalloc(sizeof(*dma_buf) +
+ q_depth * sizeof(struct hwc_work_request),
+ GFP_KERNEL);
+ if (!dma_buf)
+ return -ENOMEM;
+
+ dma_buf->num_reqs = q_depth;
+
+ buf_size = PAGE_ALIGN(q_depth * max_msg_size);
+
+ gmi = &dma_buf->mem_info;
+ err = mana_gd_alloc_memory(gc, buf_size, gmi);
+ if (err) {
+ dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
+ goto out;
+ }
+
+ virt_addr = dma_buf->mem_info.virt_addr;
+ base_pa = (u8 *)dma_buf->mem_info.dma_handle;
+
+ for (i = 0; i < q_depth; i++) {
+ hwc_wr = &dma_buf->reqs[i];
+
+ hwc_wr->buf_va = virt_addr + i * max_msg_size;
+ hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
+
+ hwc_wr->buf_len = max_msg_size;
+ }
+
+ *dma_buf_ptr = dma_buf;
+ return 0;
+out:
+ kfree(dma_buf);
+ return err;
+}
+
+static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
+ struct hwc_dma_buf *dma_buf)
+{
+ if (!dma_buf)
+ return;
+
+ mana_gd_free_memory(&dma_buf->mem_info);
+
+ kfree(dma_buf);
+}
+
+static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
+ struct hwc_wq *hwc_wq)
+{
+ if (!hwc_wq)
+ return;
+
+ mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
+
+ if (hwc_wq->gdma_wq)
+ mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
+ hwc_wq->gdma_wq);
+
+ kfree(hwc_wq);
+}
+
+static int mana_hwc_create_wq(struct hw_channel_context *hwc,
+ enum gdma_queue_type q_type, u16 q_depth,
+ u32 max_msg_size, struct hwc_cq *hwc_cq,
+ struct hwc_wq **hwc_wq_ptr)
+{
+ struct gdma_queue *queue;
+ struct hwc_wq *hwc_wq;
+ u32 queue_size;
+ int err;
+
+ WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
+
+ if (q_type == GDMA_RQ)
+ queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
+ else
+ queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
+
+ if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
+ queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
+
+ hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
+ if (!hwc_wq)
+ return -ENOMEM;
+
+ err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
+ if (err)
+ goto out;
+
+ err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
+ &hwc_wq->msg_buf);
+ if (err)
+ goto out;
+
+ hwc_wq->hwc = hwc;
+ hwc_wq->gdma_wq = queue;
+ hwc_wq->queue_depth = q_depth;
+ hwc_wq->hwc_cq = hwc_cq;
+
+ *hwc_wq_ptr = hwc_wq;
+ return 0;
+out:
+ if (err)
+ mana_hwc_destroy_wq(hwc, hwc_wq);
+ return err;
+}
+
+static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
+ struct hwc_work_request *req,
+ u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
+ bool dest_pf)
+{
+ struct device *dev = hwc_txq->hwc->dev;
+ struct hwc_tx_oob *tx_oob;
+ struct gdma_sge *sge;
+ int err;
+
+ if (req->msg_size == 0 || req->msg_size > req->buf_len) {
+ dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
+ req->msg_size, req->buf_len);
+ return -EINVAL;
+ }
+
+ tx_oob = &req->tx_oob;
+
+ tx_oob->vrq_id = dest_virt_rq_id;
+ tx_oob->dest_vfid = 0;
+ tx_oob->vrcq_id = dest_virt_rcq_id;
+ tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
+ tx_oob->loopback = false;
+ tx_oob->lso_override = false;
+ tx_oob->dest_pf = dest_pf;
+ tx_oob->vsq_id = hwc_txq->gdma_wq->id;
+
+ sge = &req->sge;
+ sge->address = (u64)req->buf_sge_addr;
+ sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
+ sge->size = req->msg_size;
+
+ memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
+ req->wqe_req.sgl = sge;
+ req->wqe_req.num_sge = 1;
+ req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
+ req->wqe_req.inline_oob_data = tx_oob;
+ req->wqe_req.client_data_unit = 0;
+
+ err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
+ if (err)
+ dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
+ return err;
+}
+
+static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
+ u16 num_msg)
+{
+ int err;
+
+ sema_init(&hwc->sema, num_msg);
+
+ err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
+ if (err)
+ dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
+ return err;
+}
+
+static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
+ u32 max_req_msg_size, u32 max_resp_msg_size)
+{
+ struct gdma_context *gc = hwc->gdma_dev->gdma_context;
+ struct hwc_wq *hwc_rxq = hwc->rxq;
+ struct hwc_work_request *req;
+ struct hwc_caller_ctx *ctx;
+ int err;
+ int i;
+
+ /* Post all WQEs on the RQ */
+ for (i = 0; i < q_depth; i++) {
+ req = &hwc_rxq->msg_buf->reqs[i];
+ err = mana_hwc_post_rx_wqe(hwc_rxq, req);
+ if (err)
+ return err;
+ }
+
+ ctx = kzalloc(q_depth * sizeof(struct hwc_caller_ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < q_depth; ++i)
+ init_completion(&ctx[i].comp_event);
+
+ hwc->caller_ctx = ctx;
+
+ return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
+}
+
+static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
+ u32 *max_req_msg_size,
+ u32 *max_resp_msg_size)
+{
+ struct hw_channel_context *hwc = gc->hwc.driver_data;
+ struct gdma_queue *rq = hwc->rxq->gdma_wq;
+ struct gdma_queue *sq = hwc->txq->gdma_wq;
+ struct gdma_queue *eq = hwc->cq->gdma_eq;
+ struct gdma_queue *cq = hwc->cq->gdma_cq;
+ int err;
+
+ init_completion(&hwc->hwc_init_eqe_comp);
+
+ err = mana_smc_setup_hwc(&gc->shm_channel, false,
+ eq->mem_info.dma_handle,
+ cq->mem_info.dma_handle,
+ rq->mem_info.dma_handle,
+ sq->mem_info.dma_handle,
+ eq->eq.msix_index);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
+ return -ETIMEDOUT;
+
+ *q_depth = hwc->hwc_init_q_depth_max;
+ *max_req_msg_size = hwc->hwc_init_max_req_msg_size;
+ *max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
+
+ if (WARN_ON(cq->id >= gc->max_num_cqs))
+ return -EPROTO;
+
+ gc->cq_table = vzalloc(gc->max_num_cqs * sizeof(struct gdma_queue *));
+ if (!gc->cq_table)
+ return -ENOMEM;
+
+ gc->cq_table[cq->id] = cq;
+
+ return 0;
+}
+
+static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
+ u32 max_req_msg_size, u32 max_resp_msg_size)
+{
+ struct hwc_wq *hwc_rxq = NULL;
+ struct hwc_wq *hwc_txq = NULL;
+ struct hwc_cq *hwc_cq = NULL;
+ int err;
+
+ err = mana_hwc_init_inflight_msg(hwc, q_depth);
+ if (err)
+ return err;
+
+ /* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
+ * queue depth and RQ queue depth.
+ */
+ err = mana_hwc_create_cq(hwc, q_depth * 2,
+ mana_hwc_init_event_handler, hwc,
+ mana_hwc_rx_event_handler, hwc,
+ mana_hwc_tx_event_handler, hwc, &hwc_cq);
+ if (err) {
+ dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
+ goto out;
+ }
+ hwc->cq = hwc_cq;
+
+ err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
+ hwc_cq, &hwc_rxq);
+ if (err) {
+ dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
+ goto out;
+ }
+ hwc->rxq = hwc_rxq;
+
+ err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
+ hwc_cq, &hwc_txq);
+ if (err) {
+ dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
+ goto out;
+ }
+ hwc->txq = hwc_txq;
+
+ hwc->num_inflight_msg = q_depth;
+ hwc->max_req_msg_size = max_req_msg_size;
+
+ return 0;
+out:
+ if (hwc_txq)
+ mana_hwc_destroy_wq(hwc, hwc_txq);
+
+ if (hwc_rxq)
+ mana_hwc_destroy_wq(hwc, hwc_rxq);
+
+ if (hwc_cq)
+ mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
+
+ mana_gd_free_res_map(&hwc->inflight_msg_res);
+ return err;
+}
+
+int mana_hwc_create_channel(struct gdma_context *gc)
+{
+ u32 max_req_msg_size, max_resp_msg_size;
+ struct gdma_dev *gd = &gc->hwc;
+ struct hw_channel_context *hwc;
+ u16 q_depth_max;
+ int err;
+
+ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
+ if (!hwc)
+ return -ENOMEM;
+
+ gd->gdma_context = gc;
+ gd->driver_data = hwc;
+ hwc->gdma_dev = gd;
+ hwc->dev = gc->dev;
+
+ /* HWC's instance number is always 0. */
+ gd->dev_id.as_uint32 = 0;
+ gd->dev_id.type = GDMA_DEVICE_HWC;
+
+ gd->pdid = INVALID_PDID;
+ gd->doorbell = INVALID_DOORBELL;
+
+ err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
+ HW_CHANNEL_MAX_REQUEST_SIZE,
+ HW_CHANNEL_MAX_RESPONSE_SIZE);
+ if (err) {
+ dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
+ goto out;
+ }
+
+ err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
+ &max_resp_msg_size);
+ if (err) {
+ dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
+ goto out;
+ }
+
+ err = mana_hwc_test_channel(gc->hwc.driver_data,
+ HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
+ max_req_msg_size, max_resp_msg_size);
+ if (err) {
+ dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
+ goto out;
+ }
+
+ return 0;
+out:
+ kfree(hwc);
+ return err;
+}
+
+void mana_hwc_destroy_channel(struct gdma_context *gc)
+{
+ struct hw_channel_context *hwc = gc->hwc.driver_data;
+ struct hwc_caller_ctx *ctx;
+
+ mana_smc_teardown_hwc(&gc->shm_channel, false);
+
+ ctx = hwc->caller_ctx;
+ kfree(ctx);
+ hwc->caller_ctx = NULL;
+
+ mana_hwc_destroy_wq(hwc, hwc->txq);
+ hwc->txq = NULL;
+
+ mana_hwc_destroy_wq(hwc, hwc->rxq);
+ hwc->rxq = NULL;
+
+ mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
+ hwc->cq = NULL;
+
+ mana_gd_free_res_map(&hwc->inflight_msg_res);
+
+ hwc->num_inflight_msg = 0;
+
+ if (hwc->gdma_dev->pdid != INVALID_PDID) {
+ hwc->gdma_dev->doorbell = INVALID_DOORBELL;
+ hwc->gdma_dev->pdid = INVALID_PDID;
+ }
+
+ kfree(hwc);
+ gc->hwc.driver_data = NULL;
+ gc->hwc.gdma_context = NULL;
+}
+
+int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
+ const void *req, u32 resp_len, void *resp)
+{
+ struct hwc_work_request *tx_wr;
+ struct hwc_wq *txq = hwc->txq;
+ struct gdma_req_hdr *req_msg;
+ struct hwc_caller_ctx *ctx;
+ u16 msg_id;
+ int err;
+
+ mana_hwc_get_msg_index(hwc, &msg_id);
+
+ tx_wr = &txq->msg_buf->reqs[msg_id];
+
+ if (req_len > tx_wr->buf_len) {
+ dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
+ tx_wr->buf_len);
+ err = -EINVAL;
+ goto out;
+ }
+
+ ctx = hwc->caller_ctx + msg_id;
+ ctx->output_buf = resp;
+ ctx->output_buflen = resp_len;
+
+ req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
+ if (req)
+ memcpy(req_msg, req, req_len);
+
+ req_msg->req.hwc_msg_id = msg_id;
+
+ tx_wr->msg_size = req_len;
+
+ err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false);
+ if (err) {
+ dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(&ctx->comp_event, 30 * HZ)) {
+ dev_err(hwc->dev, "HWC: Request timed out!\n");
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (ctx->error) {
+ err = ctx->error;
+ goto out;
+ }
+
+ if (ctx->status_code) {
+ dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
+ ctx->status_code);
+ err = -EPROTO;
+ goto out;
+ }
+out:
+ mana_hwc_put_msg_index(hwc, msg_id);
+ return err;
+}
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.h b/drivers/net/ethernet/microsoft/mana/hw_channel.h
new file mode 100644
index 000000000000..31c6e83c454a
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#ifndef _HW_CHANNEL_H
+#define _HW_CHANNEL_H
+
+#define DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ 4
+
+#define HW_CHANNEL_MAX_REQUEST_SIZE 0x1000
+#define HW_CHANNEL_MAX_RESPONSE_SIZE 0x1000
+
+#define HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH 1
+
+#define HWC_INIT_DATA_CQID 1
+#define HWC_INIT_DATA_RQID 2
+#define HWC_INIT_DATA_SQID 3
+#define HWC_INIT_DATA_QUEUE_DEPTH 4
+#define HWC_INIT_DATA_MAX_REQUEST 5
+#define HWC_INIT_DATA_MAX_RESPONSE 6
+#define HWC_INIT_DATA_MAX_NUM_CQS 7
+#define HWC_INIT_DATA_PDID 8
+#define HWC_INIT_DATA_GPA_MKEY 9
+
+/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
+ * them are naturally aligned and hence don't need __packed.
+ */
+
+union hwc_init_eq_id_db {
+ u32 as_uint32;
+
+ struct {
+ u32 eq_id : 16;
+ u32 doorbell : 16;
+ };
+}; /* HW DATA */
+
+union hwc_init_type_data {
+ u32 as_uint32;
+
+ struct {
+ u32 value : 24;
+ u32 type : 8;
+ };
+}; /* HW DATA */
+
+struct hwc_rx_oob {
+ u32 type : 6;
+ u32 eom : 1;
+ u32 som : 1;
+ u32 vendor_err : 8;
+ u32 reserved1 : 16;
+
+ u32 src_virt_wq : 24;
+ u32 src_vfid : 8;
+
+ u32 reserved2;
+
+ union {
+ u32 wqe_addr_low;
+ u32 wqe_offset;
+ };
+
+ u32 wqe_addr_high;
+
+ u32 client_data_unit : 14;
+ u32 reserved3 : 18;
+
+ u32 tx_oob_data_size;
+
+ u32 chunk_offset : 21;
+ u32 reserved4 : 11;
+}; /* HW DATA */
+
+struct hwc_tx_oob {
+ u32 reserved1;
+
+ u32 reserved2;
+
+ u32 vrq_id : 24;
+ u32 dest_vfid : 8;
+
+ u32 vrcq_id : 24;
+ u32 reserved3 : 8;
+
+ u32 vscq_id : 24;
+ u32 loopback : 1;
+ u32 lso_override: 1;
+ u32 dest_pf : 1;
+ u32 reserved4 : 5;
+
+ u32 vsq_id : 24;
+ u32 reserved5 : 8;
+}; /* HW DATA */
+
+struct hwc_work_request {
+ void *buf_va;
+ void *buf_sge_addr;
+ u32 buf_len;
+ u32 msg_size;
+
+ struct gdma_wqe_request wqe_req;
+ struct hwc_tx_oob tx_oob;
+
+ struct gdma_sge sge;
+};
+
+/* hwc_dma_buf represents the array of in-flight WQEs.
+ * mem_info as know as the GDMA mapped memory is partitioned and used by
+ * in-flight WQEs.
+ * The number of WQEs is determined by the number of in-flight messages.
+ */
+struct hwc_dma_buf {
+ struct gdma_mem_info mem_info;
+
+ u32 gpa_mkey;
+
+ u32 num_reqs;
+ struct hwc_work_request reqs[];
+};
+
+typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id,
+ const struct hwc_rx_oob *rx_oob);
+
+typedef void hwc_tx_event_handler_t(void *ctx, u32 gdma_txq_id,
+ const struct hwc_rx_oob *rx_oob);
+
+struct hwc_cq {
+ struct hw_channel_context *hwc;
+
+ struct gdma_queue *gdma_cq;
+ struct gdma_queue *gdma_eq;
+ struct gdma_comp *comp_buf;
+ u16 queue_depth;
+
+ hwc_rx_event_handler_t *rx_event_handler;
+ void *rx_event_ctx;
+
+ hwc_tx_event_handler_t *tx_event_handler;
+ void *tx_event_ctx;
+};
+
+struct hwc_wq {
+ struct hw_channel_context *hwc;
+
+ struct gdma_queue *gdma_wq;
+ struct hwc_dma_buf *msg_buf;
+ u16 queue_depth;
+
+ struct hwc_cq *hwc_cq;
+};
+
+struct hwc_caller_ctx {
+ struct completion comp_event;
+ void *output_buf;
+ u32 output_buflen;
+
+ u32 error; /* Linux error code */
+ u32 status_code;
+};
+
+struct hw_channel_context {
+ struct gdma_dev *gdma_dev;
+ struct device *dev;
+
+ u16 num_inflight_msg;
+ u32 max_req_msg_size;
+
+ u16 hwc_init_q_depth_max;
+ u32 hwc_init_max_req_msg_size;
+ u32 hwc_init_max_resp_msg_size;
+
+ struct completion hwc_init_eqe_comp;
+
+ struct hwc_wq *rxq;
+ struct hwc_wq *txq;
+ struct hwc_cq *cq;
+
+ struct semaphore sema;
+ struct gdma_resource inflight_msg_res;
+
+ struct hwc_caller_ctx *caller_ctx;
+};
+
+int mana_hwc_create_channel(struct gdma_context *gc);
+void mana_hwc_destroy_channel(struct gdma_context *gc);
+
+int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
+ const void *req, u32 resp_len, void *resp);
+
+#endif /* _HW_CHANNEL_H */
diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
new file mode 100644
index 000000000000..a2c3f826f022
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/mana.h
@@ -0,0 +1,533 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#ifndef _MANA_H
+#define _MANA_H
+
+#include "gdma.h"
+#include "hw_channel.h"
+
+/* Microsoft Azure Network Adapter (MANA)'s definitions
+ *
+ * Structures labeled with "HW DATA" are exchanged with the hardware. All of
+ * them are naturally aligned and hence don't need __packed.
+ */
+
+/* MANA protocol version */
+#define MANA_MAJOR_VERSION 0
+#define MANA_MINOR_VERSION 1
+#define MANA_MICRO_VERSION 1
+
+typedef u64 mana_handle_t;
+#define INVALID_MANA_HANDLE ((mana_handle_t)-1)
+
+enum TRI_STATE {
+ TRI_STATE_UNKNOWN = -1,
+ TRI_STATE_FALSE = 0,
+ TRI_STATE_TRUE = 1
+};
+
+/* Number of entries for hardware indirection table must be in power of 2 */
+#define MANA_INDIRECT_TABLE_SIZE 64
+#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
+
+/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
+#define MANA_HASH_KEY_SIZE 40
+
+#define COMP_ENTRY_SIZE 64
+
+#define ADAPTER_MTU_SIZE 1500
+#define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14)
+
+#define RX_BUFFERS_PER_QUEUE 512
+
+#define MAX_SEND_BUFFERS_PER_QUEUE 256
+
+#define EQ_SIZE (8 * PAGE_SIZE)
+#define LOG2_EQ_THROTTLE 3
+
+#define MAX_PORTS_IN_MANA_DEV 16
+
+struct mana_stats {
+ u64 packets;
+ u64 bytes;
+ struct u64_stats_sync syncp;
+};
+
+struct mana_txq {
+ struct gdma_queue *gdma_sq;
+
+ union {
+ u32 gdma_txq_id;
+ struct {
+ u32 reserved1 : 10;
+ u32 vsq_frame : 14;
+ u32 reserved2 : 8;
+ };
+ };
+
+ u16 vp_offset;
+
+ struct net_device *ndev;
+
+ /* The SKBs are sent to the HW and we are waiting for the CQEs. */
+ struct sk_buff_head pending_skbs;
+ struct netdev_queue *net_txq;
+
+ atomic_t pending_sends;
+
+ struct mana_stats stats;
+};
+
+/* skb data and frags dma mappings */
+struct mana_skb_head {
+ dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
+
+ u32 size[MAX_SKB_FRAGS + 1];
+};
+
+#define MANA_HEADROOM sizeof(struct mana_skb_head)
+
+enum mana_tx_pkt_format {
+ MANA_SHORT_PKT_FMT = 0,
+ MANA_LONG_PKT_FMT = 1,
+};
+
+struct mana_tx_short_oob {
+ u32 pkt_fmt : 2;
+ u32 is_outer_ipv4 : 1;
+ u32 is_outer_ipv6 : 1;
+ u32 comp_iphdr_csum : 1;
+ u32 comp_tcp_csum : 1;
+ u32 comp_udp_csum : 1;
+ u32 supress_txcqe_gen : 1;
+ u32 vcq_num : 24;
+
+ u32 trans_off : 10; /* Transport header offset */
+ u32 vsq_frame : 14;
+ u32 short_vp_offset : 8;
+}; /* HW DATA */
+
+struct mana_tx_long_oob {
+ u32 is_encap : 1;
+ u32 inner_is_ipv6 : 1;
+ u32 inner_tcp_opt : 1;
+ u32 inject_vlan_pri_tag : 1;
+ u32 reserved1 : 12;
+ u32 pcp : 3; /* 802.1Q */
+ u32 dei : 1; /* 802.1Q */
+ u32 vlan_id : 12; /* 802.1Q */
+
+ u32 inner_frame_offset : 10;
+ u32 inner_ip_rel_offset : 6;
+ u32 long_vp_offset : 12;
+ u32 reserved2 : 4;
+
+ u32 reserved3;
+ u32 reserved4;
+}; /* HW DATA */
+
+struct mana_tx_oob {
+ struct mana_tx_short_oob s_oob;
+ struct mana_tx_long_oob l_oob;
+}; /* HW DATA */
+
+enum mana_cq_type {
+ MANA_CQ_TYPE_RX,
+ MANA_CQ_TYPE_TX,
+};
+
+enum mana_cqe_type {
+ CQE_INVALID = 0,
+ CQE_RX_OKAY = 1,
+ CQE_RX_COALESCED_4 = 2,
+ CQE_RX_OBJECT_FENCE = 3,
+ CQE_RX_TRUNCATED = 4,
+
+ CQE_TX_OKAY = 32,
+ CQE_TX_SA_DROP = 33,
+ CQE_TX_MTU_DROP = 34,
+ CQE_TX_INVALID_OOB = 35,
+ CQE_TX_INVALID_ETH_TYPE = 36,
+ CQE_TX_HDR_PROCESSING_ERROR = 37,
+ CQE_TX_VF_DISABLED = 38,
+ CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39,
+ CQE_TX_VPORT_DISABLED = 40,
+ CQE_TX_VLAN_TAGGING_VIOLATION = 41,
+};
+
+#define MANA_CQE_COMPLETION 1
+
+struct mana_cqe_header {
+ u32 cqe_type : 6;
+ u32 client_type : 2;
+ u32 vendor_err : 24;
+}; /* HW DATA */
+
+/* NDIS HASH Types */
+#define NDIS_HASH_IPV4 BIT(0)
+#define NDIS_HASH_TCP_IPV4 BIT(1)
+#define NDIS_HASH_UDP_IPV4 BIT(2)
+#define NDIS_HASH_IPV6 BIT(3)
+#define NDIS_HASH_TCP_IPV6 BIT(4)
+#define NDIS_HASH_UDP_IPV6 BIT(5)
+#define NDIS_HASH_IPV6_EX BIT(6)
+#define NDIS_HASH_TCP_IPV6_EX BIT(7)
+#define NDIS_HASH_UDP_IPV6_EX BIT(8)
+
+#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
+#define MANA_HASH_L4 \
+ (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
+ NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
+
+struct mana_rxcomp_perpkt_info {
+ u32 pkt_len : 16;
+ u32 reserved1 : 16;
+ u32 reserved2;
+ u32 pkt_hash;
+}; /* HW DATA */
+
+#define MANA_RXCOMP_OOB_NUM_PPI 4
+
+/* Receive completion OOB */
+struct mana_rxcomp_oob {
+ struct mana_cqe_header cqe_hdr;
+
+ u32 rx_vlan_id : 12;
+ u32 rx_vlantag_present : 1;
+ u32 rx_outer_iphdr_csum_succeed : 1;
+ u32 rx_outer_iphdr_csum_fail : 1;
+ u32 reserved1 : 1;
+ u32 rx_hashtype : 9;
+ u32 rx_iphdr_csum_succeed : 1;
+ u32 rx_iphdr_csum_fail : 1;
+ u32 rx_tcp_csum_succeed : 1;
+ u32 rx_tcp_csum_fail : 1;
+ u32 rx_udp_csum_succeed : 1;
+ u32 rx_udp_csum_fail : 1;
+ u32 reserved2 : 1;
+
+ struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
+
+ u32 rx_wqe_offset;
+}; /* HW DATA */
+
+struct mana_tx_comp_oob {
+ struct mana_cqe_header cqe_hdr;
+
+ u32 tx_data_offset;
+
+ u32 tx_sgl_offset : 5;
+ u32 tx_wqe_offset : 27;
+
+ u32 reserved[12];
+}; /* HW DATA */
+
+struct mana_rxq;
+
+struct mana_cq {
+ struct gdma_queue *gdma_cq;
+
+ /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
+ u32 gdma_id;
+
+ /* Type of the CQ: TX or RX */
+ enum mana_cq_type type;
+
+ /* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
+ * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
+ */
+ struct mana_rxq *rxq;
+
+ /* Pointer to the mana_txq that is pushing TX CQEs to the queue.
+ * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
+ */
+ struct mana_txq *txq;
+
+ /* Pointer to a buffer which the CQ handler can copy the CQE's into. */
+ struct gdma_comp *gdma_comp_buf;
+};
+
+#define GDMA_MAX_RQE_SGES 15
+
+struct mana_recv_buf_oob {
+ /* A valid GDMA work request representing the data buffer. */
+ struct gdma_wqe_request wqe_req;
+
+ void *buf_va;
+ dma_addr_t buf_dma_addr;
+
+ /* SGL of the buffer going to be sent has part of the work request. */
+ u32 num_sge;
+ struct gdma_sge sgl[GDMA_MAX_RQE_SGES];
+
+ /* Required to store the result of mana_gd_post_work_request.
+ * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
+ * work queue when the WQE is consumed.
+ */
+ struct gdma_posted_wqe_info wqe_inf;
+};
+
+struct mana_rxq {
+ struct gdma_queue *gdma_rq;
+ /* Cache the gdma receive queue id */
+ u32 gdma_id;
+
+ /* Index of RQ in the vPort, not gdma receive queue id */
+ u32 rxq_idx;
+
+ u32 datasize;
+
+ mana_handle_t rxobj;
+
+ struct mana_cq rx_cq;
+
+ struct net_device *ndev;
+
+ /* Total number of receive buffers to be allocated */
+ u32 num_rx_buf;
+
+ u32 buf_index;
+
+ struct mana_stats stats;
+
+ /* MUST BE THE LAST MEMBER:
+ * Each receive buffer has an associated mana_recv_buf_oob.
+ */
+ struct mana_recv_buf_oob rx_oobs[];
+};
+
+struct mana_tx_qp {
+ struct mana_txq txq;
+
+ struct mana_cq tx_cq;
+
+ mana_handle_t tx_object;
+};
+
+struct mana_ethtool_stats {
+ u64 stop_queue;
+ u64 wake_queue;
+};
+
+struct mana_context {
+ struct gdma_dev *gdma_dev;
+
+ u16 num_ports;
+
+ struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
+};
+
+struct mana_port_context {
+ struct mana_context *ac;
+ struct net_device *ndev;
+
+ u8 mac_addr[ETH_ALEN];
+
+ struct mana_eq *eqs;
+
+ enum TRI_STATE rss_state;
+
+ mana_handle_t default_rxobj;
+ bool tx_shortform_allowed;
+ u16 tx_vp_offset;
+
+ struct mana_tx_qp *tx_qp;
+
+ /* Indirection Table for RX & TX. The values are queue indexes */
+ u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
+
+ /* Indirection table containing RxObject Handles */
+ mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
+
+ /* Hash key used by the NIC */
+ u8 hashkey[MANA_HASH_KEY_SIZE];
+
+ /* This points to an array of num_queues of RQ pointers. */
+ struct mana_rxq **rxqs;
+
+ /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
+ unsigned int max_queues;
+ unsigned int num_queues;
+
+ mana_handle_t port_handle;
+
+ u16 port_idx;
+
+ bool port_is_up;
+ bool port_st_save; /* Saved port state */
+
+ struct mana_ethtool_stats eth_stats;
+};
+
+int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
+ bool update_hash, bool update_tab);
+
+int mana_alloc_queues(struct net_device *ndev);
+int mana_attach(struct net_device *ndev);
+int mana_detach(struct net_device *ndev, bool from_close);
+
+int mana_probe(struct gdma_dev *gd);
+void mana_remove(struct gdma_dev *gd);
+
+extern const struct ethtool_ops mana_ethtool_ops;
+
+struct mana_obj_spec {
+ u32 queue_index;
+ u64 gdma_region;
+ u32 queue_size;
+ u32 attached_eq;
+ u32 modr_ctx_id;
+};
+
+enum mana_command_code {
+ MANA_QUERY_DEV_CONFIG = 0x20001,
+ MANA_QUERY_GF_STAT = 0x20002,
+ MANA_CONFIG_VPORT_TX = 0x20003,
+ MANA_CREATE_WQ_OBJ = 0x20004,
+ MANA_DESTROY_WQ_OBJ = 0x20005,
+ MANA_FENCE_RQ = 0x20006,
+ MANA_CONFIG_VPORT_RX = 0x20007,
+ MANA_QUERY_VPORT_CONFIG = 0x20008,
+};
+
+/* Query Device Configuration */
+struct mana_query_device_cfg_req {
+ struct gdma_req_hdr hdr;
+
+ /* Driver Capability flags */
+ u64 drv_cap_flags1;
+ u64 drv_cap_flags2;
+ u64 drv_cap_flags3;
+ u64 drv_cap_flags4;
+
+ u32 proto_major_ver;
+ u32 proto_minor_ver;
+ u32 proto_micro_ver;
+
+ u32 reserved;
+}; /* HW DATA */
+
+struct mana_query_device_cfg_resp {
+ struct gdma_resp_hdr hdr;
+
+ u64 pf_cap_flags1;
+ u64 pf_cap_flags2;
+ u64 pf_cap_flags3;
+ u64 pf_cap_flags4;
+
+ u16 max_num_vports;
+ u16 reserved;
+ u32 max_num_eqs;
+}; /* HW DATA */
+
+/* Query vPort Configuration */
+struct mana_query_vport_cfg_req {
+ struct gdma_req_hdr hdr;
+ u32 vport_index;
+}; /* HW DATA */
+
+struct mana_query_vport_cfg_resp {
+ struct gdma_resp_hdr hdr;
+ u32 max_num_sq;
+ u32 max_num_rq;
+ u32 num_indirection_ent;
+ u32 reserved1;
+ u8 mac_addr[6];
+ u8 reserved2[2];
+ mana_handle_t vport;
+}; /* HW DATA */
+
+/* Configure vPort */
+struct mana_config_vport_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ u32 pdid;
+ u32 doorbell_pageid;
+}; /* HW DATA */
+
+struct mana_config_vport_resp {
+ struct gdma_resp_hdr hdr;
+ u16 tx_vport_offset;
+ u8 short_form_allowed;
+ u8 reserved;
+}; /* HW DATA */
+
+/* Create WQ Object */
+struct mana_create_wqobj_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ u32 wq_type;
+ u32 reserved;
+ u64 wq_gdma_region;
+ u64 cq_gdma_region;
+ u32 wq_size;
+ u32 cq_size;
+ u32 cq_moderation_ctx_id;
+ u32 cq_parent_qid;
+}; /* HW DATA */
+
+struct mana_create_wqobj_resp {
+ struct gdma_resp_hdr hdr;
+ u32 wq_id;
+ u32 cq_id;
+ mana_handle_t wq_obj;
+}; /* HW DATA */
+
+/* Destroy WQ Object */
+struct mana_destroy_wqobj_req {
+ struct gdma_req_hdr hdr;
+ u32 wq_type;
+ u32 reserved;
+ mana_handle_t wq_obj_handle;
+}; /* HW DATA */
+
+struct mana_destroy_wqobj_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+/* Fence RQ */
+struct mana_fence_rq_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t wq_obj_handle;
+}; /* HW DATA */
+
+struct mana_fence_rq_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+/* Configure vPort Rx Steering */
+struct mana_cfg_rx_steer_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ u16 num_indir_entries;
+ u16 indir_tab_offset;
+ u32 rx_enable;
+ u32 rss_enable;
+ u8 update_default_rxobj;
+ u8 update_hashkey;
+ u8 update_indir_tab;
+ u8 reserved;
+ mana_handle_t default_rxobj;
+ u8 hashkey[MANA_HASH_KEY_SIZE];
+}; /* HW DATA */
+
+struct mana_cfg_rx_steer_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+#define MANA_MAX_NUM_QUEUES 16
+
+#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
+
+struct mana_tx_package {
+ struct gdma_wqe_request wqe_req;
+ struct gdma_sge sgl_array[5];
+ struct gdma_sge *sgl_ptr;
+
+ struct mana_tx_oob tx_oob;
+
+ struct gdma_posted_wqe_info wqe_info;
+};
+
+#endif /* _MANA_H */
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
new file mode 100644
index 000000000000..04d067243457
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -0,0 +1,1895 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mm.h>
+
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+
+#include "mana.h"
+
+/* Microsoft Azure Network Adapter (MANA) functions */
+
+static int mana_open(struct net_device *ndev)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ int err;
+
+ err = mana_alloc_queues(ndev);
+ if (err)
+ return err;
+
+ apc->port_is_up = true;
+
+ /* Ensure port state updated before txq state */
+ smp_wmb();
+
+ netif_carrier_on(ndev);
+ netif_tx_wake_all_queues(ndev);
+
+ return 0;
+}
+
+static int mana_close(struct net_device *ndev)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+
+ if (!apc->port_is_up)
+ return 0;
+
+ return mana_detach(ndev, true);
+}
+
+static bool mana_can_tx(struct gdma_queue *wq)
+{
+ return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
+}
+
+static unsigned int mana_checksum_info(struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *ip = ip_hdr(skb);
+
+ if (ip->protocol == IPPROTO_TCP)
+ return IPPROTO_TCP;
+
+ if (ip->protocol == IPPROTO_UDP)
+ return IPPROTO_UDP;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6 = ipv6_hdr(skb);
+
+ if (ip6->nexthdr == IPPROTO_TCP)
+ return IPPROTO_TCP;
+
+ if (ip6->nexthdr == IPPROTO_UDP)
+ return IPPROTO_UDP;
+ }
+
+ /* No csum offloading */
+ return 0;
+}
+
+static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
+ struct mana_tx_package *tp)
+{
+ struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+ struct gdma_context *gc;
+ struct device *dev;
+ skb_frag_t *frag;
+ dma_addr_t da;
+ int i;
+
+ gc = gd->gdma_context;
+ dev = gc->dev;
+ da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, da))
+ return -ENOMEM;
+
+ ash->dma_handle[0] = da;
+ ash->size[0] = skb_headlen(skb);
+
+ tp->wqe_req.sgl[0].address = ash->dma_handle[0];
+ tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
+ tp->wqe_req.sgl[0].size = ash->size[0];
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, da))
+ goto frag_err;
+
+ ash->dma_handle[i + 1] = da;
+ ash->size[i + 1] = skb_frag_size(frag);
+
+ tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
+ tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
+ tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
+ }
+
+ return 0;
+
+frag_err:
+ for (i = i - 1; i >= 0; i--)
+ dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
+ DMA_TO_DEVICE);
+
+ dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
+
+ return -ENOMEM;
+}
+
+static int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
+ struct mana_port_context *apc = netdev_priv(ndev);
+ u16 txq_idx = skb_get_queue_mapping(skb);
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+ bool ipv4 = false, ipv6 = false;
+ struct mana_tx_package pkg = {};
+ struct netdev_queue *net_txq;
+ struct mana_stats *tx_stats;
+ struct gdma_queue *gdma_sq;
+ unsigned int csum_type;
+ struct mana_txq *txq;
+ struct mana_cq *cq;
+ int err, len;
+
+ if (unlikely(!apc->port_is_up))
+ goto tx_drop;
+
+ if (skb_cow_head(skb, MANA_HEADROOM))
+ goto tx_drop_count;
+
+ txq = &apc->tx_qp[txq_idx].txq;
+ gdma_sq = txq->gdma_sq;
+ cq = &apc->tx_qp[txq_idx].tx_cq;
+
+ pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
+ pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
+
+ if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
+ pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
+ pkt_fmt = MANA_LONG_PKT_FMT;
+ } else {
+ pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
+ }
+
+ pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
+
+ if (pkt_fmt == MANA_SHORT_PKT_FMT)
+ pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
+ else
+ pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
+
+ pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
+ pkg.wqe_req.flags = 0;
+ pkg.wqe_req.client_data_unit = 0;
+
+ pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
+ WARN_ON_ONCE(pkg.wqe_req.num_sge > 30);
+
+ if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
+ pkg.wqe_req.sgl = pkg.sgl_array;
+ } else {
+ pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
+ sizeof(struct gdma_sge),
+ GFP_ATOMIC);
+ if (!pkg.sgl_ptr)
+ goto tx_drop_count;
+
+ pkg.wqe_req.sgl = pkg.sgl_ptr;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP))
+ ipv4 = true;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ ipv6 = true;
+
+ if (skb_is_gso(skb)) {
+ pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
+ pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
+
+ pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
+ pkg.tx_oob.s_oob.comp_tcp_csum = 1;
+ pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
+
+ pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
+ pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
+ if (ipv4) {
+ ip_hdr(skb)->tot_len = 0;
+ ip_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, 0,
+ IPPROTO_TCP, 0);
+ } else {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr, 0,
+ IPPROTO_TCP, 0);
+ }
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ csum_type = mana_checksum_info(skb);
+
+ if (csum_type == IPPROTO_TCP) {
+ pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
+ pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
+
+ pkg.tx_oob.s_oob.comp_tcp_csum = 1;
+ pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
+
+ } else if (csum_type == IPPROTO_UDP) {
+ pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
+ pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
+
+ pkg.tx_oob.s_oob.comp_udp_csum = 1;
+ } else {
+ /* Can't do offload of this type of checksum */
+ if (skb_checksum_help(skb))
+ goto free_sgl_ptr;
+ }
+ }
+
+ if (mana_map_skb(skb, apc, &pkg))
+ goto free_sgl_ptr;
+
+ skb_queue_tail(&txq->pending_skbs, skb);
+
+ len = skb->len;
+ net_txq = netdev_get_tx_queue(ndev, txq_idx);
+
+ err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
+ (struct gdma_posted_wqe_info *)skb->cb);
+ if (!mana_can_tx(gdma_sq)) {
+ netif_tx_stop_queue(net_txq);
+ apc->eth_stats.stop_queue++;
+ }
+
+ if (err) {
+ (void)skb_dequeue_tail(&txq->pending_skbs);
+ netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
+ err = NETDEV_TX_BUSY;
+ goto tx_busy;
+ }
+
+ err = NETDEV_TX_OK;
+ atomic_inc(&txq->pending_sends);
+
+ mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
+
+ /* skb may be freed after mana_gd_post_work_request. Do not use it. */
+ skb = NULL;
+
+ tx_stats = &txq->stats;
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->packets++;
+ tx_stats->bytes += len;
+ u64_stats_update_end(&tx_stats->syncp);
+
+tx_busy:
+ if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
+ netif_tx_wake_queue(net_txq);
+ apc->eth_stats.wake_queue++;
+ }
+
+ kfree(pkg.sgl_ptr);
+ return err;
+
+free_sgl_ptr:
+ kfree(pkg.sgl_ptr);
+tx_drop_count:
+ ndev->stats.tx_dropped++;
+tx_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void mana_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *st)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ unsigned int num_queues = apc->num_queues;
+ struct mana_stats *stats;
+ unsigned int start;
+ u64 packets, bytes;
+ int q;
+
+ if (!apc->port_is_up)
+ return;
+
+ netdev_stats_to_stats64(st, &ndev->stats);
+
+ for (q = 0; q < num_queues; q++) {
+ stats = &apc->rxqs[q]->stats;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->packets;
+ bytes = stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ st->rx_packets += packets;
+ st->rx_bytes += bytes;
+ }
+
+ for (q = 0; q < num_queues; q++) {
+ stats = &apc->tx_qp[q].txq.stats;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->packets;
+ bytes = stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ st->tx_packets += packets;
+ st->tx_bytes += bytes;
+ }
+}
+
+static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
+ int old_q)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ u32 hash = skb_get_hash(skb);
+ struct sock *sk = skb->sk;
+ int txq;
+
+ txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
+
+ if (txq != old_q && sk && sk_fullsock(sk) &&
+ rcu_access_pointer(sk->sk_dst_cache))
+ sk_tx_queue_set(sk, txq);
+
+ return txq;
+}
+
+static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ int txq;
+
+ if (ndev->real_num_tx_queues == 1)
+ return 0;
+
+ txq = sk_tx_queue_get(skb->sk);
+
+ if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
+ if (skb_rx_queue_recorded(skb))
+ txq = skb_get_rx_queue(skb);
+ else
+ txq = mana_get_tx_queue(ndev, skb, txq);
+ }
+
+ return txq;
+}
+
+static const struct net_device_ops mana_devops = {
+ .ndo_open = mana_open,
+ .ndo_stop = mana_close,
+ .ndo_select_queue = mana_select_queue,
+ .ndo_start_xmit = mana_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = mana_get_stats64,
+};
+
+static void mana_cleanup_port_context(struct mana_port_context *apc)
+{
+ kfree(apc->rxqs);
+ apc->rxqs = NULL;
+}
+
+static int mana_init_port_context(struct mana_port_context *apc)
+{
+ apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
+ GFP_KERNEL);
+
+ return !apc->rxqs ? -ENOMEM : 0;
+}
+
+static int mana_send_request(struct mana_context *ac, void *in_buf,
+ u32 in_len, void *out_buf, u32 out_len)
+{
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
+ struct gdma_resp_hdr *resp = out_buf;
+ struct gdma_req_hdr *req = in_buf;
+ struct device *dev = gc->dev;
+ static atomic_t activity_id;
+ int err;
+
+ req->dev_id = gc->mana.dev_id;
+ req->activity_id = atomic_inc_return(&activity_id);
+
+ err = mana_gd_send_request(gc, in_len, in_buf, out_len,
+ out_buf);
+ if (err || resp->status) {
+ dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
+ err, resp->status);
+ return err ? err : -EPROTO;
+ }
+
+ if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
+ req->activity_id != resp->activity_id) {
+ dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
+ req->dev_id.as_uint32, resp->dev_id.as_uint32,
+ req->activity_id, resp->activity_id);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
+ const enum mana_command_code expected_code,
+ const u32 min_size)
+{
+ if (resp_hdr->response.msg_type != expected_code)
+ return -EPROTO;
+
+ if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
+ return -EPROTO;
+
+ if (resp_hdr->response.msg_size < min_size)
+ return -EPROTO;
+
+ return 0;
+}
+
+static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
+ u32 proto_minor_ver, u32 proto_micro_ver,
+ u16 *max_num_vports)
+{
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
+ struct mana_query_device_cfg_resp resp = {};
+ struct mana_query_device_cfg_req req = {};
+ struct device *dev = gc->dev;
+ int err = 0;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
+ sizeof(req), sizeof(resp));
+ req.proto_major_ver = proto_major_ver;
+ req.proto_minor_ver = proto_minor_ver;
+ req.proto_micro_ver = proto_micro_ver;
+
+ err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
+ if (err) {
+ dev_err(dev, "Failed to query config: %d", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+ return err;
+ }
+
+ *max_num_vports = resp.max_num_vports;
+
+ return 0;
+}
+
+static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
+ u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
+{
+ struct mana_query_vport_cfg_resp resp = {};
+ struct mana_query_vport_cfg_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
+ sizeof(req), sizeof(resp));
+
+ req.vport_index = vport_index;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err)
+ return err;
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
+ sizeof(resp));
+ if (err)
+ return err;
+
+ if (resp.hdr.status)
+ return -EPROTO;
+
+ *max_sq = resp.max_num_sq;
+ *max_rq = resp.max_num_rq;
+ *num_indir_entry = resp.num_indirection_ent;
+
+ apc->port_handle = resp.vport;
+ ether_addr_copy(apc->mac_addr, resp.mac_addr);
+
+ return 0;
+}
+
+static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
+ u32 doorbell_pg_id)
+{
+ struct mana_config_vport_resp resp = {};
+ struct mana_config_vport_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
+ sizeof(req), sizeof(resp));
+ req.vport = apc->port_handle;
+ req.pdid = protection_dom_id;
+ req.doorbell_pageid = doorbell_pg_id;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
+ goto out;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
+ err, resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+
+ goto out;
+ }
+
+ apc->tx_shortform_allowed = resp.short_form_allowed;
+ apc->tx_vp_offset = resp.tx_vport_offset;
+out:
+ return err;
+}
+
+static int mana_cfg_vport_steering(struct mana_port_context *apc,
+ enum TRI_STATE rx,
+ bool update_default_rxobj, bool update_key,
+ bool update_tab)
+{
+ u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
+ struct mana_cfg_rx_steer_req *req = NULL;
+ struct mana_cfg_rx_steer_resp resp = {};
+ struct net_device *ndev = apc->ndev;
+ mana_handle_t *req_indir_tab;
+ u32 req_buf_size;
+ int err;
+
+ req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
+ req = kzalloc(req_buf_size, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
+ sizeof(resp));
+
+ req->vport = apc->port_handle;
+ req->num_indir_entries = num_entries;
+ req->indir_tab_offset = sizeof(*req);
+ req->rx_enable = rx;
+ req->rss_enable = apc->rss_state;
+ req->update_default_rxobj = update_default_rxobj;
+ req->update_hashkey = update_key;
+ req->update_indir_tab = update_tab;
+ req->default_rxobj = apc->default_rxobj;
+
+ if (update_key)
+ memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
+
+ if (update_tab) {
+ req_indir_tab = (mana_handle_t *)(req + 1);
+ memcpy(req_indir_tab, apc->rxobj_table,
+ req->num_indir_entries * sizeof(mana_handle_t));
+ }
+
+ err = mana_send_request(apc->ac, req, req_buf_size, &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
+ goto out;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
+ sizeof(resp));
+ if (err) {
+ netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
+ goto out;
+ }
+
+ if (resp.hdr.status) {
+ netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
+ resp.hdr.status);
+ err = -EPROTO;
+ }
+out:
+ kfree(req);
+ return err;
+}
+
+static int mana_create_wq_obj(struct mana_port_context *apc,
+ mana_handle_t vport,
+ u32 wq_type, struct mana_obj_spec *wq_spec,
+ struct mana_obj_spec *cq_spec,
+ mana_handle_t *wq_obj)
+{
+ struct mana_create_wqobj_resp resp = {};
+ struct mana_create_wqobj_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
+ sizeof(req), sizeof(resp));
+ req.vport = vport;
+ req.wq_type = wq_type;
+ req.wq_gdma_region = wq_spec->gdma_region;
+ req.cq_gdma_region = cq_spec->gdma_region;
+ req.wq_size = wq_spec->queue_size;
+ req.cq_size = cq_spec->queue_size;
+ req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
+ req.cq_parent_qid = cq_spec->attached_eq;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(ndev, "Failed to create WQ object: %d\n", err);
+ goto out;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+ goto out;
+ }
+
+ if (resp.wq_obj == INVALID_MANA_HANDLE) {
+ netdev_err(ndev, "Got an invalid WQ object handle\n");
+ err = -EPROTO;
+ goto out;
+ }
+
+ *wq_obj = resp.wq_obj;
+ wq_spec->queue_index = resp.wq_id;
+ cq_spec->queue_index = resp.cq_id;
+
+ return 0;
+out:
+ return err;
+}
+
+static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
+ mana_handle_t wq_obj)
+{
+ struct mana_destroy_wqobj_resp resp = {};
+ struct mana_destroy_wqobj_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
+ sizeof(req), sizeof(resp));
+ req.wq_type = wq_type;
+ req.wq_obj_handle = wq_obj;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
+ return;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
+ sizeof(resp));
+ if (err || resp.hdr.status)
+ netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
+ resp.hdr.status);
+}
+
+static void mana_init_cqe_poll_buf(struct gdma_comp *cqe_poll_buf)
+{
+ int i;
+
+ for (i = 0; i < CQE_POLLING_BUFFER; i++)
+ memset(&cqe_poll_buf[i], 0, sizeof(struct gdma_comp));
+}
+
+static void mana_destroy_eq(struct gdma_context *gc,
+ struct mana_port_context *apc)
+{
+ struct gdma_queue *eq;
+ int i;
+
+ if (!apc->eqs)
+ return;
+
+ for (i = 0; i < apc->num_queues; i++) {
+ eq = apc->eqs[i].eq;
+ if (!eq)
+ continue;
+
+ mana_gd_destroy_queue(gc, eq);
+ }
+
+ kfree(apc->eqs);
+ apc->eqs = NULL;
+}
+
+static int mana_create_eq(struct mana_port_context *apc)
+{
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+ struct gdma_queue_spec spec = {};
+ int err;
+ int i;
+
+ apc->eqs = kcalloc(apc->num_queues, sizeof(struct mana_eq),
+ GFP_KERNEL);
+ if (!apc->eqs)
+ return -ENOMEM;
+
+ spec.type = GDMA_EQ;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = EQ_SIZE;
+ spec.eq.callback = NULL;
+ spec.eq.context = apc->eqs;
+ spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
+ spec.eq.ndev = apc->ndev;
+
+ for (i = 0; i < apc->num_queues; i++) {
+ mana_init_cqe_poll_buf(apc->eqs[i].cqe_poll);
+
+ err = mana_gd_create_mana_eq(gd, &spec, &apc->eqs[i].eq);
+ if (err)
+ goto out;
+ }
+
+ return 0;
+out:
+ mana_destroy_eq(gd->gdma_context, apc);
+ return err;
+}
+
+static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
+{
+ u32 used_space_old;
+ u32 used_space_new;
+
+ used_space_old = wq->head - wq->tail;
+ used_space_new = wq->head - (wq->tail + num_units);
+
+ if (WARN_ON_ONCE(used_space_new > used_space_old))
+ return -ERANGE;
+
+ wq->tail += num_units;
+ return 0;
+}
+
+static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
+{
+ struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
+ struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
+ struct device *dev = gc->dev;
+ int i;
+
+ dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
+
+ for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
+ dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
+ DMA_TO_DEVICE);
+}
+
+static void mana_poll_tx_cq(struct mana_cq *cq)
+{
+ struct gdma_queue *gdma_eq = cq->gdma_cq->cq.parent;
+ struct gdma_comp *completions = cq->gdma_comp_buf;
+ struct gdma_posted_wqe_info *wqe_info;
+ unsigned int pkt_transmitted = 0;
+ unsigned int wqe_unit_cnt = 0;
+ struct mana_txq *txq = cq->txq;
+ struct mana_port_context *apc;
+ struct netdev_queue *net_txq;
+ struct gdma_queue *gdma_wq;
+ unsigned int avail_space;
+ struct net_device *ndev;
+ struct sk_buff *skb;
+ bool txq_stopped;
+ int comp_read;
+ int i;
+
+ ndev = txq->ndev;
+ apc = netdev_priv(ndev);
+
+ comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
+ CQE_POLLING_BUFFER);
+
+ for (i = 0; i < comp_read; i++) {
+ struct mana_tx_comp_oob *cqe_oob;
+
+ if (WARN_ON_ONCE(!completions[i].is_sq))
+ return;
+
+ cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
+ if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
+ MANA_CQE_COMPLETION))
+ return;
+
+ switch (cqe_oob->cqe_hdr.cqe_type) {
+ case CQE_TX_OKAY:
+ break;
+
+ case CQE_TX_SA_DROP:
+ case CQE_TX_MTU_DROP:
+ case CQE_TX_INVALID_OOB:
+ case CQE_TX_INVALID_ETH_TYPE:
+ case CQE_TX_HDR_PROCESSING_ERROR:
+ case CQE_TX_VF_DISABLED:
+ case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
+ case CQE_TX_VPORT_DISABLED:
+ case CQE_TX_VLAN_TAGGING_VIOLATION:
+ WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
+ cqe_oob->cqe_hdr.cqe_type);
+ break;
+
+ default:
+ /* If the CQE type is unexpected, log an error, assert,
+ * and go through the error path.
+ */
+ WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
+ cqe_oob->cqe_hdr.cqe_type);
+ return;
+ }
+
+ if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
+ return;
+
+ skb = skb_dequeue(&txq->pending_skbs);
+ if (WARN_ON_ONCE(!skb))
+ return;
+
+ wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
+ wqe_unit_cnt += wqe_info->wqe_size_in_bu;
+
+ mana_unmap_skb(skb, apc);
+
+ napi_consume_skb(skb, gdma_eq->eq.budget);
+
+ pkt_transmitted++;
+ }
+
+ if (WARN_ON_ONCE(wqe_unit_cnt == 0))
+ return;
+
+ mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
+
+ gdma_wq = txq->gdma_sq;
+ avail_space = mana_gd_wq_avail_space(gdma_wq);
+
+ /* Ensure tail updated before checking q stop */
+ smp_mb();
+
+ net_txq = txq->net_txq;
+ txq_stopped = netif_tx_queue_stopped(net_txq);
+
+ /* Ensure checking txq_stopped before apc->port_is_up. */
+ smp_rmb();
+
+ if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
+ netif_tx_wake_queue(net_txq);
+ apc->eth_stats.wake_queue++;
+ }
+
+ if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
+ WARN_ON_ONCE(1);
+}
+
+static void mana_post_pkt_rxq(struct mana_rxq *rxq)
+{
+ struct mana_recv_buf_oob *recv_buf_oob;
+ u32 curr_index;
+ int err;
+
+ curr_index = rxq->buf_index++;
+ if (rxq->buf_index == rxq->num_rx_buf)
+ rxq->buf_index = 0;
+
+ recv_buf_oob = &rxq->rx_oobs[curr_index];
+
+ err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
+ &recv_buf_oob->wqe_inf);
+ if (WARN_ON_ONCE(err))
+ return;
+
+ WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
+}
+
+static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
+ struct mana_rxq *rxq)
+{
+ struct mana_stats *rx_stats = &rxq->stats;
+ struct net_device *ndev = rxq->ndev;
+ uint pkt_len = cqe->ppi[0].pkt_len;
+ struct mana_port_context *apc;
+ u16 rxq_idx = rxq->rxq_idx;
+ struct napi_struct *napi;
+ struct gdma_queue *eq;
+ struct sk_buff *skb;
+ u32 hash_value;
+
+ apc = netdev_priv(ndev);
+ eq = apc->eqs[rxq_idx].eq;
+ eq->eq.work_done++;
+ napi = &eq->eq.napi;
+
+ if (!buf_va) {
+ ++ndev->stats.rx_dropped;
+ return;
+ }
+
+ skb = build_skb(buf_va, PAGE_SIZE);
+
+ if (!skb) {
+ free_page((unsigned long)buf_va);
+ ++ndev->stats.rx_dropped;
+ return;
+ }
+
+ skb_put(skb, pkt_len);
+ skb->dev = napi->dev;
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb_checksum_none_assert(skb);
+ skb_record_rx_queue(skb, rxq_idx);
+
+ if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
+ if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
+ hash_value = cqe->ppi[0].pkt_hash;
+
+ if (cqe->rx_hashtype & MANA_HASH_L4)
+ skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
+ else
+ skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
+ }
+
+ napi_gro_receive(napi, skb);
+
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->packets++;
+ rx_stats->bytes += pkt_len;
+ u64_stats_update_end(&rx_stats->syncp);
+}
+
+static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
+ struct gdma_comp *cqe)
+{
+ struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
+ struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
+ struct net_device *ndev = rxq->ndev;
+ struct mana_recv_buf_oob *rxbuf_oob;
+ struct device *dev = gc->dev;
+ void *new_buf, *old_buf;
+ struct page *new_page;
+ u32 curr, pktlen;
+ dma_addr_t da;
+
+ switch (oob->cqe_hdr.cqe_type) {
+ case CQE_RX_OKAY:
+ break;
+
+ case CQE_RX_TRUNCATED:
+ netdev_err(ndev, "Dropped a truncated packet\n");
+ return;
+
+ case CQE_RX_COALESCED_4:
+ netdev_err(ndev, "RX coalescing is unsupported\n");
+ return;
+
+ case CQE_RX_OBJECT_FENCE:
+ netdev_err(ndev, "RX Fencing is unsupported\n");
+ return;
+
+ default:
+ netdev_err(ndev, "Unknown RX CQE type = %d\n",
+ oob->cqe_hdr.cqe_type);
+ return;
+ }
+
+ if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
+ return;
+
+ pktlen = oob->ppi[0].pkt_len;
+
+ if (pktlen == 0) {
+ /* data packets should never have packetlength of zero */
+ netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
+ rxq->gdma_id, cq->gdma_id, rxq->rxobj);
+ return;
+ }
+
+ curr = rxq->buf_index;
+ rxbuf_oob = &rxq->rx_oobs[curr];
+ WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
+
+ new_page = alloc_page(GFP_ATOMIC);
+
+ if (new_page) {
+ da = dma_map_page(dev, new_page, 0, rxq->datasize,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, da)) {
+ __free_page(new_page);
+ new_page = NULL;
+ }
+ }
+
+ new_buf = new_page ? page_to_virt(new_page) : NULL;
+
+ if (new_buf) {
+ dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
+ DMA_FROM_DEVICE);
+
+ old_buf = rxbuf_oob->buf_va;
+
+ /* refresh the rxbuf_oob with the new page */
+ rxbuf_oob->buf_va = new_buf;
+ rxbuf_oob->buf_dma_addr = da;
+ rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
+ } else {
+ old_buf = NULL; /* drop the packet if no memory */
+ }
+
+ mana_rx_skb(old_buf, oob, rxq);
+
+ mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
+
+ mana_post_pkt_rxq(rxq);
+}
+
+static void mana_poll_rx_cq(struct mana_cq *cq)
+{
+ struct gdma_comp *comp = cq->gdma_comp_buf;
+ int comp_read, i;
+
+ comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
+ WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
+
+ for (i = 0; i < comp_read; i++) {
+ if (WARN_ON_ONCE(comp[i].is_sq))
+ return;
+
+ /* verify recv cqe references the right rxq */
+ if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
+ return;
+
+ mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
+ }
+}
+
+static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
+{
+ struct mana_cq *cq = context;
+
+ WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
+
+ if (cq->type == MANA_CQ_TYPE_RX)
+ mana_poll_rx_cq(cq);
+ else
+ mana_poll_tx_cq(cq);
+
+ mana_gd_arm_cq(gdma_queue);
+}
+
+static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
+{
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+
+ if (!cq->gdma_cq)
+ return;
+
+ mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
+}
+
+static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
+{
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+
+ if (!txq->gdma_sq)
+ return;
+
+ mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
+}
+
+static void mana_destroy_txq(struct mana_port_context *apc)
+{
+ int i;
+
+ if (!apc->tx_qp)
+ return;
+
+ for (i = 0; i < apc->num_queues; i++) {
+ mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
+
+ mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
+
+ mana_deinit_txq(apc, &apc->tx_qp[i].txq);
+ }
+
+ kfree(apc->tx_qp);
+ apc->tx_qp = NULL;
+}
+
+static int mana_create_txq(struct mana_port_context *apc,
+ struct net_device *net)
+{
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+ struct mana_obj_spec wq_spec;
+ struct mana_obj_spec cq_spec;
+ struct gdma_queue_spec spec;
+ struct gdma_context *gc;
+ struct mana_txq *txq;
+ struct mana_cq *cq;
+ u32 txq_size;
+ u32 cq_size;
+ int err;
+ int i;
+
+ apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
+ GFP_KERNEL);
+ if (!apc->tx_qp)
+ return -ENOMEM;
+
+ /* The minimum size of the WQE is 32 bytes, hence
+ * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
+ * the SQ can store. This value is then used to size other queues
+ * to prevent overflow.
+ */
+ txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
+ BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
+
+ cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
+ cq_size = PAGE_ALIGN(cq_size);
+
+ gc = gd->gdma_context;
+
+ for (i = 0; i < apc->num_queues; i++) {
+ apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
+
+ /* Create SQ */
+ txq = &apc->tx_qp[i].txq;
+
+ u64_stats_init(&txq->stats.syncp);
+ txq->ndev = net;
+ txq->net_txq = netdev_get_tx_queue(net, i);
+ txq->vp_offset = apc->tx_vp_offset;
+ skb_queue_head_init(&txq->pending_skbs);
+
+ memset(&spec, 0, sizeof(spec));
+ spec.type = GDMA_SQ;
+ spec.monitor_avl_buf = true;
+ spec.queue_size = txq_size;
+ err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
+ if (err)
+ goto out;
+
+ /* Create SQ's CQ */
+ cq = &apc->tx_qp[i].tx_cq;
+ cq->gdma_comp_buf = apc->eqs[i].cqe_poll;
+ cq->type = MANA_CQ_TYPE_TX;
+
+ cq->txq = txq;
+
+ memset(&spec, 0, sizeof(spec));
+ spec.type = GDMA_CQ;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = cq_size;
+ spec.cq.callback = mana_cq_handler;
+ spec.cq.parent_eq = apc->eqs[i].eq;
+ spec.cq.context = cq;
+ err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
+ if (err)
+ goto out;
+
+ memset(&wq_spec, 0, sizeof(wq_spec));
+ memset(&cq_spec, 0, sizeof(cq_spec));
+
+ wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
+ wq_spec.queue_size = txq->gdma_sq->queue_size;
+
+ cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
+ cq_spec.queue_size = cq->gdma_cq->queue_size;
+ cq_spec.modr_ctx_id = 0;
+ cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
+
+ err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
+ &wq_spec, &cq_spec,
+ &apc->tx_qp[i].tx_object);
+
+ if (err)
+ goto out;
+
+ txq->gdma_sq->id = wq_spec.queue_index;
+ cq->gdma_cq->id = cq_spec.queue_index;
+
+ txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+ cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+
+ txq->gdma_txq_id = txq->gdma_sq->id;
+
+ cq->gdma_id = cq->gdma_cq->id;
+
+ if (WARN_ON(cq->gdma_id >= gc->max_num_cqs))
+ return -EINVAL;
+
+ gc->cq_table[cq->gdma_id] = cq->gdma_cq;
+
+ mana_gd_arm_cq(cq->gdma_cq);
+ }
+
+ return 0;
+out:
+ mana_destroy_txq(apc);
+ return err;
+}
+
+static void mana_napi_sync_for_rx(struct mana_rxq *rxq)
+{
+ struct net_device *ndev = rxq->ndev;
+ struct mana_port_context *apc;
+ u16 rxq_idx = rxq->rxq_idx;
+ struct napi_struct *napi;
+ struct gdma_queue *eq;
+
+ apc = netdev_priv(ndev);
+ eq = apc->eqs[rxq_idx].eq;
+ napi = &eq->eq.napi;
+
+ napi_synchronize(napi);
+}
+
+static void mana_destroy_rxq(struct mana_port_context *apc,
+ struct mana_rxq *rxq, bool validate_state)
+
+{
+ struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
+ struct mana_recv_buf_oob *rx_oob;
+ struct device *dev = gc->dev;
+ int i;
+
+ if (!rxq)
+ return;
+
+ if (validate_state)
+ mana_napi_sync_for_rx(rxq);
+
+ mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
+
+ mana_deinit_cq(apc, &rxq->rx_cq);
+
+ for (i = 0; i < rxq->num_rx_buf; i++) {
+ rx_oob = &rxq->rx_oobs[i];
+
+ if (!rx_oob->buf_va)
+ continue;
+
+ dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
+ DMA_FROM_DEVICE);
+
+ free_page((unsigned long)rx_oob->buf_va);
+ rx_oob->buf_va = NULL;
+ }
+
+ if (rxq->gdma_rq)
+ mana_gd_destroy_queue(gc, rxq->gdma_rq);
+
+ kfree(rxq);
+}
+
+#define MANA_WQE_HEADER_SIZE 16
+#define MANA_WQE_SGE_SIZE 16
+
+static int mana_alloc_rx_wqe(struct mana_port_context *apc,
+ struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
+{
+ struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
+ struct mana_recv_buf_oob *rx_oob;
+ struct device *dev = gc->dev;
+ struct page *page;
+ dma_addr_t da;
+ u32 buf_idx;
+
+ WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);
+
+ *rxq_size = 0;
+ *cq_size = 0;
+
+ for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
+ rx_oob = &rxq->rx_oobs[buf_idx];
+ memset(rx_oob, 0, sizeof(*rx_oob));
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ da = dma_map_page(dev, page, 0, rxq->datasize, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, da)) {
+ __free_page(page);
+ return -ENOMEM;
+ }
+
+ rx_oob->buf_va = page_to_virt(page);
+ rx_oob->buf_dma_addr = da;
+
+ rx_oob->num_sge = 1;
+ rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
+ rx_oob->sgl[0].size = rxq->datasize;
+ rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
+
+ rx_oob->wqe_req.sgl = rx_oob->sgl;
+ rx_oob->wqe_req.num_sge = rx_oob->num_sge;
+ rx_oob->wqe_req.inline_oob_size = 0;
+ rx_oob->wqe_req.inline_oob_data = NULL;
+ rx_oob->wqe_req.flags = 0;
+ rx_oob->wqe_req.client_data_unit = 0;
+
+ *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
+ MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
+ *cq_size += COMP_ENTRY_SIZE;
+ }
+
+ return 0;
+}
+
+static int mana_push_wqe(struct mana_rxq *rxq)
+{
+ struct mana_recv_buf_oob *rx_oob;
+ u32 buf_idx;
+ int err;
+
+ for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
+ rx_oob = &rxq->rx_oobs[buf_idx];
+
+ err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
+ &rx_oob->wqe_inf);
+ if (err)
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
+ u32 rxq_idx, struct mana_eq *eq,
+ struct net_device *ndev)
+{
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+ struct mana_obj_spec wq_spec;
+ struct mana_obj_spec cq_spec;
+ struct gdma_queue_spec spec;
+ struct mana_cq *cq = NULL;
+ struct gdma_context *gc;
+ u32 cq_size, rq_size;
+ struct mana_rxq *rxq;
+ int err;
+
+ gc = gd->gdma_context;
+
+ rxq = kzalloc(sizeof(*rxq) +
+ RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
+ GFP_KERNEL);
+ if (!rxq)
+ return NULL;
+
+ rxq->ndev = ndev;
+ rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
+ rxq->rxq_idx = rxq_idx;
+ rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
+ rxq->rxobj = INVALID_MANA_HANDLE;
+
+ err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
+ if (err)
+ goto out;
+
+ rq_size = PAGE_ALIGN(rq_size);
+ cq_size = PAGE_ALIGN(cq_size);
+
+ /* Create RQ */
+ memset(&spec, 0, sizeof(spec));
+ spec.type = GDMA_RQ;
+ spec.monitor_avl_buf = true;
+ spec.queue_size = rq_size;
+ err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
+ if (err)
+ goto out;
+
+ /* Create RQ's CQ */
+ cq = &rxq->rx_cq;
+ cq->gdma_comp_buf = eq->cqe_poll;
+ cq->type = MANA_CQ_TYPE_RX;
+ cq->rxq = rxq;
+
+ memset(&spec, 0, sizeof(spec));
+ spec.type = GDMA_CQ;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = cq_size;
+ spec.cq.callback = mana_cq_handler;
+ spec.cq.parent_eq = eq->eq;
+ spec.cq.context = cq;
+ err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
+ if (err)
+ goto out;
+
+ memset(&wq_spec, 0, sizeof(wq_spec));
+ memset(&cq_spec, 0, sizeof(cq_spec));
+ wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
+ wq_spec.queue_size = rxq->gdma_rq->queue_size;
+
+ cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
+ cq_spec.queue_size = cq->gdma_cq->queue_size;
+ cq_spec.modr_ctx_id = 0;
+ cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
+
+ err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
+ &wq_spec, &cq_spec, &rxq->rxobj);
+ if (err)
+ goto out;
+
+ rxq->gdma_rq->id = wq_spec.queue_index;
+ cq->gdma_cq->id = cq_spec.queue_index;
+
+ rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+ cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+
+ rxq->gdma_id = rxq->gdma_rq->id;
+ cq->gdma_id = cq->gdma_cq->id;
+
+ err = mana_push_wqe(rxq);
+ if (err)
+ goto out;
+
+ if (cq->gdma_id >= gc->max_num_cqs)
+ goto out;
+
+ gc->cq_table[cq->gdma_id] = cq->gdma_cq;
+
+ mana_gd_arm_cq(cq->gdma_cq);
+out:
+ if (!err)
+ return rxq;
+
+ netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
+
+ mana_destroy_rxq(apc, rxq, false);
+
+ if (cq)
+ mana_deinit_cq(apc, cq);
+
+ return NULL;
+}
+
+static int mana_add_rx_queues(struct mana_port_context *apc,
+ struct net_device *ndev)
+{
+ struct mana_rxq *rxq;
+ int err = 0;
+ int i;
+
+ for (i = 0; i < apc->num_queues; i++) {
+ rxq = mana_create_rxq(apc, i, &apc->eqs[i], ndev);
+ if (!rxq) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ u64_stats_init(&rxq->stats.syncp);
+
+ apc->rxqs[i] = rxq;
+ }
+
+ apc->default_rxobj = apc->rxqs[0]->rxobj;
+out:
+ return err;
+}
+
+static void mana_destroy_vport(struct mana_port_context *apc)
+{
+ struct mana_rxq *rxq;
+ u32 rxq_idx;
+
+ for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
+ rxq = apc->rxqs[rxq_idx];
+ if (!rxq)
+ continue;
+
+ mana_destroy_rxq(apc, rxq, true);
+ apc->rxqs[rxq_idx] = NULL;
+ }
+
+ mana_destroy_txq(apc);
+}
+
+static int mana_create_vport(struct mana_port_context *apc,
+ struct net_device *net)
+{
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+ int err;
+
+ apc->default_rxobj = INVALID_MANA_HANDLE;
+
+ err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
+ if (err)
+ return err;
+
+ return mana_create_txq(apc, net);
+}
+
+static void mana_rss_table_init(struct mana_port_context *apc)
+{
+ int i;
+
+ for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ apc->indir_table[i] =
+ ethtool_rxfh_indir_default(i, apc->num_queues);
+}
+
+int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
+ bool update_hash, bool update_tab)
+{
+ u32 queue_idx;
+ int i;
+
+ if (update_tab) {
+ for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
+ queue_idx = apc->indir_table[i];
+ apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
+ }
+ }
+
+ return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
+}
+
+static int mana_init_port(struct net_device *ndev)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ u32 max_txq, max_rxq, max_queues;
+ int port_idx = apc->port_idx;
+ u32 num_indirect_entries;
+ int err;
+
+ err = mana_init_port_context(apc);
+ if (err)
+ return err;
+
+ err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
+ &num_indirect_entries);
+ if (err) {
+ netdev_err(ndev, "Failed to query info for vPort 0\n");
+ goto reset_apc;
+ }
+
+ max_queues = min_t(u32, max_txq, max_rxq);
+ if (apc->max_queues > max_queues)
+ apc->max_queues = max_queues;
+
+ if (apc->num_queues > apc->max_queues)
+ apc->num_queues = apc->max_queues;
+
+ ether_addr_copy(ndev->dev_addr, apc->mac_addr);
+
+ return 0;
+
+reset_apc:
+ kfree(apc->rxqs);
+ apc->rxqs = NULL;
+ return err;
+}
+
+int mana_alloc_queues(struct net_device *ndev)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+ int err;
+
+ err = mana_create_eq(apc);
+ if (err)
+ return err;
+
+ err = mana_create_vport(apc, ndev);
+ if (err)
+ goto destroy_eq;
+
+ err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
+ if (err)
+ goto destroy_vport;
+
+ err = mana_add_rx_queues(apc, ndev);
+ if (err)
+ goto destroy_vport;
+
+ apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
+
+ err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
+ if (err)
+ goto destroy_vport;
+
+ mana_rss_table_init(apc);
+
+ err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
+ if (err)
+ goto destroy_vport;
+
+ return 0;
+
+destroy_vport:
+ mana_destroy_vport(apc);
+destroy_eq:
+ mana_destroy_eq(gd->gdma_context, apc);
+ return err;
+}
+
+int mana_attach(struct net_device *ndev)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ int err;
+
+ ASSERT_RTNL();
+
+ err = mana_init_port(ndev);
+ if (err)
+ return err;
+
+ err = mana_alloc_queues(ndev);
+ if (err) {
+ kfree(apc->rxqs);
+ apc->rxqs = NULL;
+ return err;
+ }
+
+ netif_device_attach(ndev);
+
+ apc->port_is_up = apc->port_st_save;
+
+ /* Ensure port state updated before txq state */
+ smp_wmb();
+
+ if (apc->port_is_up) {
+ netif_carrier_on(ndev);
+ netif_tx_wake_all_queues(ndev);
+ }
+
+ return 0;
+}
+
+static int mana_dealloc_queues(struct net_device *ndev)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ struct mana_txq *txq;
+ int i, err;
+
+ if (apc->port_is_up)
+ return -EINVAL;
+
+ /* No packet can be transmitted now since apc->port_is_up is false.
+ * There is still a tiny chance that mana_poll_tx_cq() can re-enable
+ * a txq because it may not timely see apc->port_is_up being cleared
+ * to false, but it doesn't matter since mana_start_xmit() drops any
+ * new packets due to apc->port_is_up being false.
+ *
+ * Drain all the in-flight TX packets
+ */
+ for (i = 0; i < apc->num_queues; i++) {
+ txq = &apc->tx_qp[i].txq;
+
+ while (atomic_read(&txq->pending_sends) > 0)
+ usleep_range(1000, 2000);
+ }
+
+ /* We're 100% sure the queues can no longer be woken up, because
+ * we're sure now mana_poll_tx_cq() can't be running.
+ */
+
+ apc->rss_state = TRI_STATE_FALSE;
+ err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
+ if (err) {
+ netdev_err(ndev, "Failed to disable vPort: %d\n", err);
+ return err;
+ }
+
+ /* TODO: Implement RX fencing */
+ ssleep(1);
+
+ mana_destroy_vport(apc);
+
+ mana_destroy_eq(apc->ac->gdma_dev->gdma_context, apc);
+
+ return 0;
+}
+
+int mana_detach(struct net_device *ndev, bool from_close)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ int err;
+
+ ASSERT_RTNL();
+
+ apc->port_st_save = apc->port_is_up;
+ apc->port_is_up = false;
+
+ /* Ensure port state updated before txq state */
+ smp_wmb();
+
+ netif_tx_disable(ndev);
+ netif_carrier_off(ndev);
+
+ if (apc->port_st_save) {
+ err = mana_dealloc_queues(ndev);
+ if (err)
+ return err;
+ }
+
+ if (!from_close) {
+ netif_device_detach(ndev);
+ mana_cleanup_port_context(apc);
+ }
+
+ return 0;
+}
+
+static int mana_probe_port(struct mana_context *ac, int port_idx,
+ struct net_device **ndev_storage)
+{
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
+ struct mana_port_context *apc;
+ struct net_device *ndev;
+ int err;
+
+ ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
+ gc->max_num_queues);
+ if (!ndev)
+ return -ENOMEM;
+
+ *ndev_storage = ndev;
+
+ apc = netdev_priv(ndev);
+ apc->ac = ac;
+ apc->ndev = ndev;
+ apc->max_queues = gc->max_num_queues;
+ apc->num_queues = min_t(uint, gc->max_num_queues, MANA_MAX_NUM_QUEUES);
+ apc->port_handle = INVALID_MANA_HANDLE;
+ apc->port_idx = port_idx;
+
+ ndev->netdev_ops = &mana_devops;
+ ndev->ethtool_ops = &mana_ethtool_ops;
+ ndev->mtu = ETH_DATA_LEN;
+ ndev->max_mtu = ndev->mtu;
+ ndev->min_mtu = ndev->mtu;
+ ndev->needed_headroom = MANA_HEADROOM;
+ SET_NETDEV_DEV(ndev, gc->dev);
+
+ netif_carrier_off(ndev);
+
+ netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
+
+ err = mana_init_port(ndev);
+ if (err)
+ goto free_net;
+
+ netdev_lockdep_set_classes(ndev);
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ ndev->hw_features |= NETIF_F_RXCSUM;
+ ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->hw_features |= NETIF_F_RXHASH;
+ ndev->features = ndev->hw_features;
+ ndev->vlan_features = 0;
+
+ err = register_netdev(ndev);
+ if (err) {
+ netdev_err(ndev, "Unable to register netdev.\n");
+ goto reset_apc;
+ }
+
+ return 0;
+
+reset_apc:
+ kfree(apc->rxqs);
+ apc->rxqs = NULL;
+free_net:
+ *ndev_storage = NULL;
+ netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
+ free_netdev(ndev);
+ return err;
+}
+
+int mana_probe(struct gdma_dev *gd)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct device *dev = gc->dev;
+ struct mana_context *ac;
+ int err;
+ int i;
+
+ dev_info(dev,
+ "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
+ MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
+
+ err = mana_gd_register_device(gd);
+ if (err)
+ return err;
+
+ ac = kzalloc(sizeof(*ac), GFP_KERNEL);
+ if (!ac)
+ return -ENOMEM;
+
+ ac->gdma_dev = gd;
+ ac->num_ports = 1;
+ gd->driver_data = ac;
+
+ err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
+ MANA_MICRO_VERSION, &ac->num_ports);
+ if (err)
+ goto out;
+
+ if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
+ ac->num_ports = MAX_PORTS_IN_MANA_DEV;
+
+ for (i = 0; i < ac->num_ports; i++) {
+ err = mana_probe_port(ac, i, &ac->ports[i]);
+ if (err)
+ break;
+ }
+out:
+ if (err)
+ mana_remove(gd);
+
+ return err;
+}
+
+void mana_remove(struct gdma_dev *gd)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct mana_context *ac = gd->driver_data;
+ struct device *dev = gc->dev;
+ struct net_device *ndev;
+ int i;
+
+ for (i = 0; i < ac->num_ports; i++) {
+ ndev = ac->ports[i];
+ if (!ndev) {
+ if (i == 0)
+ dev_err(dev, "No net device to remove\n");
+ goto out;
+ }
+
+ /* All cleanup actions should stay after rtnl_lock(), otherwise
+ * other functions may access partially cleaned up data.
+ */
+ rtnl_lock();
+
+ mana_detach(ndev, false);
+
+ unregister_netdevice(ndev);
+
+ rtnl_unlock();
+
+ free_netdev(ndev);
+ }
+out:
+ mana_gd_deregister_device(gd);
+ gd->driver_data = NULL;
+ gd->gdma_context = NULL;
+ kfree(ac);
+}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
new file mode 100644
index 000000000000..7e74339f39ae
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include "mana.h"
+
+static const struct {
+ char name[ETH_GSTRING_LEN];
+ u16 offset;
+} mana_eth_stats[] = {
+ {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
+ {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
+};
+
+static int mana_get_sset_count(struct net_device *ndev, int stringset)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ unsigned int num_queues = apc->num_queues;
+
+ if (stringset != ETH_SS_STATS)
+ return -EINVAL;
+
+ return ARRAY_SIZE(mana_eth_stats) + num_queues * 4;
+}
+
+static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ unsigned int num_queues = apc->num_queues;
+ u8 *p = data;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) {
+ memcpy(p, mana_eth_stats[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ sprintf(p, "rx_%d_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_%d_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ sprintf(p, "tx_%d_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_%d_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+static void mana_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *e_stats, u64 *data)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ unsigned int num_queues = apc->num_queues;
+ void *eth_stats = &apc->eth_stats;
+ struct mana_stats *stats;
+ unsigned int start;
+ u64 packets, bytes;
+ int q, i = 0;
+
+ if (!apc->port_is_up)
+ return;
+
+ for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
+ data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
+
+ for (q = 0; q < num_queues; q++) {
+ stats = &apc->rxqs[q]->stats;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->packets;
+ bytes = stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ data[i++] = packets;
+ data[i++] = bytes;
+ }
+
+ for (q = 0; q < num_queues; q++) {
+ stats = &apc->tx_qp[q].txq.stats;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->packets;
+ bytes = stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ data[i++] = packets;
+ data[i++] = bytes;
+ }
+}
+
+static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd,
+ u32 *rules)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = apc->num_queues;
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static u32 mana_get_rxfh_key_size(struct net_device *ndev)
+{
+ return MANA_HASH_KEY_SIZE;
+}
+
+static u32 mana_rss_indir_size(struct net_device *ndev)
+{
+ return MANA_INDIRECT_TABLE_SIZE;
+}
+
+static int mana_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ int i;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+
+ if (indir) {
+ for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ indir[i] = apc->indir_table[i];
+ }
+
+ if (key)
+ memcpy(key, apc->hashkey, MANA_HASH_KEY_SIZE);
+
+ return 0;
+}
+
+static int mana_set_rxfh(struct net_device *ndev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ bool update_hash = false, update_table = false;
+ u32 save_table[MANA_INDIRECT_TABLE_SIZE];
+ u8 save_key[MANA_HASH_KEY_SIZE];
+ int i, err;
+
+ if (!apc->port_is_up)
+ return -EOPNOTSUPP;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ if (indir[i] >= apc->num_queues)
+ return -EINVAL;
+
+ update_table = true;
+ for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
+ save_table[i] = apc->indir_table[i];
+ apc->indir_table[i] = indir[i];
+ }
+ }
+
+ if (key) {
+ update_hash = true;
+ memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE);
+ memcpy(apc->hashkey, key, MANA_HASH_KEY_SIZE);
+ }
+
+ err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
+
+ if (err) { /* recover to original values */
+ if (update_table) {
+ for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ apc->indir_table[i] = save_table[i];
+ }
+
+ if (update_hash)
+ memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE);
+
+ mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
+ }
+
+ return err;
+}
+
+static void mana_get_channels(struct net_device *ndev,
+ struct ethtool_channels *channel)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+
+ channel->max_combined = apc->max_queues;
+ channel->combined_count = apc->num_queues;
+}
+
+static int mana_set_channels(struct net_device *ndev,
+ struct ethtool_channels *channels)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ unsigned int new_count = channels->combined_count;
+ unsigned int old_count = apc->num_queues;
+ int err, err2;
+
+ if (!apc->port_is_up)
+ return -EOPNOTSUPP;
+
+ err = mana_detach(ndev, false);
+ if (err) {
+ netdev_err(ndev, "mana_detach failed: %d\n", err);
+ return err;
+ }
+
+ apc->num_queues = new_count;
+ err = mana_attach(ndev);
+ if (!err)
+ return 0;
+
+ netdev_err(ndev, "mana_attach failed: %d\n", err);
+
+ /* Try to roll it back to the old configuration. */
+ apc->num_queues = old_count;
+ err2 = mana_attach(ndev);
+ if (err2)
+ netdev_err(ndev, "mana re-attach failed: %d\n", err2);
+
+ return err;
+}
+
+const struct ethtool_ops mana_ethtool_ops = {
+ .get_ethtool_stats = mana_get_ethtool_stats,
+ .get_sset_count = mana_get_sset_count,
+ .get_strings = mana_get_strings,
+ .get_rxnfc = mana_get_rxnfc,
+ .get_rxfh_key_size = mana_get_rxfh_key_size,
+ .get_rxfh_indir_size = mana_rss_indir_size,
+ .get_rxfh = mana_get_rxfh,
+ .set_rxfh = mana_set_rxfh,
+ .get_channels = mana_get_channels,
+ .set_channels = mana_set_channels,
+};
diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.c b/drivers/net/ethernet/microsoft/mana/shm_channel.c
new file mode 100644
index 000000000000..da255da62176
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/shm_channel.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+
+#include "shm_channel.h"
+
+#define PAGE_FRAME_L48_WIDTH_BYTES 6
+#define PAGE_FRAME_L48_WIDTH_BITS (PAGE_FRAME_L48_WIDTH_BYTES * 8)
+#define PAGE_FRAME_L48_MASK 0x0000FFFFFFFFFFFF
+#define PAGE_FRAME_H4_WIDTH_BITS 4
+#define VECTOR_MASK 0xFFFF
+#define SHMEM_VF_RESET_STATE ((u32)-1)
+
+#define SMC_MSG_TYPE_ESTABLISH_HWC 1
+#define SMC_MSG_TYPE_ESTABLISH_HWC_VERSION 0
+
+#define SMC_MSG_TYPE_DESTROY_HWC 2
+#define SMC_MSG_TYPE_DESTROY_HWC_VERSION 0
+
+#define SMC_MSG_DIRECTION_REQUEST 0
+#define SMC_MSG_DIRECTION_RESPONSE 1
+
+/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
+ * them are naturally aligned and hence don't need __packed.
+ */
+
+/* Shared memory channel protocol header
+ *
+ * msg_type: set on request and response; response matches request.
+ * msg_version: newer PF writes back older response (matching request)
+ * older PF acts on latest version known and sets that version in result
+ * (less than request).
+ * direction: 0 for request, VF->PF; 1 for response, PF->VF.
+ * status: 0 on request,
+ * operation result on response (success = 0, failure = 1 or greater).
+ * reset_vf: If set on either establish or destroy request, indicates perform
+ * FLR before/after the operation.
+ * owner_is_pf: 1 indicates PF owned, 0 indicates VF owned.
+ */
+union smc_proto_hdr {
+ u32 as_uint32;
+
+ struct {
+ u8 msg_type : 3;
+ u8 msg_version : 3;
+ u8 reserved_1 : 1;
+ u8 direction : 1;
+
+ u8 status;
+
+ u8 reserved_2;
+
+ u8 reset_vf : 1;
+ u8 reserved_3 : 6;
+ u8 owner_is_pf : 1;
+ };
+}; /* HW DATA */
+
+#define SMC_APERTURE_BITS 256
+#define SMC_BASIC_UNIT (sizeof(u32))
+#define SMC_APERTURE_DWORDS (SMC_APERTURE_BITS / (SMC_BASIC_UNIT * 8))
+#define SMC_LAST_DWORD (SMC_APERTURE_DWORDS - 1)
+
+static int mana_smc_poll_register(void __iomem *base, bool reset)
+{
+ void __iomem *ptr = base + SMC_LAST_DWORD * SMC_BASIC_UNIT;
+ u32 last_dword;
+ int i;
+
+ /* Poll the hardware for the ownership bit. This should be pretty fast,
+ * but let's do it in a loop just in case the hardware or the PF
+ * driver are temporarily busy.
+ */
+ for (i = 0; i < 20 * 1000; i++) {
+ last_dword = readl(ptr);
+
+ /* shmem reads as 0xFFFFFFFF in the reset case */
+ if (reset && last_dword == SHMEM_VF_RESET_STATE)
+ return 0;
+
+ /* If bit_31 is set, the PF currently owns the SMC. */
+ if (!(last_dword & BIT(31)))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mana_smc_read_response(struct shm_channel *sc, u32 msg_type,
+ u32 msg_version, bool reset_vf)
+{
+ void __iomem *base = sc->base;
+ union smc_proto_hdr hdr;
+ int err;
+
+ /* Wait for PF to respond. */
+ err = mana_smc_poll_register(base, reset_vf);
+ if (err)
+ return err;
+
+ hdr.as_uint32 = readl(base + SMC_LAST_DWORD * SMC_BASIC_UNIT);
+
+ if (reset_vf && hdr.as_uint32 == SHMEM_VF_RESET_STATE)
+ return 0;
+
+ /* Validate protocol fields from the PF driver */
+ if (hdr.msg_type != msg_type || hdr.msg_version > msg_version ||
+ hdr.direction != SMC_MSG_DIRECTION_RESPONSE) {
+ dev_err(sc->dev, "Wrong SMC response 0x%x, type=%d, ver=%d\n",
+ hdr.as_uint32, msg_type, msg_version);
+ return -EPROTO;
+ }
+
+ /* Validate the operation result */
+ if (hdr.status != 0) {
+ dev_err(sc->dev, "SMC operation failed: 0x%x\n", hdr.status);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+void mana_smc_init(struct shm_channel *sc, struct device *dev,
+ void __iomem *base)
+{
+ sc->dev = dev;
+ sc->base = base;
+}
+
+int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
+ u64 cq_addr, u64 rq_addr, u64 sq_addr,
+ u32 eq_msix_index)
+{
+ union smc_proto_hdr *hdr;
+ u16 all_addr_h4bits = 0;
+ u16 frame_addr_seq = 0;
+ u64 frame_addr = 0;
+ u8 shm_buf[32];
+ u64 *shmem;
+ u32 *dword;
+ u8 *ptr;
+ int err;
+ int i;
+
+ /* Ensure VF already has possession of shared memory */
+ err = mana_smc_poll_register(sc->base, false);
+ if (err) {
+ dev_err(sc->dev, "Timeout when setting up HWC: %d\n", err);
+ return err;
+ }
+
+ if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) ||
+ !PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr))
+ return -EINVAL;
+
+ if ((eq_msix_index & VECTOR_MASK) != eq_msix_index)
+ return -EINVAL;
+
+ /* Scheme for packing four addresses and extra info into 256 bits.
+ *
+ * Addresses must be page frame aligned, so only frame address bits
+ * are transferred.
+ *
+ * 52-bit frame addresses are split into the lower 48 bits and upper
+ * 4 bits. Lower 48 bits of 4 address are written sequentially from
+ * the start of the 256-bit shared memory region followed by 16 bits
+ * containing the upper 4 bits of the 4 addresses in sequence.
+ *
+ * A 16 bit EQ vector number fills out the next-to-last 32-bit dword.
+ *
+ * The final 32-bit dword is used for protocol control information as
+ * defined in smc_proto_hdr.
+ */
+
+ memset(shm_buf, 0, sizeof(shm_buf));
+ ptr = shm_buf;
+
+ /* EQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+ frame_addr = PHYS_PFN(eq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+ ptr += PAGE_FRAME_L48_WIDTH_BYTES;
+
+ /* CQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+ frame_addr = PHYS_PFN(cq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+ ptr += PAGE_FRAME_L48_WIDTH_BYTES;
+
+ /* RQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+ frame_addr = PHYS_PFN(rq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+ ptr += PAGE_FRAME_L48_WIDTH_BYTES;
+
+ /* SQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+ frame_addr = PHYS_PFN(sq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+ ptr += PAGE_FRAME_L48_WIDTH_BYTES;
+
+ /* High 4 bits of the four frame addresses */
+ *((u16 *)ptr) = all_addr_h4bits;
+ ptr += sizeof(u16);
+
+ /* EQ MSIX vector number */
+ *((u16 *)ptr) = (u16)eq_msix_index;
+ ptr += sizeof(u16);
+
+ /* 32-bit protocol header in final dword */
+ *((u32 *)ptr) = 0;
+
+ hdr = (union smc_proto_hdr *)ptr;
+ hdr->msg_type = SMC_MSG_TYPE_ESTABLISH_HWC;
+ hdr->msg_version = SMC_MSG_TYPE_ESTABLISH_HWC_VERSION;
+ hdr->direction = SMC_MSG_DIRECTION_REQUEST;
+ hdr->reset_vf = reset_vf;
+
+ /* Write 256-message buffer to shared memory (final 32-bit write
+ * triggers HW to set possession bit to PF).
+ */
+ dword = (u32 *)shm_buf;
+ for (i = 0; i < SMC_APERTURE_DWORDS; i++)
+ writel(*dword++, sc->base + i * SMC_BASIC_UNIT);
+
+ /* Read shmem response (polling for VF possession) and validate.
+ * For setup, waiting for response on shared memory is not strictly
+ * necessary, since wait occurs later for results to appear in EQE's.
+ */
+ err = mana_smc_read_response(sc, SMC_MSG_TYPE_ESTABLISH_HWC,
+ SMC_MSG_TYPE_ESTABLISH_HWC_VERSION,
+ reset_vf);
+ if (err) {
+ dev_err(sc->dev, "Error when setting up HWC: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf)
+{
+ union smc_proto_hdr hdr = {};
+ int err;
+
+ /* Ensure already has possession of shared memory */
+ err = mana_smc_poll_register(sc->base, false);
+ if (err) {
+ dev_err(sc->dev, "Timeout when tearing down HWC\n");
+ return err;
+ }
+
+ /* Set up protocol header for HWC destroy message */
+ hdr.msg_type = SMC_MSG_TYPE_DESTROY_HWC;
+ hdr.msg_version = SMC_MSG_TYPE_DESTROY_HWC_VERSION;
+ hdr.direction = SMC_MSG_DIRECTION_REQUEST;
+ hdr.reset_vf = reset_vf;
+
+ /* Write message in high 32 bits of 256-bit shared memory, causing HW
+ * to set possession bit to PF.
+ */
+ writel(hdr.as_uint32, sc->base + SMC_LAST_DWORD * SMC_BASIC_UNIT);
+
+ /* Read shmem response (polling for VF possession) and validate.
+ * For teardown, waiting for response is required to ensure hardware
+ * invalidates MST entries before software frees memory.
+ */
+ err = mana_smc_read_response(sc, SMC_MSG_TYPE_DESTROY_HWC,
+ SMC_MSG_TYPE_DESTROY_HWC_VERSION,
+ reset_vf);
+ if (err) {
+ dev_err(sc->dev, "Error when tearing down HWC: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.h b/drivers/net/ethernet/microsoft/mana/shm_channel.h
new file mode 100644
index 000000000000..5199b41497ff
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/shm_channel.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#ifndef _SHM_CHANNEL_H
+#define _SHM_CHANNEL_H
+
+struct shm_channel {
+ struct device *dev;
+ void __iomem *base;
+};
+
+void mana_smc_init(struct shm_channel *sc, struct device *dev,
+ void __iomem *base);
+
+int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
+ u64 cq_addr, u64 rq_addr, u64 sq_addr,
+ u32 eq_msix_index);
+
+int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf);
+
+#endif /* _SHM_CHANNEL_H */
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 49fd843c4c8a..b85733942053 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -485,7 +485,6 @@ static int moxart_mac_probe(struct platform_device *pdev)
ndev->base_addr = res->start;
priv->base = devm_ioremap_resource(p_dev, res);
if (IS_ERR(priv->base)) {
- dev_err(p_dev, "devm_ioremap_resource failed\n");
ret = PTR_ERR(priv->base);
goto init_fail;
}
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
index 05cb040c2677..2d3157e4d081 100644
--- a/drivers/net/ethernet/mscc/Kconfig
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -11,7 +11,7 @@ config NET_VENDOR_MICROSEMI
if NET_VENDOR_MICROSEMI
-# Users should depend on NET_SWITCHDEV, HAS_IOMEM
+# Users should depend on NET_SWITCHDEV, HAS_IOMEM, BRIDGE
config MSCC_OCELOT_SWITCH_LIB
select NET_DEVLINK
select REGMAP_MMIO
@@ -24,6 +24,7 @@ config MSCC_OCELOT_SWITCH_LIB
config MSCC_OCELOT_SWITCH
tristate "Ocelot switch driver"
+ depends on BRIDGE || BRIDGE=n
depends on NET_SWITCHDEV
depends on HAS_IOMEM
depends on OF_NET
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 46e5c9136bac..0c4283319d7f 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -6,6 +6,7 @@
*/
#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
+#include <linux/ptp_classify.h>
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
@@ -484,7 +485,8 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port,
DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
/* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of
- * reset */
+ * reset
+ */
ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed),
DEV_CLOCK_CFG);
@@ -529,22 +531,92 @@ void ocelot_port_disable(struct ocelot *ocelot, int port)
}
EXPORT_SYMBOL(ocelot_port_disable);
-void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
- struct sk_buff *clone)
+static void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
spin_lock(&ocelot_port->ts_id_lock);
skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
- /* Store timestamp ID in cb[0] of sk_buff */
- clone->cb[0] = ocelot_port->ts_id;
+ /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
+ OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4;
skb_queue_tail(&ocelot_port->tx_skbs, clone);
spin_unlock(&ocelot_port->ts_id_lock);
}
-EXPORT_SYMBOL(ocelot_port_add_txtstamp_skb);
+
+u32 ocelot_ptp_rew_op(struct sk_buff *skb)
+{
+ struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
+ u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
+ u32 rew_op = 0;
+
+ if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
+ rew_op = ptp_cmd;
+ rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
+ } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
+ rew_op = ptp_cmd;
+ }
+
+ return rew_op;
+}
+EXPORT_SYMBOL(ocelot_ptp_rew_op);
+
+static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb)
+{
+ struct ptp_header *hdr;
+ unsigned int ptp_class;
+ u8 msgtype, twostep;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return false;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return false;
+
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ twostep = hdr->flag_field[0] & 0x2;
+
+ if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0)
+ return true;
+
+ return false;
+}
+
+int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
+ struct sk_buff *skb,
+ struct sk_buff **clone)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u8 ptp_cmd = ocelot_port->ptp_cmd;
+
+ /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
+ if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
+ if (ocelot_ptp_is_onestep_sync(skb)) {
+ OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+ return 0;
+ }
+
+ /* Fall back to two-step timestamping */
+ ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ }
+
+ if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ *clone = skb_clone_sk(skb);
+ if (!(*clone))
+ return -ENOMEM;
+
+ ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
+ OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_port_txtstamp_request);
static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
struct timespec64 *ts)
@@ -603,7 +675,7 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
spin_lock_irqsave(&port->tx_skbs.lock, flags);
skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
- if (skb->cb[0] != id)
+ if (OCELOT_SKB_CB(skb)->ts_id != id)
continue;
__skb_unlink(skb, &port->tx_skbs);
skb_match = skb;
@@ -687,7 +759,7 @@ static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh)
int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
{
struct skb_shared_hwtstamps *shhwtstamps;
- u64 tod_in_ns, full_ts_in_ns, cpuq;
+ u64 tod_in_ns, full_ts_in_ns;
u64 timestamp, src_port, len;
u32 xfh[OCELOT_TAG_LEN / 4];
struct net_device *dev;
@@ -704,7 +776,6 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
ocelot_xfh_get_src_port(xfh, &src_port);
ocelot_xfh_get_len(xfh, &len);
ocelot_xfh_get_rew_val(xfh, &timestamp);
- ocelot_xfh_get_cpuq(xfh, &cpuq);
if (WARN_ON(src_port >= ocelot->num_phys_ports))
return -EINVAL;
@@ -767,17 +838,11 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
/* Everything we see on an interface that is in the HW bridge
* has already been forwarded.
*/
- if (ocelot->bridge_mask & BIT(src_port))
+ if (ocelot->ports[src_port]->bridge)
skb->offload_fwd_mark = 1;
skb->protocol = eth_type_trans(skb, dev);
-#if IS_ENABLED(CONFIG_BRIDGE_MRP)
- if (skb->protocol == cpu_to_be16(ETH_P_MRP) &&
- cpuq & BIT(OCELOT_MRP_CPUQ))
- skb->offload_fwd_mark = 0;
-#endif
-
*nskb = skb;
return 0;
@@ -1190,6 +1255,26 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
return mask;
}
+static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot,
+ struct net_device *bridge)
+{
+ u32 mask = 0;
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port)
+ continue;
+
+ if (ocelot_port->stp_state == BR_STATE_FORWARDING &&
+ ocelot_port->bridge == bridge)
+ mask |= BIT(port);
+ }
+
+ return mask;
+}
+
static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
{
u32 mask = 0;
@@ -1239,10 +1324,12 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
*/
mask = GENMASK(ocelot->num_phys_ports - 1, 0);
mask &= ~cpu_fwd_mask;
- } else if (ocelot->bridge_fwd_mask & BIT(port)) {
+ } else if (ocelot_port->bridge) {
+ struct net_device *bridge = ocelot_port->bridge;
struct net_device *bond = ocelot_port->bond;
- mask = ocelot->bridge_fwd_mask & ~BIT(port);
+ mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
+ mask &= ~BIT(port);
if (bond) {
mask &= ~ocelot_get_bond_mask(ocelot, bond,
false);
@@ -1263,29 +1350,16 @@ EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask);
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- u32 port_cfg;
+ u32 learn_ena = 0;
- if (!(BIT(port) & ocelot->bridge_mask))
- return;
+ ocelot_port->stp_state = state;
- port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port);
+ if ((state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING) &&
+ ocelot_port->learn_ena)
+ learn_ena = ANA_PORT_PORT_CFG_LEARN_ENA;
- switch (state) {
- case BR_STATE_FORWARDING:
- ocelot->bridge_fwd_mask |= BIT(port);
- fallthrough;
- case BR_STATE_LEARNING:
- if (ocelot_port->learn_ena)
- port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
- break;
-
- default:
- port_cfg &= ~ANA_PORT_PORT_CFG_LEARN_ENA;
- ocelot->bridge_fwd_mask &= ~BIT(port);
- break;
- }
-
- ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG, port);
+ ocelot_rmw_gix(ocelot, learn_ena, ANA_PORT_PORT_CFG_LEARN_ENA,
+ ANA_PORT_PORT_CFG, port);
ocelot_apply_bridge_fwd_mask(ocelot);
}
@@ -1512,43 +1586,28 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_port_mdb_del);
-int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
- struct net_device *bridge)
+void ocelot_port_bridge_join(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
{
- if (!ocelot->bridge_mask) {
- ocelot->hw_bridge_dev = bridge;
- } else {
- if (ocelot->hw_bridge_dev != bridge)
- /* This is adding the port to a second bridge, this is
- * unsupported */
- return -ENODEV;
- }
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
- ocelot->bridge_mask |= BIT(port);
+ ocelot_port->bridge = bridge;
- return 0;
+ ocelot_apply_bridge_fwd_mask(ocelot);
}
EXPORT_SYMBOL(ocelot_port_bridge_join);
-int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
- struct net_device *bridge)
+void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_vlan pvid = {0}, native_vlan = {0};
- int ret;
- ocelot->bridge_mask &= ~BIT(port);
-
- if (!ocelot->bridge_mask)
- ocelot->hw_bridge_dev = NULL;
-
- ret = ocelot_port_vlan_filtering(ocelot, port, false);
- if (ret)
- return ret;
+ ocelot_port->bridge = NULL;
ocelot_port_set_pvid(ocelot, port, pvid);
ocelot_port_set_native_vlan(ocelot, port, native_vlan);
-
- return 0;
+ ocelot_apply_bridge_fwd_mask(ocelot);
}
EXPORT_SYMBOL(ocelot_port_bridge_leave);
@@ -2051,6 +2110,9 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
}
+
+ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_BLACKHOLE);
+
/* Allow broadcast and unknown L2 multicast to the CPU. */
ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index a41b458b1b3e..8b843d3c9189 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -220,6 +220,11 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
+ if (a->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
filter->action.police_ena = true;
rate = a->police.rate_bytes_ps;
filter->action.pol.rate = div_u64(rate, 1000) * 8;
diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c
index 683da320bfd8..08b481a93460 100644
--- a/drivers/net/ethernet/mscc/ocelot_mrp.c
+++ b/drivers/net/ethernet/mscc/ocelot_mrp.c
@@ -1,9 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Microsemi Ocelot Switch driver
*
- * This contains glue logic between the switchdev driver operations and the
- * mscc_ocelot_switch_lib.
- *
* Copyright (c) 2017, 2019 Microsemi Corporation
* Copyright 2020-2021 NXP Semiconductors
*/
@@ -15,13 +12,34 @@
#include "ocelot.h"
#include "ocelot_vcap.h"
-static int ocelot_mrp_del_vcap(struct ocelot *ocelot, int port)
+static const u8 mrp_test_dmac[] = { 0x01, 0x15, 0x4e, 0x00, 0x00, 0x01 };
+static const u8 mrp_control_dmac[] = { 0x01, 0x15, 0x4e, 0x00, 0x00, 0x02 };
+
+static int ocelot_mrp_find_partner_port(struct ocelot *ocelot,
+ struct ocelot_port *p)
+{
+ int i;
+
+ for (i = 0; i < ocelot->num_phys_ports; ++i) {
+ struct ocelot_port *ocelot_port = ocelot->ports[i];
+
+ if (!ocelot_port || p == ocelot_port)
+ continue;
+
+ if (ocelot_port->mrp_ring_id == p->mrp_ring_id)
+ return i;
+ }
+
+ return -1;
+}
+
+static int ocelot_mrp_del_vcap(struct ocelot *ocelot, int id)
{
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *filter;
block_vcap_is2 = &ocelot->block[VCAP_IS2];
- filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, port,
+ filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, id,
false);
if (!filter)
return 0;
@@ -29,6 +47,87 @@ static int ocelot_mrp_del_vcap(struct ocelot *ocelot, int port)
return ocelot_vcap_filter_del(ocelot, filter);
}
+static int ocelot_mrp_redirect_add_vcap(struct ocelot *ocelot, int src_port,
+ int dst_port)
+{
+ const u8 mrp_test_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ struct ocelot_vcap_filter *filter;
+ int err;
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return -ENOMEM;
+
+ filter->key_type = OCELOT_VCAP_KEY_ETYPE;
+ filter->prio = 1;
+ filter->id.cookie = src_port;
+ filter->id.tc_offload = false;
+ filter->block_id = VCAP_IS2;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ filter->ingress_port_mask = BIT(src_port);
+ ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac);
+ ether_addr_copy(filter->key.etype.dmac.mask, mrp_test_mask);
+ filter->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
+ filter->action.port_mask = BIT(dst_port);
+
+ err = ocelot_vcap_filter_add(ocelot, filter, NULL);
+ if (err)
+ kfree(filter);
+
+ return err;
+}
+
+static int ocelot_mrp_copy_add_vcap(struct ocelot *ocelot, int port,
+ int prio, unsigned long cookie)
+{
+ const u8 mrp_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+ struct ocelot_vcap_filter *filter;
+ int err;
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return -ENOMEM;
+
+ filter->key_type = OCELOT_VCAP_KEY_ETYPE;
+ filter->prio = prio;
+ filter->id.cookie = cookie;
+ filter->id.tc_offload = false;
+ filter->block_id = VCAP_IS2;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ filter->ingress_port_mask = BIT(port);
+ /* Here is possible to use control or test dmac because the mask
+ * doesn't cover the LSB
+ */
+ ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac);
+ ether_addr_copy(filter->key.etype.dmac.mask, mrp_mask);
+ filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ filter->action.port_mask = 0x0;
+ filter->action.cpu_copy_ena = true;
+ filter->action.cpu_qu_num = OCELOT_MRP_CPUQ;
+
+ err = ocelot_vcap_filter_add(ocelot, filter, NULL);
+ if (err)
+ kfree(filter);
+
+ return err;
+}
+
+static void ocelot_mrp_save_mac(struct ocelot *ocelot,
+ struct ocelot_port *port)
+{
+ ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_test_dmac,
+ port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_control_dmac,
+ port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+}
+
+static void ocelot_mrp_del_mac(struct ocelot *ocelot,
+ struct ocelot_port *port)
+{
+ ocelot_mact_forget(ocelot, mrp_test_dmac, port->pvid_vlan.vid);
+ ocelot_mact_forget(ocelot, mrp_control_dmac, port->pvid_vlan.vid);
+}
+
int ocelot_mrp_add(struct ocelot *ocelot, int port,
const struct switchdev_obj_mrp *mrp)
{
@@ -45,18 +144,7 @@ int ocelot_mrp_add(struct ocelot *ocelot, int port,
if (mrp->p_port != dev && mrp->s_port != dev)
return 0;
- if (ocelot->mrp_ring_id != 0 &&
- ocelot->mrp_s_port &&
- ocelot->mrp_p_port)
- return -EINVAL;
-
- if (mrp->p_port == dev)
- ocelot->mrp_p_port = dev;
-
- if (mrp->s_port == dev)
- ocelot->mrp_s_port = dev;
-
- ocelot->mrp_ring_id = mrp->ring_id;
+ ocelot_port->mrp_ring_id = mrp->ring_id;
return 0;
}
@@ -66,33 +154,14 @@ int ocelot_mrp_del(struct ocelot *ocelot, int port,
const struct switchdev_obj_mrp *mrp)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct ocelot_port_private *priv;
- struct net_device *dev;
if (!ocelot_port)
return -EOPNOTSUPP;
- priv = container_of(ocelot_port, struct ocelot_port_private, port);
- dev = priv->dev;
-
- if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev)
+ if (ocelot_port->mrp_ring_id != mrp->ring_id)
return 0;
- if (ocelot->mrp_ring_id == 0 &&
- !ocelot->mrp_s_port &&
- !ocelot->mrp_p_port)
- return -EINVAL;
-
- if (ocelot_mrp_del_vcap(ocelot, priv->chip_port))
- return -EINVAL;
-
- if (ocelot->mrp_p_port == dev)
- ocelot->mrp_p_port = NULL;
-
- if (ocelot->mrp_s_port == dev)
- ocelot->mrp_s_port = NULL;
-
- ocelot->mrp_ring_id = 0;
+ ocelot_port->mrp_ring_id = 0;
return 0;
}
@@ -102,49 +171,39 @@ int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
const struct switchdev_obj_ring_role_mrp *mrp)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct ocelot_vcap_filter *filter;
- struct ocelot_port_private *priv;
- struct net_device *dev;
+ int dst_port;
int err;
if (!ocelot_port)
return -EOPNOTSUPP;
- priv = container_of(ocelot_port, struct ocelot_port_private, port);
- dev = priv->dev;
-
- if (ocelot->mrp_ring_id != mrp->ring_id)
- return -EINVAL;
-
- if (!mrp->sw_backup)
+ if (mrp->ring_role != BR_MRP_RING_ROLE_MRC && !mrp->sw_backup)
return -EOPNOTSUPP;
- if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev)
+ if (ocelot_port->mrp_ring_id != mrp->ring_id)
return 0;
- filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
- if (!filter)
- return -ENOMEM;
+ ocelot_mrp_save_mac(ocelot, ocelot_port);
- filter->key_type = OCELOT_VCAP_KEY_ETYPE;
- filter->prio = 1;
- filter->id.cookie = priv->chip_port;
- filter->id.tc_offload = false;
- filter->block_id = VCAP_IS2;
- filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
- filter->ingress_port_mask = BIT(priv->chip_port);
- *(__be16 *)filter->key.etype.etype.value = htons(ETH_P_MRP);
- *(__be16 *)filter->key.etype.etype.mask = htons(0xffff);
- filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
- filter->action.port_mask = 0x0;
- filter->action.cpu_copy_ena = true;
- filter->action.cpu_qu_num = OCELOT_MRP_CPUQ;
+ if (mrp->ring_role != BR_MRP_RING_ROLE_MRC)
+ return ocelot_mrp_copy_add_vcap(ocelot, port, 1, port);
- err = ocelot_vcap_filter_add(ocelot, filter, NULL);
+ dst_port = ocelot_mrp_find_partner_port(ocelot, ocelot_port);
+ if (dst_port == -1)
+ return -EINVAL;
+
+ err = ocelot_mrp_redirect_add_vcap(ocelot, port, dst_port);
if (err)
- kfree(filter);
+ return err;
- return err;
+ err = ocelot_mrp_copy_add_vcap(ocelot, port, 2,
+ port + ocelot->num_phys_ports);
+ if (err) {
+ ocelot_mrp_del_vcap(ocelot, port);
+ return err;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(ocelot_mrp_add_ring_role);
@@ -152,24 +211,32 @@ int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
const struct switchdev_obj_ring_role_mrp *mrp)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct ocelot_port_private *priv;
- struct net_device *dev;
+ int i;
if (!ocelot_port)
return -EOPNOTSUPP;
- priv = container_of(ocelot_port, struct ocelot_port_private, port);
- dev = priv->dev;
-
- if (ocelot->mrp_ring_id != mrp->ring_id)
- return -EINVAL;
-
- if (!mrp->sw_backup)
+ if (mrp->ring_role != BR_MRP_RING_ROLE_MRC && !mrp->sw_backup)
return -EOPNOTSUPP;
- if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev)
+ if (ocelot_port->mrp_ring_id != mrp->ring_id)
return 0;
- return ocelot_mrp_del_vcap(ocelot, priv->chip_port);
+ ocelot_mrp_del_vcap(ocelot, port);
+ ocelot_mrp_del_vcap(ocelot, port + ocelot->num_phys_ports);
+
+ for (i = 0; i < ocelot->num_phys_ports; ++i) {
+ ocelot_port = ocelot->ports[i];
+
+ if (!ocelot_port)
+ continue;
+
+ if (ocelot_port->mrp_ring_id != 0)
+ goto out;
+ }
+
+ ocelot_mrp_del_mac(ocelot, ocelot->ports[port]);
+out:
+ return 0;
}
EXPORT_SYMBOL(ocelot_mrp_del_ring_role);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 12cb6867a2d0..aad33d22c33f 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -251,6 +251,12 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
return -EEXIST;
}
+ if (action->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
pol.burst = action->police.burst;
@@ -501,21 +507,17 @@ static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
/* Check if timestamping is needed */
if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- rew_op = ocelot_port->ptp_cmd;
+ struct sk_buff *clone = NULL;
- if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
- struct sk_buff *clone;
-
- clone = skb_clone_sk(skb);
- if (!clone) {
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
+ if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
- ocelot_port_add_txtstamp_skb(ocelot, port, clone);
+ if (clone)
+ OCELOT_SKB_CB(skb)->clone = clone;
- rew_op |= clone->cb[0] << 3;
- }
+ rew_op = ocelot_ptp_rew_op(skb);
}
ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
@@ -1111,77 +1113,213 @@ static int ocelot_port_obj_del(struct net_device *dev,
return ret;
}
-static int ocelot_netdevice_bridge_join(struct ocelot *ocelot, int port,
- struct net_device *bridge)
+static void ocelot_inherit_brport_flags(struct ocelot *ocelot, int port,
+ struct net_device *brport_dev)
+{
+ struct switchdev_brport_flags flags = {0};
+ int flag;
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+
+ for_each_set_bit(flag, &flags.mask, 32)
+ if (br_port_flag_is_set(brport_dev, BIT(flag)))
+ flags.val |= BIT(flag);
+
+ ocelot_port_bridge_flags(ocelot, port, flags);
+}
+
+static void ocelot_clear_brport_flags(struct ocelot *ocelot, int port)
{
struct switchdev_brport_flags flags;
- int err;
flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
- flags.val = flags.mask;
+ flags.val = flags.mask & ~BR_LEARNING;
+
+ ocelot_port_bridge_flags(ocelot, port, flags);
+}
- err = ocelot_port_bridge_join(ocelot, port, bridge);
+static int ocelot_switchdev_sync(struct ocelot *ocelot, int port,
+ struct net_device *brport_dev,
+ struct net_device *bridge_dev,
+ struct netlink_ext_ack *extack)
+{
+ clock_t ageing_time;
+ u8 stp_state;
+ int err;
+
+ ocelot_inherit_brport_flags(ocelot, port, brport_dev);
+
+ stp_state = br_port_get_stp_state(brport_dev);
+ ocelot_bridge_stp_state_set(ocelot, port, stp_state);
+
+ err = ocelot_port_vlan_filtering(ocelot, port,
+ br_vlan_enabled(bridge_dev));
if (err)
return err;
- ocelot_port_bridge_flags(ocelot, port, flags);
+ ageing_time = br_get_ageing_time(bridge_dev);
+ ocelot_port_attr_ageing_set(ocelot, port, ageing_time);
+
+ err = br_mdb_replay(bridge_dev, brport_dev,
+ &ocelot_switchdev_blocking_nb, extack);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
+ err = br_fdb_replay(bridge_dev, brport_dev, &ocelot_switchdev_nb);
+ if (err)
+ return err;
+
+ err = br_vlan_replay(bridge_dev, brport_dev,
+ &ocelot_switchdev_blocking_nb, extack);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
+ return 0;
+}
+
+static int ocelot_switchdev_unsync(struct ocelot *ocelot, int port)
+{
+ int err;
+
+ err = ocelot_port_vlan_filtering(ocelot, port, false);
+ if (err)
+ return err;
+
+ ocelot_clear_brport_flags(ocelot, port);
+
+ ocelot_bridge_stp_state_set(ocelot, port, BR_STATE_FORWARDING);
+
+ return 0;
+}
+
+static int ocelot_netdevice_bridge_join(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct net_device *bridge,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+ int err;
+
+ ocelot_port_bridge_join(ocelot, port, bridge);
+
+ err = ocelot_switchdev_sync(ocelot, port, brport_dev, bridge, extack);
+ if (err)
+ goto err_switchdev_sync;
return 0;
+
+err_switchdev_sync:
+ ocelot_port_bridge_leave(ocelot, port, bridge);
+ return err;
}
-static int ocelot_netdevice_bridge_leave(struct ocelot *ocelot, int port,
+static int ocelot_netdevice_bridge_leave(struct net_device *dev,
+ struct net_device *brport_dev,
struct net_device *bridge)
{
- struct switchdev_brport_flags flags;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
int err;
- flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
- flags.val = flags.mask & ~BR_LEARNING;
+ err = ocelot_switchdev_unsync(ocelot, port);
+ if (err)
+ return err;
- err = ocelot_port_bridge_leave(ocelot, port, bridge);
+ ocelot_port_bridge_leave(ocelot, port, bridge);
- ocelot_port_bridge_flags(ocelot, port, flags);
+ return 0;
+}
+
+static int ocelot_netdevice_lag_join(struct net_device *dev,
+ struct net_device *bond,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ struct net_device *bridge_dev;
+ int port = priv->chip_port;
+ int err;
+ err = ocelot_port_lag_join(ocelot, port, bond, info);
+ if (err == -EOPNOTSUPP) {
+ NL_SET_ERR_MSG_MOD(extack, "Offloading not supported");
+ return 0;
+ }
+
+ bridge_dev = netdev_master_upper_dev_get(bond);
+ if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
+ return 0;
+
+ err = ocelot_netdevice_bridge_join(dev, bond, bridge_dev, extack);
+ if (err)
+ goto err_bridge_join;
+
+ return 0;
+
+err_bridge_join:
+ ocelot_port_lag_leave(ocelot, port, bond);
return err;
}
-static int ocelot_netdevice_changeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+static int ocelot_netdevice_lag_leave(struct net_device *dev,
+ struct net_device *bond)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
+ struct net_device *bridge_dev;
int port = priv->chip_port;
+
+ ocelot_port_lag_leave(ocelot, port, bond);
+
+ bridge_dev = netdev_master_upper_dev_get(bond);
+ if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
+ return 0;
+
+ return ocelot_netdevice_bridge_leave(dev, bond, bridge_dev);
+}
+
+static int ocelot_netdevice_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct netlink_ext_ack *extack;
int err = 0;
+ extack = netdev_notifier_info_to_extack(&info->info);
+
if (netif_is_bridge_master(info->upper_dev)) {
- if (info->linking) {
- err = ocelot_netdevice_bridge_join(ocelot, port,
- info->upper_dev);
- } else {
- err = ocelot_netdevice_bridge_leave(ocelot, port,
+ if (info->linking)
+ err = ocelot_netdevice_bridge_join(dev, dev,
+ info->upper_dev,
+ extack);
+ else
+ err = ocelot_netdevice_bridge_leave(dev, dev,
info->upper_dev);
- }
}
if (netif_is_lag_master(info->upper_dev)) {
- if (info->linking) {
- err = ocelot_port_lag_join(ocelot, port,
- info->upper_dev,
- info->upper_info);
- if (err == -EOPNOTSUPP) {
- NL_SET_ERR_MSG_MOD(info->info.extack,
- "Offloading not supported");
- err = 0;
- }
- } else {
- ocelot_port_lag_leave(ocelot, port,
- info->upper_dev);
- }
+ if (info->linking)
+ err = ocelot_netdevice_lag_join(dev, info->upper_dev,
+ info->upper_info, extack);
+ else
+ ocelot_netdevice_lag_leave(dev, info->upper_dev);
}
return notifier_from_errno(err);
}
+/* Treat CHANGEUPPER events on an offloaded LAG as individual CHANGEUPPER
+ * events for the lower physical ports of the LAG.
+ * If the LAG upper isn't offloaded, ignore its CHANGEUPPER events.
+ * In case the LAG joined a bridge, notify that we are offloading it and can do
+ * forwarding in hardware towards it.
+ */
static int
ocelot_netdevice_lag_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
@@ -1191,6 +1329,12 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev,
int err = NOTIFY_DONE;
netdev_for_each_lower_dev(dev, lower, iter) {
+ struct ocelot_port_private *priv = netdev_priv(lower);
+ struct ocelot_port *ocelot_port = &priv->port;
+
+ if (ocelot_port->bond != dev)
+ return NOTIFY_OK;
+
err = ocelot_netdevice_changeupper(lower, info);
if (err)
return notifier_from_errno(err);
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index a33ab315cc6b..87ad2137ba06 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -4,6 +4,8 @@
* Copyright (c) 2017 Microsemi Corporation
* Copyright 2020 NXP
*/
+#include <linux/time64.h>
+
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index 37a232911395..7945393a0655 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -761,6 +761,7 @@ static void is1_entry_set(struct ocelot *ocelot, int ix,
vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_ETYPE,
etype.value, etype.mask);
}
+ break;
}
default:
break;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 8f2f091bce89..9cfcd5500462 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -6657,7 +6657,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
/**
* s2io_set_link - Set the LInk status
- * @work: work struct containing a pointer to device private structue
+ * @work: work struct containing a pointer to device private structure
* Description: Sets the link status for the adapter
*/
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index 9c86f4f9cd42..63f65193dd49 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -454,49 +454,49 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
#define vxge_debug_ll_config(level, fmt, ...) \
vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__)
#else
-#define vxge_debug_ll_config(level, fmt, ...)
+#define vxge_debug_ll_config(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
#define vxge_debug_init(level, fmt, ...) \
vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__)
#else
-#define vxge_debug_init(level, fmt, ...)
+#define vxge_debug_init(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
#define vxge_debug_tx(level, fmt, ...) \
vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__)
#else
-#define vxge_debug_tx(level, fmt, ...)
+#define vxge_debug_tx(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
#define vxge_debug_rx(level, fmt, ...) \
vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__)
#else
-#define vxge_debug_rx(level, fmt, ...)
+#define vxge_debug_rx(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
#define vxge_debug_mem(level, fmt, ...) \
vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__)
#else
-#define vxge_debug_mem(level, fmt, ...)
+#define vxge_debug_mem(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
#define vxge_debug_entryexit(level, fmt, ...) \
vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__)
#else
-#define vxge_debug_entryexit(level, fmt, ...)
+#define vxge_debug_entryexit(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
#define vxge_debug_intr(level, fmt, ...) \
vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__)
#else
-#define vxge_debug_intr(level, fmt, ...)
+#define vxge_debug_intr(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
#define VXGE_DEVICE_DEBUG_LEVEL_SET(level, mask, vdev) {\
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index bdbf0726145e..605a1617b195 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -419,8 +419,8 @@ nfp_abm_port_get_stats_strings(struct nfp_app *app, struct nfp_port *port,
return data;
alink = repr->app_priv;
for (i = 0; i < alink->vnic->dp.num_r_vecs; i++) {
- data = nfp_pr_et(data, "q%u_no_wait", i);
- data = nfp_pr_et(data, "q%u_delayed", i);
+ ethtool_sprintf(&data, "q%u_no_wait", i);
+ ethtool_sprintf(&data, "q%u_delayed", i);
}
return data;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 56833a41f3d2..31377923ea3d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -47,6 +47,7 @@ struct nfp_app;
#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
#define NFP_FL_FEATS_IPV6_TUN BIT(7)
#define NFP_FL_FEATS_VLAN_QINQ BIT(8)
+#define NFP_FL_FEATS_QOS_PPS BIT(9)
#define NFP_FL_FEATS_HOST_ACK BIT(31)
#define NFP_FL_ENABLE_FLOW_MERGE BIT(0)
@@ -61,7 +62,8 @@ struct nfp_app;
NFP_FL_FEATS_FLOW_MOD | \
NFP_FL_FEATS_PRE_TUN_RULES | \
NFP_FL_FEATS_IPV6_TUN | \
- NFP_FL_FEATS_VLAN_QINQ)
+ NFP_FL_FEATS_VLAN_QINQ | \
+ NFP_FL_FEATS_QOS_PPS)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index d4ce8f9ef3cc..784c6dbf8bc4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -10,19 +10,26 @@
#include "../nfp_port.h"
#define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000)
+#define NFP_FL_QOS_PPS BIT(15)
struct nfp_police_cfg_head {
__be32 flags_opts;
__be32 port;
};
+enum NFP_FL_QOS_TYPES {
+ NFP_FL_QOS_TYPE_BPS,
+ NFP_FL_QOS_TYPE_PPS,
+ NFP_FL_QOS_TYPE_MAX,
+};
+
/* Police cmsg for configuring a trTCM traffic conditioner (8W/32B)
* See RFC 2698 for more details.
* ----------------------------------------------------------------
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | Flag options |
+ * | Reserved |p| Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Port Ingress |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -38,6 +45,9 @@ struct nfp_police_cfg_head {
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Committed Information Rate |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * Word[0](FLag options):
+ * [15] p(pps) 1 for pps ,0 for bps
+ *
*/
struct nfp_police_config {
struct nfp_police_cfg_head head;
@@ -62,13 +72,18 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow,
struct netlink_ext_ack *extack)
{
- struct flow_action_entry *action = &flow->rule->action.entries[0];
+ struct flow_action_entry *paction = &flow->rule->action.entries[0];
+ u32 action_num = flow->rule->action.num_entries;
struct nfp_flower_priv *fl_priv = app->priv;
+ struct flow_action_entry *action = NULL;
struct nfp_flower_repr_priv *repr_priv;
struct nfp_police_config *config;
+ u32 netdev_port_id, i;
struct nfp_repr *repr;
struct sk_buff *skb;
- u32 netdev_port_id;
+ bool pps_support;
+ u32 bps_num = 0;
+ u32 pps_num = 0;
u32 burst;
u64 rate;
@@ -78,6 +93,8 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
}
repr = netdev_priv(netdev);
repr_priv = repr->app_priv;
+ netdev_port_id = nfp_repr_get_port_id(netdev);
+ pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
if (repr_priv->block_shared) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks");
@@ -89,9 +106,18 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
- if (!flow_offload_has_one_action(&flow->rule->action)) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires a single action");
- return -EOPNOTSUPP;
+ if (pps_support) {
+ if (action_num > 2 || action_num == 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: qos rate limit offload only support action number 1 or 2");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (!flow_offload_has_one_action(&flow->rule->action)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: qos rate limit offload requires a single action");
+ return -EOPNOTSUPP;
+ }
}
if (flow->common.prio != 1) {
@@ -99,31 +125,69 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
- if (action->id != FLOW_ACTION_POLICE) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires police action");
- return -EOPNOTSUPP;
+ for (i = 0 ; i < action_num; i++) {
+ action = paction + i;
+ if (action->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: qos rate limit offload requires police action");
+ return -EOPNOTSUPP;
+ }
+ if (action->police.rate_bytes_ps > 0) {
+ if (bps_num++) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: qos rate limit offload only support one BPS action");
+ return -EOPNOTSUPP;
+ }
+ }
+ if (action->police.rate_pkt_ps > 0) {
+ if (!pps_support) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: FW does not support PPS action");
+ return -EOPNOTSUPP;
+ }
+ if (pps_num++) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: qos rate limit offload only support one PPS action");
+ return -EOPNOTSUPP;
+ }
+ }
}
- rate = action->police.rate_bytes_ps;
- burst = action->police.burst;
- netdev_port_id = nfp_repr_get_port_id(netdev);
-
- skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
- NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- config = nfp_flower_cmsg_get_data(skb);
- memset(config, 0, sizeof(struct nfp_police_config));
- config->head.port = cpu_to_be32(netdev_port_id);
- config->bkt_tkn_p = cpu_to_be32(burst);
- config->bkt_tkn_c = cpu_to_be32(burst);
- config->pbs = cpu_to_be32(burst);
- config->cbs = cpu_to_be32(burst);
- config->pir = cpu_to_be32(rate);
- config->cir = cpu_to_be32(rate);
- nfp_ctrl_tx(repr->app->ctrl, skb);
+ for (i = 0 ; i < action_num; i++) {
+ /* Set QoS data for this interface */
+ action = paction + i;
+ if (action->police.rate_bytes_ps > 0) {
+ rate = action->police.rate_bytes_ps;
+ burst = action->police.burst;
+ } else if (action->police.rate_pkt_ps > 0) {
+ rate = action->police.rate_pkt_ps;
+ burst = action->police.burst_pkt;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: qos rate limit is not BPS or PPS");
+ continue;
+ }
+ if (rate != 0) {
+ skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
+ NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ config = nfp_flower_cmsg_get_data(skb);
+ memset(config, 0, sizeof(struct nfp_police_config));
+ if (action->police.rate_pkt_ps > 0)
+ config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_PPS);
+ config->head.port = cpu_to_be32(netdev_port_id);
+ config->bkt_tkn_p = cpu_to_be32(burst);
+ config->bkt_tkn_c = cpu_to_be32(burst);
+ config->pbs = cpu_to_be32(burst);
+ config->cbs = cpu_to_be32(burst);
+ config->pir = cpu_to_be32(rate);
+ config->cir = cpu_to_be32(rate);
+ nfp_ctrl_tx(repr->app->ctrl, skb);
+ }
+ }
repr_priv->qos_table.netdev_port_id = netdev_port_id;
fl_priv->qos_rate_limiters++;
if (fl_priv->qos_rate_limiters == 1)
@@ -141,9 +205,10 @@ nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_flower_repr_priv *repr_priv;
struct nfp_police_config *config;
+ u32 netdev_port_id, i;
struct nfp_repr *repr;
struct sk_buff *skb;
- u32 netdev_port_id;
+ bool pps_support;
if (!nfp_netdev_is_nfp_repr(netdev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
@@ -153,27 +218,38 @@ nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
netdev_port_id = nfp_repr_get_port_id(netdev);
repr_priv = repr->app_priv;
+ pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
if (!repr_priv->qos_table.netdev_port_id) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist");
return -EOPNOTSUPP;
}
- skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
- NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- /* Clear all qos associate data for this interface */
memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos));
fl_priv->qos_rate_limiters--;
if (!fl_priv->qos_rate_limiters)
cancel_delayed_work_sync(&fl_priv->qos_stats_work);
-
- config = nfp_flower_cmsg_get_data(skb);
- memset(config, 0, sizeof(struct nfp_police_config));
- config->head.port = cpu_to_be32(netdev_port_id);
- nfp_ctrl_tx(repr->app->ctrl, skb);
+ for (i = 0 ; i < NFP_FL_QOS_TYPE_MAX; i++) {
+ if (i == NFP_FL_QOS_TYPE_PPS && !pps_support)
+ break;
+ /* 0:bps 1:pps
+ * Clear QoS data for this interface.
+ * There is no need to check if a specific QOS_TYPE was
+ * configured as the firmware handles clearing a QoS entry
+ * safely, even if it wasn't explicitly added.
+ */
+ skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
+ NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ config = nfp_flower_cmsg_get_data(skb);
+ memset(config, 0, sizeof(struct nfp_police_config));
+ if (i == NFP_FL_QOS_TYPE_PPS)
+ config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_PPS);
+ config->head.port = cpu_to_be32(netdev_port_id);
+ nfp_ctrl_tx(repr->app->ctrl, skb);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 76d13af46a7a..3e9baff07100 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -18,7 +18,6 @@ struct netdev_bpf;
struct netlink_ext_ack;
struct pci_dev;
struct sk_buff;
-struct sk_buff;
struct nfp_app;
struct nfp_cpp;
struct nfp_pf;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 713ee3041d49..bea978df7713 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -364,6 +364,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
attrs.split = eth_port.is_split;
attrs.splittable = !attrs.split;
+ attrs.lanes = eth_port.port_lanes;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = eth_port.label_port;
attrs.phys.split_subport_number = eth_port.label_subport;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 9c9ae33d84ce..1b482446536d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -429,17 +429,6 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
}
-__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...)
-{
- va_list args;
-
- va_start(args, fmt);
- vsnprintf(data, ETH_GSTRING_LEN, fmt, args);
- va_end(args);
-
- return data + ETH_GSTRING_LEN;
-}
-
static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
@@ -454,29 +443,29 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
int i;
for (i = 0; i < nn->max_r_vecs; i++) {
- data = nfp_pr_et(data, "rvec_%u_rx_pkts", i);
- data = nfp_pr_et(data, "rvec_%u_tx_pkts", i);
- data = nfp_pr_et(data, "rvec_%u_tx_busy", i);
+ ethtool_sprintf(&data, "rvec_%u_rx_pkts", i);
+ ethtool_sprintf(&data, "rvec_%u_tx_pkts", i);
+ ethtool_sprintf(&data, "rvec_%u_tx_busy", i);
}
- data = nfp_pr_et(data, "hw_rx_csum_ok");
- data = nfp_pr_et(data, "hw_rx_csum_inner_ok");
- data = nfp_pr_et(data, "hw_rx_csum_complete");
- data = nfp_pr_et(data, "hw_rx_csum_err");
- data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
- data = nfp_pr_et(data, "rx_tls_decrypted_packets");
- data = nfp_pr_et(data, "hw_tx_csum");
- data = nfp_pr_et(data, "hw_tx_inner_csum");
- data = nfp_pr_et(data, "tx_gather");
- data = nfp_pr_et(data, "tx_lso");
- data = nfp_pr_et(data, "tx_tls_encrypted_packets");
- data = nfp_pr_et(data, "tx_tls_ooo");
- data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");
-
- data = nfp_pr_et(data, "hw_tls_no_space");
- data = nfp_pr_et(data, "rx_tls_resync_req_ok");
- data = nfp_pr_et(data, "rx_tls_resync_req_ign");
- data = nfp_pr_et(data, "rx_tls_resync_sent");
+ ethtool_sprintf(&data, "hw_rx_csum_ok");
+ ethtool_sprintf(&data, "hw_rx_csum_inner_ok");
+ ethtool_sprintf(&data, "hw_rx_csum_complete");
+ ethtool_sprintf(&data, "hw_rx_csum_err");
+ ethtool_sprintf(&data, "rx_replace_buf_alloc_fail");
+ ethtool_sprintf(&data, "rx_tls_decrypted_packets");
+ ethtool_sprintf(&data, "hw_tx_csum");
+ ethtool_sprintf(&data, "hw_tx_inner_csum");
+ ethtool_sprintf(&data, "tx_gather");
+ ethtool_sprintf(&data, "tx_lso");
+ ethtool_sprintf(&data, "tx_tls_encrypted_packets");
+ ethtool_sprintf(&data, "tx_tls_ooo");
+ ethtool_sprintf(&data, "tx_tls_drop_no_sync_data");
+
+ ethtool_sprintf(&data, "hw_tls_no_space");
+ ethtool_sprintf(&data, "rx_tls_resync_req_ok");
+ ethtool_sprintf(&data, "rx_tls_resync_req_ign");
+ ethtool_sprintf(&data, "rx_tls_resync_sent");
return data;
}
@@ -550,19 +539,19 @@ nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr)
swap_off = repr * NN_ET_SWITCH_STATS_LEN;
for (i = 0; i < NN_ET_SWITCH_STATS_LEN; i++)
- data = nfp_pr_et(data, nfp_net_et_stats[i + swap_off].name);
+ ethtool_sprintf(&data, nfp_net_et_stats[i + swap_off].name);
for (i = NN_ET_SWITCH_STATS_LEN; i < NN_ET_SWITCH_STATS_LEN * 2; i++)
- data = nfp_pr_et(data, nfp_net_et_stats[i - swap_off].name);
+ ethtool_sprintf(&data, nfp_net_et_stats[i - swap_off].name);
for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++)
- data = nfp_pr_et(data, nfp_net_et_stats[i].name);
+ ethtool_sprintf(&data, nfp_net_et_stats[i].name);
for (i = 0; i < num_vecs; i++) {
- data = nfp_pr_et(data, "rxq_%u_pkts", i);
- data = nfp_pr_et(data, "rxq_%u_bytes", i);
- data = nfp_pr_et(data, "txq_%u_pkts", i);
- data = nfp_pr_et(data, "txq_%u_bytes", i);
+ ethtool_sprintf(&data, "rxq_%u_pkts", i);
+ ethtool_sprintf(&data, "rxq_%u_bytes", i);
+ ethtool_sprintf(&data, "txq_%u_pkts", i);
+ ethtool_sprintf(&data, "txq_%u_bytes", i);
}
return data;
@@ -610,15 +599,15 @@ static u8 *nfp_vnic_get_tlv_stats_strings(struct nfp_net *nn, u8 *data)
memcpy(data, nfp_tlv_stat_names[id], ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
} else {
- data = nfp_pr_et(data, "dev_unknown_stat%u", id);
+ ethtool_sprintf(&data, "dev_unknown_stat%u", id);
}
}
for (i = 0; i < nn->max_r_vecs; i++) {
- data = nfp_pr_et(data, "rxq_%u_pkts", i);
- data = nfp_pr_et(data, "rxq_%u_bytes", i);
- data = nfp_pr_et(data, "txq_%u_pkts", i);
- data = nfp_pr_et(data, "txq_%u_bytes", i);
+ ethtool_sprintf(&data, "rxq_%u_pkts", i);
+ ethtool_sprintf(&data, "rxq_%u_bytes", i);
+ ethtool_sprintf(&data, "txq_%u_pkts", i);
+ ethtool_sprintf(&data, "txq_%u_bytes", i);
}
return data;
@@ -666,7 +655,7 @@ static u8 *nfp_mac_get_stats_strings(struct net_device *netdev, u8 *data)
return data;
for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++)
- data = nfp_pr_et(data, "mac.%s", nfp_mac_et_stats[i].name);
+ ethtool_sprintf(&data, "mac.%s", nfp_mac_et_stats[i].name);
return data;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index d7fd203bb180..ae4da189d955 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -92,8 +92,6 @@ struct nfp_port {
extern const struct ethtool_ops nfp_port_ethtool_ops;
-__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...);
-
int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index d3cbb4215f5c..64c6842bd452 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1044,7 +1044,8 @@ static netdev_tx_t lpc_eth_hard_start_xmit(struct sk_buff *skb,
if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) {
/* This function should never be called when there are no
- buffers */
+ * buffers
+ */
netif_stop_queue(ndev);
spin_unlock_irq(&pldat->lock);
WARN(1, "BUG! TX request when no free TX buffers!\n");
@@ -1318,7 +1319,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
/* Allocate a chunk of memory for the DMA ethernet buffers
- and descriptors */
+ * and descriptors
+ */
pldat->dma_buff_base_v =
dma_alloc_coherent(dev,
pldat->dma_buff_size, &dma_handle,
@@ -1348,9 +1350,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
__lpc_get_mac(pldat, ndev->dev_addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
- const char *macaddr = of_get_mac_address(np);
- if (!IS_ERR(macaddr))
- ether_addr_copy(ndev->dev_addr, macaddr);
+ of_get_mac_address(np, ndev->dev_addr);
}
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
@@ -1365,7 +1365,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
__lpc_mii_mngt_reset(pldat);
/* Force default PHY interface setup in chip, this will probably be
- changed by the PHY driver */
+ * changed by the PHY driver
+ */
pldat->link = 0;
pldat->speed = 100;
pldat->duplex = DUPLEX_FULL;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 55cef5b16aa5..a6823c4d355d 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -612,15 +612,6 @@ void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
struct pch_gbe_rx_ring *rx_ring);
void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
-u32 pch_ch_control_read(struct pci_dev *pdev);
-void pch_ch_control_write(struct pci_dev *pdev, u32 val);
-u32 pch_ch_event_read(struct pci_dev *pdev);
-void pch_ch_event_write(struct pci_dev *pdev, u32 val);
-u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
-u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
-u64 pch_rx_snap_read(struct pci_dev *pdev);
-u64 pch_tx_snap_read(struct pci_dev *pdev);
-int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
/* pch_gbe_param.c */
void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 140cee7c459d..334af49e5add 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_classify.h>
+#include <linux/ptp_pch.h>
#include <linux/gpio.h>
#define DRV_VERSION "1.01"
diff --git a/drivers/net/ethernet/pensando/ionic/Makefile b/drivers/net/ethernet/pensando/ionic/Makefile
index 8d3c2d3cb10d..4e7642a2d25f 100644
--- a/drivers/net/ethernet/pensando/ionic/Makefile
+++ b/drivers/net/ethernet/pensando/ionic/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_IONIC) := ionic.o
ionic-y := ionic_main.o ionic_bus_pci.o ionic_devlink.o ionic_dev.o \
ionic_debugfs.o ionic_lif.o ionic_rx_filter.o ionic_ethtool.o \
ionic_txrx.o ionic_stats.o ionic_fw.o
+ionic-$(CONFIG_PTP_1588_CLOCK) += ionic_phc.o
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 084a924431d5..66204106f83e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -20,6 +20,10 @@ struct ionic_lif;
#define DEVCMD_TIMEOUT 10
+#define IONIC_PHC_UPDATE_NS 10000000000 /* 10s in nanoseconds */
+#define NORMAL_PPB 1000000000 /* one billion parts per billion */
+#define SCALED_PPM (1000000ull << 16) /* 2^16 million parts per 2^16 million */
+
struct ionic_vf {
u16 index;
u8 macaddr[6];
@@ -64,6 +68,8 @@ struct ionic_admin_ctx {
union ionic_adminq_comp comp;
};
+int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
+int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err);
int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
int ionic_set_dma_mask(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index b0d8499d373b..e4a5416adc80 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -184,6 +184,10 @@ static int ionic_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct device *dev = ionic->dev;
int ret = 0;
+ if (ionic->lif &&
+ test_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state))
+ return -EBUSY;
+
if (num_vfs > 0) {
ret = pci_enable_sriov(pdev, num_vfs);
if (ret) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index fb2b5bf179d7..1dfe962e22e0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -14,18 +14,23 @@
static void ionic_watchdog_cb(struct timer_list *t)
{
struct ionic *ionic = from_timer(ionic, t, watchdog_timer);
+ struct ionic_lif *lif = ionic->lif;
int hb;
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
- if (!ionic->lif)
+ if (!lif)
return;
hb = ionic_heartbeat_check(ionic);
+ dev_dbg(ionic->dev, "%s: hb %d running %d UP %d\n",
+ __func__, hb, netif_running(lif->netdev),
+ test_bit(IONIC_LIF_F_UP, lif->state));
- if (hb >= 0)
- ionic_link_status_check_request(ionic->lif, CAN_NOT_SLEEP);
+ if (hb >= 0 &&
+ !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
}
void ionic_init_devinfo(struct ionic *ionic)
@@ -74,6 +79,8 @@ int ionic_dev_setup(struct ionic *ionic)
idev->intr_status = bar->vaddr + IONIC_BAR0_INTR_STATUS_OFFSET;
idev->intr_ctrl = bar->vaddr + IONIC_BAR0_INTR_CTRL_OFFSET;
+ idev->hwstamp_regs = &idev->dev_info_regs->hwstamp;
+
sig = ioread32(&idev->dev_info_regs->signature);
if (sig != IONIC_DEV_INFO_SIGNATURE) {
dev_err(dev, "Incompatible firmware signature %x", sig);
@@ -89,9 +96,17 @@ int ionic_dev_setup(struct ionic *ionic)
return -EFAULT;
}
- idev->last_fw_status = 0xff;
timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0);
ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ;
+
+ /* set times to ensure the first check will proceed */
+ atomic_long_set(&idev->last_check_time, jiffies - 2 * HZ);
+ idev->last_hb_time = jiffies - 2 * ionic->watchdog_period;
+ /* init as ready, so no transition if the first check succeeds */
+ idev->last_fw_hb = 0;
+ idev->fw_hb_ready = true;
+ idev->fw_status_ready = true;
+
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
@@ -105,29 +120,38 @@ int ionic_dev_setup(struct ionic *ionic)
int ionic_heartbeat_check(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
- unsigned long hb_time;
+ unsigned long check_time, last_check_time;
+ bool fw_status_ready, fw_hb_ready;
u8 fw_status;
- u32 hb;
+ u32 fw_hb;
- /* wait a little more than one second before testing again */
- hb_time = jiffies;
- if (time_before(hb_time, (idev->last_hb_time + ionic->watchdog_period)))
+ /* wait a least one second before testing again */
+ check_time = jiffies;
+ last_check_time = atomic_long_read(&idev->last_check_time);
+do_check_time:
+ if (time_before(check_time, last_check_time + HZ))
return 0;
+ if (!atomic_long_try_cmpxchg_relaxed(&idev->last_check_time,
+ &last_check_time, check_time)) {
+ /* if called concurrently, only the first should proceed. */
+ dev_dbg(ionic->dev, "%s: do_check_time again\n", __func__);
+ goto do_check_time;
+ }
/* firmware is useful only if the running bit is set and
* fw_status != 0xff (bad PCI read)
*/
fw_status = ioread8(&idev->dev_info_regs->fw_status);
- if (fw_status != 0xff)
- fw_status &= IONIC_FW_STS_F_RUNNING; /* use only the run bit */
+ fw_status_ready = (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING);
/* is this a transition? */
- if (fw_status != idev->last_fw_status &&
- idev->last_fw_status != 0xff) {
+ if (fw_status_ready != idev->fw_status_ready) {
struct ionic_lif *lif = ionic->lif;
bool trigger = false;
- if (!fw_status || fw_status == 0xff) {
+ idev->fw_status_ready = fw_status_ready;
+
+ if (!fw_status_ready) {
dev_info(ionic->dev, "FW stopped %u\n", fw_status);
if (lif && !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
trigger = true;
@@ -141,44 +165,47 @@ int ionic_heartbeat_check(struct ionic *ionic)
struct ionic_deferred_work *work;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- dev_err(ionic->dev, "LIF reset trigger dropped\n");
- } else {
+ if (work) {
work->type = IONIC_DW_TYPE_LIF_RESET;
- if (fw_status & IONIC_FW_STS_F_RUNNING &&
- fw_status != 0xff)
- work->fw_status = 1;
+ work->fw_status = fw_status_ready;
ionic_lif_deferred_enqueue(&lif->deferred, work);
}
}
}
- idev->last_fw_status = fw_status;
- if (!fw_status || fw_status == 0xff)
+ if (!fw_status_ready)
return -ENXIO;
- /* early FW has no heartbeat, else FW will return non-zero */
- hb = ioread32(&idev->dev_info_regs->fw_heartbeat);
- if (!hb)
+ /* wait at least one watchdog period since the last heartbeat */
+ last_check_time = idev->last_hb_time;
+ if (time_before(check_time, last_check_time + ionic->watchdog_period))
return 0;
- /* are we stalled? */
- if (hb == idev->last_hb) {
- /* only complain once for each stall seen */
- if (idev->last_hb_time != 1) {
- dev_info(ionic->dev, "FW heartbeat stalled at %d\n",
- idev->last_hb);
- idev->last_hb_time = 1;
- }
+ fw_hb = ioread32(&idev->dev_info_regs->fw_heartbeat);
+ fw_hb_ready = fw_hb != idev->last_fw_hb;
- return -ENXIO;
+ /* early FW version had no heartbeat, so fake it */
+ if (!fw_hb_ready && !fw_hb)
+ fw_hb_ready = true;
+
+ dev_dbg(ionic->dev, "%s: fw_hb %u last_fw_hb %u ready %u\n",
+ __func__, fw_hb, idev->last_fw_hb, fw_hb_ready);
+
+ idev->last_fw_hb = fw_hb;
+
+ /* log a transition */
+ if (fw_hb_ready != idev->fw_hb_ready) {
+ idev->fw_hb_ready = fw_hb_ready;
+ if (!fw_hb_ready)
+ dev_info(ionic->dev, "FW heartbeat stalled at %d\n", fw_hb);
+ else
+ dev_info(ionic->dev, "FW heartbeat restored at %d\n", fw_hb);
}
- if (idev->last_hb_time == 1)
- dev_info(ionic->dev, "FW heartbeat restored at %d\n", hb);
+ if (!fw_hb_ready)
+ return -ENXIO;
- idev->last_hb = hb;
- idev->last_hb_time = hb_time;
+ idev->last_hb_time = check_time;
return 0;
}
@@ -585,9 +612,9 @@ void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void *cb_arg)
{
- struct device *dev = q->lif->ionic->dev;
struct ionic_desc_info *desc_info;
struct ionic_lif *lif = q->lif;
+ struct device *dev = q->dev;
desc_info = &q->info[q->head_idx];
desc_info->cb = cb;
@@ -629,7 +656,7 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
/* stop index must be for a descriptor that is not yet completed */
if (unlikely(!ionic_q_is_posted(q, stop_index)))
- dev_err(q->lif->ionic->dev,
+ dev_err(q->dev,
"ionic stop is not posted %s stop %u tail %u head %u\n",
q->name, stop_index, q->tail_idx, q->head_idx);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 690768ff0143..c25cf9b744c5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -4,6 +4,7 @@
#ifndef _IONIC_DEV_H_
#define _IONIC_DEV_H_
+#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
@@ -58,6 +59,7 @@ static_assert(sizeof(struct ionic_dev_getattr_cmd) == 64);
static_assert(sizeof(struct ionic_dev_getattr_comp) == 16);
static_assert(sizeof(struct ionic_dev_setattr_cmd) == 64);
static_assert(sizeof(struct ionic_dev_setattr_comp) == 16);
+static_assert(sizeof(struct ionic_lif_setphc_cmd) == 64);
/* Port commands */
static_assert(sizeof(struct ionic_port_identify_cmd) == 64);
@@ -134,10 +136,13 @@ struct ionic_devinfo {
struct ionic_dev {
union ionic_dev_info_regs __iomem *dev_info_regs;
union ionic_dev_cmd_regs __iomem *dev_cmd_regs;
+ struct ionic_hwstamp_regs __iomem *hwstamp_regs;
+ atomic_long_t last_check_time;
unsigned long last_hb_time;
- u32 last_hb;
- u8 last_fw_status;
+ u32 last_fw_hb;
+ bool fw_hb_ready;
+ bool fw_status_ready;
u64 __iomem *db_pages;
dma_addr_t phy_db_pages;
@@ -170,11 +175,20 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info, void *cb_arg);
-struct ionic_page_info {
+#define IONIC_PAGE_SIZE PAGE_SIZE
+#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2)
+#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
+ __GFP_COMP | __GFP_MEMALLOC)
+
+struct ionic_buf_info {
struct page *page;
dma_addr_t dma_addr;
+ u32 page_offset;
+ u32 len;
};
+#define IONIC_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1)
+
struct ionic_desc_info {
union {
void *desc;
@@ -187,8 +201,9 @@ struct ionic_desc_info {
struct ionic_txq_sg_desc *txq_sg_desc;
struct ionic_rxq_sg_desc *rxq_sgl_desc;
};
- unsigned int npages;
- struct ionic_page_info pages[IONIC_RX_MAX_SG_ELEMS + 1];
+ unsigned int bytes;
+ unsigned int nbufs;
+ struct ionic_buf_info bufs[IONIC_MAX_FRAGS];
ionic_desc_cb cb;
void *cb_arg;
};
@@ -199,10 +214,13 @@ struct ionic_queue {
struct device *dev;
struct ionic_lif *lif;
struct ionic_desc_info *info;
+ u64 dbval;
u16 head_idx;
u16 tail_idx;
unsigned int index;
unsigned int num_descs;
+ unsigned int max_sg_elems;
+ u64 features;
u64 dbell_count;
u64 stop;
u64 wake;
@@ -211,7 +229,6 @@ struct ionic_queue {
unsigned int type;
unsigned int hw_index;
unsigned int hw_type;
- u64 dbval;
union {
void *base;
struct ionic_txq_desc *txq;
@@ -229,7 +246,7 @@ struct ionic_queue {
unsigned int sg_desc_size;
unsigned int pid;
char name[IONIC_QUEUE_NAME_MAX_SZ];
-};
+} ____cacheline_aligned_in_smp;
#define IONIC_INTR_INDEX_NOT_ASSIGNED -1
#define IONIC_INTR_NAME_MAX_SZ 32
@@ -256,7 +273,7 @@ struct ionic_cq {
u64 compl_count;
void *base;
dma_addr_t base_pa;
-};
+} ____cacheline_aligned_in_smp;
struct ionic;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 0832bedcb3b4..6583be570e45 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -29,11 +29,9 @@ static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf)
static void ionic_get_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *buf)
{
- struct ionic_lif *lif;
+ struct ionic_lif *lif = netdev_priv(netdev);
u32 i;
- lif = netdev_priv(netdev);
-
memset(buf, 0, stats->n_stats * sizeof(*buf));
for (i = 0; i < ionic_num_stats_grps; i++)
ionic_stats_groups[i].get_values(lif, &buf);
@@ -209,6 +207,14 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseER_Full);
break;
+ case IONIC_XCVR_PID_SFP_10GBASE_T:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ break;
+ case IONIC_XCVR_PID_SFP_1000BASE_T:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ break;
case IONIC_XCVR_PID_UNKNOWN:
/* This means there's no module plugged in */
break;
@@ -264,12 +270,10 @@ static int ionic_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *ks)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_dev *idev = &lif->ionic->idev;
struct ionic *ionic = lif->ionic;
- struct ionic_dev *idev;
int err = 0;
- idev = &lif->ionic->idev;
-
/* set autoneg */
if (ks->base.autoneg != idev->port_info->config.an_enable) {
mutex_lock(&ionic->dev_cmd_lock);
@@ -845,6 +849,98 @@ static int ionic_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static int ionic_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ __le64 mask;
+
+ if (!lif->phc || !lif->phc->ptp)
+ return ethtool_op_get_ts_info(netdev, info);
+
+ info->phc_index = ptp_clock_index(lif->phc->ptp);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ /* tx modes */
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ mask = cpu_to_le64(BIT_ULL(IONIC_TXSTAMP_ONESTEP_SYNC));
+ if (ionic->ident.lif.eth.hwstamp_tx_modes & mask)
+ info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+
+ mask = cpu_to_le64(BIT_ULL(IONIC_TXSTAMP_ONESTEP_P2P));
+ if (ionic->ident.lif.eth.hwstamp_tx_modes & mask)
+ info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_P2P);
+
+ /* rx filters */
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ mask = cpu_to_le64(IONIC_PKT_CLS_NTP_ALL);
+ if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_NTP_ALL);
+
+ mask = cpu_to_le64(IONIC_PKT_CLS_PTP1_SYNC);
+ if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC);
+
+ mask = cpu_to_le64(IONIC_PKT_CLS_PTP1_DREQ);
+ if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
+
+ mask = cpu_to_le64(IONIC_PKT_CLS_PTP1_ALL);
+ if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+
+ mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L4_SYNC);
+ if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC);
+
+ mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L4_DREQ);
+ if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+
+ mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L4_ALL);
+ if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+
+ mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L2_SYNC);
+ if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC);
+
+ mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L2_DREQ);
+ if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
+
+ mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L2_ALL);
+ if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
+
+ mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_SYNC);
+ if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_SYNC);
+
+ mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_DREQ);
+ if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+
+ mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_ALL);
+ if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
static int ionic_nway_reset(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -902,6 +998,7 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.set_pauseparam = ionic_set_pauseparam,
.get_fecparam = ionic_get_fecparam,
.set_fecparam = ionic_set_fecparam,
+ .get_ts_info = ionic_get_ts_info,
.nway_reset = ionic_nway_reset,
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 31ccfcdc2b0a..0478b48d9895 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -34,6 +34,7 @@ enum ionic_cmd_opcode {
IONIC_CMD_LIF_RESET = 22,
IONIC_CMD_LIF_GETATTR = 23,
IONIC_CMD_LIF_SETATTR = 24,
+ IONIC_CMD_LIF_SETPHC = 25,
IONIC_CMD_RX_MODE_SET = 30,
IONIC_CMD_RX_FILTER_ADD = 31,
@@ -269,6 +270,9 @@ union ionic_drv_identity {
* value in usecs to device units using:
* device units = usecs * mult / div
* @eq_count: Number of shared event queues
+ * @hwstamp_mask: Bitmask for subtraction of hardware tick values.
+ * @hwstamp_mult: Hardware tick to nanosecond multiplier.
+ * @hwstamp_shift: Hardware tick to nanosecond divisor (power of two).
*/
union ionic_dev_identity {
struct {
@@ -283,6 +287,9 @@ union ionic_dev_identity {
__le32 intr_coal_mult;
__le32 intr_coal_div;
__le32 eq_count;
+ __le64 hwstamp_mask;
+ __le32 hwstamp_mult;
+ __le32 hwstamp_shift;
};
__le32 words[478];
};
@@ -320,7 +327,7 @@ struct ionic_lif_identify_comp {
/**
* enum ionic_lif_capability - LIF capabilities
* @IONIC_LIF_CAP_ETH: LIF supports Ethernet
- * @IONIC_LIF_CAP_RDMA: LIF support RDMA
+ * @IONIC_LIF_CAP_RDMA: LIF supports RDMA
*/
enum ionic_lif_capability {
IONIC_LIF_CAP_ETH = BIT(0),
@@ -346,6 +353,68 @@ enum ionic_logical_qtype {
};
/**
+ * enum ionic_q_feature - Common Features for most queue types
+ *
+ * Common features use bits 0-15. Per-queue-type features use higher bits.
+ *
+ * @IONIC_QIDENT_F_CQ: Queue has completion ring
+ * @IONIC_QIDENT_F_SG: Queue has scatter/gather ring
+ * @IONIC_QIDENT_F_EQ: Queue can use event queue
+ * @IONIC_QIDENT_F_CMB: Queue is in cmb bar
+ * @IONIC_Q_F_2X_DESC: Double main descriptor size
+ * @IONIC_Q_F_2X_CQ_DESC: Double cq descriptor size
+ * @IONIC_Q_F_2X_SG_DESC: Double sg descriptor size
+ * @IONIC_Q_F_4X_DESC: Quadruple main descriptor size
+ * @IONIC_Q_F_4X_CQ_DESC: Quadruple cq descriptor size
+ * @IONIC_Q_F_4X_SG_DESC: Quadruple sg descriptor size
+ */
+enum ionic_q_feature {
+ IONIC_QIDENT_F_CQ = BIT_ULL(0),
+ IONIC_QIDENT_F_SG = BIT_ULL(1),
+ IONIC_QIDENT_F_EQ = BIT_ULL(2),
+ IONIC_QIDENT_F_CMB = BIT_ULL(3),
+ IONIC_Q_F_2X_DESC = BIT_ULL(4),
+ IONIC_Q_F_2X_CQ_DESC = BIT_ULL(5),
+ IONIC_Q_F_2X_SG_DESC = BIT_ULL(6),
+ IONIC_Q_F_4X_DESC = BIT_ULL(7),
+ IONIC_Q_F_4X_CQ_DESC = BIT_ULL(8),
+ IONIC_Q_F_4X_SG_DESC = BIT_ULL(9),
+};
+
+/**
+ * enum ionic_rxq_feature - RXQ-specific Features
+ *
+ * Per-queue-type features use bits 16 and higher.
+ *
+ * @IONIC_RXQ_F_HWSTAMP: Queue supports Hardware Timestamping
+ */
+enum ionic_rxq_feature {
+ IONIC_RXQ_F_HWSTAMP = BIT_ULL(16),
+};
+
+/**
+ * enum ionic_txq_feature - TXQ-specific Features
+ *
+ * Per-queue-type features use bits 16 and higher.
+ *
+ * @IONIC_TXQ_F_HWSTAMP: Queue supports Hardware Timestamping
+ */
+enum ionic_txq_feature {
+ IONIC_TXQ_F_HWSTAMP = BIT(16),
+};
+
+/**
+ * struct ionic_hwstamp_bits - Hardware timestamp decoding bits
+ * @IONIC_HWSTAMP_INVALID: Invalid hardware timestamp value
+ * @IONIC_HWSTAMP_CQ_NEGOFFSET: Timestamp field negative offset
+ * from the base cq descriptor.
+ */
+enum ionic_hwstamp_bits {
+ IONIC_HWSTAMP_INVALID = ~0ull,
+ IONIC_HWSTAMP_CQ_NEGOFFSET = 8,
+};
+
+/**
* struct ionic_lif_logical_qtype - Descriptor of logical to HW queue type
* @qtype: Hardware Queue Type
* @qid_count: Number of Queue IDs of the logical type
@@ -404,7 +473,9 @@ union ionic_lif_config {
* @max_ucast_filters: Number of perfect unicast addresses supported
* @max_mcast_filters: Number of perfect multicast addresses supported
* @min_frame_size: Minimum size of frames to be sent
- * @max_frame_size: Maximim size of frames to be sent
+ * @max_frame_size: Maximum size of frames to be sent
+ * @hwstamp_tx_modes: Bitmask of BIT_ULL(enum ionic_txstamp_mode)
+ * @hwstamp_rx_filters: Bitmask of enum ionic_pkt_class
* @config: LIF config struct with features, mtu, mac, q counts
*
* @rdma: RDMA identify structure
@@ -438,7 +509,10 @@ union ionic_lif_identity {
__le16 rss_ind_tbl_sz;
__le32 min_frame_size;
__le32 max_frame_size;
- u8 rsvd2[106];
+ u8 rsvd2[2];
+ __le64 hwstamp_tx_modes;
+ __le64 hwstamp_rx_filters;
+ u8 rsvd3[88];
union ionic_lif_config config;
} __packed eth;
@@ -529,7 +603,7 @@ struct ionic_q_identify_comp {
* union ionic_q_identity - queue identity information
* @version: Queue type version that can be used with FW
* @supported: Bitfield of queue versions, first bit = ver 0
- * @features: Queue features
+ * @features: Queue features (enum ionic_q_feature, etc)
* @desc_sz: Descriptor size
* @comp_sz: Completion descriptor size
* @sg_desc_sz: Scatter/Gather descriptor size
@@ -541,10 +615,6 @@ union ionic_q_identity {
u8 version;
u8 supported;
u8 rsvd[6];
-#define IONIC_QIDENT_F_CQ 0x01 /* queue has completion ring */
-#define IONIC_QIDENT_F_SG 0x02 /* queue has scatter/gather ring */
-#define IONIC_QIDENT_F_EQ 0x04 /* queue can use event queue */
-#define IONIC_QIDENT_F_CMB 0x08 /* queue is in cmb bar */
__le64 features;
__le16 desc_sz;
__le16 comp_sz;
@@ -585,6 +655,7 @@ union ionic_q_identity {
* @ring_base: Queue ring base address
* @cq_ring_base: Completion queue ring base address
* @sg_ring_base: Scatter/Gather ring base address
+ * @features: Mask of queue features to enable, if not in the flags above.
*/
struct ionic_q_init_cmd {
u8 opcode;
@@ -608,7 +679,8 @@ struct ionic_q_init_cmd {
__le64 ring_base;
__le64 cq_ring_base;
__le64 sg_ring_base;
- u8 rsvd2[20];
+ u8 rsvd2[12];
+ __le64 features;
} __packed;
/**
@@ -692,7 +764,7 @@ enum ionic_txq_desc_opcode {
* checksums are also updated.
*
* IONIC_TXQ_DESC_OPCODE_TSO:
- * Device preforms TCP segmentation offload
+ * Device performs TCP segmentation offload
* (TSO). @hdr_len is the number of bytes
* to the end of TCP header (the offset to
* the TCP payload). @mss is the desired
@@ -982,13 +1054,13 @@ struct ionic_rxq_comp {
};
enum ionic_pkt_type {
- IONIC_PKT_TYPE_NON_IP = 0x000,
- IONIC_PKT_TYPE_IPV4 = 0x001,
- IONIC_PKT_TYPE_IPV4_TCP = 0x003,
- IONIC_PKT_TYPE_IPV4_UDP = 0x005,
- IONIC_PKT_TYPE_IPV6 = 0x008,
- IONIC_PKT_TYPE_IPV6_TCP = 0x018,
- IONIC_PKT_TYPE_IPV6_UDP = 0x028,
+ IONIC_PKT_TYPE_NON_IP = 0x00,
+ IONIC_PKT_TYPE_IPV4 = 0x01,
+ IONIC_PKT_TYPE_IPV4_TCP = 0x03,
+ IONIC_PKT_TYPE_IPV4_UDP = 0x05,
+ IONIC_PKT_TYPE_IPV6 = 0x08,
+ IONIC_PKT_TYPE_IPV6_TCP = 0x18,
+ IONIC_PKT_TYPE_IPV6_UDP = 0x28,
/* below types are only used if encap offloads are enabled on lif */
IONIC_PKT_TYPE_ENCAP_NON_IP = 0x40,
IONIC_PKT_TYPE_ENCAP_IPV4 = 0x41,
@@ -1019,7 +1091,64 @@ enum ionic_eth_hw_features {
IONIC_ETH_HW_TSO_UDP_CSUM = BIT(16),
IONIC_ETH_HW_RX_CSUM_GENEVE = BIT(17),
IONIC_ETH_HW_TX_CSUM_GENEVE = BIT(18),
- IONIC_ETH_HW_TSO_GENEVE = BIT(19)
+ IONIC_ETH_HW_TSO_GENEVE = BIT(19),
+ IONIC_ETH_HW_TIMESTAMP = BIT(20),
+};
+
+/**
+ * enum ionic_pkt_class - Packet classification mask.
+ *
+ * Used with rx steering filter, packets indicated by the mask can be steered
+ * toward a specific receive queue.
+ *
+ * @IONIC_PKT_CLS_NTP_ALL: All NTP packets.
+ * @IONIC_PKT_CLS_PTP1_SYNC: PTPv1 sync
+ * @IONIC_PKT_CLS_PTP1_DREQ: PTPv1 delay-request
+ * @IONIC_PKT_CLS_PTP1_ALL: PTPv1 all packets
+ * @IONIC_PKT_CLS_PTP2_L4_SYNC: PTPv2-UDP sync
+ * @IONIC_PKT_CLS_PTP2_L4_DREQ: PTPv2-UDP delay-request
+ * @IONIC_PKT_CLS_PTP2_L4_ALL: PTPv2-UDP all packets
+ * @IONIC_PKT_CLS_PTP2_L2_SYNC: PTPv2-ETH sync
+ * @IONIC_PKT_CLS_PTP2_L2_DREQ: PTPv2-ETH delay-request
+ * @IONIC_PKT_CLS_PTP2_L2_ALL: PTPv2-ETH all packets
+ * @IONIC_PKT_CLS_PTP2_SYNC: PTPv2 sync
+ * @IONIC_PKT_CLS_PTP2_DREQ: PTPv2 delay-request
+ * @IONIC_PKT_CLS_PTP2_ALL: PTPv2 all packets
+ * @IONIC_PKT_CLS_PTP_SYNC: PTP sync
+ * @IONIC_PKT_CLS_PTP_DREQ: PTP delay-request
+ * @IONIC_PKT_CLS_PTP_ALL: PTP all packets
+ */
+enum ionic_pkt_class {
+ IONIC_PKT_CLS_NTP_ALL = BIT(0),
+
+ IONIC_PKT_CLS_PTP1_SYNC = BIT(1),
+ IONIC_PKT_CLS_PTP1_DREQ = BIT(2),
+ IONIC_PKT_CLS_PTP1_ALL = BIT(3) |
+ IONIC_PKT_CLS_PTP1_SYNC | IONIC_PKT_CLS_PTP1_DREQ,
+
+ IONIC_PKT_CLS_PTP2_L4_SYNC = BIT(4),
+ IONIC_PKT_CLS_PTP2_L4_DREQ = BIT(5),
+ IONIC_PKT_CLS_PTP2_L4_ALL = BIT(6) |
+ IONIC_PKT_CLS_PTP2_L4_SYNC | IONIC_PKT_CLS_PTP2_L4_DREQ,
+
+ IONIC_PKT_CLS_PTP2_L2_SYNC = BIT(7),
+ IONIC_PKT_CLS_PTP2_L2_DREQ = BIT(8),
+ IONIC_PKT_CLS_PTP2_L2_ALL = BIT(9) |
+ IONIC_PKT_CLS_PTP2_L2_SYNC | IONIC_PKT_CLS_PTP2_L2_DREQ,
+
+ IONIC_PKT_CLS_PTP2_SYNC =
+ IONIC_PKT_CLS_PTP2_L4_SYNC | IONIC_PKT_CLS_PTP2_L2_SYNC,
+ IONIC_PKT_CLS_PTP2_DREQ =
+ IONIC_PKT_CLS_PTP2_L4_DREQ | IONIC_PKT_CLS_PTP2_L2_DREQ,
+ IONIC_PKT_CLS_PTP2_ALL =
+ IONIC_PKT_CLS_PTP2_L4_ALL | IONIC_PKT_CLS_PTP2_L2_ALL,
+
+ IONIC_PKT_CLS_PTP_SYNC =
+ IONIC_PKT_CLS_PTP1_SYNC | IONIC_PKT_CLS_PTP2_SYNC,
+ IONIC_PKT_CLS_PTP_DREQ =
+ IONIC_PKT_CLS_PTP1_DREQ | IONIC_PKT_CLS_PTP2_DREQ,
+ IONIC_PKT_CLS_PTP_ALL =
+ IONIC_PKT_CLS_PTP1_ALL | IONIC_PKT_CLS_PTP2_ALL,
};
/**
@@ -1111,6 +1240,8 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_QSFP_100G_CWDM4 = 69,
IONIC_XCVR_PID_QSFP_100G_PSM4 = 70,
IONIC_XCVR_PID_SFP_25GBASE_ACC = 71,
+ IONIC_XCVR_PID_SFP_10GBASE_T = 72,
+ IONIC_XCVR_PID_SFP_1000BASE_T = 73,
};
/**
@@ -1327,11 +1458,25 @@ enum ionic_stats_ctl_cmd {
};
/**
+ * enum ionic_txstamp_mode - List of TX Timestamping Modes
+ * @IONIC_TXSTAMP_OFF: Disable TX hardware timetamping.
+ * @IONIC_TXSTAMP_ON: Enable local TX hardware timetamping.
+ * @IONIC_TXSTAMP_ONESTEP_SYNC: Modify TX PTP Sync packets.
+ * @IONIC_TXSTAMP_ONESTEP_P2P: Modify TX PTP Sync and PDelayResp.
+ */
+enum ionic_txstamp_mode {
+ IONIC_TXSTAMP_OFF = 0,
+ IONIC_TXSTAMP_ON = 1,
+ IONIC_TXSTAMP_ONESTEP_SYNC = 2,
+ IONIC_TXSTAMP_ONESTEP_P2P = 3,
+};
+
+/**
* enum ionic_port_attr - List of device attributes
* @IONIC_PORT_ATTR_STATE: Port state attribute
* @IONIC_PORT_ATTR_SPEED: Port speed attribute
* @IONIC_PORT_ATTR_MTU: Port MTU attribute
- * @IONIC_PORT_ATTR_AUTONEG: Port autonegotation attribute
+ * @IONIC_PORT_ATTR_AUTONEG: Port autonegotiation attribute
* @IONIC_PORT_ATTR_FEC: Port FEC attribute
* @IONIC_PORT_ATTR_PAUSE: Port pause attribute
* @IONIC_PORT_ATTR_LOOPBACK: Port loopback attribute
@@ -1568,6 +1713,7 @@ enum ionic_rss_hash_types {
* @IONIC_LIF_ATTR_FEATURES: LIF features attribute
* @IONIC_LIF_ATTR_RSS: LIF RSS attribute
* @IONIC_LIF_ATTR_STATS_CTRL: LIF statistics control attribute
+ * @IONIC_LIF_ATTR_TXSTAMP: LIF TX timestamping mode
*/
enum ionic_lif_attr {
IONIC_LIF_ATTR_STATE = 0,
@@ -1577,6 +1723,7 @@ enum ionic_lif_attr {
IONIC_LIF_ATTR_FEATURES = 4,
IONIC_LIF_ATTR_RSS = 5,
IONIC_LIF_ATTR_STATS_CTRL = 6,
+ IONIC_LIF_ATTR_TXSTAMP = 7,
};
/**
@@ -1594,6 +1741,7 @@ enum ionic_lif_attr {
* @key: The hash secret key
* @addr: Address for the indirection table shared memory
* @stats_ctl: stats control commands (enum ionic_stats_ctl_cmd)
+ * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode)
*/
struct ionic_lif_setattr_cmd {
u8 opcode;
@@ -1612,6 +1760,7 @@ struct ionic_lif_setattr_cmd {
__le64 addr;
} rss;
u8 stats_ctl;
+ __le16 txstamp_mode;
u8 rsvd[60];
} __packed;
};
@@ -1656,6 +1805,7 @@ struct ionic_lif_getattr_cmd {
* @mtu: Mtu
* @mac: Station mac
* @features: Features (enum ionic_eth_hw_features)
+ * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode)
* @color: Color bit
*/
struct ionic_lif_getattr_comp {
@@ -1667,11 +1817,35 @@ struct ionic_lif_getattr_comp {
__le32 mtu;
u8 mac[6];
__le64 features;
+ __le16 txstamp_mode;
u8 rsvd2[11];
} __packed;
u8 color;
};
+/**
+ * struct ionic_lif_setphc_cmd - Set LIF PTP Hardware Clock
+ * @opcode: Opcode
+ * @lif_index: LIF index
+ * @tick: Hardware stamp tick of an instant in time.
+ * @nsec: Nanosecond stamp of the same instant.
+ * @frac: Fractional nanoseconds at the same instant.
+ * @mult: Cycle to nanosecond multiplier.
+ * @shift: Cycle to nanosecond divisor (power of two).
+ */
+struct ionic_lif_setphc_cmd {
+ u8 opcode;
+ u8 rsvd1;
+ __le16 lif_index;
+ u8 rsvd2[4];
+ __le64 tick;
+ __le64 nsec;
+ __le64 frac;
+ __le32 mult;
+ __le32 shift;
+ u8 rsvd3[24];
+};
+
enum ionic_rx_mode {
IONIC_RX_MODE_F_UNICAST = BIT(0),
IONIC_RX_MODE_F_MULTICAST = BIT(1),
@@ -1704,9 +1878,10 @@ struct ionic_rx_mode_set_cmd {
typedef struct ionic_admin_comp ionic_rx_mode_set_comp;
enum ionic_rx_filter_match_type {
- IONIC_RX_FILTER_MATCH_VLAN = 0,
- IONIC_RX_FILTER_MATCH_MAC,
- IONIC_RX_FILTER_MATCH_MAC_VLAN,
+ IONIC_RX_FILTER_MATCH_VLAN = 0x0,
+ IONIC_RX_FILTER_MATCH_MAC = 0x1,
+ IONIC_RX_FILTER_MATCH_MAC_VLAN = 0x2,
+ IONIC_RX_FILTER_STEER_PKTCLASS = 0x10,
};
/**
@@ -1723,6 +1898,7 @@ enum ionic_rx_filter_match_type {
* @mac_vlan: MACVLAN filter
* @vlan: VLAN ID
* @addr: MAC address (network-byte order)
+ * @pkt_class: Packet classification filter
*/
struct ionic_rx_filter_add_cmd {
u8 opcode;
@@ -1741,8 +1917,9 @@ struct ionic_rx_filter_add_cmd {
__le16 vlan;
u8 addr[6];
} mac_vlan;
+ __le64 pkt_class;
u8 rsvd[54];
- };
+ } __packed;
};
/**
@@ -1951,8 +2128,8 @@ enum ionic_qos_sched_type {
* @pfc_cos: Priority-Flow Control class of service
* @dwrr_weight: QoS class scheduling weight
* @strict_rlmt: Rate limit for strict priority scheduling
- * @rw_dot1q_pcp: Rewrite dot1q pcp to this value (valid iff F_RW_DOT1Q_PCP)
- * @rw_ip_dscp: Rewrite ip dscp to this value (valid iff F_RW_IP_DSCP)
+ * @rw_dot1q_pcp: Rewrite dot1q pcp to value (valid iff F_RW_DOT1Q_PCP)
+ * @rw_ip_dscp: Rewrite ip dscp to value (valid iff F_RW_IP_DSCP)
* @dot1q_pcp: Dot1q pcp value
* @ndscp: Number of valid dscp values in the ip_dscp field
* @ip_dscp: IP dscp values
@@ -2743,6 +2920,16 @@ union ionic_dev_cmd_comp {
};
/**
+ * struct ionic_hwstamp_regs - Hardware current timestamp registers
+ * @tick_low: Low 32 bits of hardware timestamp
+ * @tick_high: High 32 bits of hardware timestamp
+ */
+struct ionic_hwstamp_regs {
+ u32 tick_low;
+ u32 tick_high;
+};
+
+/**
* union ionic_dev_info_regs - Device info register format (read-only)
* @signature: Signature value of 0x44455649 ('DEVI')
* @version: Current version of info
@@ -2752,6 +2939,7 @@ union ionic_dev_cmd_comp {
* @fw_heartbeat: Firmware heartbeat counter
* @serial_num: Serial number
* @fw_version: Firmware version
+ * @hwstamp_regs: Hardware current timestamp registers
*/
union ionic_dev_info_regs {
#define IONIC_DEVINFO_FWVERS_BUFLEN 32
@@ -2766,6 +2954,8 @@ union ionic_dev_info_regs {
u32 fw_heartbeat;
char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN];
char serial_num[IONIC_DEVINFO_SERIAL_BUFLEN];
+ u8 rsvd_pad1024[948];
+ struct ionic_hwstamp_regs hwstamp;
};
u32 words[512];
};
@@ -2813,6 +3003,7 @@ union ionic_adminq_cmd {
struct ionic_q_control_cmd q_control;
struct ionic_lif_setattr_cmd lif_setattr;
struct ionic_lif_getattr_cmd lif_getattr;
+ struct ionic_lif_setphc_cmd lif_setphc;
struct ionic_rx_mode_set_cmd rx_mode_set;
struct ionic_rx_filter_add_cmd rx_filter_add;
struct ionic_rx_filter_del_cmd rx_filter_del;
@@ -2829,6 +3020,7 @@ union ionic_adminq_comp {
struct ionic_q_init_comp q_init;
struct ionic_lif_setattr_comp lif_setattr;
struct ionic_lif_getattr_comp lif_getattr;
+ struct ionic_admin_comp lif_setphc;
struct ionic_rx_filter_add_comp rx_filter_add;
struct ionic_fw_control_comp fw_control;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 11140915c2da..af3a5368529c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -120,23 +120,34 @@ static void ionic_link_status_check(struct ionic_lif *lif)
if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
return;
+ /* Don't put carrier back up if we're in a broken state */
+ if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) {
+ clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
+ return;
+ }
+
link_status = le16_to_cpu(lif->info->status.link_status);
link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
if (link_up) {
- if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
+ int err = 0;
+
+ if (netdev->flags & IFF_UP && netif_running(netdev)) {
mutex_lock(&lif->queue_lock);
- ionic_start_queues(lif);
+ err = ionic_start_queues(lif);
+ if (err && err != -EBUSY) {
+ netdev_err(lif->netdev,
+ "Failed to start queues: %d\n", err);
+ set_bit(IONIC_LIF_F_BROKEN, lif->state);
+ netif_carrier_off(lif->netdev);
+ }
mutex_unlock(&lif->queue_lock);
}
- if (!netif_carrier_ok(netdev)) {
- u32 link_speed;
-
+ if (!err && !netif_carrier_ok(netdev)) {
ionic_port_identify(lif->ionic);
- link_speed = le32_to_cpu(lif->info->status.link_speed);
netdev_info(netdev, "Link up - %d Gbps\n",
- link_speed / 1000);
+ le32_to_cpu(lif->info->status.link_speed) / 1000);
netif_carrier_on(netdev);
}
} else {
@@ -145,7 +156,7 @@ static void ionic_link_status_check(struct ionic_lif *lif)
netif_carrier_off(netdev);
}
- if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
+ if (netdev->flags & IFF_UP && netif_running(netdev)) {
mutex_lock(&lif->queue_lock);
ionic_stop_queues(lif);
mutex_unlock(&lif->queue_lock);
@@ -382,6 +393,8 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
static void ionic_qcqs_free(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
+ struct ionic_qcq *adminqcq;
+ unsigned long irqflags;
if (lif->notifyqcq) {
ionic_qcq_free(lif, lif->notifyqcq);
@@ -390,9 +403,14 @@ static void ionic_qcqs_free(struct ionic_lif *lif)
}
if (lif->adminqcq) {
- ionic_qcq_free(lif, lif->adminqcq);
- devm_kfree(dev, lif->adminqcq);
+ spin_lock_irqsave(&lif->adminq_lock, irqflags);
+ adminqcq = READ_ONCE(lif->adminqcq);
lif->adminqcq = NULL;
+ spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
+ if (adminqcq) {
+ ionic_qcq_free(lif, adminqcq);
+ devm_kfree(dev, adminqcq);
+ }
}
if (lif->rxqcqs) {
@@ -495,6 +513,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
goto err_out;
}
+ new->q.dev = dev;
new->flags = flags;
new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
@@ -506,6 +525,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
}
new->q.type = type;
+ new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
desc_size, sg_desc_size, pid);
@@ -656,20 +676,20 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
err = -ENOMEM;
lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
- sizeof(struct ionic_qcq *), GFP_KERNEL);
+ sizeof(*lif->txqcqs), GFP_KERNEL);
if (!lif->txqcqs)
goto err_out;
lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
- sizeof(struct ionic_qcq *), GFP_KERNEL);
+ sizeof(*lif->rxqcqs), GFP_KERNEL);
if (!lif->rxqcqs)
goto err_out;
- lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
- sizeof(struct ionic_tx_stats), GFP_KERNEL);
+ lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1,
+ sizeof(*lif->txqstats), GFP_KERNEL);
if (!lif->txqstats)
goto err_out;
- lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
- sizeof(struct ionic_rx_stats), GFP_KERNEL);
+ lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1,
+ sizeof(*lif->rxqstats), GFP_KERNEL);
if (!lif->rxqstats)
goto err_out;
@@ -711,15 +731,14 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.ring_base = cpu_to_le64(q->base_pa),
.cq_ring_base = cpu_to_le64(cq->base_pa),
.sg_ring_base = cpu_to_le64(q->sg_base_pa),
+ .features = cpu_to_le64(q->features),
},
};
unsigned int intr_index;
int err;
- if (qcq->flags & IONIC_QCQ_F_INTR)
- intr_index = qcq->intr.index;
- else
- intr_index = lif->rxqcqs[q->index]->intr.index;
+ intr_index = qcq->intr.index;
+
ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
@@ -773,6 +792,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.ring_base = cpu_to_le64(q->base_pa),
.cq_ring_base = cpu_to_le64(cq->base_pa),
.sg_ring_base = cpu_to_le64(q->sg_base_pa),
+ .features = cpu_to_le64(q->features),
},
};
int err;
@@ -810,6 +830,254 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
return 0;
}
+int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
+{
+ unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
+ unsigned int txq_i, flags;
+ struct ionic_qcq *txq;
+ u64 features;
+ int err;
+
+ mutex_lock(&lif->queue_lock);
+
+ if (lif->hwstamp_txq)
+ goto out;
+
+ features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP;
+
+ num_desc = IONIC_MIN_TXRX_DESC;
+ desc_sz = sizeof(struct ionic_txq_desc);
+ comp_sz = 2 * sizeof(struct ionic_txq_comp);
+
+ if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
+ lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1))
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
+ else
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
+
+ txq_i = lif->ionic->ntxqs_per_lif;
+ flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
+
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
+ num_desc, desc_sz, comp_sz, sg_desc_sz,
+ lif->kern_pid, &txq);
+ if (err)
+ goto err_qcq_alloc;
+
+ txq->q.features = features;
+
+ ionic_link_qcq_interrupts(lif->adminqcq, txq);
+ ionic_debugfs_add_qcq(lif, txq);
+
+ lif->hwstamp_txq = txq;
+
+ if (netif_running(lif->netdev)) {
+ err = ionic_lif_txq_init(lif, txq);
+ if (err)
+ goto err_qcq_init;
+
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
+ err = ionic_qcq_enable(txq);
+ if (err)
+ goto err_qcq_enable;
+ }
+ }
+
+out:
+ mutex_unlock(&lif->queue_lock);
+
+ return 0;
+
+err_qcq_enable:
+ ionic_lif_qcq_deinit(lif, txq);
+err_qcq_init:
+ lif->hwstamp_txq = NULL;
+ ionic_debugfs_del_qcq(txq);
+ ionic_qcq_free(lif, txq);
+ devm_kfree(lif->ionic->dev, txq);
+err_qcq_alloc:
+ mutex_unlock(&lif->queue_lock);
+ return err;
+}
+
+int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
+{
+ unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
+ unsigned int rxq_i, flags;
+ struct ionic_qcq *rxq;
+ u64 features;
+ int err;
+
+ mutex_lock(&lif->queue_lock);
+
+ if (lif->hwstamp_rxq)
+ goto out;
+
+ features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
+
+ num_desc = IONIC_MIN_TXRX_DESC;
+ desc_sz = sizeof(struct ionic_rxq_desc);
+ comp_sz = 2 * sizeof(struct ionic_rxq_comp);
+ sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
+
+ rxq_i = lif->ionic->nrxqs_per_lif;
+ flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
+
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
+ num_desc, desc_sz, comp_sz, sg_desc_sz,
+ lif->kern_pid, &rxq);
+ if (err)
+ goto err_qcq_alloc;
+
+ rxq->q.features = features;
+
+ ionic_link_qcq_interrupts(lif->adminqcq, rxq);
+ ionic_debugfs_add_qcq(lif, rxq);
+
+ lif->hwstamp_rxq = rxq;
+
+ if (netif_running(lif->netdev)) {
+ err = ionic_lif_rxq_init(lif, rxq);
+ if (err)
+ goto err_qcq_init;
+
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
+ ionic_rx_fill(&rxq->q);
+ err = ionic_qcq_enable(rxq);
+ if (err)
+ goto err_qcq_enable;
+ }
+ }
+
+out:
+ mutex_unlock(&lif->queue_lock);
+
+ return 0;
+
+err_qcq_enable:
+ ionic_lif_qcq_deinit(lif, rxq);
+err_qcq_init:
+ lif->hwstamp_rxq = NULL;
+ ionic_debugfs_del_qcq(rxq);
+ ionic_qcq_free(lif, rxq);
+ devm_kfree(lif->ionic->dev, rxq);
+err_qcq_alloc:
+ mutex_unlock(&lif->queue_lock);
+ return err;
+}
+
+int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all)
+{
+ struct ionic_queue_params qparam;
+
+ ionic_init_queue_params(lif, &qparam);
+
+ if (rx_all)
+ qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
+ else
+ qparam.rxq_features = 0;
+
+ /* if we're not running, just set the values and return */
+ if (!netif_running(lif->netdev)) {
+ lif->rxq_features = qparam.rxq_features;
+ return 0;
+ }
+
+ return ionic_reconfigure_queues(lif, &qparam);
+}
+
+int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_TXSTAMP,
+ .txstamp_mode = cpu_to_le16(txstamp_mode),
+ },
+ };
+
+ return ionic_adminq_post_wait(lif, &ctx);
+}
+
+static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.rx_filter_del = {
+ .opcode = IONIC_CMD_RX_FILTER_DEL,
+ .lif_index = cpu_to_le16(lif->index),
+ },
+ };
+ struct ionic_rx_filter *f;
+ u32 filter_id;
+ int err;
+
+ spin_lock_bh(&lif->rx_filters.lock);
+
+ f = ionic_rx_filter_rxsteer(lif);
+ if (!f) {
+ spin_unlock_bh(&lif->rx_filters.lock);
+ return;
+ }
+
+ filter_id = f->filter_id;
+ ionic_rx_filter_free(lif, f);
+
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id);
+
+ ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id);
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err && err != -EEXIST)
+ netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id);
+}
+
+static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.rx_filter_add = {
+ .opcode = IONIC_CMD_RX_FILTER_ADD,
+ .lif_index = cpu_to_le16(lif->index),
+ .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS),
+ .pkt_class = cpu_to_le64(pkt_class),
+ },
+ };
+ u8 qtype;
+ u32 qid;
+ int err;
+
+ if (!lif->hwstamp_rxq)
+ return -EINVAL;
+
+ qtype = lif->hwstamp_rxq->q.type;
+ ctx.cmd.rx_filter_add.qtype = qtype;
+
+ qid = lif->hwstamp_rxq->q.index;
+ ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid);
+
+ netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n");
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err && err != -EEXIST)
+ return err;
+
+ return ionic_rx_filter_save(lif, 0, qid, 0, &ctx);
+}
+
+int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
+{
+ ionic_lif_del_hwstamp_rxfilt(lif);
+
+ if (!pkt_class)
+ return 0;
+
+ return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class);
+}
+
static bool ionic_notifyq_service(struct ionic_cq *cq,
struct ionic_cq_info *cq_info)
{
@@ -837,7 +1105,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
switch (le16_to_cpu(comp->event.ecode)) {
case IONIC_EVENT_LINK_CHANGE:
- ionic_link_status_check_request(lif, false);
+ ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
break;
case IONIC_EVENT_RESET:
work = kzalloc(sizeof(*work), GFP_ATOMIC);
@@ -875,30 +1143,43 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
struct ionic_lif *lif = napi_to_cq(napi)->lif;
struct ionic_dev *idev = &lif->ionic->idev;
+ unsigned long irqflags;
unsigned int flags = 0;
+ int rx_work = 0;
+ int tx_work = 0;
int n_work = 0;
int a_work = 0;
int work_done;
+ int credits;
if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
ionic_notifyq_service, NULL, NULL);
+ spin_lock_irqsave(&lif->adminq_lock, irqflags);
if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
ionic_adminq_service, NULL, NULL);
+ spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
+
+ if (lif->hwstamp_rxq)
+ rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget,
+ ionic_rx_service, NULL, NULL);
+
+ if (lif->hwstamp_txq)
+ tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget,
+ ionic_tx_service, NULL, NULL);
- work_done = max(n_work, a_work);
+ work_done = max(max(n_work, a_work), max(rx_work, tx_work));
if (work_done < budget && napi_complete_done(napi, work_done)) {
flags |= IONIC_INTR_CRED_UNMASK;
- lif->adminqcq->cq.bound_intr->rearm_count++;
+ intr->rearm_count++;
}
if (work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE;
- ionic_intr_credits(idev->intr_ctrl,
- intr->index,
- n_work + a_work, flags);
+ credits = n_work + a_work + rx_work + tx_work;
+ ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
}
return work_done;
@@ -1258,6 +1539,10 @@ static int ionic_set_nic_features(struct ionic_lif *lif,
int err;
ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
+
+ if (lif->phc)
+ ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP);
+
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
return err;
@@ -1305,6 +1590,8 @@ static int ionic_set_nic_features(struct ionic_lif *lif,
dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
+ if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP)
+ dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n");
return 0;
}
@@ -1441,7 +1728,7 @@ static int ionic_start_queues_reconfig(struct ionic_lif *lif)
*/
err = ionic_txrx_init(lif);
mutex_unlock(&lif->queue_lock);
- ionic_link_status_check_request(lif, true);
+ ionic_link_status_check_request(lif, CAN_SLEEP);
netif_device_attach(lif->netdev);
return err;
@@ -1480,7 +1767,8 @@ static void ionic_tx_timeout_work(struct work_struct *ws)
{
struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
- netdev_info(lif->netdev, "Tx Timeout recovery\n");
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return;
/* if we were stopped before this scheduled job was launched,
* don't bother the queues as they are already stopped.
@@ -1496,6 +1784,7 @@ static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue);
schedule_work(&lif->tx_timeout_work);
}
@@ -1645,11 +1934,17 @@ static void ionic_txrx_disable(struct ionic_lif *lif)
err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT));
}
+ if (lif->hwstamp_txq)
+ err = ionic_qcq_disable(lif->hwstamp_txq, (err != -ETIMEDOUT));
+
if (lif->rxqcqs) {
for (i = 0; i < lif->nxqs; i++)
err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
}
+ if (lif->hwstamp_rxq)
+ err = ionic_qcq_disable(lif->hwstamp_rxq, (err != -ETIMEDOUT));
+
ionic_lif_quiesce(lif);
}
@@ -1672,6 +1967,17 @@ static void ionic_txrx_deinit(struct ionic_lif *lif)
}
}
lif->rx_mode = 0;
+
+ if (lif->hwstamp_txq) {
+ ionic_lif_qcq_deinit(lif, lif->hwstamp_txq);
+ ionic_tx_flush(&lif->hwstamp_txq->cq);
+ ionic_tx_empty(&lif->hwstamp_txq->q);
+ }
+
+ if (lif->hwstamp_rxq) {
+ ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq);
+ ionic_rx_empty(&lif->hwstamp_rxq->q);
+ }
}
static void ionic_txrx_free(struct ionic_lif *lif)
@@ -1693,15 +1999,30 @@ static void ionic_txrx_free(struct ionic_lif *lif)
lif->rxqcqs[i] = NULL;
}
}
+
+ if (lif->hwstamp_txq) {
+ ionic_qcq_free(lif, lif->hwstamp_txq);
+ devm_kfree(lif->ionic->dev, lif->hwstamp_txq);
+ lif->hwstamp_txq = NULL;
+ }
+
+ if (lif->hwstamp_rxq) {
+ ionic_qcq_free(lif, lif->hwstamp_rxq);
+ devm_kfree(lif->ionic->dev, lif->hwstamp_rxq);
+ lif->hwstamp_rxq = NULL;
+ }
}
static int ionic_txrx_alloc(struct ionic_lif *lif)
{
- unsigned int sg_desc_sz;
- unsigned int flags;
- unsigned int i;
+ unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
+ unsigned int flags, i;
int err = 0;
+ num_desc = lif->ntxq_descs;
+ desc_sz = sizeof(struct ionic_txq_desc);
+ comp_sz = sizeof(struct ionic_txq_comp);
+
if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
sizeof(struct ionic_txq_sg_desc_v1))
@@ -1714,10 +2035,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
flags |= IONIC_QCQ_F_INTR;
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
- lif->ntxq_descs,
- sizeof(struct ionic_txq_desc),
- sizeof(struct ionic_txq_comp),
- sg_desc_sz,
+ num_desc, desc_sz, comp_sz, sg_desc_sz,
lif->kern_pid, &lif->txqcqs[i]);
if (err)
goto err_out;
@@ -1734,16 +2052,24 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
}
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
+
+ num_desc = lif->nrxq_descs;
+ desc_sz = sizeof(struct ionic_rxq_desc);
+ comp_sz = sizeof(struct ionic_rxq_comp);
+ sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
+
+ if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC)
+ comp_sz *= 2;
+
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
- lif->nrxq_descs,
- sizeof(struct ionic_rxq_desc),
- sizeof(struct ionic_rxq_comp),
- sizeof(struct ionic_rxq_sg_desc),
+ num_desc, desc_sz, comp_sz, sg_desc_sz,
lif->kern_pid, &lif->rxqcqs[i]);
if (err)
goto err_out;
+ lif->rxqcqs[i]->q.features = lif->rxq_features;
+
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
lif->rxqcqs[i]->intr.index,
lif->rx_coalesce_hw);
@@ -1822,8 +2148,26 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
}
}
+ if (lif->hwstamp_rxq) {
+ ionic_rx_fill(&lif->hwstamp_rxq->q);
+ err = ionic_qcq_enable(lif->hwstamp_rxq);
+ if (err)
+ goto err_out_hwstamp_rx;
+ }
+
+ if (lif->hwstamp_txq) {
+ err = ionic_qcq_enable(lif->hwstamp_txq);
+ if (err)
+ goto err_out_hwstamp_tx;
+ }
+
return 0;
+err_out_hwstamp_tx:
+ if (lif->hwstamp_rxq)
+ derr = ionic_qcq_disable(lif->hwstamp_rxq, (derr != -ETIMEDOUT));
+err_out_hwstamp_rx:
+ i = lif->nxqs;
err_out:
while (i--) {
derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
@@ -1837,6 +2181,12 @@ static int ionic_start_queues(struct ionic_lif *lif)
{
int err;
+ if (test_bit(IONIC_LIF_F_BROKEN, lif->state))
+ return -EIO;
+
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return -EBUSY;
+
if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
return 0;
@@ -1855,13 +2205,17 @@ static int ionic_open(struct net_device *netdev)
struct ionic_lif *lif = netdev_priv(netdev);
int err;
+ /* If recovering from a broken state, clear the bit and we'll try again */
+ if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
+ netdev_info(netdev, "clearing broken state\n");
+
err = ionic_txrx_alloc(lif);
if (err)
return err;
err = ionic_txrx_init(lif);
if (err)
- goto err_out;
+ goto err_txrx_free;
err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
if (err)
@@ -1882,7 +2236,7 @@ static int ionic_open(struct net_device *netdev)
err_txrx_deinit:
ionic_txrx_deinit(lif);
-err_out:
+err_txrx_free:
ionic_txrx_free(lif);
return err;
}
@@ -1910,6 +2264,20 @@ static int ionic_stop(struct net_device *netdev)
return 0;
}
+static int ionic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return ionic_lif_hwstamp_set(lif, ifr);
+ case SIOCGHWTSTAMP:
+ return ionic_lif_hwstamp_get(lif, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int ionic_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivf)
{
@@ -2158,6 +2526,7 @@ static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
static const struct net_device_ops ionic_netdev_ops = {
.ndo_open = ionic_open,
.ndo_stop = ionic_stop,
+ .ndo_do_ioctl = ionic_do_ioctl,
.ndo_start_xmit = ionic_start_xmit,
.ndo_get_stats64 = ionic_get_stats64,
.ndo_set_rx_mode = ionic_ndo_set_rx_mode,
@@ -2181,7 +2550,9 @@ static const struct net_device_ops ionic_netdev_ops = {
static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
{
/* only swapping the queues, not the napi, flags, or other stuff */
+ swap(a->q.features, b->q.features);
swap(a->q.num_descs, b->q.num_descs);
+ swap(a->q.desc_size, b->q.desc_size);
swap(a->q.base, b->q.base);
swap(a->q.base_pa, b->q.base_pa);
swap(a->q.info, b->q.info);
@@ -2189,6 +2560,7 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
swap(a->q_base_pa, b->q_base_pa);
swap(a->q_size, b->q_size);
+ swap(a->q.sg_desc_size, b->q.sg_desc_size);
swap(a->q.sg_base, b->q.sg_base);
swap(a->q.sg_base_pa, b->q.sg_base_pa);
swap(a->sg_base, b->sg_base);
@@ -2196,23 +2568,26 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
swap(a->sg_size, b->sg_size);
swap(a->cq.num_descs, b->cq.num_descs);
+ swap(a->cq.desc_size, b->cq.desc_size);
swap(a->cq.base, b->cq.base);
swap(a->cq.base_pa, b->cq.base_pa);
swap(a->cq.info, b->cq.info);
swap(a->cq_base, b->cq_base);
swap(a->cq_base_pa, b->cq_base_pa);
swap(a->cq_size, b->cq_size);
+
+ ionic_debugfs_del_qcq(a);
+ ionic_debugfs_add_qcq(a->q.lif, a);
}
int ionic_reconfigure_queues(struct ionic_lif *lif,
struct ionic_queue_params *qparam)
{
+ unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
struct ionic_qcq **tx_qcqs = NULL;
struct ionic_qcq **rx_qcqs = NULL;
- unsigned int sg_desc_sz;
- unsigned int flags;
+ unsigned int flags, i;
int err = -ENOMEM;
- unsigned int i;
/* allocate temporary qcq arrays to hold new queue structs */
if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
@@ -2221,7 +2596,9 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
if (!tx_qcqs)
goto err_out;
}
- if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs) {
+ if (qparam->nxqs != lif->nxqs ||
+ qparam->nrxq_descs != lif->nrxq_descs ||
+ qparam->rxq_features != lif->rxq_features) {
rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
sizeof(struct ionic_qcq *), GFP_KERNEL);
if (!rx_qcqs)
@@ -2231,21 +2608,22 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
/* allocate new desc_info and rings, but leave the interrupt setup
* until later so as to not mess with the still-running queues
*/
- if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
- lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
- sizeof(struct ionic_txq_sg_desc_v1))
- sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
- else
- sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
-
if (tx_qcqs) {
+ num_desc = qparam->ntxq_descs;
+ desc_sz = sizeof(struct ionic_txq_desc);
+ comp_sz = sizeof(struct ionic_txq_comp);
+
+ if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
+ lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
+ sizeof(struct ionic_txq_sg_desc_v1))
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
+ else
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
+
for (i = 0; i < qparam->nxqs; i++) {
flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
- qparam->ntxq_descs,
- sizeof(struct ionic_txq_desc),
- sizeof(struct ionic_txq_comp),
- sg_desc_sz,
+ num_desc, desc_sz, comp_sz, sg_desc_sz,
lif->kern_pid, &tx_qcqs[i]);
if (err)
goto err_out;
@@ -2253,16 +2631,23 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
}
if (rx_qcqs) {
+ num_desc = qparam->nrxq_descs;
+ desc_sz = sizeof(struct ionic_rxq_desc);
+ comp_sz = sizeof(struct ionic_rxq_comp);
+ sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
+
+ if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC)
+ comp_sz *= 2;
+
for (i = 0; i < qparam->nxqs; i++) {
flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
- qparam->nrxq_descs,
- sizeof(struct ionic_rxq_desc),
- sizeof(struct ionic_rxq_comp),
- sizeof(struct ionic_rxq_sg_desc),
+ num_desc, desc_sz, comp_sz, sg_desc_sz,
lif->kern_pid, &rx_qcqs[i]);
if (err)
goto err_out;
+
+ rx_qcqs[i]->q.features = qparam->rxq_features;
}
}
@@ -2349,9 +2734,10 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
}
swap(lif->nxqs, qparam->nxqs);
+ swap(lif->rxq_features, qparam->rxq_features);
err_out_reinit_unlock:
- /* re-init the queues, but don't loose an error code */
+ /* re-init the queues, but don't lose an error code */
if (err)
ionic_start_queues_reconfig(lif);
else
@@ -2450,7 +2836,6 @@ int ionic_lif_alloc(struct ionic *ionic)
lif->index = 0;
lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
- lif->tx_budget = IONIC_TX_BUDGET_DEFAULT;
/* Convert the default coalesce value to actual hw resolution */
lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
@@ -2501,6 +2886,8 @@ int ionic_lif_alloc(struct ionic *ionic)
}
netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
+ ionic_lif_alloc_phc(lif);
+
return 0;
err_out_free_qcqs:
@@ -2601,10 +2988,13 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
}
clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
- ionic_link_status_check_request(lif, true);
+ ionic_link_status_check_request(lif, CAN_SLEEP);
netif_device_attach(lif->netdev);
dev_info(ionic->dev, "FW Up: LIFs restarted\n");
+ /* restore the hardware timestamping queues */
+ ionic_lif_hwstamp_replay(lif);
+
return;
err_txrx_free:
@@ -2621,6 +3011,8 @@ void ionic_lif_free(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
+ ionic_lif_free_phc(lif);
+
/* free rss indirection table */
dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
lif->rss_ind_tbl_pa);
@@ -2957,6 +3349,8 @@ int ionic_lif_register(struct ionic_lif *lif)
{
int err;
+ ionic_lif_register_phc(lif);
+
INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
lif->ionic->nb.notifier_call = ionic_lif_notify;
@@ -2969,10 +3363,11 @@ int ionic_lif_register(struct ionic_lif *lif)
err = register_netdev(lif->netdev);
if (err) {
dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
+ ionic_lif_unregister_phc(lif);
return err;
}
- ionic_link_status_check_request(lif, true);
+ ionic_link_status_check_request(lif, CAN_SLEEP);
lif->registered = true;
ionic_lif_set_netdev_info(lif);
@@ -2989,6 +3384,9 @@ void ionic_lif_unregister(struct ionic_lif *lif)
if (lif->netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(lif->netdev);
+
+ ionic_lif_unregister_phc(lif);
+
lif->registered = false;
}
@@ -3128,6 +3526,16 @@ int ionic_lif_size(struct ionic *ionic)
ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
+ /* reserve last queue id for hardware timestamping */
+ if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) {
+ if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) {
+ lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP);
+ } else {
+ ntxqs_per_lif -= 1;
+ nrxqs_per_lif -= 1;
+ }
+ }
+
nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
nxqs = min(nxqs, num_online_cpus());
neqs = min(neqs_per_lif, num_online_cpus());
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 563dba384a53..346506f01715 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -4,6 +4,9 @@
#ifndef _IONIC_LIF_H_
#define _IONIC_LIF_H_
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <uapi/linux/net_tstamp.h>
#include <linux/dim.h>
#include <linux/pci.h>
#include "ionic_rx_filter.h"
@@ -36,6 +39,8 @@ struct ionic_tx_stats {
u64 crc32_csum;
u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR];
u64 dma_map_err;
+ u64 hwstamp_valid;
+ u64 hwstamp_invalid;
};
struct ionic_rx_stats {
@@ -49,6 +54,8 @@ struct ionic_rx_stats {
u64 csum_error;
u64 dma_map_err;
u64 alloc_err;
+ u64 hwstamp_valid;
+ u64 hwstamp_invalid;
};
#define IONIC_QCQ_F_INITED BIT(0)
@@ -125,6 +132,10 @@ struct ionic_lif_sw_stats {
u64 rx_csum_none;
u64 rx_csum_complete;
u64 rx_csum_error;
+ u64 tx_hwstamp_valid;
+ u64 tx_hwstamp_invalid;
+ u64 rx_hwstamp_valid;
+ u64 rx_hwstamp_invalid;
u64 hw_tx_dropped;
u64 hw_rx_dropped;
u64 hw_rx_over_errors;
@@ -139,6 +150,7 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_LINK_CHECK_REQUESTED,
IONIC_LIF_F_FW_RESET,
IONIC_LIF_F_SPLIT_INTR,
+ IONIC_LIF_F_BROKEN,
IONIC_LIF_F_TX_DIM_INTR,
IONIC_LIF_F_RX_DIM_INTR,
@@ -157,40 +169,45 @@ struct ionic_qtype_info {
u16 sg_desc_stride;
};
+struct ionic_phc;
+
#define IONIC_LIF_NAME_MAX_SZ 32
struct ionic_lif {
- char name[IONIC_LIF_NAME_MAX_SZ];
- struct list_head list;
struct net_device *netdev;
DECLARE_BITMAP(state, IONIC_LIF_F_STATE_SIZE);
struct ionic *ionic;
- bool registered;
unsigned int index;
unsigned int hw_index;
- unsigned int kern_pid;
- u64 __iomem *kern_dbpage;
struct mutex queue_lock; /* lock for queue structures */
spinlock_t adminq_lock; /* lock for AdminQ operations */
struct ionic_qcq *adminqcq;
struct ionic_qcq *notifyqcq;
struct ionic_qcq **txqcqs;
+ struct ionic_qcq *hwstamp_txq;
struct ionic_tx_stats *txqstats;
struct ionic_qcq **rxqcqs;
+ struct ionic_qcq *hwstamp_rxq;
struct ionic_rx_stats *rxqstats;
+ struct ionic_deferred deferred;
+ struct work_struct tx_timeout_work;
u64 last_eid;
+ unsigned int kern_pid;
+ u64 __iomem *kern_dbpage;
unsigned int neqs;
unsigned int nxqs;
unsigned int ntxq_descs;
unsigned int nrxq_descs;
u32 rx_copybreak;
- u32 tx_budget;
+ u64 rxq_features;
unsigned int rx_mode;
u64 hw_features;
+ bool registered;
bool mc_overflow;
- unsigned int nmcast;
bool uc_overflow;
u16 lif_type;
+ unsigned int nmcast;
unsigned int nucast;
+ char name[IONIC_LIF_NAME_MAX_SZ];
union ionic_lif_identity *identity;
struct ionic_lif_info *info;
@@ -205,16 +222,34 @@ struct ionic_lif {
u32 rss_ind_tbl_sz;
struct ionic_rx_filters rx_filters;
- struct ionic_deferred deferred;
- unsigned long *dbid_inuse;
- unsigned int dbid_count;
- struct dentry *dentry;
u32 rx_coalesce_usecs; /* what the user asked for */
u32 rx_coalesce_hw; /* what the hw is using */
u32 tx_coalesce_usecs; /* what the user asked for */
u32 tx_coalesce_hw; /* what the hw is using */
+ unsigned long *dbid_inuse;
+ unsigned int dbid_count;
- struct work_struct tx_timeout_work;
+ struct ionic_phc *phc;
+
+ struct dentry *dentry;
+};
+
+struct ionic_phc {
+ spinlock_t lock; /* lock for cc and tc */
+ struct cyclecounter cc;
+ struct timecounter tc;
+
+ struct mutex config_lock; /* lock for ts_config */
+ struct hwtstamp_config ts_config;
+ u64 ts_config_rx_filt;
+ u32 ts_config_tx_mode;
+
+ u32 init_cc_mult;
+ long aux_work_delay;
+
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp;
+ struct ionic_lif *lif;
};
struct ionic_queue_params {
@@ -222,6 +257,7 @@ struct ionic_queue_params {
unsigned int ntxq_descs;
unsigned int nrxq_descs;
unsigned int intr_split;
+ u64 rxq_features;
};
static inline void ionic_init_queue_params(struct ionic_lif *lif,
@@ -231,6 +267,7 @@ static inline void ionic_init_queue_params(struct ionic_lif *lif,
qparam->ntxq_descs = lif->ntxq_descs;
qparam->nrxq_descs = lif->nrxq_descs;
qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ qparam->rxq_features = lif->rxq_features;
}
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
@@ -263,6 +300,49 @@ void ionic_lif_unregister(struct ionic_lif *lif);
int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
union ionic_lif_identity *lif_ident);
int ionic_lif_size(struct ionic *ionic);
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+int ionic_lif_hwstamp_replay(struct ionic_lif *lif);
+int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr);
+int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr);
+ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter);
+void ionic_lif_register_phc(struct ionic_lif *lif);
+void ionic_lif_unregister_phc(struct ionic_lif *lif);
+void ionic_lif_alloc_phc(struct ionic_lif *lif);
+void ionic_lif_free_phc(struct ionic_lif *lif);
+#else
+static inline int ionic_lif_hwstamp_replay(struct ionic_lif *lif)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter)
+{
+ return ns_to_ktime(0);
+}
+
+static inline void ionic_lif_register_phc(struct ionic_lif *lif) {}
+static inline void ionic_lif_unregister_phc(struct ionic_lif *lif) {}
+static inline void ionic_lif_alloc_phc(struct ionic_lif *lif) {}
+static inline void ionic_lif_free_phc(struct ionic_lif *lif) {}
+#endif
+
+int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif);
+int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif);
+int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all);
+int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode);
+int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class);
+
int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
const u8 *key, const u32 *indir);
int ionic_reconfigure_queues(struct ionic_lif *lif,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index fbc57de6683e..61cfe2120817 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -148,6 +148,8 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
return "IONIC_CMD_LIF_SETATTR";
case IONIC_CMD_LIF_GETATTR:
return "IONIC_CMD_LIF_GETATTR";
+ case IONIC_CMD_LIF_SETPHC:
+ return "IONIC_CMD_LIF_SETPHC";
case IONIC_CMD_RX_MODE_SET:
return "IONIC_CMD_RX_MODE_SET";
case IONIC_CMD_RX_FILTER_ADD:
@@ -187,10 +189,17 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
static void ionic_adminq_flush(struct ionic_lif *lif)
{
- struct ionic_queue *q = &lif->adminqcq->q;
struct ionic_desc_info *desc_info;
+ unsigned long irqflags;
+ struct ionic_queue *q;
- spin_lock(&lif->adminq_lock);
+ spin_lock_irqsave(&lif->adminq_lock, irqflags);
+ if (!lif->adminqcq) {
+ spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
+ return;
+ }
+
+ q = &lif->adminqcq->q;
while (q->tail_idx != q->head_idx) {
desc_info = &q->info[q->tail_idx];
@@ -199,7 +208,7 @@ static void ionic_adminq_flush(struct ionic_lif *lif)
desc_info->cb_arg = NULL;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
}
- spin_unlock(&lif->adminq_lock);
+ spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
}
static int ionic_adminq_check_err(struct ionic_lif *lif,
@@ -234,35 +243,36 @@ static void ionic_adminq_cb(struct ionic_queue *q,
{
struct ionic_admin_ctx *ctx = cb_arg;
struct ionic_admin_comp *comp;
- struct device *dev;
if (!ctx)
return;
comp = cq_info->cq_desc;
- dev = &q->lif->netdev->dev;
memcpy(&ctx->comp, comp, sizeof(*comp));
- dev_dbg(dev, "comp admin queue command:\n");
+ dev_dbg(q->dev, "comp admin queue command:\n");
dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
&ctx->comp, sizeof(ctx->comp), true);
complete_all(&ctx->work);
}
-static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
+int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
struct ionic_desc_info *desc_info;
+ unsigned long irqflags;
struct ionic_queue *q;
int err = 0;
- if (!lif->adminqcq)
+ spin_lock_irqsave(&lif->adminq_lock, irqflags);
+ if (!lif->adminqcq) {
+ spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
return -EIO;
+ }
q = &lif->adminqcq->q;
- spin_lock(&lif->adminq_lock);
if (!ionic_q_has_space(q, 1)) {
err = -ENOSPC;
goto err_out;
@@ -282,19 +292,17 @@ static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
ionic_q_post(q, true, ionic_adminq_cb, ctx);
err_out:
- spin_unlock(&lif->adminq_lock);
+ spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
return err;
}
-int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
+int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err)
{
struct net_device *netdev = lif->netdev;
unsigned long remaining;
const char *name;
- int err;
- err = ionic_adminq_post(lif, ctx);
if (err) {
if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
name = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
@@ -309,6 +317,15 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
return ionic_adminq_check_err(lif, ctx, (remaining == 0));
}
+int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
+{
+ int err;
+
+ err = ionic_adminq_post(lif, ctx);
+
+ return ionic_adminq_wait(lif, ctx, err);
+}
+
static void ionic_dev_cmd_clean(struct ionic *ionic)
{
union __iomem ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
new file mode 100644
index 000000000000..a87c87e86aef
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017 - 2021 Pensando Systems, Inc */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "ionic.h"
+#include "ionic_bus.h"
+#include "ionic_lif.h"
+#include "ionic_ethtool.h"
+
+static int ionic_hwstamp_tx_mode(int config_tx_type)
+{
+ switch (config_tx_type) {
+ case HWTSTAMP_TX_OFF:
+ return IONIC_TXSTAMP_OFF;
+ case HWTSTAMP_TX_ON:
+ return IONIC_TXSTAMP_ON;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ return IONIC_TXSTAMP_ONESTEP_SYNC;
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ return IONIC_TXSTAMP_ONESTEP_P2P;
+ default:
+ return -ERANGE;
+ }
+}
+
+static u64 ionic_hwstamp_rx_filt(int config_rx_filter)
+{
+ switch (config_rx_filter) {
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ return IONIC_PKT_CLS_PTP1_ALL;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ return IONIC_PKT_CLS_PTP1_SYNC;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ return IONIC_PKT_CLS_PTP1_SYNC | IONIC_PKT_CLS_PTP1_DREQ;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ return IONIC_PKT_CLS_PTP2_L4_ALL;
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ return IONIC_PKT_CLS_PTP2_L4_SYNC;
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ return IONIC_PKT_CLS_PTP2_L4_SYNC | IONIC_PKT_CLS_PTP2_L4_DREQ;
+
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ return IONIC_PKT_CLS_PTP2_L2_ALL;
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ return IONIC_PKT_CLS_PTP2_L2_SYNC;
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ return IONIC_PKT_CLS_PTP2_L2_SYNC | IONIC_PKT_CLS_PTP2_L2_DREQ;
+
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ return IONIC_PKT_CLS_PTP2_ALL;
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ return IONIC_PKT_CLS_PTP2_SYNC;
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ return IONIC_PKT_CLS_PTP2_SYNC | IONIC_PKT_CLS_PTP2_DREQ;
+
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return IONIC_PKT_CLS_NTP_ALL;
+
+ default:
+ return 0;
+ }
+}
+
+static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif,
+ struct hwtstamp_config *new_ts)
+{
+ struct ionic *ionic = lif->ionic;
+ struct hwtstamp_config *config;
+ struct hwtstamp_config ts;
+ int tx_mode = 0;
+ u64 rx_filt = 0;
+ int err, err2;
+ bool rx_all;
+ __le64 mask;
+
+ if (!lif->phc || !lif->phc->ptp)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&lif->phc->config_lock);
+
+ if (new_ts) {
+ config = new_ts;
+ } else {
+ /* If called with new_ts == NULL, replay the previous request
+ * primarily for recovery after a FW_RESET.
+ * We saved the previous configuration request info, so copy
+ * the previous request for reference, clear the current state
+ * to match the device's reset state, and run with it.
+ */
+ config = &ts;
+ memcpy(config, &lif->phc->ts_config, sizeof(*config));
+ memset(&lif->phc->ts_config, 0, sizeof(lif->phc->ts_config));
+ lif->phc->ts_config_tx_mode = 0;
+ lif->phc->ts_config_rx_filt = 0;
+ }
+
+ tx_mode = ionic_hwstamp_tx_mode(config->tx_type);
+ if (tx_mode < 0) {
+ err = tx_mode;
+ goto err_queues;
+ }
+
+ mask = cpu_to_le64(BIT_ULL(tx_mode));
+ if ((ionic->ident.lif.eth.hwstamp_tx_modes & mask) != mask) {
+ err = -ERANGE;
+ goto err_queues;
+ }
+
+ rx_filt = ionic_hwstamp_rx_filt(config->rx_filter);
+ rx_all = config->rx_filter != HWTSTAMP_FILTER_NONE && !rx_filt;
+
+ mask = cpu_to_le64(rx_filt);
+ if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) != mask) {
+ rx_filt = 0;
+ rx_all = true;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ dev_dbg(ionic->dev, "config_rx_filter %d rx_filt %#llx rx_all %d\n",
+ config->rx_filter, rx_filt, rx_all);
+
+ if (tx_mode) {
+ err = ionic_lif_create_hwstamp_txq(lif);
+ if (err)
+ goto err_queues;
+ }
+
+ if (rx_filt) {
+ err = ionic_lif_create_hwstamp_rxq(lif);
+ if (err)
+ goto err_queues;
+ }
+
+ if (tx_mode != lif->phc->ts_config_tx_mode) {
+ err = ionic_lif_set_hwstamp_txmode(lif, tx_mode);
+ if (err)
+ goto err_txmode;
+ }
+
+ if (rx_filt != lif->phc->ts_config_rx_filt) {
+ err = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt);
+ if (err)
+ goto err_rxfilt;
+ }
+
+ if (rx_all != (lif->phc->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)) {
+ err = ionic_lif_config_hwstamp_rxq_all(lif, rx_all);
+ if (err)
+ goto err_rxall;
+ }
+
+ memcpy(&lif->phc->ts_config, config, sizeof(*config));
+ lif->phc->ts_config_rx_filt = rx_filt;
+ lif->phc->ts_config_tx_mode = tx_mode;
+
+ mutex_unlock(&lif->phc->config_lock);
+
+ return 0;
+
+err_rxall:
+ if (rx_filt != lif->phc->ts_config_rx_filt) {
+ rx_filt = lif->phc->ts_config_rx_filt;
+ err2 = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt);
+ if (err2)
+ dev_err(ionic->dev,
+ "Failed to revert rx timestamp filter: %d\n", err2);
+ }
+err_rxfilt:
+ if (tx_mode != lif->phc->ts_config_tx_mode) {
+ tx_mode = lif->phc->ts_config_tx_mode;
+ err2 = ionic_lif_set_hwstamp_txmode(lif, tx_mode);
+ if (err2)
+ dev_err(ionic->dev,
+ "Failed to revert tx timestamp mode: %d\n", err2);
+ }
+err_txmode:
+ /* special queues remain allocated, just unused */
+err_queues:
+ mutex_unlock(&lif->phc->config_lock);
+ return err;
+}
+
+int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = ionic_lif_hwstamp_set_ts_config(lif, &config);
+ if (err) {
+ netdev_info(lif->netdev, "hwstamp set failed: %d\n", err);
+ return err;
+ }
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int ionic_lif_hwstamp_replay(struct ionic_lif *lif)
+{
+ int err;
+
+ err = ionic_lif_hwstamp_set_ts_config(lif, NULL);
+ if (err)
+ netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err);
+
+ return err;
+}
+
+int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+
+ if (!lif->phc || !lif->phc->ptp)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&lif->phc->config_lock);
+ memcpy(&config, &lif->phc->ts_config, sizeof(config));
+ mutex_unlock(&lif->phc->config_lock);
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+ return 0;
+}
+
+static u64 ionic_hwstamp_read(struct ionic *ionic,
+ struct ptp_system_timestamp *sts)
+{
+ u32 tick_high_before, tick_high, tick_low;
+
+ /* read and discard low part to defeat hw staging of high part */
+ (void)ioread32(&ionic->idev.hwstamp_regs->tick_low);
+
+ tick_high_before = ioread32(&ionic->idev.hwstamp_regs->tick_high);
+
+ ptp_read_system_prets(sts);
+ tick_low = ioread32(&ionic->idev.hwstamp_regs->tick_low);
+ ptp_read_system_postts(sts);
+
+ tick_high = ioread32(&ionic->idev.hwstamp_regs->tick_high);
+
+ /* If tick_high changed, re-read tick_low once more. Assume tick_high
+ * cannot change again so soon as in the span of re-reading tick_low.
+ */
+ if (tick_high != tick_high_before) {
+ ptp_read_system_prets(sts);
+ tick_low = ioread32(&ionic->idev.hwstamp_regs->tick_low);
+ ptp_read_system_postts(sts);
+ }
+
+ return (u64)tick_low | ((u64)tick_high << 32);
+}
+
+static u64 ionic_cc_read(const struct cyclecounter *cc)
+{
+ struct ionic_phc *phc = container_of(cc, struct ionic_phc, cc);
+ struct ionic *ionic = phc->lif->ionic;
+
+ return ionic_hwstamp_read(ionic, NULL);
+}
+
+static int ionic_setphc_cmd(struct ionic_phc *phc, struct ionic_admin_ctx *ctx)
+{
+ ctx->work = COMPLETION_INITIALIZER_ONSTACK(ctx->work);
+
+ ctx->cmd.lif_setphc.opcode = IONIC_CMD_LIF_SETPHC;
+ ctx->cmd.lif_setphc.lif_index = cpu_to_le16(phc->lif->index);
+
+ ctx->cmd.lif_setphc.tick = cpu_to_le64(phc->tc.cycle_last);
+ ctx->cmd.lif_setphc.nsec = cpu_to_le64(phc->tc.nsec);
+ ctx->cmd.lif_setphc.frac = cpu_to_le64(phc->tc.frac);
+ ctx->cmd.lif_setphc.mult = cpu_to_le32(phc->cc.mult);
+ ctx->cmd.lif_setphc.shift = cpu_to_le32(phc->cc.shift);
+
+ return ionic_adminq_post(phc->lif, ctx);
+}
+
+static int ionic_phc_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+{
+ struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
+ struct ionic_admin_ctx ctx = {};
+ unsigned long irqflags;
+ s64 adj;
+ int err;
+
+ /* Reject phc adjustments during device upgrade */
+ if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
+ return -EBUSY;
+
+ /* Adjustment value scaled by 2^16 million */
+ adj = (s64)scaled_ppm * phc->init_cc_mult;
+
+ /* Adjustment value to scale */
+ adj /= (s64)SCALED_PPM;
+
+ /* Final adjusted multiplier */
+ adj += phc->init_cc_mult;
+
+ spin_lock_irqsave(&phc->lock, irqflags);
+
+ /* update the point-in-time basis to now, before adjusting the rate */
+ timecounter_read(&phc->tc);
+ phc->cc.mult = adj;
+
+ /* Setphc commands are posted in-order, sequenced by phc->lock. We
+ * need to drop the lock before waiting for the command to complete.
+ */
+ err = ionic_setphc_cmd(phc, &ctx);
+
+ spin_unlock_irqrestore(&phc->lock, irqflags);
+
+ return ionic_adminq_wait(phc->lif, &ctx, err);
+}
+
+static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
+ struct ionic_admin_ctx ctx = {};
+ unsigned long irqflags;
+ int err;
+
+ /* Reject phc adjustments during device upgrade */
+ if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
+ return -EBUSY;
+
+ spin_lock_irqsave(&phc->lock, irqflags);
+
+ timecounter_adjtime(&phc->tc, delta);
+
+ /* Setphc commands are posted in-order, sequenced by phc->lock. We
+ * need to drop the lock before waiting for the command to complete.
+ */
+ err = ionic_setphc_cmd(phc, &ctx);
+
+ spin_unlock_irqrestore(&phc->lock, irqflags);
+
+ return ionic_adminq_wait(phc->lif, &ctx, err);
+}
+
+static int ionic_phc_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
+ struct ionic_admin_ctx ctx = {};
+ unsigned long irqflags;
+ int err;
+ u64 ns;
+
+ /* Reject phc adjustments during device upgrade */
+ if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
+ return -EBUSY;
+
+ ns = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&phc->lock, irqflags);
+
+ timecounter_init(&phc->tc, &phc->cc, ns);
+
+ /* Setphc commands are posted in-order, sequenced by phc->lock. We
+ * need to drop the lock before waiting for the command to complete.
+ */
+ err = ionic_setphc_cmd(phc, &ctx);
+
+ spin_unlock_irqrestore(&phc->lock, irqflags);
+
+ return ionic_adminq_wait(phc->lif, &ctx, err);
+}
+
+static int ionic_phc_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
+ struct ionic *ionic = phc->lif->ionic;
+ unsigned long irqflags;
+ u64 tick, ns;
+
+ /* Do not attempt to read device time during upgrade */
+ if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
+ return -EBUSY;
+
+ spin_lock_irqsave(&phc->lock, irqflags);
+
+ tick = ionic_hwstamp_read(ionic, sts);
+
+ ns = timecounter_cyc2time(&phc->tc, tick);
+
+ spin_unlock_irqrestore(&phc->lock, irqflags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static long ionic_phc_aux_work(struct ptp_clock_info *info)
+{
+ struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
+ struct ionic_admin_ctx ctx = {};
+ unsigned long irqflags;
+ int err;
+
+ /* Do not update phc during device upgrade, but keep polling to resume
+ * after upgrade. Since we don't update the point in time basis, there
+ * is no expectation that we are maintaining the phc time during the
+ * upgrade. After upgrade, it will need to be readjusted back to the
+ * correct time by the ptp daemon.
+ */
+ if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
+ return phc->aux_work_delay;
+
+ spin_lock_irqsave(&phc->lock, irqflags);
+
+ /* update point-in-time basis to now */
+ timecounter_read(&phc->tc);
+
+ /* Setphc commands are posted in-order, sequenced by phc->lock. We
+ * need to drop the lock before waiting for the command to complete.
+ */
+ err = ionic_setphc_cmd(phc, &ctx);
+
+ spin_unlock_irqrestore(&phc->lock, irqflags);
+
+ ionic_adminq_wait(phc->lif, &ctx, err);
+
+ return phc->aux_work_delay;
+}
+
+ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 tick)
+{
+ unsigned long irqflags;
+ u64 ns;
+
+ if (!lif->phc)
+ return 0;
+
+ spin_lock_irqsave(&lif->phc->lock, irqflags);
+ ns = timecounter_cyc2time(&lif->phc->tc, tick);
+ spin_unlock_irqrestore(&lif->phc->lock, irqflags);
+
+ return ns_to_ktime(ns);
+}
+
+static const struct ptp_clock_info ionic_ptp_info = {
+ .owner = THIS_MODULE,
+ .name = "ionic_ptp",
+ .adjfine = ionic_phc_adjfine,
+ .adjtime = ionic_phc_adjtime,
+ .gettimex64 = ionic_phc_gettimex64,
+ .settime64 = ionic_phc_settime64,
+ .do_aux_work = ionic_phc_aux_work,
+};
+
+void ionic_lif_register_phc(struct ionic_lif *lif)
+{
+ if (!lif->phc || !(lif->hw_features & IONIC_ETH_HW_TIMESTAMP))
+ return;
+
+ lif->phc->ptp = ptp_clock_register(&lif->phc->ptp_info, lif->ionic->dev);
+
+ if (IS_ERR(lif->phc->ptp)) {
+ dev_warn(lif->ionic->dev, "Cannot register phc device: %ld\n",
+ PTR_ERR(lif->phc->ptp));
+
+ lif->phc->ptp = NULL;
+ }
+
+ if (lif->phc->ptp)
+ ptp_schedule_worker(lif->phc->ptp, lif->phc->aux_work_delay);
+}
+
+void ionic_lif_unregister_phc(struct ionic_lif *lif)
+{
+ if (!lif->phc || !lif->phc->ptp)
+ return;
+
+ ptp_clock_unregister(lif->phc->ptp);
+
+ lif->phc->ptp = NULL;
+}
+
+void ionic_lif_alloc_phc(struct ionic_lif *lif)
+{
+ struct ionic *ionic = lif->ionic;
+ struct ionic_phc *phc;
+ u64 delay, diff, mult;
+ u64 frac = 0;
+ u64 features;
+ u32 shift;
+
+ if (!ionic->idev.hwstamp_regs)
+ return;
+
+ features = le64_to_cpu(ionic->ident.lif.eth.config.features);
+ if (!(features & IONIC_ETH_HW_TIMESTAMP))
+ return;
+
+ phc = devm_kzalloc(ionic->dev, sizeof(*phc), GFP_KERNEL);
+ if (!phc)
+ return;
+
+ phc->lif = lif;
+
+ phc->cc.read = ionic_cc_read;
+ phc->cc.mask = le64_to_cpu(ionic->ident.dev.hwstamp_mask);
+ phc->cc.mult = le32_to_cpu(ionic->ident.dev.hwstamp_mult);
+ phc->cc.shift = le32_to_cpu(ionic->ident.dev.hwstamp_shift);
+
+ if (!phc->cc.mult) {
+ dev_err(lif->ionic->dev,
+ "Invalid device PHC mask multiplier %u, disabling HW timestamp support\n",
+ phc->cc.mult);
+ devm_kfree(lif->ionic->dev, phc);
+ lif->phc = NULL;
+ return;
+ }
+
+ dev_dbg(lif->ionic->dev, "Device PHC mask %#llx mult %u shift %u\n",
+ phc->cc.mask, phc->cc.mult, phc->cc.shift);
+
+ spin_lock_init(&phc->lock);
+ mutex_init(&phc->config_lock);
+
+ /* max ticks is limited by the multiplier, or by the update period. */
+ if (phc->cc.shift + 2 + ilog2(IONIC_PHC_UPDATE_NS) >= 64) {
+ /* max ticks that do not overflow when multiplied by max
+ * adjusted multiplier (twice the initial multiplier)
+ */
+ diff = U64_MAX / phc->cc.mult / 2;
+ } else {
+ /* approx ticks at four times the update period */
+ diff = (u64)IONIC_PHC_UPDATE_NS << (phc->cc.shift + 2);
+ diff = DIV_ROUND_UP(diff, phc->cc.mult);
+ }
+
+ /* transform to bitmask */
+ diff |= diff >> 1;
+ diff |= diff >> 2;
+ diff |= diff >> 4;
+ diff |= diff >> 8;
+ diff |= diff >> 16;
+ diff |= diff >> 32;
+
+ /* constrain to the hardware bitmask, and use this as the bitmask */
+ diff &= phc->cc.mask;
+ phc->cc.mask = diff;
+
+ /* the wrap period is now defined by diff (or phc->cc.mask)
+ *
+ * we will update the time basis at about 1/4 the wrap period, so
+ * should not see a difference of more than +/- diff/4.
+ *
+ * this is sufficient not see a difference of more than +/- diff/2, as
+ * required by timecounter_cyc2time, to detect an old time stamp.
+ *
+ * adjust the initial multiplier, being careful to avoid overflow:
+ * - do not overflow 63 bits: init_cc_mult * SCALED_PPM
+ * - do not overflow 64 bits: max_mult * (diff / 2)
+ *
+ * we want to increase the initial multiplier as much as possible, to
+ * allow for more precise adjustment in ionic_phc_adjfine.
+ *
+ * only adjust the multiplier if we can double it or more.
+ */
+ mult = U64_MAX / 2 / max(diff / 2, SCALED_PPM);
+ shift = mult / phc->cc.mult;
+ if (shift >= 2) {
+ /* initial multiplier will be 2^n of hardware cc.mult */
+ shift = fls(shift);
+ /* increase cc.mult and cc.shift by the same 2^n and n. */
+ phc->cc.mult <<= shift;
+ phc->cc.shift += shift;
+ }
+
+ dev_dbg(lif->ionic->dev, "Initial PHC mask %#llx mult %u shift %u\n",
+ phc->cc.mask, phc->cc.mult, phc->cc.shift);
+
+ /* frequency adjustments are relative to the initial multiplier */
+ phc->init_cc_mult = phc->cc.mult;
+
+ timecounter_init(&phc->tc, &phc->cc, ktime_get_real_ns());
+
+ /* Update cycle_last at 1/4 the wrap period, or IONIC_PHC_UPDATE_NS */
+ delay = min_t(u64, IONIC_PHC_UPDATE_NS,
+ cyclecounter_cyc2ns(&phc->cc, diff / 4, 0, &frac));
+ dev_dbg(lif->ionic->dev, "Work delay %llu ms\n", delay / NSEC_PER_MSEC);
+
+ phc->aux_work_delay = nsecs_to_jiffies(delay);
+
+ phc->ptp_info = ionic_ptp_info;
+
+ /* We have allowed to adjust the multiplier up to +/- 1 part per 1.
+ * Here expressed as NORMAL_PPB (1 billion parts per billion).
+ */
+ phc->ptp_info.max_adj = NORMAL_PPB;
+
+ lif->phc = phc;
+}
+
+void ionic_lif_free_phc(struct ionic_lif *lif)
+{
+ if (!lif->phc)
+ return;
+
+ mutex_destroy(&lif->phc->config_lock);
+
+ devm_kfree(lif->ionic->dev, lif->phc);
+ lif->phc = NULL;
+}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index cd0076fc3044..d71316d9ded2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -140,6 +140,9 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
case IONIC_RX_FILTER_MATCH_MAC_VLAN:
key = le16_to_cpu(ac->mac_vlan.vlan);
break;
+ case IONIC_RX_FILTER_STEER_PKTCLASS:
+ key = 0;
+ break;
default:
return -EINVAL;
}
@@ -210,3 +213,21 @@ struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif,
return NULL;
}
+
+struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif)
+{
+ struct ionic_rx_filter *f;
+ struct hlist_head *head;
+ unsigned int key;
+
+ key = hash_32(0, IONIC_RX_FILTER_HASH_BITS);
+ head = &lif->rx_filters.by_hash[key];
+
+ hlist_for_each_entry(f, head, by_hash) {
+ if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_STEER_PKTCLASS)
+ continue;
+ return f;
+ }
+
+ return NULL;
+}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
index cf8f4c0a961c..1ead48be3c83 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
@@ -31,5 +31,6 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
u32 hash, struct ionic_admin_ctx *ctx);
struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid);
struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, const u8 *addr);
+struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif);
#endif /* _IONIC_RX_FILTER_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 6ae75b771a15..58a854666c62 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -130,6 +130,8 @@ static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
IONIC_TX_STAT_DESC(frags),
IONIC_TX_STAT_DESC(tso),
IONIC_TX_STAT_DESC(tso_bytes),
+ IONIC_TX_STAT_DESC(hwstamp_valid),
+ IONIC_TX_STAT_DESC(hwstamp_invalid),
IONIC_TX_STAT_DESC(csum_none),
IONIC_TX_STAT_DESC(csum),
IONIC_TX_STAT_DESC(vlan_inserted),
@@ -143,6 +145,8 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(csum_none),
IONIC_RX_STAT_DESC(csum_complete),
IONIC_RX_STAT_DESC(csum_error),
+ IONIC_RX_STAT_DESC(hwstamp_valid),
+ IONIC_RX_STAT_DESC(hwstamp_invalid),
IONIC_RX_STAT_DESC(dropped),
IONIC_RX_STAT_DESC(vlan_stripped),
};
@@ -177,33 +181,54 @@ static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
#define MAX_Q(lif) ((lif)->netdev->real_num_tx_queues)
+static void ionic_add_lif_txq_stats(struct ionic_lif *lif, int q_num,
+ struct ionic_lif_sw_stats *stats)
+{
+ struct ionic_tx_stats *txstats = &lif->txqstats[q_num];
+
+ stats->tx_packets += txstats->pkts;
+ stats->tx_bytes += txstats->bytes;
+ stats->tx_tso += txstats->tso;
+ stats->tx_tso_bytes += txstats->tso_bytes;
+ stats->tx_csum_none += txstats->csum_none;
+ stats->tx_csum += txstats->csum;
+ stats->tx_hwstamp_valid += txstats->hwstamp_valid;
+ stats->tx_hwstamp_invalid += txstats->hwstamp_invalid;
+}
+
+static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num,
+ struct ionic_lif_sw_stats *stats)
+{
+ struct ionic_rx_stats *rxstats = &lif->rxqstats[q_num];
+
+ stats->rx_packets += rxstats->pkts;
+ stats->rx_bytes += rxstats->bytes;
+ stats->rx_csum_none += rxstats->csum_none;
+ stats->rx_csum_complete += rxstats->csum_complete;
+ stats->rx_csum_error += rxstats->csum_error;
+ stats->rx_hwstamp_valid += rxstats->hwstamp_valid;
+ stats->rx_hwstamp_invalid += rxstats->hwstamp_invalid;
+}
+
static void ionic_get_lif_stats(struct ionic_lif *lif,
struct ionic_lif_sw_stats *stats)
{
- struct ionic_tx_stats *txstats;
- struct ionic_rx_stats *rxstats;
struct rtnl_link_stats64 ns;
int q_num;
memset(stats, 0, sizeof(*stats));
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- txstats = &lif->txqstats[q_num];
- stats->tx_packets += txstats->pkts;
- stats->tx_bytes += txstats->bytes;
- stats->tx_tso += txstats->tso;
- stats->tx_tso_bytes += txstats->tso_bytes;
- stats->tx_csum_none += txstats->csum_none;
- stats->tx_csum += txstats->csum;
-
- rxstats = &lif->rxqstats[q_num];
- stats->rx_packets += rxstats->pkts;
- stats->rx_bytes += rxstats->bytes;
- stats->rx_csum_none += rxstats->csum_none;
- stats->rx_csum_complete += rxstats->csum_complete;
- stats->rx_csum_error += rxstats->csum_error;
+ ionic_add_lif_txq_stats(lif, q_num, stats);
+ ionic_add_lif_rxq_stats(lif, q_num, stats);
}
+ if (lif->hwstamp_txq)
+ ionic_add_lif_txq_stats(lif, lif->hwstamp_txq->q.index, stats);
+
+ if (lif->hwstamp_rxq)
+ ionic_add_lif_rxq_stats(lif, lif->hwstamp_rxq->q.index, stats);
+
ionic_get_stats64(lif->netdev, &ns);
stats->hw_tx_dropped = ns.tx_dropped;
stats->hw_rx_dropped = ns.rx_dropped;
@@ -214,30 +239,30 @@ static void ionic_get_lif_stats(struct ionic_lif *lif,
static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
{
- u64 total = 0;
-
- /* lif stats */
- total += IONIC_NUM_LIF_STATS;
+ u64 total = 0, tx_queues = MAX_Q(lif), rx_queues = MAX_Q(lif);
- /* tx stats */
- total += MAX_Q(lif) * IONIC_NUM_TX_STATS;
+ if (lif->hwstamp_txq)
+ tx_queues += 1;
- /* rx stats */
- total += MAX_Q(lif) * IONIC_NUM_RX_STATS;
+ if (lif->hwstamp_rxq)
+ rx_queues += 1;
- /* port stats */
+ total += IONIC_NUM_LIF_STATS;
total += IONIC_NUM_PORT_STATS;
+ total += tx_queues * IONIC_NUM_TX_STATS;
+ total += rx_queues * IONIC_NUM_RX_STATS;
+
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
/* tx debug stats */
- total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS +
+ total += tx_queues * (IONIC_NUM_DBG_CQ_STATS +
IONIC_NUM_TX_Q_STATS +
IONIC_NUM_DBG_INTR_STATS +
IONIC_MAX_NUM_SG_CNTR);
/* rx debug stats */
- total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS +
+ total += rx_queues * (IONIC_NUM_DBG_CQ_STATS +
IONIC_NUM_DBG_INTR_STATS +
IONIC_NUM_DBG_NAPI_STATS +
IONIC_MAX_NUM_NAPI_CNTR);
@@ -246,97 +271,167 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
return total;
}
+static void ionic_sw_stats_get_tx_strings(struct ionic_lif *lif, u8 **buf,
+ int q_num)
+{
+ int i;
+
+ for (i = 0; i < IONIC_NUM_TX_STATS; i++)
+ ethtool_sprintf(buf, "tx_%d_%s", q_num,
+ ionic_tx_stats_desc[i].name);
+
+ if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
+ !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
+ return;
+
+ for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++)
+ ethtool_sprintf(buf, "txq_%d_%s", q_num,
+ ionic_txq_stats_desc[i].name);
+ for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++)
+ ethtool_sprintf(buf, "txq_%d_cq_%s", q_num,
+ ionic_dbg_cq_stats_desc[i].name);
+ for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++)
+ ethtool_sprintf(buf, "txq_%d_intr_%s", q_num,
+ ionic_dbg_intr_stats_desc[i].name);
+ for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++)
+ ethtool_sprintf(buf, "txq_%d_sg_cntr_%d", q_num, i);
+}
+
+static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf,
+ int q_num)
+{
+ int i;
+
+ for (i = 0; i < IONIC_NUM_RX_STATS; i++)
+ ethtool_sprintf(buf, "rx_%d_%s", q_num,
+ ionic_rx_stats_desc[i].name);
+
+ if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
+ !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
+ return;
+
+ for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++)
+ ethtool_sprintf(buf, "rxq_%d_cq_%s", q_num,
+ ionic_dbg_cq_stats_desc[i].name);
+ for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++)
+ ethtool_sprintf(buf, "rxq_%d_intr_%s", q_num,
+ ionic_dbg_intr_stats_desc[i].name);
+ for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++)
+ ethtool_sprintf(buf, "rxq_%d_napi_%s", q_num,
+ ionic_dbg_napi_stats_desc[i].name);
+ for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++)
+ ethtool_sprintf(buf, "rxq_%d_napi_work_done_%d", q_num, i);
+}
+
static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
{
int i, q_num;
- for (i = 0; i < IONIC_NUM_LIF_STATS; i++) {
- snprintf(*buf, ETH_GSTRING_LEN, ionic_lif_stats_desc[i].name);
- *buf += ETH_GSTRING_LEN;
+ for (i = 0; i < IONIC_NUM_LIF_STATS; i++)
+ ethtool_sprintf(buf, ionic_lif_stats_desc[i].name);
+
+ for (i = 0; i < IONIC_NUM_PORT_STATS; i++)
+ ethtool_sprintf(buf, ionic_port_stats_desc[i].name);
+
+ for (q_num = 0; q_num < MAX_Q(lif); q_num++)
+ ionic_sw_stats_get_tx_strings(lif, buf, q_num);
+
+ if (lif->hwstamp_txq)
+ ionic_sw_stats_get_tx_strings(lif, buf, lif->hwstamp_txq->q.index);
+
+ for (q_num = 0; q_num < MAX_Q(lif); q_num++)
+ ionic_sw_stats_get_rx_strings(lif, buf, q_num);
+
+ if (lif->hwstamp_rxq)
+ ionic_sw_stats_get_rx_strings(lif, buf, lif->hwstamp_rxq->q.index);
+}
+
+static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf,
+ int q_num)
+{
+ struct ionic_tx_stats *txstats;
+ struct ionic_qcq *txqcq;
+ int i;
+
+ txstats = &lif->txqstats[q_num];
+
+ for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
+ **buf = IONIC_READ_STAT64(txstats, &ionic_tx_stats_desc[i]);
+ (*buf)++;
}
- for (i = 0; i < IONIC_NUM_PORT_STATS; i++) {
- snprintf(*buf, ETH_GSTRING_LEN,
- ionic_port_stats_desc[i].name);
- *buf += ETH_GSTRING_LEN;
+ if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
+ !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
+ return;
+
+ txqcq = lif->txqcqs[q_num];
+ for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&txqcq->q,
+ &ionic_txq_stats_desc[i]);
+ (*buf)++;
}
+ for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&txqcq->cq,
+ &ionic_dbg_cq_stats_desc[i]);
+ (*buf)++;
+ }
+ for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&txqcq->intr,
+ &ionic_dbg_intr_stats_desc[i]);
+ (*buf)++;
+ }
+ for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&txqcq->napi_stats,
+ &ionic_dbg_napi_stats_desc[i]);
+ (*buf)++;
+ }
+ for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
+ **buf = txqcq->napi_stats.work_done_cntr[i];
+ (*buf)++;
+ }
+ for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
+ **buf = txstats->sg_cntr[i];
+ (*buf)++;
+ }
+}
- for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
- snprintf(*buf, ETH_GSTRING_LEN, "tx_%d_%s",
- q_num, ionic_tx_stats_desc[i].name);
- *buf += ETH_GSTRING_LEN;
- }
-
- if (test_bit(IONIC_LIF_F_UP, lif->state) &&
- test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
- for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
- snprintf(*buf, ETH_GSTRING_LEN,
- "txq_%d_%s",
- q_num,
- ionic_txq_stats_desc[i].name);
- *buf += ETH_GSTRING_LEN;
- }
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
- snprintf(*buf, ETH_GSTRING_LEN,
- "txq_%d_cq_%s",
- q_num,
- ionic_dbg_cq_stats_desc[i].name);
- *buf += ETH_GSTRING_LEN;
- }
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
- snprintf(*buf, ETH_GSTRING_LEN,
- "txq_%d_intr_%s",
- q_num,
- ionic_dbg_intr_stats_desc[i].name);
- *buf += ETH_GSTRING_LEN;
- }
- for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
- snprintf(*buf, ETH_GSTRING_LEN,
- "txq_%d_sg_cntr_%d",
- q_num, i);
- *buf += ETH_GSTRING_LEN;
- }
- }
+static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf,
+ int q_num)
+{
+ struct ionic_rx_stats *rxstats;
+ struct ionic_qcq *rxqcq;
+ int i;
+
+ rxstats = &lif->rxqstats[q_num];
+
+ for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
+ **buf = IONIC_READ_STAT64(rxstats, &ionic_rx_stats_desc[i]);
+ (*buf)++;
}
- for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
- snprintf(*buf, ETH_GSTRING_LEN,
- "rx_%d_%s",
- q_num, ionic_rx_stats_desc[i].name);
- *buf += ETH_GSTRING_LEN;
- }
-
- if (test_bit(IONIC_LIF_F_UP, lif->state) &&
- test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
- snprintf(*buf, ETH_GSTRING_LEN,
- "rxq_%d_cq_%s",
- q_num,
- ionic_dbg_cq_stats_desc[i].name);
- *buf += ETH_GSTRING_LEN;
- }
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
- snprintf(*buf, ETH_GSTRING_LEN,
- "rxq_%d_intr_%s",
- q_num,
- ionic_dbg_intr_stats_desc[i].name);
- *buf += ETH_GSTRING_LEN;
- }
- for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
- snprintf(*buf, ETH_GSTRING_LEN,
- "rxq_%d_napi_%s",
- q_num,
- ionic_dbg_napi_stats_desc[i].name);
- *buf += ETH_GSTRING_LEN;
- }
- for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
- snprintf(*buf, ETH_GSTRING_LEN,
- "rxq_%d_napi_work_done_%d",
- q_num, i);
- *buf += ETH_GSTRING_LEN;
- }
- }
+
+ if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
+ !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
+ return;
+
+ rxqcq = lif->rxqcqs[q_num];
+ for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&rxqcq->cq,
+ &ionic_dbg_cq_stats_desc[i]);
+ (*buf)++;
+ }
+ for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&rxqcq->intr,
+ &ionic_dbg_intr_stats_desc[i]);
+ (*buf)++;
+ }
+ for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
+ **buf = IONIC_READ_STAT64(&rxqcq->napi_stats,
+ &ionic_dbg_napi_stats_desc[i]);
+ (*buf)++;
+ }
+ for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
+ **buf = rxqcq->napi_stats.work_done_cntr[i];
+ (*buf)++;
}
}
@@ -344,9 +439,6 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
{
struct ionic_port_stats *port_stats;
struct ionic_lif_sw_stats lif_stats;
- struct ionic_qcq *txqcq, *rxqcq;
- struct ionic_tx_stats *txstats;
- struct ionic_rx_stats *rxstats;
int i, q_num;
ionic_get_lif_stats(lif, &lif_stats);
@@ -363,73 +455,17 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
(*buf)++;
}
- for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- txstats = &lif->txqstats[q_num];
-
- for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
- **buf = IONIC_READ_STAT64(txstats,
- &ionic_tx_stats_desc[i]);
- (*buf)++;
- }
-
- if (test_bit(IONIC_LIF_F_UP, lif->state) &&
- test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
- txqcq = lif->txqcqs[q_num];
- for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->q,
- &ionic_txq_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->cq,
- &ionic_dbg_cq_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->intr,
- &ionic_dbg_intr_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
- **buf = txstats->sg_cntr[i];
- (*buf)++;
- }
- }
- }
+ for (q_num = 0; q_num < MAX_Q(lif); q_num++)
+ ionic_sw_stats_get_txq_values(lif, buf, q_num);
- for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- rxstats = &lif->rxqstats[q_num];
-
- for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
- **buf = IONIC_READ_STAT64(rxstats,
- &ionic_rx_stats_desc[i]);
- (*buf)++;
- }
-
- if (test_bit(IONIC_LIF_F_UP, lif->state) &&
- test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
- rxqcq = lif->rxqcqs[q_num];
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->cq,
- &ionic_dbg_cq_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->intr,
- &ionic_dbg_intr_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->napi_stats,
- &ionic_dbg_napi_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
- **buf = rxqcq->napi_stats.work_done_cntr[i];
- (*buf)++;
- }
- }
- }
+ if (lif->hwstamp_txq)
+ ionic_sw_stats_get_txq_values(lif, buf, lif->hwstamp_txq->q.index);
+
+ for (q_num = 0; q_num < MAX_Q(lif); q_num++)
+ ionic_sw_stats_get_rxq_values(lif, buf, q_num);
+
+ if (lif->hwstamp_rxq)
+ ionic_sw_stats_get_rxq_values(lif, buf, lif->hwstamp_rxq->q.index);
}
const struct ionic_stats_group_intf ionic_stats_groups[] = {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 4087311f7082..08934888575c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -10,14 +10,6 @@
#include "ionic_lif.h"
#include "ionic_txrx.h"
-static void ionic_rx_clean(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info,
- void *cb_arg);
-
-static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
-
-static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
@@ -40,72 +32,149 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
return netdev_get_tx_queue(q->lif->netdev, q->index);
}
-static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
- unsigned int len, bool frags)
+static void ionic_rx_buf_reset(struct ionic_buf_info *buf_info)
+{
+ buf_info->page = NULL;
+ buf_info->page_offset = 0;
+ buf_info->dma_addr = 0;
+}
+
+static int ionic_rx_page_alloc(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info)
{
- struct ionic_lif *lif = q->lif;
+ struct net_device *netdev = q->lif->netdev;
struct ionic_rx_stats *stats;
- struct net_device *netdev;
- struct sk_buff *skb;
+ struct device *dev;
- netdev = lif->netdev;
- stats = &q->lif->rxqstats[q->index];
+ dev = q->dev;
+ stats = q_to_rx_stats(q);
- if (frags)
- skb = napi_get_frags(&q_to_qcq(q)->napi);
- else
- skb = netdev_alloc_skb_ip_align(netdev, len);
+ if (unlikely(!buf_info)) {
+ net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
+ netdev->name, q->name);
+ return -EINVAL;
+ }
- if (unlikely(!skb)) {
- net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
- netdev->name, q->name);
+ buf_info->page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
+ if (unlikely(!buf_info->page)) {
+ net_err_ratelimited("%s: %s page alloc failed\n",
+ netdev->name, q->name);
stats->alloc_err++;
- return NULL;
+ return -ENOMEM;
}
+ buf_info->page_offset = 0;
- return skb;
+ buf_info->dma_addr = dma_map_page(dev, buf_info->page, buf_info->page_offset,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
+ __free_pages(buf_info->page, 0);
+ ionic_rx_buf_reset(buf_info);
+ net_err_ratelimited("%s: %s dma map failed\n",
+ netdev->name, q->name);
+ stats->dma_map_err++;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void ionic_rx_page_free(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info)
+{
+ struct net_device *netdev = q->lif->netdev;
+ struct device *dev = q->dev;
+
+ if (unlikely(!buf_info)) {
+ net_err_ratelimited("%s: %s invalid buf_info in free\n",
+ netdev->name, q->name);
+ return;
+ }
+
+ if (!buf_info->page)
+ return;
+
+ dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_pages(buf_info->page, 0);
+ ionic_rx_buf_reset(buf_info);
+}
+
+static bool ionic_rx_buf_recycle(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info, u32 used)
+{
+ u32 size;
+
+ /* don't re-use pages allocated in low-mem condition */
+ if (page_is_pfmemalloc(buf_info->page))
+ return false;
+
+ /* don't re-use buffers from non-local numa nodes */
+ if (page_to_nid(buf_info->page) != numa_mem_id())
+ return false;
+
+ size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
+ buf_info->page_offset += size;
+ if (buf_info->page_offset >= IONIC_PAGE_SIZE)
+ return false;
+
+ get_page(buf_info->page);
+
+ return true;
}
static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info)
+ struct ionic_rxq_comp *comp)
{
- struct ionic_rxq_comp *comp = cq_info->cq_desc;
- struct device *dev = q->lif->ionic->dev;
- struct ionic_page_info *page_info;
+ struct net_device *netdev = q->lif->netdev;
+ struct ionic_buf_info *buf_info;
+ struct ionic_rx_stats *stats;
+ struct device *dev = q->dev;
struct sk_buff *skb;
unsigned int i;
u16 frag_len;
u16 len;
- page_info = &desc_info->pages[0];
+ stats = q_to_rx_stats(q);
+
+ buf_info = &desc_info->bufs[0];
len = le16_to_cpu(comp->len);
- prefetch(page_address(page_info->page) + NET_IP_ALIGN);
+ prefetch(buf_info->page);
- skb = ionic_rx_skb_alloc(q, len, true);
- if (unlikely(!skb))
+ skb = napi_get_frags(&q_to_qcq(q)->napi);
+ if (unlikely(!skb)) {
+ net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
+ netdev->name, q->name);
+ stats->alloc_err++;
return NULL;
+ }
i = comp->num_sg_elems + 1;
do {
- if (unlikely(!page_info->page)) {
- struct napi_struct *napi = &q_to_qcq(q)->napi;
-
- napi->skb = NULL;
+ if (unlikely(!buf_info->page)) {
dev_kfree_skb(skb);
return NULL;
}
- frag_len = min(len, (u16)PAGE_SIZE);
+ frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
len -= frag_len;
- dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr),
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dev,
+ buf_info->dma_addr + buf_info->page_offset,
+ frag_len, DMA_FROM_DEVICE);
+
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page_info->page, 0, frag_len, PAGE_SIZE);
- page_info->page = NULL;
- page_info++;
+ buf_info->page, buf_info->page_offset, frag_len,
+ IONIC_PAGE_SIZE);
+
+ if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
+ dma_unmap_page(dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+ ionic_rx_buf_reset(buf_info);
+ }
+
+ buf_info++;
+
i--;
} while (i > 0);
@@ -114,30 +183,37 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info)
+ struct ionic_rxq_comp *comp)
{
- struct ionic_rxq_comp *comp = cq_info->cq_desc;
- struct device *dev = q->lif->ionic->dev;
- struct ionic_page_info *page_info;
+ struct net_device *netdev = q->lif->netdev;
+ struct ionic_buf_info *buf_info;
+ struct ionic_rx_stats *stats;
+ struct device *dev = q->dev;
struct sk_buff *skb;
u16 len;
- page_info = &desc_info->pages[0];
+ stats = q_to_rx_stats(q);
+
+ buf_info = &desc_info->bufs[0];
len = le16_to_cpu(comp->len);
- skb = ionic_rx_skb_alloc(q, len, false);
- if (unlikely(!skb))
+ skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
+ if (unlikely(!skb)) {
+ net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
+ netdev->name, q->name);
+ stats->alloc_err++;
return NULL;
+ }
- if (unlikely(!page_info->page)) {
+ if (unlikely(!buf_info->page)) {
dev_kfree_skb(skb);
return NULL;
}
- dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr),
+ dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
len, DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, page_address(page_info->page), len);
- dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr),
+ skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
+ dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
len, DMA_FROM_DEVICE);
skb_put(skb, len);
@@ -151,14 +227,15 @@ static void ionic_rx_clean(struct ionic_queue *q,
struct ionic_cq_info *cq_info,
void *cb_arg)
{
- struct ionic_rxq_comp *comp = cq_info->cq_desc;
+ struct net_device *netdev = q->lif->netdev;
struct ionic_qcq *qcq = q_to_qcq(q);
struct ionic_rx_stats *stats;
- struct net_device *netdev;
+ struct ionic_rxq_comp *comp;
struct sk_buff *skb;
+ comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
+
stats = q_to_rx_stats(q);
- netdev = q->lif->netdev;
if (comp->status) {
stats->dropped++;
@@ -169,9 +246,9 @@ static void ionic_rx_clean(struct ionic_queue *q,
stats->bytes += le16_to_cpu(comp->len);
if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
- skb = ionic_rx_copybreak(q, desc_info, cq_info);
+ skb = ionic_rx_copybreak(q, desc_info, comp);
else
- skb = ionic_rx_frags(q, desc_info, cq_info);
+ skb = ionic_rx_frags(q, desc_info, comp);
if (unlikely(!skb)) {
stats->dropped++;
@@ -219,17 +296,39 @@ static void ionic_rx_clean(struct ionic_queue *q,
stats->vlan_stripped++;
}
+ if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
+ __le64 *cq_desc_hwstamp;
+ u64 hwstamp;
+
+ cq_desc_hwstamp =
+ cq_info->cq_desc +
+ qcq->cq.desc_size -
+ sizeof(struct ionic_rxq_comp) -
+ IONIC_HWSTAMP_CQ_NEGOFFSET;
+
+ hwstamp = le64_to_cpu(*cq_desc_hwstamp);
+
+ if (hwstamp != IONIC_HWSTAMP_INVALID) {
+ skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
+ stats->hwstamp_valid++;
+ } else {
+ stats->hwstamp_invalid++;
+ }
+ }
+
if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
napi_gro_receive(&qcq->napi, skb);
else
napi_gro_frags(&qcq->napi);
}
-static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
+bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
{
- struct ionic_rxq_comp *comp = cq_info->cq_desc;
struct ionic_queue *q = cq->bound_q;
struct ionic_desc_info *desc_info;
+ struct ionic_rxq_comp *comp;
+
+ comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
if (!color_match(comp->pkt_type_color, cq->done_color))
return false;
@@ -253,138 +352,75 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return true;
}
-static int ionic_rx_page_alloc(struct ionic_queue *q,
- struct ionic_page_info *page_info)
-{
- struct ionic_lif *lif = q->lif;
- struct ionic_rx_stats *stats;
- struct net_device *netdev;
- struct device *dev;
-
- netdev = lif->netdev;
- dev = lif->ionic->dev;
- stats = q_to_rx_stats(q);
-
- if (unlikely(!page_info)) {
- net_err_ratelimited("%s: %s invalid page_info in alloc\n",
- netdev->name, q->name);
- return -EINVAL;
- }
-
- page_info->page = dev_alloc_page();
- if (unlikely(!page_info->page)) {
- net_err_ratelimited("%s: %s page alloc failed\n",
- netdev->name, q->name);
- stats->alloc_err++;
- return -ENOMEM;
- }
-
- page_info->dma_addr = dma_map_page(dev, page_info->page, 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, page_info->dma_addr))) {
- put_page(page_info->page);
- page_info->dma_addr = 0;
- page_info->page = NULL;
- net_err_ratelimited("%s: %s dma map failed\n",
- netdev->name, q->name);
- stats->dma_map_err++;
- return -EIO;
- }
-
- return 0;
-}
-
-static void ionic_rx_page_free(struct ionic_queue *q,
- struct ionic_page_info *page_info)
-{
- struct ionic_lif *lif = q->lif;
- struct net_device *netdev;
- struct device *dev;
-
- netdev = lif->netdev;
- dev = lif->ionic->dev;
-
- if (unlikely(!page_info)) {
- net_err_ratelimited("%s: %s invalid page_info in free\n",
- netdev->name, q->name);
- return;
- }
-
- if (unlikely(!page_info->page)) {
- net_err_ratelimited("%s: %s invalid page in free\n",
- netdev->name, q->name);
- return;
- }
-
- dma_unmap_page(dev, page_info->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
-
- put_page(page_info->page);
- page_info->dma_addr = 0;
- page_info->page = NULL;
-}
-
void ionic_rx_fill(struct ionic_queue *q)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_desc_info *desc_info;
- struct ionic_page_info *page_info;
struct ionic_rxq_sg_desc *sg_desc;
struct ionic_rxq_sg_elem *sg_elem;
+ struct ionic_buf_info *buf_info;
struct ionic_rxq_desc *desc;
unsigned int remain_len;
- unsigned int seg_len;
+ unsigned int frag_len;
unsigned int nfrags;
unsigned int i, j;
unsigned int len;
len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
- nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
for (i = ionic_q_space_avail(q); i; i--) {
+ nfrags = 0;
remain_len = len;
desc_info = &q->info[q->head_idx];
desc = desc_info->desc;
- sg_desc = desc_info->sg_desc;
- page_info = &desc_info->pages[0];
+ buf_info = &desc_info->bufs[0];
- if (page_info->page) { /* recycle the buffer */
- ionic_rxq_post(q, false, ionic_rx_clean, NULL);
- continue;
- }
-
- /* fill main descriptor - pages[0] */
- desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
- IONIC_RXQ_DESC_OPCODE_SIMPLE;
- desc_info->npages = nfrags;
- if (unlikely(ionic_rx_page_alloc(q, page_info))) {
- desc->addr = 0;
- desc->len = 0;
- return;
+ if (!buf_info->page) { /* alloc a new buffer? */
+ if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
+ desc->addr = 0;
+ desc->len = 0;
+ return;
+ }
}
- desc->addr = cpu_to_le64(page_info->dma_addr);
- seg_len = min_t(unsigned int, PAGE_SIZE, len);
- desc->len = cpu_to_le16(seg_len);
- remain_len -= seg_len;
- page_info++;
- /* fill sg descriptors - pages[1..n] */
- for (j = 0; j < nfrags - 1; j++) {
- if (page_info->page) /* recycle the sg buffer */
- continue;
+ /* fill main descriptor - buf[0] */
+ desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
+ frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
+ desc->len = cpu_to_le16(frag_len);
+ remain_len -= frag_len;
+ buf_info++;
+ nfrags++;
+ /* fill sg descriptors - buf[1..n] */
+ sg_desc = desc_info->sg_desc;
+ for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
sg_elem = &sg_desc->elems[j];
- if (unlikely(ionic_rx_page_alloc(q, page_info))) {
- sg_elem->addr = 0;
- sg_elem->len = 0;
- return;
+ if (!buf_info->page) { /* alloc a new sg buffer? */
+ if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
+ sg_elem->addr = 0;
+ sg_elem->len = 0;
+ return;
+ }
}
- sg_elem->addr = cpu_to_le64(page_info->dma_addr);
- seg_len = min_t(unsigned int, PAGE_SIZE, remain_len);
- sg_elem->len = cpu_to_le16(seg_len);
- remain_len -= seg_len;
- page_info++;
+
+ sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
+ frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset);
+ sg_elem->len = cpu_to_le16(frag_len);
+ remain_len -= frag_len;
+ buf_info++;
+ nfrags++;
}
+ /* clear end sg element as a sentinel */
+ if (j < q->max_sg_elems) {
+ sg_elem = &sg_desc->elems[j];
+ memset(sg_elem, 0, sizeof(*sg_elem));
+ }
+
+ desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
+ IONIC_RXQ_DESC_OPCODE_SIMPLE;
+ desc_info->nbufs = nfrags;
+
ionic_rxq_post(q, false, ionic_rx_clean, NULL);
}
@@ -395,21 +431,24 @@ void ionic_rx_fill(struct ionic_queue *q)
void ionic_rx_empty(struct ionic_queue *q)
{
struct ionic_desc_info *desc_info;
- struct ionic_page_info *page_info;
+ struct ionic_buf_info *buf_info;
unsigned int i, j;
for (i = 0; i < q->num_descs; i++) {
desc_info = &q->info[i];
for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
- page_info = &desc_info->pages[j];
- if (page_info->page)
- ionic_rx_page_free(q, page_info);
+ buf_info = &desc_info->bufs[j];
+ if (buf_info->page)
+ ionic_rx_page_free(q, buf_info);
}
- desc_info->npages = 0;
+ desc_info->nbufs = 0;
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
}
+
+ q->head_idx = 0;
+ q->tail_idx = 0;
}
static void ionic_dim_update(struct ionic_qcq *qcq)
@@ -525,7 +564,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
idev = &lif->ionic->idev;
txcq = &lif->txqcqs[qi]->cq;
- tx_work_done = ionic_cq_service(txcq, lif->tx_budget,
+ tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
ionic_tx_service, NULL, NULL);
rx_work_done = ionic_cq_service(rxcq, budget,
@@ -558,7 +597,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
void *data, size_t len)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct device *dev = q->lif->ionic->dev;
+ struct device *dev = q->dev;
dma_addr_t dma_addr;
dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
@@ -576,7 +615,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
size_t offset, size_t len)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct device *dev = q->lif->ionic->dev;
+ struct device *dev = q->dev;
dma_addr_t dma_addr;
dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
@@ -588,62 +627,130 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
return dma_addr;
}
+static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
+ struct ionic_desc_info *desc_info)
+{
+ struct ionic_buf_info *buf_info = desc_info->bufs;
+ struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ struct device *dev = q->dev;
+ dma_addr_t dma_addr;
+ unsigned int nfrags;
+ skb_frag_t *frag;
+ int frag_idx;
+
+ dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
+ if (dma_mapping_error(dev, dma_addr)) {
+ stats->dma_map_err++;
+ return -EIO;
+ }
+ buf_info->dma_addr = dma_addr;
+ buf_info->len = skb_headlen(skb);
+ buf_info++;
+
+ frag = skb_shinfo(skb)->frags;
+ nfrags = skb_shinfo(skb)->nr_frags;
+ for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
+ dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
+ if (dma_mapping_error(dev, dma_addr)) {
+ stats->dma_map_err++;
+ goto dma_fail;
+ }
+ buf_info->dma_addr = dma_addr;
+ buf_info->len = skb_frag_size(frag);
+ buf_info++;
+ }
+
+ desc_info->nbufs = 1 + nfrags;
+
+ return 0;
+
+dma_fail:
+ /* unwind the frag mappings and the head mapping */
+ while (frag_idx > 0) {
+ frag_idx--;
+ buf_info--;
+ dma_unmap_page(dev, buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ }
+ dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
+ return -EIO;
+}
+
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info,
void *cb_arg)
{
- struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
- struct ionic_txq_sg_elem *elem = sg_desc->elems;
+ struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct ionic_txq_desc *desc = desc_info->desc;
- struct device *dev = q->lif->ionic->dev;
- u8 opcode, flags, nsge;
- u16 queue_index;
+ struct ionic_qcq *qcq = q_to_qcq(q);
+ struct sk_buff *skb = cb_arg;
+ struct device *dev = q->dev;
unsigned int i;
- u64 addr;
+ u16 qi;
+
+ if (desc_info->nbufs) {
+ dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ buf_info++;
+ for (i = 1; i < desc_info->nbufs; i++, buf_info++)
+ dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ }
- decode_txq_desc_cmd(le64_to_cpu(desc->cmd),
- &opcode, &flags, &nsge, &addr);
+ if (!skb)
+ return;
- /* use unmap_single only if either this is not TSO,
- * or this is first descriptor of a TSO
- */
- if (opcode != IONIC_TXQ_DESC_OPCODE_TSO ||
- flags & IONIC_TXQ_DESC_FLAG_TSO_SOT)
- dma_unmap_single(dev, (dma_addr_t)addr,
- le16_to_cpu(desc->len), DMA_TO_DEVICE);
- else
- dma_unmap_page(dev, (dma_addr_t)addr,
- le16_to_cpu(desc->len), DMA_TO_DEVICE);
-
- for (i = 0; i < nsge; i++, elem++)
- dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr),
- le16_to_cpu(elem->len), DMA_TO_DEVICE);
-
- if (cb_arg) {
- struct sk_buff *skb = cb_arg;
- u32 len = skb->len;
-
- queue_index = skb_get_queue_mapping(skb);
- if (unlikely(__netif_subqueue_stopped(q->lif->netdev,
- queue_index))) {
- netif_wake_subqueue(q->lif->netdev, queue_index);
- q->wake++;
+ qi = skb_get_queue_mapping(skb);
+
+ if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) {
+ if (cq_info) {
+ struct skb_shared_hwtstamps hwts = {};
+ __le64 *cq_desc_hwstamp;
+ u64 hwstamp;
+
+ cq_desc_hwstamp =
+ cq_info->cq_desc +
+ qcq->cq.desc_size -
+ sizeof(struct ionic_txq_comp) -
+ IONIC_HWSTAMP_CQ_NEGOFFSET;
+
+ hwstamp = le64_to_cpu(*cq_desc_hwstamp);
+
+ if (hwstamp != IONIC_HWSTAMP_INVALID) {
+ hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_tstamp_tx(skb, &hwts);
+
+ stats->hwstamp_valid++;
+ } else {
+ stats->hwstamp_invalid++;
+ }
}
- dev_kfree_skb_any(skb);
- stats->clean++;
- netdev_tx_completed_queue(q_to_ndq(q), 1, len);
+
+ } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
+ netif_wake_subqueue(q->lif->netdev, qi);
+ q->wake++;
}
+
+ desc_info->bytes = skb->len;
+ stats->clean++;
+
+ dev_consume_skb_any(skb);
}
-static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
+bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
{
- struct ionic_txq_comp *comp = cq_info->cq_desc;
struct ionic_queue *q = cq->bound_q;
struct ionic_desc_info *desc_info;
+ struct ionic_txq_comp *comp;
+ int bytes = 0;
+ int pkts = 0;
u16 index;
+ comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
+
if (!color_match(comp->color, cq->done_color))
return false;
@@ -652,13 +759,21 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
*/
do {
desc_info = &q->info[q->tail_idx];
+ desc_info->bytes = 0;
index = q->tail_idx;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
+ if (desc_info->cb_arg) {
+ pkts++;
+ bytes += desc_info->bytes;
+ }
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
} while (index != le16_to_cpu(comp->comp_index));
+ if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
+ netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
+
return true;
}
@@ -677,15 +792,25 @@ void ionic_tx_flush(struct ionic_cq *cq)
void ionic_tx_empty(struct ionic_queue *q)
{
struct ionic_desc_info *desc_info;
+ int bytes = 0;
+ int pkts = 0;
/* walk the not completed tx entries, if any */
while (q->head_idx != q->tail_idx) {
desc_info = &q->info[q->tail_idx];
+ desc_info->bytes = 0;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
+ if (desc_info->cb_arg) {
+ pkts++;
+ bytes += desc_info->bytes;
+ }
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
}
+
+ if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
+ netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
}
static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
@@ -756,50 +881,34 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
desc->hdr_len = cpu_to_le16(hdrlen);
desc->mss = cpu_to_le16(mss);
- if (done) {
+ if (start) {
skb_tx_timestamp(skb);
- netdev_tx_sent_queue(q_to_ndq(q), skb->len);
- ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
+ if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
+ netdev_tx_sent_queue(q_to_ndq(q), skb->len);
+ ionic_txq_post(q, false, ionic_tx_clean, skb);
} else {
- ionic_txq_post(q, false, ionic_tx_clean, NULL);
+ ionic_txq_post(q, done, NULL, NULL);
}
}
-static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
- struct ionic_txq_sg_elem **elem)
-{
- struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
- struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
-
- *elem = sg_desc->elems;
- return desc;
-}
-
static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct ionic_desc_info *rewind_desc_info;
- struct device *dev = q->lif->ionic->dev;
+ struct ionic_desc_info *desc_info;
+ struct ionic_buf_info *buf_info;
struct ionic_txq_sg_elem *elem;
struct ionic_txq_desc *desc;
- unsigned int frag_left = 0;
- unsigned int offset = 0;
- u16 abort = q->head_idx;
- unsigned int len_left;
+ unsigned int chunk_len;
+ unsigned int frag_rem;
+ unsigned int tso_rem;
+ unsigned int seg_rem;
dma_addr_t desc_addr;
+ dma_addr_t frag_addr;
unsigned int hdrlen;
- unsigned int nfrags;
- unsigned int seglen;
- u64 total_bytes = 0;
- u64 total_pkts = 0;
- u16 rewind = abort;
- unsigned int left;
unsigned int len;
unsigned int mss;
- skb_frag_t *frag;
bool start, done;
bool outer_csum;
- dma_addr_t addr;
bool has_vlan;
u16 desc_len;
u8 desc_nsge;
@@ -807,9 +916,14 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
bool encap;
int err;
+ desc_info = &q->info[q->head_idx];
+ buf_info = desc_info->bufs;
+
+ if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
+ return -EIO;
+
+ len = skb->len;
mss = skb_shinfo(skb)->gso_size;
- nfrags = skb_shinfo(skb)->nr_frags;
- len_left = skb->len - skb_headlen(skb);
outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
has_vlan = !!skb_vlan_tag_present(skb);
@@ -834,125 +948,75 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
else
hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
- seglen = hdrlen + mss;
- left = skb_headlen(skb);
+ tso_rem = len;
+ seg_rem = min(tso_rem, hdrlen + mss);
- desc = ionic_tx_tso_next(q, &elem);
- start = true;
+ frag_addr = 0;
+ frag_rem = 0;
- /* Chop skb->data up into desc segments */
+ start = true;
- while (left > 0) {
- len = min(seglen, left);
- frag_left = seglen - len;
- desc_addr = ionic_tx_map_single(q, skb->data + offset, len);
- if (dma_mapping_error(dev, desc_addr))
- goto err_out_abort;
- desc_len = len;
+ while (tso_rem > 0) {
+ desc = NULL;
+ elem = NULL;
+ desc_addr = 0;
+ desc_len = 0;
desc_nsge = 0;
- left -= len;
- offset += len;
- if (nfrags > 0 && frag_left > 0)
- continue;
- done = (nfrags == 0 && left == 0);
- ionic_tx_tso_post(q, desc, skb,
- desc_addr, desc_nsge, desc_len,
- hdrlen, mss,
- outer_csum,
- vlan_tci, has_vlan,
- start, done);
- total_pkts++;
- total_bytes += start ? len : len + hdrlen;
- desc = ionic_tx_tso_next(q, &elem);
- start = false;
- seglen = mss;
- }
-
- /* Chop skb frags into desc segments */
-
- for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
- offset = 0;
- left = skb_frag_size(frag);
- len_left -= left;
- nfrags--;
- stats->frags++;
-
- while (left > 0) {
- if (frag_left > 0) {
- len = min(frag_left, left);
- frag_left -= len;
- addr = ionic_tx_map_frag(q, frag, offset, len);
- if (dma_mapping_error(dev, addr))
- goto err_out_abort;
- elem->addr = cpu_to_le64(addr);
- elem->len = cpu_to_le16(len);
+ /* use fragments until we have enough to post a single descriptor */
+ while (seg_rem > 0) {
+ /* if the fragment is exhausted then move to the next one */
+ if (frag_rem == 0) {
+ /* grab the next fragment */
+ frag_addr = buf_info->dma_addr;
+ frag_rem = buf_info->len;
+ buf_info++;
+ }
+ chunk_len = min(frag_rem, seg_rem);
+ if (!desc) {
+ /* fill main descriptor */
+ desc = desc_info->txq_desc;
+ elem = desc_info->txq_sg_desc->elems;
+ desc_addr = frag_addr;
+ desc_len = chunk_len;
+ } else {
+ /* fill sg descriptor */
+ elem->addr = cpu_to_le64(frag_addr);
+ elem->len = cpu_to_le16(chunk_len);
elem++;
desc_nsge++;
- left -= len;
- offset += len;
- if (nfrags > 0 && frag_left > 0)
- continue;
- done = (nfrags == 0 && left == 0);
- ionic_tx_tso_post(q, desc, skb, desc_addr,
- desc_nsge, desc_len,
- hdrlen, mss, outer_csum,
- vlan_tci, has_vlan,
- start, done);
- total_pkts++;
- total_bytes += start ? len : len + hdrlen;
- desc = ionic_tx_tso_next(q, &elem);
- start = false;
- } else {
- len = min(mss, left);
- frag_left = mss - len;
- desc_addr = ionic_tx_map_frag(q, frag,
- offset, len);
- if (dma_mapping_error(dev, desc_addr))
- goto err_out_abort;
- desc_len = len;
- desc_nsge = 0;
- left -= len;
- offset += len;
- if (nfrags > 0 && frag_left > 0)
- continue;
- done = (nfrags == 0 && left == 0);
- ionic_tx_tso_post(q, desc, skb, desc_addr,
- desc_nsge, desc_len,
- hdrlen, mss, outer_csum,
- vlan_tci, has_vlan,
- start, done);
- total_pkts++;
- total_bytes += start ? len : len + hdrlen;
- desc = ionic_tx_tso_next(q, &elem);
- start = false;
}
+ frag_addr += chunk_len;
+ frag_rem -= chunk_len;
+ tso_rem -= chunk_len;
+ seg_rem -= chunk_len;
}
+ seg_rem = min(tso_rem, mss);
+ done = (tso_rem == 0);
+ /* post descriptor */
+ ionic_tx_tso_post(q, desc, skb,
+ desc_addr, desc_nsge, desc_len,
+ hdrlen, mss, outer_csum, vlan_tci, has_vlan,
+ start, done);
+ start = false;
+ /* Buffer information is stored with the first tso descriptor */
+ desc_info = &q->info[q->head_idx];
+ desc_info->nbufs = 0;
}
- stats->pkts += total_pkts;
- stats->bytes += total_bytes;
+ stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
+ stats->bytes += len;
stats->tso++;
- stats->tso_bytes += total_bytes;
+ stats->tso_bytes = len;
return 0;
-
-err_out_abort:
- while (rewind != q->head_idx) {
- rewind_desc_info = &q->info[rewind];
- ionic_tx_clean(q, rewind_desc_info, NULL, NULL);
- rewind = (rewind + 1) & (q->num_descs - 1);
- }
- q->head_idx = abort;
-
- return -ENOMEM;
}
-static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
+static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
+ struct ionic_desc_info *desc_info)
{
- struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
+ struct ionic_txq_desc *desc = desc_info->txq_desc;
+ struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct device *dev = q->lif->ionic->dev;
- dma_addr_t dma_addr;
bool has_vlan;
u8 flags = 0;
bool encap;
@@ -961,23 +1025,22 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
has_vlan = !!skb_vlan_tag_present(skb);
encap = skb->encapsulation;
- dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
- if (dma_mapping_error(dev, dma_addr))
- return -ENOMEM;
-
flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
- flags, skb_shinfo(skb)->nr_frags, dma_addr);
+ flags, skb_shinfo(skb)->nr_frags,
+ buf_info->dma_addr);
desc->cmd = cpu_to_le64(cmd);
- desc->len = cpu_to_le16(skb_headlen(skb));
- desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
- desc->csum_offset = cpu_to_le16(skb->csum_offset);
+ desc->len = cpu_to_le16(buf_info->len);
if (has_vlan) {
desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
stats->vlan_inserted++;
+ } else {
+ desc->vlan_tci = 0;
}
+ desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
+ desc->csum_offset = cpu_to_le16(skb->csum_offset);
if (skb_csum_is_sctp(skb))
stats->crc32_csum++;
@@ -987,12 +1050,12 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
return 0;
}
-static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
+static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
+ struct ionic_desc_info *desc_info)
{
- struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
+ struct ionic_txq_desc *desc = desc_info->txq_desc;
+ struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct device *dev = q->lif->ionic->dev;
- dma_addr_t dma_addr;
bool has_vlan;
u8 flags = 0;
bool encap;
@@ -1001,67 +1064,66 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
has_vlan = !!skb_vlan_tag_present(skb);
encap = skb->encapsulation;
- dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
- if (dma_mapping_error(dev, dma_addr))
- return -ENOMEM;
-
flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
- flags, skb_shinfo(skb)->nr_frags, dma_addr);
+ flags, skb_shinfo(skb)->nr_frags,
+ buf_info->dma_addr);
desc->cmd = cpu_to_le64(cmd);
- desc->len = cpu_to_le16(skb_headlen(skb));
+ desc->len = cpu_to_le16(buf_info->len);
if (has_vlan) {
desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
stats->vlan_inserted++;
+ } else {
+ desc->vlan_tci = 0;
}
+ desc->csum_start = 0;
+ desc->csum_offset = 0;
stats->csum_none++;
return 0;
}
-static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
+static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
+ struct ionic_desc_info *desc_info)
{
- struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
- unsigned int len_left = skb->len - skb_headlen(skb);
+ struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
+ struct ionic_buf_info *buf_info = &desc_info->bufs[1];
struct ionic_txq_sg_elem *elem = sg_desc->elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct device *dev = q->lif->ionic->dev;
- dma_addr_t dma_addr;
- skb_frag_t *frag;
- u16 len;
+ unsigned int i;
- for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) {
- len = skb_frag_size(frag);
- elem->len = cpu_to_le16(len);
- dma_addr = ionic_tx_map_frag(q, frag, 0, len);
- if (dma_mapping_error(dev, dma_addr))
- return -ENOMEM;
- elem->addr = cpu_to_le64(dma_addr);
- len_left -= len;
- stats->frags++;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
+ elem->addr = cpu_to_le64(buf_info->dma_addr);
+ elem->len = cpu_to_le16(buf_info->len);
}
+ stats->frags += skb_shinfo(skb)->nr_frags;
+
return 0;
}
static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
{
+ struct ionic_desc_info *desc_info = &q->info[q->head_idx];
struct ionic_tx_stats *stats = q_to_tx_stats(q);
int err;
+ if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
+ return -EIO;
+
/* set up the initial descriptor */
if (skb->ip_summed == CHECKSUM_PARTIAL)
- err = ionic_tx_calc_csum(q, skb);
+ err = ionic_tx_calc_csum(q, skb, desc_info);
else
- err = ionic_tx_calc_no_csum(q, skb);
+ err = ionic_tx_calc_no_csum(q, skb, desc_info);
if (err)
return err;
/* add frags */
- err = ionic_tx_skb_frags(q, skb);
+ err = ionic_tx_skb_frags(q, skb, desc_info);
if (err)
return err;
@@ -1069,7 +1131,8 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
stats->pkts++;
stats->bytes += skb->len;
- netdev_tx_sent_queue(q_to_ndq(q), skb->len);
+ if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
+ netdev_tx_sent_queue(q_to_ndq(q), skb->len);
ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
return 0;
@@ -1077,7 +1140,6 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
{
- int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
int ndescs;
int err;
@@ -1088,7 +1150,8 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
else
ndescs = 1;
- if (skb_shinfo(skb)->nr_frags <= sg_elems)
+ /* If non-TSO, just need 1 desc and nr_frags sg elems */
+ if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
return ndescs;
/* Too many frags, so linearize */
@@ -1121,6 +1184,42 @@ static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
return stopped;
}
+static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_queue *q = &lif->hwstamp_txq->q;
+ int err, ndescs;
+
+ /* Does not stop/start txq, because we post to a separate tx queue
+ * for timestamping, and if a packet can't be posted immediately to
+ * the timestamping queue, it is dropped.
+ */
+
+ ndescs = ionic_tx_descs_needed(q, skb);
+ if (unlikely(ndescs < 0))
+ goto err_out_drop;
+
+ if (unlikely(!ionic_q_has_space(q, ndescs)))
+ goto err_out_drop;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
+ if (skb_is_gso(skb))
+ err = ionic_tx_tso(q, skb);
+ else
+ err = ionic_tx(q, skb);
+
+ if (err)
+ goto err_out_drop;
+
+ return NETDEV_TX_OK;
+
+err_out_drop:
+ q->drop++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
u16 queue_index = skb_get_queue_mapping(skb);
@@ -1134,6 +1233,10 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode)
+ return ionic_start_hwstamp_xmit(skb, netdev);
+
if (unlikely(queue_index >= lif->nxqs))
queue_index = 0;
q = &lif->txqcqs[queue_index]->q;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index 7667b72232b8..d7cbaad8a6fb 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -14,4 +14,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget);
int ionic_txrx_napi(struct napi_struct *napi, int budget);
netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
+bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
+
#endif /* _IONIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 07824bf9d68d..dfaf10edfabf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -396,6 +396,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
tpa_param->tpa_ipv6_en_flg = 1;
tpa_param->tpa_pkt_split_flg = 1;
tpa_param->tpa_gro_consistent_flg = 1;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index b8dc5c4591ef..ed2b6fe5a78d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -4734,6 +4734,7 @@ void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
*/
link.speed = (hwfn->cdev->num_hwfns > 1) ?
100000 : 40000;
+ break;
default:
/* In auto mode pass PF link image to VF */
break;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 8c47a9d2a965..8e150dd4f899 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -345,7 +345,7 @@ int qede_xdp_transmit(struct net_device *dev, int n_frames,
struct qede_tx_queue *xdp_tx;
struct xdp_frame *xdpf;
dma_addr_t mapping;
- int i, drops = 0;
+ int i, nxmit = 0;
u16 xdp_prod;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -364,18 +364,13 @@ int qede_xdp_transmit(struct net_device *dev, int n_frames,
mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dmadev, mapping))) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
-
- continue;
- }
+ if (unlikely(dma_mapping_error(dmadev, mapping)))
+ break;
if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
- NULL, xdpf))) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ NULL, xdpf)))
+ break;
+ nxmit++;
}
if (flags & XDP_XMIT_FLUSH) {
@@ -387,7 +382,7 @@ int qede_xdp_transmit(struct net_device *dev, int n_frames,
spin_unlock(&xdp_tx->xdp_tx_lock);
- return n_frames - drops;
+ return nxmit;
}
int qede_txq_has_work(struct qede_tx_queue *txq)
@@ -1214,12 +1209,9 @@ static int qede_rx_build_jumbo(struct qede_dev *edev,
dma_unmap_page(rxq->dev, bd->mapping,
PAGE_SIZE, DMA_FROM_DEVICE);
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
- bd->data, rxq->rx_headroom, cur_size);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, bd->data,
+ rxq->rx_headroom, cur_size, PAGE_SIZE);
- skb->truesize += PAGE_SIZE;
- skb->data_len += cur_size;
- skb->len += cur_size;
pkt_len -= cur_size;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 4d952036ba82..01ac1e93d27a 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -7,7 +7,6 @@
#include <linux/crash_dump.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/version.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 117188e3c7de..87b8c032195d 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1437,6 +1437,7 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
{
struct emac_tpd tpd;
u32 prod_idx;
+ int len;
memset(&tpd, 0, sizeof(tpd));
@@ -1456,9 +1457,10 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
if (skb_network_offset(skb) != ETH_HLEN)
TPD_TYP_SET(&tpd, 1);
+ len = skb->len;
emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
- netdev_sent_queue(adpt->netdev, skb->len);
+ netdev_sent_queue(adpt->netdev, len);
/* Make sure the are enough free descriptors to hold one
* maximum-sized SKB. We need one desc for each fragment,
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5a3b65a6eb4f..ab9b02574a15 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -885,7 +885,7 @@ qca_spi_probe(struct spi_device *spi)
struct net_device *qcaspi_devs = NULL;
u8 legacy_mode = 0;
u16 signature;
- const char *mac;
+ int ret;
if (!spi->dev.of_node) {
dev_err(&spi->dev, "Missing device tree\n");
@@ -962,12 +962,8 @@ qca_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, qcaspi_devs);
- mac = of_get_mac_address(spi->dev.of_node);
-
- if (!IS_ERR(mac))
- ether_addr_copy(qca->net_dev->dev_addr, mac);
-
- if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
+ ret = of_get_mac_address(spi->dev.of_node, qca->net_dev->dev_addr);
+ if (ret) {
eth_hw_addr_random(qca->net_dev);
dev_info(&spi->dev, "Using random MAC address: %pM\n",
qca->net_dev->dev_addr);
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 362b4f5c162c..bcdeca7b3366 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -323,7 +323,6 @@ static int qca_uart_probe(struct serdev_device *serdev)
{
struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart));
struct qcauart *qca;
- const char *mac;
u32 speed = 115200;
int ret;
@@ -348,12 +347,8 @@ static int qca_uart_probe(struct serdev_device *serdev)
of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);
- mac = of_get_mac_address(serdev->dev.of_node);
-
- if (!IS_ERR(mac))
- ether_addr_copy(qca->net_dev->dev_addr, mac);
-
- if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
+ ret = of_get_mac_address(serdev->dev.of_node, qca->net_dev->dev_addr);
+ if (ret) {
eth_hw_addr_random(qca->net_dev);
dev_info(&serdev->dev, "Using random MAC address: %pM\n",
qca->net_dev->dev_addr);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 3d00b3232308..0be5ac7ab261 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -56,20 +56,22 @@ static void
__rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
+ struct rmnet_map_header *map_header = (void *)skb->data;
struct rmnet_endpoint *ep;
u16 len, pad;
u8 mux_id;
- if (RMNET_MAP_GET_CD_BIT(skb)) {
+ if (map_header->flags & MAP_CMD_FLAG) {
+ /* Packet contains a MAP command (not data) */
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
return rmnet_map_command(skb, port);
goto free_skb;
}
- mux_id = RMNET_MAP_GET_MUX_ID(skb);
- pad = RMNET_MAP_GET_PAD(skb);
- len = RMNET_MAP_GET_LENGTH(skb) - pad;
+ mux_id = map_header->mux_id;
+ pad = map_header->flags & MAP_PAD_LEN_MASK;
+ len = ntohs(map_header->pkt_len) - pad;
if (mux_id >= RMNET_MAX_LOGICAL_EP)
goto free_skb;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 576501db2a0b..2aea153f4247 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -32,18 +32,6 @@ enum rmnet_map_commands {
RMNET_MAP_COMMAND_ENUM_LENGTH
};
-#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
- (Y)->data)->mux_id)
-#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
- (Y)->data)->cd_bit)
-#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \
- (Y)->data)->pad_len)
-#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \
- ((Y)->data + \
- sizeof(struct rmnet_map_header)))
-#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \
- (Y)->data)->pkt_len))
-
#define RMNET_MAP_COMMAND_REQUEST 0
#define RMNET_MAP_COMMAND_ACK 1
#define RMNET_MAP_COMMAND_UNSUPPORTED 2
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index beaee4962128..add0f5ade2e6 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -12,12 +12,13 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
struct rmnet_port *port,
int enable)
{
+ struct rmnet_map_header *map_header = (void *)skb->data;
struct rmnet_endpoint *ep;
struct net_device *vnd;
u8 mux_id;
int r;
- mux_id = RMNET_MAP_GET_MUX_ID(skb);
+ mux_id = map_header->mux_id;
if (mux_id >= RMNET_MAX_LOGICAL_EP) {
kfree_skb(skb);
@@ -49,6 +50,7 @@ static void rmnet_map_send_ack(struct sk_buff *skb,
unsigned char type,
struct rmnet_port *port)
{
+ struct rmnet_map_header *map_header = (void *)skb->data;
struct rmnet_map_control_command *cmd;
struct net_device *dev = skb->dev;
@@ -58,7 +60,8 @@ static void rmnet_map_send_ack(struct sk_buff *skb,
skb->protocol = htons(ETH_P_MAP);
- cmd = RMNET_MAP_GET_CMD_START(skb);
+ /* Command data immediately follows the MAP header */
+ cmd = (struct rmnet_map_control_command *)(map_header + 1);
cmd->cmd_type = type & 0x03;
netif_tx_lock(dev);
@@ -71,11 +74,13 @@ static void rmnet_map_send_ack(struct sk_buff *skb,
*/
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
{
+ struct rmnet_map_header *map_header = (void *)skb->data;
struct rmnet_map_control_command *cmd;
unsigned char command_name;
unsigned char rc = 0;
- cmd = RMNET_MAP_GET_CMD_START(skb);
+ /* Command data immediately follows the MAP header */
+ cmd = (struct rmnet_map_control_command *)(map_header + 1);
command_name = cmd->command_name;
switch (command_name) {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 21d38167f961..0ac2ff828320 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -197,22 +197,16 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
- struct iphdr *ip4h = (struct iphdr *)iphdr;
- __be16 *hdr = (__be16 *)ul_header, offset;
+ struct iphdr *ip4h = iphdr;
+ u16 val;
- offset = htons((__force u16)(skb_transport_header(skb) -
- (unsigned char *)iphdr));
- ul_header->csum_start_offset = offset;
- ul_header->csum_insert_offset = skb->csum_offset;
- ul_header->csum_enabled = 1;
+ val = MAP_CSUM_UL_ENABLED_FLAG;
if (ip4h->protocol == IPPROTO_UDP)
- ul_header->udp_ind = 1;
- else
- ul_header->udp_ind = 0;
+ val |= MAP_CSUM_UL_UDP_FLAG;
+ val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
- /* Changing remaining fields to network order */
- hdr++;
- *hdr = htons((__force u16)*hdr);
+ ul_header->csum_start_offset = htons(skb_network_header_len(skb));
+ ul_header->csum_info = htons(val);
skb->ip_summed = CHECKSUM_NONE;
@@ -239,23 +233,16 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
- struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
- __be16 *hdr = (__be16 *)ul_header, offset;
-
- offset = htons((__force u16)(skb_transport_header(skb) -
- (unsigned char *)ip6hdr));
- ul_header->csum_start_offset = offset;
- ul_header->csum_insert_offset = skb->csum_offset;
- ul_header->csum_enabled = 1;
+ struct ipv6hdr *ip6h = ip6hdr;
+ u16 val;
+ val = MAP_CSUM_UL_ENABLED_FLAG;
if (ip6h->nexthdr == IPPROTO_UDP)
- ul_header->udp_ind = 1;
- else
- ul_header->udp_ind = 0;
+ val |= MAP_CSUM_UL_UDP_FLAG;
+ val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
- /* Changing remaining fields to network order */
- hdr++;
- *hdr = htons((__force u16)*hdr);
+ ul_header->csum_start_offset = htons(skb_network_header_len(skb));
+ ul_header->csum_info = htons(val);
skb->ip_summed = CHECKSUM_NONE;
@@ -284,6 +271,7 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
return map_header;
}
+ BUILD_BUG_ON(MAP_PAD_LEN_MASK < 3);
padding = ALIGN(map_datalen, 4) - map_datalen;
if (padding == 0)
@@ -297,7 +285,8 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
done:
map_header->pkt_len = htons(map_datalen + padding);
- map_header->pad_len = padding & 0x3F;
+ /* This is a data packet, so the CMD bit is 0 */
+ map_header->flags = padding & MAP_PAD_LEN_MASK;
return map_header;
}
@@ -319,7 +308,7 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
return NULL;
maph = (struct rmnet_map_header *)skb->data;
- packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
+ packet_len = ntohs(maph->pkt_len) + sizeof(*maph);
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
@@ -328,7 +317,7 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
return NULL;
/* Some hardware can send us empty frames. Catch them */
- if (ntohs(maph->pkt_len) == 0)
+ if (!maph->pkt_len)
return NULL;
skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
@@ -361,7 +350,7 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
- if (!csum_trailer->valid) {
+ if (!(csum_trailer->flags & MAP_CSUM_DL_VALID_FLAG)) {
priv->stats.csum_valid_unset++;
return -EINVAL;
}
@@ -421,10 +410,7 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
}
sw_csum:
- ul_header->csum_start_offset = 0;
- ul_header->csum_insert_offset = 0;
- ul_header->csum_enabled = 0;
- ul_header->udp_ind = 0;
+ memset(ul_header, 0, sizeof(*ul_header));
priv->stats.csum_sw++;
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 1df2c002c9f6..2c89cde7da1e 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -1586,12 +1586,10 @@ DECLARE_RTL_COND(rtl_counters_cond)
static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
{
- dma_addr_t paddr = tp->counters_phys_addr;
- u32 cmd;
+ u32 cmd = lower_32_bits(tp->counters_phys_addr);
- RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32);
+ RTL_W32(tp, CounterAddrHigh, upper_32_bits(tp->counters_phys_addr));
rtl_pci_commit(tp);
- cmd = (u64)paddr & DMA_BIT_MASK(32);
RTL_W32(tp, CounterAddrLow, cmd);
RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
@@ -1903,6 +1901,41 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
return ret;
}
+static void rtl8169_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *data)
+{
+ data->rx_max_pending = NUM_RX_DESC;
+ data->rx_pending = NUM_RX_DESC;
+ data->tx_max_pending = NUM_TX_DESC;
+ data->tx_pending = NUM_TX_DESC;
+}
+
+static void rtl8169_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *data)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ bool tx_pause, rx_pause;
+
+ phy_get_pause(tp->phydev, &tx_pause, &rx_pause);
+
+ data->autoneg = tp->phydev->autoneg;
+ data->tx_pause = tx_pause ? 1 : 0;
+ data->rx_pause = rx_pause ? 1 : 0;
+}
+
+static int rtl8169_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *data)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (dev->mtu > ETH_DATA_LEN)
+ return -EOPNOTSUPP;
+
+ phy_set_asym_pause(tp->phydev, data->rx_pause, data->tx_pause);
+
+ return 0;
+}
+
static const struct ethtool_ops rtl8169_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -1923,6 +1956,9 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.set_eee = rtl8169_set_eee,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_ringparam = rtl8169_get_ringparam,
+ .get_pauseparam = rtl8169_get_pauseparam,
+ .set_pauseparam = rtl8169_set_pauseparam,
};
static void rtl_enable_eee(struct rtl8169_private *tp)
@@ -2352,11 +2388,13 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
pcie_set_readrq(tp->pci_dev, readrq);
/* Chip doesn't support pause in jumbo mode */
- linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- tp->phydev->advertising, !jumbo);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- tp->phydev->advertising, !jumbo);
- phy_start_aneg(tp->phydev);
+ if (jumbo) {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ tp->phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ tp->phydev->advertising);
+ phy_start_aneg(tp->phydev);
+ }
}
DECLARE_RTL_COND(rtl_chipcmd_cond)
@@ -2735,11 +2773,6 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
__rtl_hw_start_8168cp(tp);
}
-static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
-{
- rtl_hw_start_8168c_2(tp);
-}
-
static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
{
rtl_set_def_aspm_entry_latency(tp);
@@ -3652,7 +3685,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
[RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
[RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2,
- [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_3,
+ [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_2,
[RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4,
[RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2,
[RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3,
@@ -4365,20 +4398,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
if (net_ratelimit())
netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
pci_cmd, pci_status_errs);
- /*
- * The recovery sequence below admits a very elaborated explanation:
- * - it seems to work;
- * - I did not see what else could be done;
- * - it makes iop3xx happy.
- *
- * Feel free to adjust to your needs.
- */
- if (pdev->broken_parity_status)
- pci_cmd &= ~PCI_COMMAND_PARITY;
- else
- pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
-
- pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
@@ -4662,6 +4681,7 @@ static void rtl8169_down(struct rtl8169_private *tp)
static void rtl8169_up(struct rtl8169_private *tp)
{
pci_set_master(tp->pci_dev);
+ phy_init_hw(tp->phydev);
phy_resume(tp->phydev);
rtl8169_init_phy(tp);
napi_enable(&tp->napi);
@@ -5087,6 +5107,10 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
return -EUNATCH;
}
+ tp->phydev->mac_managed_pm = 1;
+
+ phy_support_asym_pause(tp->phydev);
+
/* PHY will be woken up in rtl_open() */
phy_suspend(tp->phydev);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index cb47e68c1a3e..86a1eb0634e8 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -993,6 +993,7 @@ struct ravb_private {
struct platform_device *pdev;
void __iomem *addr;
struct clk *clk;
+ struct clk *refclk;
struct mdiobb_ctrl mdiobb;
u32 num_rx_ring[NUM_RX_QUEUE];
u32 num_tx_ring[NUM_TX_QUEUE];
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index eb0c03bdb12d..4afff320dfd0 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -109,11 +109,13 @@ static void ravb_set_buffer_align(struct sk_buff *skb)
* Ethernet AVB device doesn't have ROM for MAC address.
* This function gets the MAC address that was used by a bootloader.
*/
-static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
+static void ravb_read_mac_address(struct device_node *np,
+ struct net_device *ndev)
{
- if (!IS_ERR(mac)) {
- ether_addr_copy(ndev->dev_addr, mac);
- } else {
+ int ret;
+
+ ret = of_get_mac_address(np, ndev->dev_addr);
+ if (ret) {
u32 mahr = ravb_read(ndev, MAHR);
u32 malr = ravb_read(ndev, MALR);
@@ -911,31 +913,20 @@ static int ravb_poll(struct napi_struct *napi, int budget)
int q = napi - priv->napi;
int mask = BIT(q);
int quota = budget;
- u32 ris0, tis;
- for (;;) {
- tis = ravb_read(ndev, TIS);
- ris0 = ravb_read(ndev, RIS0);
- if (!((ris0 & mask) || (tis & mask)))
- break;
+ /* Processing RX Descriptor Ring */
+ /* Clear RX interrupt */
+ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
+ if (ravb_rx(ndev, &quota, q))
+ goto out;
- /* Processing RX Descriptor Ring */
- if (ris0 & mask) {
- /* Clear RX interrupt */
- ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- if (ravb_rx(ndev, &quota, q))
- goto out;
- }
- /* Processing TX Descriptor Ring */
- if (tis & mask) {
- spin_lock_irqsave(&priv->lock, flags);
- /* Clear TX interrupt */
- ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
- ravb_tx_free(ndev, q, true);
- netif_wake_subqueue(ndev, q);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
- }
+ /* Processing RX Descriptor Ring */
+ spin_lock_irqsave(&priv->lock, flags);
+ /* Clear TX interrupt */
+ ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
+ ravb_tx_free(ndev, q, true);
+ netif_wake_subqueue(ndev, q);
+ spin_unlock_irqrestore(&priv->lock, flags);
napi_complete(napi);
@@ -2148,6 +2139,13 @@ static int ravb_probe(struct platform_device *pdev)
goto out_release;
}
+ priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
+ if (IS_ERR(priv->refclk)) {
+ error = PTR_ERR(priv->refclk);
+ goto out_release;
+ }
+ clk_prepare_enable(priv->refclk);
+
ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
@@ -2164,7 +2162,7 @@ static int ravb_probe(struct platform_device *pdev)
/* Set GTI value */
error = ravb_set_gti(ndev);
if (error)
- goto out_release;
+ goto out_disable_refclk;
/* Request GTI loading */
ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
@@ -2183,7 +2181,7 @@ static int ravb_probe(struct platform_device *pdev)
"Cannot allocate desc base address table (size %d bytes)\n",
priv->desc_bat_size);
error = -ENOMEM;
- goto out_release;
+ goto out_disable_refclk;
}
for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
priv->desc_bat[q].die_dt = DT_EOS;
@@ -2200,7 +2198,7 @@ static int ravb_probe(struct platform_device *pdev)
priv->msg_enable = RAVB_DEF_MSG_ENABLE;
/* Read and set MAC address */
- ravb_read_mac_address(ndev, of_get_mac_address(np));
+ ravb_read_mac_address(np, ndev);
if (!is_valid_ether_addr(ndev->dev_addr)) {
dev_warn(&pdev->dev,
"no valid MAC address supplied, using a random one\n");
@@ -2243,6 +2241,8 @@ out_dma_free:
/* Stop PTP Clock driver */
if (chip_id != RCAR_GEN2)
ravb_ptp_stop(ndev);
+out_disable_refclk:
+ clk_disable_unprepare(priv->refclk);
out_release:
free_netdev(ndev);
@@ -2260,6 +2260,8 @@ static int ravb_remove(struct platform_device *pdev)
if (priv->chip_id != RCAR_GEN2)
ravb_ptp_stop(ndev);
+ clk_disable_unprepare(priv->refclk);
+
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
/* Set reset mode */
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index f029c7c03804..c5b154868c1f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -560,7 +560,7 @@ static struct sh_eth_cpu_data r7s72100_data = {
EESR_TDE,
.fdr_value = 0x0000070f,
- .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5,
+ .trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE,
.no_psr = 1,
.apr = 1,
@@ -701,7 +701,7 @@ static struct sh_eth_cpu_data rcar_gen2_data = {
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
.fdr_value = 0x00000f0f,
- .trscer_err_mask = DESC_I_RINT8,
+ .trscer_err_mask = TRSCER_RMAFCE,
.apr = 1,
.mpr = 1,
@@ -782,7 +782,7 @@ static struct sh_eth_cpu_data r7s9210_data = {
.fdr_value = 0x0000070f,
- .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5,
+ .trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE,
.apr = 1,
.mpr = 1,
@@ -1094,7 +1094,7 @@ static struct sh_eth_cpu_data sh771x_data = {
EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP,
- .trscer_err_mask = DESC_I_RINT8,
+ .trscer_err_mask = TRSCER_RMAFCE,
.tsu = 1,
.dual_port = 1,
@@ -1749,7 +1749,7 @@ static void sh_eth_emac_interrupt(struct net_device *ndev)
link_stat = sh_eth_read(ndev, PSR);
if (mdp->ether_link_active_low)
link_stat = ~link_stat;
- if (!(link_stat & PHY_ST_LINK)) {
+ if (!(link_stat & PSR_LMON)) {
sh_eth_rcv_snd_disable(ndev);
} else {
/* Link Up */
@@ -3170,7 +3170,6 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
struct device_node *np = dev->of_node;
struct sh_eth_plat_data *pdata;
phy_interface_t interface;
- const char *mac_addr;
int ret;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
@@ -3182,9 +3181,7 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
return NULL;
pdata->phy_interface = interface;
- mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(pdata->mac_addr, mac_addr);
+ of_get_mac_address(np, pdata->mac_addr);
pdata->no_ether_link =
of_property_read_bool(np, "renesas,no-ether-link");
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index c1b3751b12c4..a5c07c6ff44a 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -171,7 +171,7 @@ enum GECMR_BIT {
};
/* EDMR */
-enum DMAC_M_BIT {
+enum EDMR_BIT {
EDMR_NBST = 0x80,
EDMR_EL = 0x40, /* Litte endian */
EDMR_DL1 = 0x20, EDMR_DL0 = 0x10,
@@ -180,13 +180,13 @@ enum DMAC_M_BIT {
};
/* EDTRR */
-enum DMAC_T_BIT {
+enum EDTRR_BIT {
EDTRR_TRNS_GETHER = 0x03,
EDTRR_TRNS_ETHER = 0x01,
};
/* EDRRR */
-enum EDRRR_R_BIT {
+enum EDRRR_BIT {
EDRRR_R = 0x01,
};
@@ -208,7 +208,7 @@ enum PIR_BIT {
};
/* PSR */
-enum PHY_STATUS_BIT { PHY_ST_LINK = 0x01, };
+enum PSR_BIT { PSR_LMON = 0x01, };
/* EESR */
enum EESR_BIT {
@@ -288,27 +288,6 @@ enum EESIPR_BIT {
EESIPR_CERFIP = 0x00000001,
};
-/* Receive descriptor 0 bits */
-enum RD_STS_BIT {
- RD_RACT = 0x80000000, RD_RDLE = 0x40000000,
- RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000,
- RD_RFE = 0x08000000, RD_RFS10 = 0x00000200,
- RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080,
- RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020,
- RD_RFS5 = 0x00000010, RD_RFS4 = 0x00000008,
- RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002,
- RD_RFS1 = 0x00000001,
-};
-#define RDF1ST RD_RFP1
-#define RDFEND RD_RFP0
-#define RD_RFP (RD_RFP1|RD_RFP0)
-
-/* Receive descriptor 1 bits */
-enum RD_LEN_BIT {
- RD_RFL = 0x0000ffff, /* receive frame length */
- RD_RBL = 0xffff0000, /* receive buffer length */
-};
-
/* FCFTR */
enum FCFTR_BIT {
FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000,
@@ -318,28 +297,13 @@ enum FCFTR_BIT {
#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0)
#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0)
-/* Transmit descriptor 0 bits */
-enum TD_STS_BIT {
- TD_TACT = 0x80000000, TD_TDLE = 0x40000000,
- TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000,
- TD_TFE = 0x08000000, TD_TWBI = 0x04000000,
-};
-#define TDF1ST TD_TFP1
-#define TDFEND TD_TFP0
-#define TD_TFP (TD_TFP1|TD_TFP0)
-
-/* Transmit descriptor 1 bits */
-enum TD_LEN_BIT {
- TD_TBL = 0xffff0000, /* transmit buffer length */
-};
-
/* RMCR */
enum RMCR_BIT {
RMCR_RNC = 0x00000001,
};
/* ECMR */
-enum FELIC_MODE_BIT {
+enum ECMR_BIT {
ECMR_TRCCM = 0x04000000, ECMR_RCSC = 0x00800000,
ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
@@ -350,7 +314,7 @@ enum FELIC_MODE_BIT {
};
/* ECSR */
-enum ECSR_STATUS_BIT {
+enum ECSR_BIT {
ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10,
ECSR_LCHNG = 0x04,
ECSR_MPD = 0x02, ECSR_ICD = 0x01,
@@ -360,7 +324,7 @@ enum ECSR_STATUS_BIT {
ECSR_ICD | ECSIPR_MPDIP)
/* ECSIPR */
-enum ECSIPR_STATUS_MASK_BIT {
+enum ECSIPR_BIT {
ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10,
ECSIPR_LCHNGIP = 0x04,
ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01,
@@ -380,14 +344,20 @@ enum MPR_BIT {
};
/* TRSCER */
-enum DESC_I_BIT {
- DESC_I_TINT4 = 0x0800, DESC_I_TINT3 = 0x0400, DESC_I_TINT2 = 0x0200,
- DESC_I_TINT1 = 0x0100, DESC_I_RINT8 = 0x0080, DESC_I_RINT5 = 0x0010,
- DESC_I_RINT4 = 0x0008, DESC_I_RINT3 = 0x0004, DESC_I_RINT2 = 0x0002,
- DESC_I_RINT1 = 0x0001,
+enum TRSCER_BIT {
+ TRSCER_CNDCE = 0x00000800,
+ TRSCER_DLCCE = 0x00000400,
+ TRSCER_CDCE = 0x00000200,
+ TRSCER_TROCE = 0x00000100,
+ TRSCER_RMAFCE = 0x00000080,
+ TRSCER_RRFCE = 0x00000010,
+ TRSCER_RTLFCE = 0x00000008,
+ TRSCER_RTSFCE = 0x00000004,
+ TRSCER_PRECE = 0x00000002,
+ TRSCER_CERFCE = 0x00000001,
};
-#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2)
+#define DEFAULT_TRSCER_ERR_MASK (TRSCER_RMAFCE | TRSCER_RRFCE | TRSCER_CDCE)
/* RPADIR */
enum RPADIR_BIT {
@@ -445,6 +415,24 @@ struct sh_eth_txdesc {
u32 pad0; /* padding data */
} __aligned(2) __packed;
+/* Transmit descriptor 0 bits */
+enum TD_STS_BIT {
+ TD_TACT = 0x80000000,
+ TD_TDLE = 0x40000000,
+ TD_TFP1 = 0x20000000,
+ TD_TFP0 = 0x10000000,
+ TD_TFE = 0x08000000,
+ TD_TWBI = 0x04000000,
+};
+#define TDF1ST TD_TFP1
+#define TDFEND TD_TFP0
+#define TD_TFP (TD_TFP1 | TD_TFP0)
+
+/* Transmit descriptor 1 bits */
+enum TD_LEN_BIT {
+ TD_TBL = 0xffff0000, /* transmit buffer length */
+};
+
/* The sh ether Rx buffer descriptors.
* This structure should be 20 bytes.
*/
@@ -455,6 +443,34 @@ struct sh_eth_rxdesc {
u32 pad0; /* padding data */
} __aligned(2) __packed;
+/* Receive descriptor 0 bits */
+enum RD_STS_BIT {
+ RD_RACT = 0x80000000,
+ RD_RDLE = 0x40000000,
+ RD_RFP1 = 0x20000000,
+ RD_RFP0 = 0x10000000,
+ RD_RFE = 0x08000000,
+ RD_RFS10 = 0x00000200,
+ RD_RFS9 = 0x00000100,
+ RD_RFS8 = 0x00000080,
+ RD_RFS7 = 0x00000040,
+ RD_RFS6 = 0x00000020,
+ RD_RFS5 = 0x00000010,
+ RD_RFS4 = 0x00000008,
+ RD_RFS3 = 0x00000004,
+ RD_RFS2 = 0x00000002,
+ RD_RFS1 = 0x00000001,
+};
+#define RDF1ST RD_RFP1
+#define RDFEND RD_RFP0
+#define RD_RFP (RD_RFP1 | RD_RFP0)
+
+/* Receive descriptor 1 bits */
+enum RD_LEN_BIT {
+ RD_RFL = 0x0000ffff, /* receive frame length */
+ RD_RBL = 0xffff0000, /* receive buffer length */
+};
+
/* This structure is used by each CPU dependency handling. */
struct sh_eth_cpu_data {
/* mandatory functions */
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 3473d296b2e2..a46633606cae 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2736,7 +2736,7 @@ static void rocker_switchdev_event_work(struct work_struct *work)
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
- if (!fdb_info->added_by_user)
+ if (!fdb_info->added_by_user || fdb_info->is_local)
break;
err = rocker_world_port_fdb_add(rocker_port, fdb_info);
if (err) {
@@ -2747,7 +2747,7 @@ static void rocker_switchdev_event_work(struct work_struct *work)
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
- if (!fdb_info->added_by_user)
+ if (!fdb_info->added_by_user || fdb_info->is_local)
break;
err = rocker_world_port_fdb_del(rocker_port, fdb_info);
if (err)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 33f79402850d..4639ed9438a3 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -25,8 +25,7 @@
#ifdef CONFIG_OF
static int sxgbe_probe_config_dt(struct platform_device *pdev,
- struct sxgbe_plat_data *plat,
- const char **mac)
+ struct sxgbe_plat_data *plat)
{
struct device_node *np = pdev->dev.of_node;
struct sxgbe_dma_cfg *dma_cfg;
@@ -35,7 +34,6 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev,
if (!np)
return -ENODEV;
- *mac = of_get_mac_address(np);
err = of_get_phy_mode(np, &plat->interface);
if (err && err != -ENODEV)
return err;
@@ -63,8 +61,7 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev,
}
#else
static int sxgbe_probe_config_dt(struct platform_device *pdev,
- struct sxgbe_plat_data *plat,
- const char **mac)
+ struct sxgbe_plat_data *plat)
{
return -ENOSYS;
}
@@ -85,7 +82,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
void __iomem *addr;
struct sxgbe_priv_data *priv = NULL;
struct sxgbe_plat_data *plat_dat = NULL;
- const char *mac = NULL;
struct net_device *ndev = platform_get_drvdata(pdev);
struct device_node *node = dev->of_node;
@@ -101,7 +97,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
if (!plat_dat)
return -ENOMEM;
- ret = sxgbe_probe_config_dt(pdev, plat_dat, &mac);
+ ret = sxgbe_probe_config_dt(pdev, plat_dat);
if (ret) {
pr_err("%s: main dt probe failed\n", __func__);
return ret;
@@ -122,8 +118,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
}
/* Get MAC address if available (DT) */
- if (!IS_ERR_OR_NULL(mac))
- ether_addr_copy(priv->dev->dev_addr, mac);
+ of_get_mac_address(node, priv->dev->dev_addr);
/* Get the TX/RX IRQ numbers */
for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index da6886dcac37..c3f35da1b82a 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1747,6 +1747,22 @@ static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
mask, names);
}
+static void efx_ef10_get_fec_stats(struct efx_nic *efx,
+ struct ethtool_fec_stats *fec_stats)
+{
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
+
+ efx_ef10_get_stat_mask(efx, mask);
+ if (test_bit(EF10_STAT_fec_corrected_errors, mask))
+ fec_stats->corrected_blocks.total =
+ stats[EF10_STAT_fec_corrected_errors];
+ if (test_bit(EF10_STAT_fec_uncorrected_errors, mask))
+ fec_stats->uncorrectable_blocks.total =
+ stats[EF10_STAT_fec_uncorrected_errors];
+}
+
static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats)
{
@@ -2928,8 +2944,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
/* Get the transmit queue */
tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
- tx_queue = efx_channel_get_tx_queue(channel,
- tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
+ tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
if (!tx_queue->timestamping) {
/* Transmit completion */
@@ -4122,6 +4137,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.get_wol = efx_ef10_get_wol,
.set_wol = efx_ef10_set_wol,
.resume_wol = efx_port_dummy_op_void,
+ .get_fec_stats = efx_ef10_get_fec_stats,
.test_chip = efx_ef10_test_chip,
.test_nvram = efx_mcdi_nvram_test_all,
.mcdi_request = efx_ef10_mcdi_request,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 36c8625a6fd7..c746ca7235f1 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -920,7 +920,7 @@ static void efx_probe_vpd_strings(struct efx_nic *efx)
}
/* Get the Read only section */
- ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
if (ro_start < 0) {
netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
return;
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 1bfeee283ea9..a3ca406a3561 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -914,6 +914,8 @@ int efx_set_channels(struct efx_nic *efx)
}
}
}
+ if (xdp_queue_number)
+ efx->xdp_tx_queue_count = xdp_queue_number;
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
if (rc)
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 3332cdf2918a..cd590e0685e5 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -78,7 +78,6 @@ enum efx_loopback_mode {
(1 << LOOPBACK_XAUI) | \
(1 << LOOPBACK_GMII) | \
(1 << LOOPBACK_SGMII) | \
- (1 << LOOPBACK_SGMII) | \
(1 << LOOPBACK_XGBR) | \
(1 << LOOPBACK_XFI) | \
(1 << LOOPBACK_XAUI_FAR) | \
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 12a91c559aa2..058d9fe41d99 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -206,6 +206,15 @@ static int efx_ethtool_set_wol(struct net_device *net_dev,
return efx->type->set_wol(efx, wol->wolopts);
}
+static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->get_fec_stats)
+ efx->type->get_fec_stats(efx, fec_stats);
+}
+
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
struct ethtool_ts_info *ts_info)
{
@@ -257,6 +266,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_module_eeprom = efx_ethtool_get_module_eeprom,
.get_link_ksettings = efx_ethtool_get_link_ksettings,
.set_link_ksettings = efx_ethtool_set_link_ksettings,
+ .get_fec_stats = efx_ethtool_get_fec_stats,
.get_fecparam = efx_ethtool_get_fecparam,
.set_fecparam = efx_ethtool_set_fecparam,
};
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index f8979991970e..5e7a57b680ca 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2800,7 +2800,7 @@ static void ef4_probe_vpd_strings(struct ef4_nic *efx)
}
/* Get the Read only section */
- ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
if (ro_start < 0) {
netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
return;
diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h
index a529ff395ead..a381cf9ec4f3 100644
--- a/drivers/net/ethernet/sfc/falcon/net_driver.h
+++ b/drivers/net/ethernet/sfc/falcon/net_driver.h
@@ -637,7 +637,7 @@ union ef4_multicast_hash {
* struct ef4_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
* @pci_dev: The PCI device
- * @node: List node for maintaning primary/secondary function lists
+ * @node: List node for maintaining primary/secondary function lists
* @primary: &struct ef4_nic instance for the primary function of this
* controller. May be the same structure, and may be %NULL if no
* primary function is bound. Serialised by rtnl_lock.
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index d75cf5ff5686..49df02ecee91 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -835,14 +835,14 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
+ tx_queue = channel->tx_queue +
+ (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
+ tx_queue = channel->tx_queue +
+ (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
netif_tx_lock(efx->net_dev);
efx_farch_notify_tx_desc(tx_queue);
@@ -1081,16 +1081,16 @@ static void
efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
int qid;
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
- tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
- qid % EFX_MAX_TXQ_PER_CHANNEL);
- if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
+ channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL);
+ tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL);
+ if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0))
efx_farch_magic_event(tx_queue->channel,
EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
- }
}
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 9f7dfdf708cf..9b4b25704271 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1187,6 +1187,7 @@ struct efx_udp_tunnel {
* @get_wol: Get WoL configuration from driver state
* @set_wol: Push WoL configuration to the NIC
* @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
+ * @get_fec_stats: Get standard FEC statistics.
* @test_chip: Test registers. May use efx_farch_test_registers(), and is
* expected to reset the NIC.
* @test_nvram: Test validity of NVRAM contents
@@ -1332,6 +1333,8 @@ struct efx_nic_type {
void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
int (*set_wol)(struct efx_nic *efx, u32 type);
void (*resume_wol)(struct efx_nic *efx);
+ void (*get_fec_stats)(struct efx_nic *efx,
+ struct ethtool_fec_stats *fec_stats);
unsigned int (*check_caps)(const struct efx_nic *efx,
u8 flag,
u32 offset);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 89c5c75f479f..17b8119c48e5 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -94,12 +94,11 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
rx_buf->len -= hdr_len;
for (;;) {
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_buf->page, rx_buf->page_offset,
- rx_buf->len);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len, efx->rx_buffer_truesize);
rx_buf->page = NULL;
- skb->len += rx_buf->len;
- skb->data_len += rx_buf->len;
+
if (skb_shinfo(skb)->nr_frags == n_frags)
break;
@@ -111,8 +110,6 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
n_frags = 0;
}
- skb->truesize += n_frags * efx->rx_buffer_truesize;
-
/* Move past the ethernet header */
skb->protocol = eth_type_trans(skb, efx->net_dev);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 1665529a7271..0c6650d2e239 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -412,14 +412,6 @@ err:
return NETDEV_TX_OK;
}
-static void efx_xdp_return_frames(int n, struct xdp_frame **xdpfs)
-{
- int i;
-
- for (i = 0; i < n; i++)
- xdp_return_frame_rx_napi(xdpfs[i]);
-}
-
/* Transmit a packet from an XDP buffer
*
* Returns number of packets sent on success, error code otherwise.
@@ -492,12 +484,7 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
if (flush && i > 0)
efx_nic_push_buffers(tx_queue);
- if (i == 0)
- return -EIO;
-
- efx_xdp_return_frames(n - i, xdpfs + i);
-
- return i;
+ return i == 0 ? -EIO : i;
}
/* Initiate a packet transmission. We use one channel per CPU
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 891b49281bc6..cbde83f620a0 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2204,7 +2204,7 @@ static int try_toggle_control_gpio(struct device *dev,
const char *name, int index,
int value, unsigned int nsdelay)
{
- struct gpio_desc *gpio = *desc;
+ struct gpio_desc *gpio;
enum gpiod_flags flags = value ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH;
gpio = devm_gpiod_get_index_optional(dev, name, index, flags);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 606c79de93a6..556a9790cdcf 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2649,11 +2649,13 @@ static const struct of_device_id smsc911x_dt_ids[] = {
MODULE_DEVICE_TABLE(of, smsc911x_dt_ids);
#endif
+#ifdef CONFIG_ACPI
static const struct acpi_device_id smsc911x_acpi_match[] = {
{ "ARMH9118", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match);
+#endif
static struct platform_driver smsc911x_driver = {
.probe = smsc911x_drv_probe,
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 200785e703c8..dfc85cc68173 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1760,8 +1760,7 @@ static int netsec_xdp_xmit(struct net_device *ndev, int n,
{
struct netsec_priv *priv = netdev_priv(ndev);
struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
- int drops = 0;
- int i;
+ int i, nxmit = 0;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -1772,12 +1771,11 @@ static int netsec_xdp_xmit(struct net_device *ndev, int n,
int err;
err = netsec_xdp_queue_one(priv, xdpf, true);
- if (err != NETSEC_XDP_TX) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- } else {
- tx_ring->xdp_xmit++;
- }
+ if (err != NETSEC_XDP_TX)
+ break;
+
+ tx_ring->xdp_xmit++;
+ nxmit++;
}
spin_unlock(&tx_ring->lock);
@@ -1786,7 +1784,7 @@ static int netsec_xdp_xmit(struct net_device *ndev, int n,
tx_ring->xdp_xmit = 0;
}
- return n - drops;
+ return nxmit;
}
static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog,
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 501b9c7aba56..fcbb4bb31408 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1559,7 +1559,6 @@ static int ave_probe(struct platform_device *pdev)
struct ave_private *priv;
struct net_device *ndev;
struct device_node *np;
- const void *mac_addr;
void __iomem *base;
const char *name;
int i, irq, ret;
@@ -1600,12 +1599,9 @@ static int ave_probe(struct platform_device *pdev)
ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
- mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(ndev->dev_addr, mac_addr);
-
- /* if the mac address is invalid, use random mac address */
- if (!is_valid_ether_addr(ndev->dev_addr)) {
+ ret = of_get_mac_address(np, ndev->dev_addr);
+ if (ret) {
+ /* if the mac address is invalid, use random mac address */
eth_hw_addr_random(ndev);
dev_warn(dev, "Using random MAC address: %pM\n",
ndev->dev_addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 366740ab9c5a..f2e478b884b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -6,6 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
+ stmmac_xdp.o \
$(stmmac-y)
stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 6f271c46368d..619e3c0760d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -33,6 +33,7 @@
#define DWMAC_CORE_4_10 0x41
#define DWMAC_CORE_5_00 0x50
#define DWMAC_CORE_5_10 0x51
+#define DWMAC_CORE_5_20 0x52
#define DWXGMAC_CORE_2_10 0x21
#define DWXLGMAC_CORE_2_00 0x20
@@ -182,6 +183,12 @@ struct stmmac_extra_stats {
/* TSO */
unsigned long tx_tso_frames;
unsigned long tx_tso_nfrags;
+ /* EST */
+ unsigned long mtl_est_cgce;
+ unsigned long mtl_est_hlbs;
+ unsigned long mtl_est_hlbf;
+ unsigned long mtl_est_btre;
+ unsigned long mtl_est_btrlm;
};
/* Safety Feature statistics exposed by ethtool */
@@ -253,6 +260,9 @@ struct stmmac_safety_stats {
#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */
#define DEFAULT_DMA_PBL 8
+/* MSI defines */
+#define STMMAC_MSI_VEC_MAX 32
+
/* PCS status and mask defines */
#define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */
#define PCS_LINK_IRQ BIT(1) /* PCS Link */
@@ -303,12 +313,37 @@ enum dma_irq_status {
handle_tx = 0x8,
};
+enum dma_irq_dir {
+ DMA_DIR_RX = 0x1,
+ DMA_DIR_TX = 0x2,
+ DMA_DIR_RXTX = 0x3,
+};
+
+enum request_irq_err {
+ REQ_IRQ_ERR_ALL,
+ REQ_IRQ_ERR_TX,
+ REQ_IRQ_ERR_RX,
+ REQ_IRQ_ERR_SFTY_UE,
+ REQ_IRQ_ERR_SFTY_CE,
+ REQ_IRQ_ERR_LPI,
+ REQ_IRQ_ERR_WOL,
+ REQ_IRQ_ERR_MAC,
+ REQ_IRQ_ERR_NO,
+};
+
/* EEE and LPI defines */
#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0)
#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1)
#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
+/* FPE defines */
+#define FPE_EVENT_UNKNOWN 0
+#define FPE_EVENT_TRSP BIT(0)
+#define FPE_EVENT_TVER BIT(1)
+#define FPE_EVENT_RRSP BIT(2)
+#define FPE_EVENT_RVER BIT(3)
+
#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
/* Physical Coding Sublayer */
@@ -382,6 +417,8 @@ struct dma_features {
unsigned int estsel;
unsigned int fpesel;
unsigned int tbssel;
+ /* Numbers of Auxiliary Snapshot Inputs */
+ unsigned int aux_snapshot_n;
};
/* RX Buffer size must be multiple of 4/8/16 bytes */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 08c76636c164..dfbaea06d108 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -115,7 +115,7 @@ static int anarion_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(gmac))
return PTR_ERR(gmac);
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 27254b27d7ed..bc91fd867dcd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -438,7 +438,7 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(stmmac_res.addr))
return PTR_ERR(stmmac_res.addr);
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index fad503820e04..fbfda55b4c52 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -27,7 +27,7 @@ static int dwmac_generic_probe(struct platform_device *pdev)
return ret;
if (pdev->dev.of_node) {
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat)) {
dev_err(&pdev->dev, "dt configuration failed\n");
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 223f69da7e95..84651207a1de 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -90,6 +90,32 @@ imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
return ret;
}
+static int imx_dwmac_clks_config(void *priv, bool enabled)
+{
+ struct imx_priv_data *dwmac = priv;
+ int ret = 0;
+
+ if (enabled) {
+ ret = clk_prepare_enable(dwmac->clk_mem);
+ if (ret) {
+ dev_err(dwmac->dev, "mem clock enable failed\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dwmac->clk_tx);
+ if (ret) {
+ dev_err(dwmac->dev, "tx clock enable failed\n");
+ clk_disable_unprepare(dwmac->clk_mem);
+ return ret;
+ }
+ } else {
+ clk_disable_unprepare(dwmac->clk_tx);
+ clk_disable_unprepare(dwmac->clk_mem);
+ }
+
+ return ret;
+}
+
static int imx_dwmac_init(struct platform_device *pdev, void *priv)
{
struct plat_stmmacenet_data *plat_dat;
@@ -98,39 +124,18 @@ static int imx_dwmac_init(struct platform_device *pdev, void *priv)
plat_dat = dwmac->plat_dat;
- ret = clk_prepare_enable(dwmac->clk_mem);
- if (ret) {
- dev_err(&pdev->dev, "mem clock enable failed\n");
- return ret;
- }
-
- ret = clk_prepare_enable(dwmac->clk_tx);
- if (ret) {
- dev_err(&pdev->dev, "tx clock enable failed\n");
- goto clk_tx_en_failed;
- }
-
if (dwmac->ops->set_intf_mode) {
ret = dwmac->ops->set_intf_mode(plat_dat);
if (ret)
- goto intf_mode_failed;
+ return ret;
}
return 0;
-
-intf_mode_failed:
- clk_disable_unprepare(dwmac->clk_tx);
-clk_tx_en_failed:
- clk_disable_unprepare(dwmac->clk_mem);
- return ret;
}
static void imx_dwmac_exit(struct platform_device *pdev, void *priv)
{
- struct imx_priv_data *dwmac = priv;
-
- clk_disable_unprepare(dwmac->clk_tx);
- clk_disable_unprepare(dwmac->clk_mem);
+ /* nothing to do now */
}
static void imx_dwmac_fix_speed(void *priv, unsigned int speed)
@@ -226,7 +231,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -249,10 +254,15 @@ static int imx_dwmac_probe(struct platform_device *pdev)
plat_dat->addr64 = dwmac->ops->addr_width;
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
+ plat_dat->clks_config = imx_dwmac_clks_config;
plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
plat_dat->bsp_priv = dwmac;
dwmac->plat_dat = plat_dat;
+ ret = imx_dwmac_clks_config(dwmac, true);
+ if (ret)
+ goto err_clks_config;
+
ret = imx_dwmac_init(pdev, dwmac);
if (ret)
goto err_dwmac_init;
@@ -263,9 +273,11 @@ static int imx_dwmac_probe(struct platform_device *pdev)
return 0;
-err_dwmac_init:
err_drv_probe:
imx_dwmac_exit(pdev, plat_dat->bsp_priv);
+err_dwmac_init:
+ imx_dwmac_clks_config(dwmac, false);
+err_clks_config:
err_parse_dt:
err_match_data:
stmmac_remove_config_dt(pdev, plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index 6c19fcc76c6f..06d287f104be 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -85,7 +85,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat)) {
dev_err(&pdev->dev, "dt configuration failed\n");
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 0b64f7710d17..80728a4c0e3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -8,9 +8,28 @@
#include "dwmac-intel.h"
#include "dwmac4.h"
#include "stmmac.h"
+#include "stmmac_ptp.h"
+
+#define INTEL_MGBE_ADHOC_ADDR 0x15
+#define INTEL_MGBE_XPCS_ADDR 0x16
+
+/* Selection for PTP Clock Freq belongs to PSE & PCH GbE */
+#define PSE_PTP_CLK_FREQ_MASK (GMAC_GPO0 | GMAC_GPO3)
+#define PSE_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
+#define PSE_PTP_CLK_FREQ_200MHZ (GMAC_GPO0 | GMAC_GPO3)
+#define PSE_PTP_CLK_FREQ_256MHZ (0)
+#define PCH_PTP_CLK_FREQ_MASK (GMAC_GPO0)
+#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
+#define PCH_PTP_CLK_FREQ_200MHZ (0)
+
+/* Cross-timestamping defines */
+#define ART_CPUID_LEAF 0x15
+#define EHL_PSE_ART_MHZ 19200000
struct intel_priv_data {
int mdio_adhoc_addr; /* mdio address for serdes & etc */
+ unsigned long crossts_adj;
+ bool is_pse;
};
/* This struct is used to associate PCI Function of MAC controller on a board,
@@ -134,6 +153,11 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
return data;
}
+ /* PSE only - ungate SGMII PHY Rx Clock */
+ if (intel_priv->is_pse)
+ mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
+ 0, SERDES_PHY_RX_CLK);
+
return 0;
}
@@ -149,6 +173,11 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
serdes_phy_addr = intel_priv->mdio_adhoc_addr;
+ /* PSE only - gate SGMII PHY Rx Clock */
+ if (intel_priv->is_pse)
+ mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
+ SERDES_PHY_RX_CLK, 0);
+
/* move power state to P3 */
data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
@@ -201,6 +230,161 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
}
}
+/* Program PTP Clock Frequency for different variant of
+ * Intel mGBE that has slightly different GPO mapping
+ */
+static void intel_mgbe_ptp_clk_freq_config(void *npriv)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)npriv;
+ struct intel_priv_data *intel_priv;
+ u32 gpio_value;
+
+ intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv;
+
+ gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS);
+
+ if (intel_priv->is_pse) {
+ /* For PSE GbE, use 200MHz */
+ gpio_value &= ~PSE_PTP_CLK_FREQ_MASK;
+ gpio_value |= PSE_PTP_CLK_FREQ_200MHZ;
+ } else {
+ /* For PCH GbE, use 200MHz */
+ gpio_value &= ~PCH_PTP_CLK_FREQ_MASK;
+ gpio_value |= PCH_PTP_CLK_FREQ_200MHZ;
+ }
+
+ writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS);
+}
+
+static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
+ u64 *art_time)
+{
+ u64 ns;
+
+ ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3);
+ ns <<= GMAC4_ART_TIME_SHIFT;
+ ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2);
+ ns <<= GMAC4_ART_TIME_SHIFT;
+ ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1);
+ ns <<= GMAC4_ART_TIME_SHIFT;
+ ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0);
+
+ *art_time = ns;
+}
+
+static int intel_crosststamp(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ struct intel_priv_data *intel_priv;
+
+ struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
+ void __iomem *ptpaddr = priv->ptpaddr;
+ void __iomem *ioaddr = priv->hw->pcsr;
+ unsigned long flags;
+ u64 art_time = 0;
+ u64 ptp_time = 0;
+ u32 num_snapshot;
+ u32 gpio_value;
+ u32 acr_value;
+ int ret;
+ u32 v;
+ int i;
+
+ if (!boot_cpu_has(X86_FEATURE_ART))
+ return -EOPNOTSUPP;
+
+ intel_priv = priv->plat->bsp_priv;
+
+ /* Both internal crosstimestamping and external triggered event
+ * timestamping cannot be run concurrently.
+ */
+ if (priv->plat->ext_snapshot_en)
+ return -EBUSY;
+
+ mutex_lock(&priv->aux_ts_lock);
+ /* Enable Internal snapshot trigger */
+ acr_value = readl(ptpaddr + PTP_ACR);
+ acr_value &= ~PTP_ACR_MASK;
+ switch (priv->plat->int_snapshot_num) {
+ case AUX_SNAPSHOT0:
+ acr_value |= PTP_ACR_ATSEN0;
+ break;
+ case AUX_SNAPSHOT1:
+ acr_value |= PTP_ACR_ATSEN1;
+ break;
+ case AUX_SNAPSHOT2:
+ acr_value |= PTP_ACR_ATSEN2;
+ break;
+ case AUX_SNAPSHOT3:
+ acr_value |= PTP_ACR_ATSEN3;
+ break;
+ default:
+ mutex_unlock(&priv->aux_ts_lock);
+ return -EINVAL;
+ }
+ writel(acr_value, ptpaddr + PTP_ACR);
+
+ /* Clear FIFO */
+ acr_value = readl(ptpaddr + PTP_ACR);
+ acr_value |= PTP_ACR_ATSFC;
+ writel(acr_value, ptpaddr + PTP_ACR);
+ /* Release the mutex */
+ mutex_unlock(&priv->aux_ts_lock);
+
+ /* Trigger Internal snapshot signal
+ * Create a rising edge by just toggle the GPO1 to low
+ * and back to high.
+ */
+ gpio_value = readl(ioaddr + GMAC_GPIO_STATUS);
+ gpio_value &= ~GMAC_GPO1;
+ writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
+ gpio_value |= GMAC_GPO1;
+ writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
+
+ /* Poll for time sync operation done */
+ ret = readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v,
+ (v & GMAC_INT_TSIE), 100, 10000);
+
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: Wait for time sync operation timeout\n", __func__);
+ return ret;
+ }
+
+ num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
+ GMAC_TIMESTAMP_ATSNS_MASK) >>
+ GMAC_TIMESTAMP_ATSNS_SHIFT;
+
+ /* Repeat until the timestamps are from the FIFO last segment */
+ for (i = 0; i < num_snapshot; i++) {
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
+ *device = ns_to_ktime(ptp_time);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
+ *system = convert_art_to_tsc(art_time);
+ }
+
+ system->cycles *= intel_priv->crossts_adj;
+
+ return 0;
+}
+
+static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
+ int base)
+{
+ if (boot_cpu_has(X86_FEATURE_ART)) {
+ unsigned int art_freq;
+
+ /* On systems that support ART, ART frequency can be obtained
+ * from ECX register of CPUID leaf (0x15).
+ */
+ art_freq = cpuid_ecx(ART_CPUID_LEAF);
+ do_div(art_freq, base);
+ intel_priv->crossts_adj = art_freq;
+ }
+}
+
static void common_default_data(struct plat_stmmacenet_data *plat)
{
plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
@@ -263,6 +447,9 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
/* Disable Priority config by default */
plat->tx_queues_cfg[i].use_prio = false;
+ /* Default TX Q0 to use TSO and rest TXQ for TBS */
+ if (i > 0)
+ plat->tx_queues_cfg[i].tbs_en = 1;
}
/* FIFO size is 4096 bytes for 1 tx/rx queue */
@@ -284,6 +471,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->dma_cfg->fixed_burst = 0;
plat->dma_cfg->mixed_burst = 0;
plat->dma_cfg->aal = 0;
+ plat->dma_cfg->dche = true;
plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
GFP_KERNEL);
@@ -319,6 +507,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
return ret;
}
+ plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
+
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
@@ -333,6 +523,30 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
/* Use the last Rx queue */
plat->vlan_fail_q = plat->rx_queues_to_use - 1;
+ /* Intel mgbe SGMII interface uses pcs-xcps */
+ if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ plat->mdio_bus_data->has_xpcs = true;
+ plat->mdio_bus_data->xpcs_an_inband = true;
+ }
+
+ /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
+ plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
+ plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
+
+ plat->int_snapshot_num = AUX_SNAPSHOT1;
+ plat->ext_snapshot_num = AUX_SNAPSHOT0;
+
+ plat->has_crossts = true;
+ plat->crosststamp = intel_crosststamp;
+
+ /* Setup MSI vector offset specific to Intel mGbE controller */
+ plat->msi_mac_vec = 29;
+ plat->msi_lpi_vec = 28;
+ plat->msi_sfty_ce_vec = 27;
+ plat->msi_sfty_ue_vec = 26;
+ plat->msi_rx_base_vec = 0;
+ plat->msi_tx_base_vec = 1;
+
return 0;
}
@@ -378,8 +592,14 @@ static struct stmmac_pci_info ehl_rgmii1g_info = {
static int ehl_pse0_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ intel_priv->is_pse = true;
plat->bus_id = 2;
plat->addr64 = 32;
+
+ intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
+
return ehl_common_data(pdev, plat);
}
@@ -410,8 +630,14 @@ static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
static int ehl_pse1_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ intel_priv->is_pse = true;
plat->bus_id = 3;
plat->addr64 = 32;
+
+ intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
+
return ehl_common_data(pdev, plat);
}
@@ -609,6 +835,79 @@ static const struct stmmac_pci_info quark_info = {
.setup = quark_default_data,
};
+static int stmmac_config_single_msi(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ int ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
+ __func__);
+ return ret;
+ }
+
+ res->irq = pci_irq_vector(pdev, 0);
+ res->wol_irq = res->irq;
+ plat->multi_msi_en = 0;
+ dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
+ __func__);
+
+ return 0;
+}
+
+static int stmmac_config_multi_msi(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ int ret;
+ int i;
+
+ if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
+ plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
+ dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
+ __func__);
+ return -1;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* For RX MSI */
+ for (i = 0; i < plat->rx_queues_to_use; i++) {
+ res->rx_irq[i] = pci_irq_vector(pdev,
+ plat->msi_rx_base_vec + i * 2);
+ }
+
+ /* For TX MSI */
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ res->tx_irq[i] = pci_irq_vector(pdev,
+ plat->msi_tx_base_vec + i * 2);
+ }
+
+ if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
+ res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
+ if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
+ res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
+ if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
+ res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
+ if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
+ res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
+ if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
+ res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
+
+ plat->multi_msi_en = 1;
+ dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
+
+ return 0;
+}
+
/**
* intel_eth_pci_probe
*
@@ -650,7 +949,7 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
/* Enable pci device */
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
__func__);
@@ -664,20 +963,27 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
plat->bsp_priv = intel_priv;
- intel_priv->mdio_adhoc_addr = 0x15;
+ intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
+ intel_priv->crossts_adj = 1;
+
+ /* Initialize all MSI vectors to invalid so that it can be set
+ * according to platform data settings below.
+ * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
+ */
+ plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
+ plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
+ plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
+ plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
+ plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
+ plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
+ plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;
ret = info->setup(pdev, plat);
if (ret)
return ret;
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
- if (ret < 0)
- return ret;
-
memset(&res, 0, sizeof(res));
res.addr = pcim_iomap_table(pdev)[0];
- res.wol_irq = pci_irq_vector(pdev, 0);
- res.irq = pci_irq_vector(pdev, 0);
if (plat->eee_usecs_rate > 0) {
u32 tx_lpi_usec;
@@ -686,13 +992,28 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
}
+ ret = stmmac_config_multi_msi(pdev, plat, &res);
+ if (ret) {
+ ret = stmmac_config_single_msi(pdev, plat, &res);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
+ __func__);
+ goto err_alloc_irq;
+ }
+ }
+
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (ret) {
- pci_free_irq_vectors(pdev);
- clk_disable_unprepare(plat->stmmac_clk);
- clk_unregister_fixed_rate(plat->stmmac_clk);
+ goto err_dvr_probe;
}
+ return 0;
+
+err_dvr_probe:
+ pci_free_irq_vectors(pdev);
+err_alloc_irq:
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_unregister_fixed_rate(plat->stmmac_clk);
return ret;
}
@@ -710,13 +1031,9 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
stmmac_dvr_remove(&pdev->dev);
- pci_free_irq_vectors(pdev);
-
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
pcim_iounmap_regions(pdev, BIT(0));
-
- pci_disable_device(pdev);
}
static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
@@ -732,7 +1049,6 @@ static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
if (ret)
return ret;
- pci_disable_device(pdev);
pci_wake_from_d3(pdev, true);
return 0;
}
@@ -745,7 +1061,7 @@ static int __maybe_unused intel_eth_pci_resume(struct device *dev)
pci_restore_state(pdev);
pci_set_power_state(pdev, PCI_D0);
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
@@ -757,41 +1073,41 @@ static int __maybe_unused intel_eth_pci_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
intel_eth_pci_resume);
-#define PCI_DEVICE_ID_INTEL_QUARK_ID 0x0937
-#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G_ID 0x4b30
-#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G_ID 0x4b31
-#define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5_ID 0x4b32
+#define PCI_DEVICE_ID_INTEL_QUARK 0x0937
+#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30
+#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31
+#define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5 0x4b32
/* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
* which are named PSE0 and PSE1
*/
-#define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G_ID 0x4ba0
-#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G_ID 0x4ba1
-#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5_ID 0x4ba2
-#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID 0x4bb0
-#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID 0x4bb1
-#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID 0x4bb2
-#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID 0x43ac
-#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID 0x43a2
-#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac
-#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0_ID 0x7aac
-#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1_ID 0x7aad
+#define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G 0x4ba0
+#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G 0x4ba1
+#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5 0x4ba2
+#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G 0x4bb0
+#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G 0x4bb1
+#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5 0x4bb2
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0 0x43ac
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1 0x43a2
+#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac
+#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac
+#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad
static const struct pci_device_id intel_eth_pci_id_table[] = {
- { PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5_ID, &ehl_sgmii1g_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G_ID, &ehl_pse0_rgmii1g_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G_ID, &ehl_pse0_sgmii1g_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5_ID, &ehl_pse0_sgmii1g_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID, &ehl_pse1_rgmii1g_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) },
- { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_phy0_info) },
- { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_phy0_info) },
- { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_phy1_info) },
- { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0_ID, &adls_sgmii1g_phy0_info) },
- { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1_ID, &adls_sgmii1g_phy1_info) },
+ { PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
index e723096c0b15..542acb8ce467 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
@@ -14,6 +14,7 @@
/* SERDES defines */
#define SERDES_PLL_CLK BIT(0) /* PLL clk valid signal */
+#define SERDES_PHY_RX_CLK BIT(1) /* PSE SGMII PHY rx clk */
#define SERDES_RST BIT(2) /* Serdes Reset */
#define SERDES_PWR_ST_MASK GENMASK(6, 4) /* Serdes Power state*/
#define SERDES_PWR_ST_SHIFT 4
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index bf3250e0e59c..28dd0ed85a82 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -255,7 +255,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
if (val)
return val;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -352,6 +352,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
plat_dat->bsp_priv = gmac;
plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
plat_dat->multicast_filter_bins = 0;
+ plat_dat->tx_fifo_size = 8192;
+ plat_dat->rx_fifo_size = 8192;
err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (err)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index 3d3f43d91b98..9d77c647badd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -37,7 +37,7 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 9e4b83832938..58c0feaa8131 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -407,7 +407,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index bbc16b5a410a..16fb66a0ca72 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -52,7 +52,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 848e5c37746b..c7a6588d9398 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -398,7 +398,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
index 8551ea878ba5..adfeb8d3293d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
@@ -118,7 +118,7 @@ static int oxnas_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index bfc4a92f1d92..84382fc5cc4d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -461,7 +461,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat)) {
dev_err(&pdev->dev, "dt configuration failed\n");
return PTR_ERR(plat_dat);
@@ -477,7 +477,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rgmii");
ethqos->rgmii_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ethqos->rgmii_base)) {
- dev_err(&pdev->dev, "Can't get rgmii base\n");
ret = PTR_ERR(ethqos->rgmii_base);
goto err_mem;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 6ef30252bfe0..8d28a536e1bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1396,7 +1396,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 70d41783329d..85208128f135 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -398,7 +398,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index e1b63df6f96f..710d7435733e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -325,7 +325,7 @@ static int sti_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 5d4df4c5254e..2b38a499a404 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -371,7 +371,7 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index e62efd166ec8..4422baeed3d8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -239,6 +239,22 @@ static const struct emac_variant emac_variant_h6 = {
#define EMAC_RX_EARLY_INT BIT(13)
#define EMAC_RGMII_STA_INT BIT(16)
+#define EMAC_INT_MSK_COMMON EMAC_RGMII_STA_INT
+#define EMAC_INT_MSK_TX (EMAC_TX_INT | \
+ EMAC_TX_DMA_STOP_INT | \
+ EMAC_TX_BUF_UA_INT | \
+ EMAC_TX_TIMEOUT_INT | \
+ EMAC_TX_UNDERFLOW_INT | \
+ EMAC_TX_EARLY_INT |\
+ EMAC_INT_MSK_COMMON)
+#define EMAC_INT_MSK_RX (EMAC_RX_INT | \
+ EMAC_RX_BUF_UA_INT | \
+ EMAC_RX_DMA_STOP_INT | \
+ EMAC_RX_TIMEOUT_INT | \
+ EMAC_RX_OVERFLOW_INT | \
+ EMAC_RX_EARLY_INT | \
+ EMAC_INT_MSK_COMMON)
+
#define MAC_ADDR_TYPE_DST BIT(31)
/* H3 specific bits for EPHY */
@@ -412,13 +428,19 @@ static void sun8i_dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
}
static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr,
- struct stmmac_extra_stats *x, u32 chan)
+ struct stmmac_extra_stats *x, u32 chan,
+ u32 dir)
{
u32 v;
int ret = 0;
v = readl(ioaddr + EMAC_INT_STA);
+ if (dir == DMA_DIR_RX)
+ v &= EMAC_INT_MSK_RX;
+ else if (dir == DMA_DIR_TX)
+ v &= EMAC_INT_MSK_TX;
+
if (v & EMAC_TX_INT) {
ret |= handle_tx;
x->tx_normal_irq_n++;
@@ -1199,7 +1221,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret)
return -EINVAL;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 0e1ca2cba3c7..527077c98ebc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -108,7 +108,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index d23be45a64e5..d046e33b8a29 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -208,7 +208,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 2bac49b49f73..90383abafa66 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -255,7 +255,7 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
}
static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
- u32 number_chan)
+ u32 queue)
{
writel(riwt, ioaddr + DMA_RX_WATCHDOG);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 82df91c130f7..462ca7ed095a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -42,6 +42,7 @@
#define GMAC_HW_FEATURE3 0x00000128
#define GMAC_MDIO_ADDR 0x00000200
#define GMAC_MDIO_DATA 0x00000204
+#define GMAC_GPIO_STATUS 0x0000020C
#define GMAC_ARP_ADDR 0x00000210
#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
@@ -49,6 +50,7 @@
#define GMAC_L4_ADDR(reg) (0x904 + (reg) * 0x30)
#define GMAC_L3_ADDR0(reg) (0x910 + (reg) * 0x30)
#define GMAC_L3_ADDR1(reg) (0x914 + (reg) * 0x30)
+#define GMAC_TIMESTAMP_STATUS 0x00000b20
/* RX Queues Routing */
#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0)
@@ -143,6 +145,7 @@
#define GMAC_INT_PCS_PHYIS BIT(3)
#define GMAC_INT_PMT_EN BIT(4)
#define GMAC_INT_LPI_EN BIT(5)
+#define GMAC_INT_TSIE BIT(12)
#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
GMAC_INT_PCS_ANE)
@@ -259,6 +262,7 @@ enum power_event {
#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0)
/* MAC HW features2 bitmap */
+#define GMAC_HW_FEAT_AUXSNAPNUM GENMASK(30, 28)
#define GMAC_HW_FEAT_PPSOUTNUM GENMASK(26, 24)
#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
#define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12)
@@ -278,6 +282,12 @@ enum power_event {
#define GMAC_HW_FEAT_DVLAN BIT(5)
#define GMAC_HW_FEAT_NRVF GENMASK(2, 0)
+/* GMAC GPIO Status reg */
+#define GMAC_GPO0 BIT(16)
+#define GMAC_GPO1 BIT(17)
+#define GMAC_GPO2 BIT(18)
+#define GMAC_GPO3 BIT(19)
+
/* MAC HW ADDR regs */
#define GMAC_HI_DCS GENMASK(18, 16)
#define GMAC_HI_DCS_SHIFT 16
@@ -298,6 +308,11 @@ enum power_event {
#define GMAC_L4DP0_SHIFT 16
#define GMAC_L4SP0 GENMASK(15, 0)
+/* MAC Timestamp Status */
+#define GMAC_TIMESTAMP_AUXTSTRIG BIT(2)
+#define GMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25)
+#define GMAC_TIMESTAMP_ATSNS_SHIFT 25
+
/* MTL registers */
#define MTL_OPERATION_MODE 0x00000c00
#define MTL_FRPE BIT(15)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 29f765a246a0..95864f014ffa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -53,6 +53,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
if (hw->pcs)
value |= GMAC_PCS_IRQ_DEFAULT;
+ /* Enable FPE interrupt */
+ if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26)
+ value |= GMAC_INT_FPE_EN;
+
writel(value, ioaddr + GMAC_INT_EN);
}
@@ -1245,6 +1249,8 @@ const struct stmmac_ops dwmac410_ops = {
.config_l4_filter = dwmac4_config_l4_filter,
.est_configure = dwmac5_est_configure,
.fpe_configure = dwmac5_fpe_configure,
+ .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
+ .fpe_irq_status = dwmac5_fpe_irq_status,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
@@ -1294,6 +1300,8 @@ const struct stmmac_ops dwmac510_ops = {
.config_l4_filter = dwmac4_config_l4_filter,
.est_configure = dwmac5_est_configure,
.fpe_configure = dwmac5_fpe_configure,
+ .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
+ .fpe_irq_status = dwmac5_fpe_irq_status,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 62aa0e95beb7..a602d16b9e53 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -161,6 +161,19 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
value |= DMA_SYS_BUS_EAME;
writel(value, ioaddr + DMA_SYS_BUS_MODE);
+
+ value = readl(ioaddr + DMA_BUS_MODE);
+
+ if (dma_cfg->multi_msi_en) {
+ value &= ~DMA_BUS_MODE_INTM_MASK;
+ value |= (DMA_BUS_MODE_INTM_MODE1 << DMA_BUS_MODE_INTM_SHIFT);
+ }
+
+ if (dma_cfg->dche)
+ value |= DMA_BUS_MODE_DCHE;
+
+ writel(value, ioaddr + DMA_BUS_MODE);
+
}
static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
@@ -210,12 +223,9 @@ static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
_dwmac4_dump_dma_regs(ioaddr, i, reg_space);
}
-static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan)
+static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue)
{
- u32 chan;
-
- for (chan = 0; chan < number_chan; chan++)
- writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan));
+ writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue));
}
static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
@@ -415,6 +425,8 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
/* IEEE 1588-2002 */
dma_cap->time_stamp = 0;
+ /* Number of Auxiliary Snapshot Inputs */
+ dma_cap->aux_snapshot_n = (hw_cap & GMAC_HW_FEAT_AUXSNAPNUM) >> 28;
/* MAC HW feature3 */
hw_cap = readl(ioaddr + GMAC_HW_FEATURE3);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 8391ca63d943..9321879b599c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -25,6 +25,10 @@
#define DMA_TBS_CTRL 0x00001050
/* DMA Bus Mode bitmap */
+#define DMA_BUS_MODE_DCHE BIT(19)
+#define DMA_BUS_MODE_INTM_MASK GENMASK(17, 16)
+#define DMA_BUS_MODE_INTM_SHIFT 16
+#define DMA_BUS_MODE_INTM_MODE1 0x1
#define DMA_BUS_MODE_SFT_RESET BIT(0)
/* DMA SYS Bus Mode bitmap */
@@ -149,6 +153,25 @@
#define DMA_CHAN_STATUS_TPS BIT(1)
#define DMA_CHAN_STATUS_TI BIT(0)
+#define DMA_CHAN_STATUS_MSK_COMMON (DMA_CHAN_STATUS_NIS | \
+ DMA_CHAN_STATUS_AIS | \
+ DMA_CHAN_STATUS_CDE | \
+ DMA_CHAN_STATUS_FBE)
+
+#define DMA_CHAN_STATUS_MSK_RX (DMA_CHAN_STATUS_REB | \
+ DMA_CHAN_STATUS_ERI | \
+ DMA_CHAN_STATUS_RWT | \
+ DMA_CHAN_STATUS_RPS | \
+ DMA_CHAN_STATUS_RBU | \
+ DMA_CHAN_STATUS_RI | \
+ DMA_CHAN_STATUS_MSK_COMMON)
+
+#define DMA_CHAN_STATUS_MSK_TX (DMA_CHAN_STATUS_ETI | \
+ DMA_CHAN_STATUS_TBU | \
+ DMA_CHAN_STATUS_TPS | \
+ DMA_CHAN_STATUS_TI | \
+ DMA_CHAN_STATUS_MSK_COMMON)
+
/* Interrupt enable bits per channel */
#define DMA_CHAN_INTR_ENA_NIE BIT(16)
#define DMA_CHAN_INTR_ENA_AIE BIT(15)
@@ -206,7 +229,7 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
int dwmac4_dma_interrupt(void __iomem *ioaddr,
- struct stmmac_extra_stats *x, u32 chan);
+ struct stmmac_extra_stats *x, u32 chan, u32 dir);
void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 71e50751ef2d..e63270267578 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -135,12 +135,17 @@ void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
}
int dwmac4_dma_interrupt(void __iomem *ioaddr,
- struct stmmac_extra_stats *x, u32 chan)
+ struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
int ret = 0;
+ if (dir == DMA_DIR_RX)
+ intr_status &= DMA_CHAN_STATUS_MSK_RX;
+ else if (dir == DMA_DIR_TX)
+ intr_status &= DMA_CHAN_STATUS_MSK_TX;
+
/* ABNORMAL interrupts */
if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
if (unlikely(intr_status & DMA_CHAN_STATUS_RBU))
@@ -161,20 +166,19 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
}
}
/* TX/RX NORMAL interrupts */
- if (likely(intr_status & DMA_CHAN_STATUS_NIS)) {
+ if (likely(intr_status & DMA_CHAN_STATUS_NIS))
x->normal_irq_n++;
- if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
- x->rx_normal_irq_n++;
- ret |= handle_rx;
- }
- if (likely(intr_status & (DMA_CHAN_STATUS_TI |
- DMA_CHAN_STATUS_TBU))) {
- x->tx_normal_irq_n++;
- ret |= handle_tx;
- }
- if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
- x->rx_early_irq++;
+ if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
+ x->rx_normal_irq_n++;
+ ret |= handle_rx;
+ }
+ if (likely(intr_status & (DMA_CHAN_STATUS_TI |
+ DMA_CHAN_STATUS_TBU))) {
+ x->tx_normal_irq_n++;
+ ret |= handle_tx;
}
+ if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
+ x->rx_early_irq++;
writel(intr_status & intr_en, ioaddr + DMA_CHAN_STATUS(chan));
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 8f7ac24545ef..d8c6ff725237 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -192,6 +192,7 @@ int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
/* 1. Enable Safety Features */
value = readl(ioaddr + MTL_ECC_CONTROL);
+ value |= MEEAO; /* MTL ECC Error Addr Status Override */
value |= TSOEE; /* TSO ECC */
value |= MRXPEE; /* MTL RX Parser ECC */
value |= MESTEE; /* MTL EST ECC */
@@ -595,9 +596,95 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
ctrl &= ~EEST;
writel(ctrl, ioaddr + MTL_EST_CONTROL);
+
+ /* Configure EST interrupt */
+ if (cfg->enable)
+ ctrl = (IECGCE | IEHS | IEHF | IEBE | IECC);
+ else
+ ctrl = 0;
+
+ writel(ctrl, ioaddr + MTL_EST_INT_EN);
+
return 0;
}
+void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt)
+{
+ u32 status, value, feqn, hbfq, hbfs, btrl;
+ u32 txqcnt_mask = (1 << txqcnt) - 1;
+
+ status = readl(ioaddr + MTL_EST_STATUS);
+
+ value = (CGCE | HLBS | HLBF | BTRE | SWLC);
+
+ /* Return if there is no error */
+ if (!(status & value))
+ return;
+
+ if (status & CGCE) {
+ /* Clear Interrupt */
+ writel(CGCE, ioaddr + MTL_EST_STATUS);
+
+ x->mtl_est_cgce++;
+ }
+
+ if (status & HLBS) {
+ value = readl(ioaddr + MTL_EST_SCH_ERR);
+ value &= txqcnt_mask;
+
+ x->mtl_est_hlbs++;
+
+ /* Clear Interrupt */
+ writel(value, ioaddr + MTL_EST_SCH_ERR);
+
+ /* Collecting info to shows all the queues that has HLBS
+ * issue. The only way to clear this is to clear the
+ * statistic
+ */
+ if (net_ratelimit())
+ netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value);
+ }
+
+ if (status & HLBF) {
+ value = readl(ioaddr + MTL_EST_FRM_SZ_ERR);
+ feqn = value & txqcnt_mask;
+
+ value = readl(ioaddr + MTL_EST_FRM_SZ_CAP);
+ hbfq = (value & SZ_CAP_HBFQ_MASK(txqcnt)) >> SZ_CAP_HBFQ_SHIFT;
+ hbfs = value & SZ_CAP_HBFS_MASK;
+
+ x->mtl_est_hlbf++;
+
+ /* Clear Interrupt */
+ writel(feqn, ioaddr + MTL_EST_FRM_SZ_ERR);
+
+ if (net_ratelimit())
+ netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n",
+ hbfq, hbfs);
+ }
+
+ if (status & BTRE) {
+ if ((status & BTRL) == BTRL_MAX)
+ x->mtl_est_btrlm++;
+ else
+ x->mtl_est_btre++;
+
+ btrl = (status & BTRL) >> BTRL_SHIFT;
+
+ if (net_ratelimit())
+ netdev_info(dev, "EST: BTR Error Loop Count %u\n",
+ btrl);
+
+ writel(BTRE, ioaddr + MTL_EST_STATUS);
+ }
+
+ if (status & SWLC) {
+ writel(SWLC, ioaddr + MTL_EST_STATUS);
+ netdev_info(dev, "EST: SWOL has been switched\n");
+ }
+}
+
void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
bool enable)
{
@@ -621,3 +708,52 @@ void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
value |= EFPE;
writel(value, ioaddr + MAC_FPE_CTRL_STS);
}
+
+int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
+{
+ u32 value;
+ int status;
+
+ status = FPE_EVENT_UNKNOWN;
+
+ value = readl(ioaddr + MAC_FPE_CTRL_STS);
+
+ if (value & TRSP) {
+ status |= FPE_EVENT_TRSP;
+ netdev_info(dev, "FPE: Respond mPacket is transmitted\n");
+ }
+
+ if (value & TVER) {
+ status |= FPE_EVENT_TVER;
+ netdev_info(dev, "FPE: Verify mPacket is transmitted\n");
+ }
+
+ if (value & RRSP) {
+ status |= FPE_EVENT_RRSP;
+ netdev_info(dev, "FPE: Respond mPacket is received\n");
+ }
+
+ if (value & RVER) {
+ status |= FPE_EVENT_RVER;
+ netdev_info(dev, "FPE: Verify mPacket is received\n");
+ }
+
+ return status;
+}
+
+void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, enum stmmac_mpacket_type type)
+{
+ u32 value;
+
+ value = readl(ioaddr + MAC_FPE_CTRL_STS);
+
+ if (type == MPACKET_VERIFY) {
+ value &= ~SRSP;
+ value |= SVER;
+ } else {
+ value &= ~SVER;
+ value |= SRSP;
+ }
+
+ writel(value, ioaddr + MAC_FPE_CTRL_STS);
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 56b0762c1276..6b2fd37b29ad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -12,6 +12,12 @@
#define TMOUTEN BIT(0)
#define MAC_FPE_CTRL_STS 0x00000234
+#define TRSP BIT(19)
+#define TVER BIT(18)
+#define RRSP BIT(17)
+#define RVER BIT(16)
+#define SRSP BIT(2)
+#define SVER BIT(1)
#define EFPE BIT(0)
#define MAC_PPS_CONTROL 0x00000b70
@@ -38,6 +44,36 @@
#define PTOV_SHIFT 24
#define SSWL BIT(1)
#define EEST BIT(0)
+
+#define MTL_EST_STATUS 0x00000c58
+#define BTRL GENMASK(11, 8)
+#define BTRL_SHIFT 8
+#define BTRL_MAX (0xF << BTRL_SHIFT)
+#define SWOL BIT(7)
+#define SWOL_SHIFT 7
+#define CGCE BIT(4)
+#define HLBS BIT(3)
+#define HLBF BIT(2)
+#define BTRE BIT(1)
+#define SWLC BIT(0)
+
+#define MTL_EST_SCH_ERR 0x00000c60
+#define MTL_EST_FRM_SZ_ERR 0x00000c64
+#define MTL_EST_FRM_SZ_CAP 0x00000c68
+#define SZ_CAP_HBFS_MASK GENMASK(14, 0)
+#define SZ_CAP_HBFQ_SHIFT 16
+#define SZ_CAP_HBFQ_MASK(_val) ({ typeof(_val) (val) = (_val); \
+ ((val) > 4 ? GENMASK(18, 16) : \
+ (val) > 2 ? GENMASK(17, 16) : \
+ BIT(16)); })
+
+#define MTL_EST_INT_EN 0x00000c70
+#define IECGCE CGCE
+#define IEHS HLBS
+#define IEHF HLBF
+#define IEBE BTRE
+#define IECC SWLC
+
#define MTL_EST_GCL_CONTROL 0x00000c80
#define BTR_LOW 0x0
#define BTR_HIGH 0x1
@@ -62,6 +98,7 @@
#define ADDR GENMASK(15, 0)
#define MTL_RXP_IACC_DATA 0x00000cb4
#define MTL_ECC_CONTROL 0x00000cc0
+#define MEEAO BIT(8)
#define TSOEE BIT(4)
#define MRXPEE BIT(3)
#define MESTEE BIT(2)
@@ -98,6 +135,8 @@
#define GMAC_RXQCTRL_VFFQ_SHIFT 17
#define GMAC_RXQCTRL_VFFQE BIT(16)
+#define GMAC_INT_FPE_EN BIT(17)
+
int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp);
int dwmac5_safety_feat_irq_status(struct net_device *ndev,
void __iomem *ioaddr, unsigned int asp,
@@ -111,7 +150,12 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
u32 sub_second_inc, u32 systime_flags);
int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
unsigned int ptp_rate);
+void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt);
void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
bool enable);
+void dwmac5_fpe_send_mpacket(void __iomem *ioaddr,
+ enum stmmac_mpacket_type type);
+int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev);
#endif /* __DWMAC5_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index e5dbd0bc257e..1914ad698cab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -128,6 +128,26 @@
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
+#define DMA_STATUS_MSK_COMMON (DMA_STATUS_NIS | \
+ DMA_STATUS_AIS | \
+ DMA_STATUS_FBI)
+
+#define DMA_STATUS_MSK_RX (DMA_STATUS_ERI | \
+ DMA_STATUS_RWT | \
+ DMA_STATUS_RPS | \
+ DMA_STATUS_RU | \
+ DMA_STATUS_RI | \
+ DMA_STATUS_OVF | \
+ DMA_STATUS_MSK_COMMON)
+
+#define DMA_STATUS_MSK_TX (DMA_STATUS_ETI | \
+ DMA_STATUS_UNF | \
+ DMA_STATUS_TJT | \
+ DMA_STATUS_TU | \
+ DMA_STATUS_TPS | \
+ DMA_STATUS_TI | \
+ DMA_STATUS_MSK_COMMON)
+
#define NUM_DWMAC100_DMA_REGS 9
#define NUM_DWMAC1000_DMA_REGS 23
@@ -139,7 +159,7 @@ void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan);
int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x,
- u32 chan);
+ u32 chan, u32 dir);
int dwmac_dma_reset(void __iomem *ioaddr);
#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 57a53a600aa5..d1c31200bb91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -155,7 +155,7 @@ static void show_rx_process_state(unsigned int status)
#endif
int dwmac_dma_interrupt(void __iomem *ioaddr,
- struct stmmac_extra_stats *x, u32 chan)
+ struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
int ret = 0;
/* read the status register (CSR5) */
@@ -167,6 +167,12 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
show_tx_process_state(intr_status);
show_rx_process_state(intr_status);
#endif
+
+ if (dir == DMA_DIR_RX)
+ intr_status &= DMA_STATUS_MSK_RX;
+ else if (dir == DMA_DIR_TX)
+ intr_status &= DMA_STATUS_MSK_TX;
+
/* ABNORMAL interrupts */
if (unlikely(intr_status & DMA_STATUS_AIS)) {
if (unlikely(intr_status & DMA_STATUS_UNF)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 6c3b8a950f58..1913385df685 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -412,6 +412,12 @@
#define XGMAC_TI BIT(0)
#define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4)
+#define XGMAC_DMA_STATUS_MSK_COMMON (XGMAC_NIS | XGMAC_AIS | XGMAC_FBE)
+#define XGMAC_DMA_STATUS_MSK_RX (XGMAC_RBU | XGMAC_RI | \
+ XGMAC_DMA_STATUS_MSK_COMMON)
+#define XGMAC_DMA_STATUS_MSK_TX (XGMAC_TBU | XGMAC_TPS | XGMAC_TI | \
+ XGMAC_DMA_STATUS_MSK_COMMON)
+
/* Descriptors */
#define XGMAC_TDES0_LTV BIT(31)
#define XGMAC_TDES0_LT GENMASK(7, 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 77308c5c5d29..906e985441a9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -323,12 +323,18 @@ static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan)
}
static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
- struct stmmac_extra_stats *x, u32 chan)
+ struct stmmac_extra_stats *x, u32 chan,
+ u32 dir)
{
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
+ if (dir == DMA_DIR_RX)
+ intr_status &= XGMAC_DMA_STATUS_MSK_RX;
+ else if (dir == DMA_DIR_TX)
+ intr_status &= XGMAC_DMA_STATUS_MSK_TX;
+
/* ABNORMAL interrupts */
if (unlikely(intr_status & XGMAC_AIS)) {
if (unlikely(intr_status & XGMAC_RBU)) {
@@ -441,12 +447,9 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
}
-static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan)
+static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue)
{
- u32 i;
-
- for (i = 0; i < nchan; i++)
- writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(i));
+ writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(queue));
}
static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 979ac9fca23c..2cc91759b91f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -201,12 +201,12 @@ struct stmmac_dma_ops {
void (*start_rx)(void __iomem *ioaddr, u32 chan);
void (*stop_rx)(void __iomem *ioaddr, u32 chan);
int (*dma_interrupt) (void __iomem *ioaddr,
- struct stmmac_extra_stats *x, u32 chan);
+ struct stmmac_extra_stats *x, u32 chan, u32 dir);
/* If supported then get the optional core features */
void (*get_hw_feature)(void __iomem *ioaddr,
struct dma_features *dma_cap);
/* Program the HW RX Watchdog */
- void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan);
+ void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 queue);
void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
@@ -280,7 +280,6 @@ struct stmmac_dma_ops {
struct mac_device_info;
struct net_device;
struct rgmii_adv;
-struct stmmac_safety_stats;
struct stmmac_tc_entry;
struct stmmac_pps_cfg;
struct stmmac_rss;
@@ -393,8 +392,13 @@ struct stmmac_ops {
void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
int (*est_configure)(void __iomem *ioaddr, struct stmmac_est *cfg,
unsigned int ptp_rate);
+ void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt);
void (*fpe_configure)(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
bool enable);
+ void (*fpe_send_mpacket)(void __iomem *ioaddr,
+ enum stmmac_mpacket_type type);
+ int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev);
};
#define stmmac_core_init(__priv, __args...) \
@@ -491,8 +495,16 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, set_arp_offload, __args)
#define stmmac_est_configure(__priv, __args...) \
stmmac_do_callback(__priv, mac, est_configure, __args)
+#define stmmac_est_irq_status(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, est_irq_status, __args)
#define stmmac_fpe_configure(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, fpe_configure, __args)
+#define stmmac_fpe_send_mpacket(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args)
+#define stmmac_fpe_irq_status(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, fpe_irq_status, __args)
+
+struct stmmac_priv;
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -504,6 +516,8 @@ struct stmmac_hwtimestamp {
int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec,
int add_sub, int gmac4);
void (*get_systime) (void __iomem *ioaddr, u64 *systime);
+ void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time);
+ void (*timestamp_interrupt)(struct stmmac_priv *priv);
};
#define stmmac_config_hw_tstamping(__priv, __args...) \
@@ -518,6 +532,10 @@ struct stmmac_hwtimestamp {
stmmac_do_callback(__priv, ptp, adjust_systime, __args)
#define stmmac_get_systime(__priv, __args...) \
stmmac_do_void_callback(__priv, ptp, get_systime, __args)
+#define stmmac_get_ptptime(__priv, __args...) \
+ stmmac_do_void_callback(__priv, ptp, get_ptptime, __args)
+#define stmmac_timestamp_interrupt(__priv, __args...) \
+ stmmac_do_void_callback(__priv, ptp, timestamp_interrupt, __args)
/* Helpers to manage the descriptors for chain and ring modes */
struct stmmac_mode_ops {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index e553b9a1f785..b6cd43eda7ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -26,10 +26,21 @@
struct stmmac_resources {
void __iomem *addr;
- const char *mac;
+ u8 mac[ETH_ALEN];
int wol_irq;
int lpi_irq;
int irq;
+ int sfty_ce_irq;
+ int sfty_ue_irq;
+ int rx_irq[MTL_MAX_RX_QUEUES];
+ int tx_irq[MTL_MAX_TX_QUEUES];
+};
+
+enum stmmac_txbuf_type {
+ STMMAC_TXBUF_T_SKB,
+ STMMAC_TXBUF_T_XDP_TX,
+ STMMAC_TXBUF_T_XDP_NDO,
+ STMMAC_TXBUF_T_XSK_TX,
};
struct stmmac_tx_info {
@@ -38,6 +49,7 @@ struct stmmac_tx_info {
unsigned len;
bool last_segment;
bool is_jumbo;
+ enum stmmac_txbuf_type buf_type;
};
#define STMMAC_TBS_AVAIL BIT(0)
@@ -53,8 +65,13 @@ struct stmmac_tx_queue {
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
struct dma_edesc *dma_entx;
struct dma_desc *dma_tx;
- struct sk_buff **tx_skbuff;
+ union {
+ struct sk_buff **tx_skbuff;
+ struct xdp_frame **xdpf;
+ };
struct stmmac_tx_info *tx_skbuff_dma;
+ struct xsk_buff_pool *xsk_pool;
+ u32 xsk_frames_done;
unsigned int cur_tx;
unsigned int dirty_tx;
dma_addr_t dma_tx_phy;
@@ -63,15 +80,23 @@ struct stmmac_tx_queue {
};
struct stmmac_rx_buffer {
- struct page *page;
+ union {
+ struct {
+ struct page *page;
+ dma_addr_t addr;
+ __u32 page_offset;
+ };
+ struct xdp_buff *xdp;
+ };
struct page *sec_page;
- dma_addr_t addr;
dma_addr_t sec_addr;
};
struct stmmac_rx_queue {
u32 rx_count_frames;
u32 queue_index;
+ struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
struct page_pool *page_pool;
struct stmmac_rx_buffer *buf_pool;
struct stmmac_priv *priv_data;
@@ -79,6 +104,7 @@ struct stmmac_rx_queue {
struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
unsigned int cur_rx;
unsigned int dirty_rx;
+ unsigned int buf_alloc_num;
u32 rx_zeroc_thresh;
dma_addr_t dma_rx_phy;
u32 rx_tail_addr;
@@ -93,6 +119,7 @@ struct stmmac_rx_queue {
struct stmmac_channel {
struct napi_struct rx_napi ____cacheline_aligned_in_smp;
struct napi_struct tx_napi ____cacheline_aligned_in_smp;
+ struct napi_struct rxtx_napi ____cacheline_aligned_in_smp;
struct stmmac_priv *priv_data;
spinlock_t lock;
u32 index;
@@ -147,20 +174,21 @@ struct stmmac_flow_entry {
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
- u32 tx_coal_frames;
- u32 tx_coal_timer;
- u32 rx_coal_frames;
+ u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
+ u32 tx_coal_timer[MTL_MAX_TX_QUEUES];
+ u32 rx_coal_frames[MTL_MAX_TX_QUEUES];
int tx_coalesce;
int hwts_tx_en;
bool tx_path_in_lpi_mode;
bool tso;
int sph;
+ int sph_cap;
u32 sarc_type;
unsigned int dma_buf_sz;
unsigned int rx_copybreak;
- u32 rx_riwt;
+ u32 rx_riwt[MTL_MAX_TX_QUEUES];
int hwts_rx_en;
void __iomem *ioaddr;
@@ -222,9 +250,24 @@ struct stmmac_priv {
int use_riwt;
int irq_wake;
spinlock_t ptp_lock;
+ /* Protects auxiliary snapshot registers from concurrent access. */
+ struct mutex aux_ts_lock;
+
void __iomem *mmcaddr;
void __iomem *ptpaddr;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ int sfty_ce_irq;
+ int sfty_ue_irq;
+ int rx_irq[MTL_MAX_RX_QUEUES];
+ int tx_irq[MTL_MAX_TX_QUEUES];
+ /*irq name */
+ char int_name_mac[IFNAMSIZ + 9];
+ char int_name_wol[IFNAMSIZ + 9];
+ char int_name_lpi[IFNAMSIZ + 9];
+ char int_name_sfty_ce[IFNAMSIZ + 10];
+ char int_name_sfty_ue[IFNAMSIZ + 10];
+ char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14];
+ char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18];
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir;
@@ -234,6 +277,12 @@ struct stmmac_priv {
struct workqueue_struct *wq;
struct work_struct service_task;
+ /* Workqueue for handling FPE hand-shaking */
+ unsigned long fpe_task_state;
+ struct workqueue_struct *fpe_wq;
+ struct work_struct fpe_task;
+ char wq_name[IFNAMSIZ + 4];
+
/* TC Handling */
unsigned int tc_entries_max;
unsigned int tc_off_max;
@@ -246,6 +295,10 @@ struct stmmac_priv {
/* Receive Side Scaling */
struct stmmac_rss rss;
+
+ /* XDP BPF Program */
+ unsigned long *af_xdp_zc_qps;
+ struct bpf_prog *xdp_prog;
};
enum stmmac_state {
@@ -262,6 +315,8 @@ void stmmac_set_ethtool_ops(struct net_device *netdev);
void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
+int stmmac_open(struct net_device *dev);
+int stmmac_release(struct net_device *dev);
int stmmac_resume(struct device *dev);
int stmmac_suspend(struct device *dev);
int stmmac_dvr_remove(struct device *dev);
@@ -272,6 +327,27 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv);
bool stmmac_eee_init(struct stmmac_priv *priv);
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
+int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
+void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable);
+
+static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
+{
+ return !!priv->xdp_prog;
+}
+
+static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
+{
+ if (stmmac_xdp_is_enabled(priv))
+ return XDP_PACKET_HEADROOM;
+
+ return 0;
+}
+
+void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
+void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
+void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
+void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue);
+int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags);
#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
void stmmac_selftest_run(struct net_device *dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index c5642985ef95..61b11639ee0c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -158,6 +158,12 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
/* TSO */
STMMAC_STAT(tx_tso_frames),
STMMAC_STAT(tx_tso_nfrags),
+ /* EST */
+ STMMAC_STAT(mtl_est_cgce),
+ STMMAC_STAT(mtl_est_hlbs),
+ STMMAC_STAT(mtl_est_hlbf),
+ STMMAC_STAT(mtl_est_btre),
+ STMMAC_STAT(mtl_est_btrlm),
};
#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
@@ -756,28 +762,75 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
return (riwt * 256) / (clk / 1000000);
}
-static int stmmac_get_coalesce(struct net_device *dev,
- struct ethtool_coalesce *ec)
+static int __stmmac_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ int queue)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 max_cnt;
+ u32 rx_cnt;
+ u32 tx_cnt;
- ec->tx_coalesce_usecs = priv->tx_coal_timer;
- ec->tx_max_coalesced_frames = priv->tx_coal_frames;
+ rx_cnt = priv->plat->rx_queues_to_use;
+ tx_cnt = priv->plat->tx_queues_to_use;
+ max_cnt = max(rx_cnt, tx_cnt);
- if (priv->use_riwt) {
- ec->rx_max_coalesced_frames = priv->rx_coal_frames;
- ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv);
+ if (queue < 0)
+ queue = 0;
+ else if (queue >= max_cnt)
+ return -EINVAL;
+
+ if (queue < tx_cnt) {
+ ec->tx_coalesce_usecs = priv->tx_coal_timer[queue];
+ ec->tx_max_coalesced_frames = priv->tx_coal_frames[queue];
+ } else {
+ ec->tx_coalesce_usecs = 0;
+ ec->tx_max_coalesced_frames = 0;
+ }
+
+ if (priv->use_riwt && queue < rx_cnt) {
+ ec->rx_max_coalesced_frames = priv->rx_coal_frames[queue];
+ ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt[queue],
+ priv);
+ } else {
+ ec->rx_max_coalesced_frames = 0;
+ ec->rx_coalesce_usecs = 0;
}
return 0;
}
-static int stmmac_set_coalesce(struct net_device *dev,
+static int stmmac_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
+ return __stmmac_get_coalesce(dev, ec, -1);
+}
+
+static int stmmac_get_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __stmmac_get_coalesce(dev, ec, queue);
+}
+
+static int __stmmac_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
struct stmmac_priv *priv = netdev_priv(dev);
- u32 rx_cnt = priv->plat->rx_queues_to_use;
+ bool all_queues = false;
unsigned int rx_riwt;
+ u32 max_cnt;
+ u32 rx_cnt;
+ u32 tx_cnt;
+
+ rx_cnt = priv->plat->rx_queues_to_use;
+ tx_cnt = priv->plat->tx_queues_to_use;
+ max_cnt = max(rx_cnt, tx_cnt);
+
+ if (queue < 0)
+ all_queues = true;
+ else if (queue >= max_cnt)
+ return -EINVAL;
if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) {
rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
@@ -785,8 +838,23 @@ static int stmmac_set_coalesce(struct net_device *dev,
if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT))
return -EINVAL;
- priv->rx_riwt = rx_riwt;
- stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
+ if (all_queues) {
+ int i;
+
+ for (i = 0; i < rx_cnt; i++) {
+ priv->rx_riwt[i] = rx_riwt;
+ stmmac_rx_watchdog(priv, priv->ioaddr,
+ rx_riwt, i);
+ priv->rx_coal_frames[i] =
+ ec->rx_max_coalesced_frames;
+ }
+ } else if (queue < rx_cnt) {
+ priv->rx_riwt[queue] = rx_riwt;
+ stmmac_rx_watchdog(priv, priv->ioaddr,
+ rx_riwt, queue);
+ priv->rx_coal_frames[queue] =
+ ec->rx_max_coalesced_frames;
+ }
}
if ((ec->tx_coalesce_usecs == 0) &&
@@ -797,13 +865,37 @@ static int stmmac_set_coalesce(struct net_device *dev,
(ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES))
return -EINVAL;
- /* Only copy relevant parameters, ignore all others. */
- priv->tx_coal_frames = ec->tx_max_coalesced_frames;
- priv->tx_coal_timer = ec->tx_coalesce_usecs;
- priv->rx_coal_frames = ec->rx_max_coalesced_frames;
+ if (all_queues) {
+ int i;
+
+ for (i = 0; i < tx_cnt; i++) {
+ priv->tx_coal_frames[i] =
+ ec->tx_max_coalesced_frames;
+ priv->tx_coal_timer[i] =
+ ec->tx_coalesce_usecs;
+ }
+ } else if (queue < tx_cnt) {
+ priv->tx_coal_frames[queue] =
+ ec->tx_max_coalesced_frames;
+ priv->tx_coal_timer[queue] =
+ ec->tx_coalesce_usecs;
+ }
+
return 0;
}
+static int stmmac_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ return __stmmac_set_coalesce(dev, ec, -1);
+}
+
+static int stmmac_set_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __stmmac_set_coalesce(dev, ec, queue);
+}
+
static int stmmac_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
{
@@ -1001,6 +1093,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_ts_info = stmmac_get_ts_info,
.get_coalesce = stmmac_get_coalesce,
.set_coalesce = stmmac_set_coalesce,
+ .get_per_queue_coalesce = stmmac_get_per_queue_coalesce,
+ .set_per_queue_coalesce = stmmac_set_per_queue_coalesce,
.get_channels = stmmac_get_channels,
.set_channels = stmmac_set_channels,
.get_tunable = stmmac_get_tunable,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index d291612eeafb..074e2cdfb0fa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -12,8 +12,11 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/delay.h>
+#include <linux/ptp_clock_kernel.h>
#include "common.h"
#include "stmmac_ptp.h"
+#include "dwmac4.h"
+#include "stmmac.h"
static void config_hw_tstamping(void __iomem *ioaddr, u32 data)
{
@@ -153,6 +156,51 @@ static void get_systime(void __iomem *ioaddr, u64 *systime)
*systime = ns;
}
+static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
+{
+ u64 ns;
+
+ ns = readl(ptpaddr + PTP_ATNR);
+ ns += readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC;
+
+ *ptp_time = ns;
+}
+
+static void timestamp_interrupt(struct stmmac_priv *priv)
+{
+ u32 num_snapshot, ts_status, tsync_int;
+ struct ptp_clock_event event;
+ unsigned long flags;
+ u64 ptp_time;
+ int i;
+
+ tsync_int = readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE;
+
+ if (!tsync_int)
+ return;
+
+ /* Read timestamp status to clear interrupt from either external
+ * timestamp or start/end of PPS.
+ */
+ ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS);
+
+ if (!priv->plat->ext_snapshot_en)
+ return;
+
+ num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
+ GMAC_TIMESTAMP_ATSNS_SHIFT;
+
+ for (i = 0; i < num_snapshot; i++) {
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+ get_ptptime(priv->ptpaddr, &ptp_time);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ptp_time;
+ ptp_clock_event(priv->ptp_clock, &event);
+ }
+}
+
const struct stmmac_hwtimestamp stmmac_ptp = {
.config_hw_tstamping = config_hw_tstamping,
.init_systime = init_systime,
@@ -160,4 +208,6 @@ const struct stmmac_hwtimestamp stmmac_ptp = {
.config_addend = config_addend,
.adjust_systime = adjust_systime,
.get_systime = get_systime,
+ .get_ptptime = get_ptptime,
+ .timestamp_interrupt = timestamp_interrupt,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4749bd0af160..a9a984c57d78 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -28,6 +28,7 @@
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/prefetch.h>
#include <linux/pinctrl/consumer.h>
#ifdef CONFIG_DEBUG_FS
@@ -37,9 +38,12 @@
#include <linux/net_tstamp.h>
#include <linux/phylink.h>
#include <linux/udp.h>
+#include <linux/bpf_trace.h>
#include <net/pkt_cls.h>
+#include <net/xdp_sock_drv.h>
#include "stmmac_ptp.h"
#include "stmmac.h"
+#include "stmmac_xdp.h"
#include <linux/reset.h>
#include <linux/of_mdio.h>
#include "dwmac1000.h"
@@ -66,6 +70,16 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
+/* Limit to make sure XDP TX and slow path can coexist */
+#define STMMAC_XSK_TX_BUDGET_MAX 256
+#define STMMAC_TX_XSK_AVAIL 16
+#define STMMAC_RX_FILL_BATCH 16
+
+#define STMMAC_XDP_PASS 0
+#define STMMAC_XDP_CONSUMED BIT(0)
+#define STMMAC_XDP_TX BIT(1)
+#define STMMAC_XDP_REDIRECT BIT(2)
+
static int flow_ctrl = FLOW_AUTO;
module_param(flow_ctrl, int, 0644);
MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
@@ -104,6 +118,13 @@ module_param(chain_mode, int, 0444);
MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
+/* For MSI interrupts handling */
+static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
+static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
+static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
+static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
+static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
+static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
#ifdef CONFIG_DEBUG_FS
static const struct net_device_ops stmmac_netdev_ops;
@@ -113,6 +134,38 @@ static void stmmac_exit_fs(struct net_device *dev);
#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
+int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
+{
+ int ret = 0;
+
+ if (enabled) {
+ ret = clk_prepare_enable(priv->plat->stmmac_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(priv->plat->pclk);
+ if (ret) {
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ return ret;
+ }
+ if (priv->plat->clks_config) {
+ ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
+ if (ret) {
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ clk_disable_unprepare(priv->plat->pclk);
+ return ret;
+ }
+ }
+ } else {
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ clk_disable_unprepare(priv->plat->pclk);
+ if (priv->plat->clks_config)
+ priv->plat->clks_config(priv->plat->bsp_priv, enabled);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
+
/**
* stmmac_verify_args - verify the driver parameters.
* Description: it checks the driver parameters and set a default in case of
@@ -134,11 +187,7 @@ static void stmmac_verify_args(void)
eee_timer = STMMAC_DEFAULT_LPI_TIMER;
}
-/**
- * stmmac_disable_all_queues - Disable all queues
- * @priv: driver private structure
- */
-static void stmmac_disable_all_queues(struct stmmac_priv *priv)
+static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
{
u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
@@ -148,6 +197,12 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
for (queue = 0; queue < maxq; queue++) {
struct stmmac_channel *ch = &priv->channel[queue];
+ if (stmmac_xdp_is_enabled(priv) &&
+ test_bit(queue, priv->af_xdp_zc_qps)) {
+ napi_disable(&ch->rxtx_napi);
+ continue;
+ }
+
if (queue < rx_queues_cnt)
napi_disable(&ch->rx_napi);
if (queue < tx_queues_cnt)
@@ -156,6 +211,28 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
}
/**
+ * stmmac_disable_all_queues - Disable all queues
+ * @priv: driver private structure
+ */
+static void stmmac_disable_all_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ struct stmmac_rx_queue *rx_q;
+ u32 queue;
+
+ /* synchronize_rcu() needed for pending XDP buffers to drain */
+ for (queue = 0; queue < rx_queues_cnt; queue++) {
+ rx_q = &priv->rx_queue[queue];
+ if (rx_q->xsk_pool) {
+ synchronize_rcu();
+ break;
+ }
+ }
+
+ __stmmac_disable_all_queues(priv);
+}
+
+/**
* stmmac_enable_all_queues - Enable all queues
* @priv: driver private structure
*/
@@ -169,6 +246,12 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv)
for (queue = 0; queue < maxq; queue++) {
struct stmmac_channel *ch = &priv->channel[queue];
+ if (stmmac_xdp_is_enabled(priv) &&
+ test_bit(queue, priv->af_xdp_zc_qps)) {
+ napi_enable(&ch->rxtx_napi);
+ continue;
+ }
+
if (queue < rx_queues_cnt)
napi_enable(&ch->rx_napi);
if (queue < tx_queues_cnt)
@@ -433,6 +516,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
{
struct skb_shared_hwtstamps shhwtstamp;
bool found = false;
+ s64 adjust = 0;
u64 ns = 0;
if (!priv->hwts_tx_en)
@@ -451,6 +535,13 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
}
if (found) {
+ /* Correct the clk domain crossing(CDC) error */
+ if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
+ adjust += -(2 * (NSEC_PER_SEC /
+ priv->plat->clk_ptp_rate));
+ ns += adjust;
+ }
+
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp.hwtstamp = ns_to_ktime(ns);
@@ -474,6 +565,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
{
struct skb_shared_hwtstamps *shhwtstamp = NULL;
struct dma_desc *desc = p;
+ u64 adjust = 0;
u64 ns = 0;
if (!priv->hwts_rx_en)
@@ -485,6 +577,13 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
/* Check if timestamp is available */
if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
+
+ /* Correct the clk domain crossing(CDC) error */
+ if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
+ adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate);
+ ns -= adjust;
+ }
+
netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
shhwtstamp = skb_hwtstamps(skb);
memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
@@ -922,6 +1021,21 @@ static void stmmac_mac_an_restart(struct phylink_config *config)
/* Not Supported */
}
+static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
+ enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
+ enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
+ bool *hs_enable = &fpe_cfg->hs_enable;
+
+ if (is_up && *hs_enable) {
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
+ } else {
+ *lo_state = FPE_EVENT_UNKNOWN;
+ *lp_state = FPE_EVENT_UNKNOWN;
+ }
+}
+
static void stmmac_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
@@ -932,6 +1046,9 @@ static void stmmac_mac_link_down(struct phylink_config *config,
priv->tx_lpi_enabled = false;
stmmac_eee_init(priv);
stmmac_set_eee_pls(priv, priv->hw, false);
+
+ if (priv->dma_cap.fpesel)
+ stmmac_fpe_link_state_handle(priv, false);
}
static void stmmac_mac_link_up(struct phylink_config *config,
@@ -1030,6 +1147,9 @@ static void stmmac_mac_link_up(struct phylink_config *config,
priv->tx_lpi_enabled = priv->eee_enabled;
stmmac_set_eee_pls(priv, priv->hw, true);
}
+
+ if (priv->dma_cap.fpesel)
+ stmmac_fpe_link_state_handle(priv, true);
}
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
@@ -1117,6 +1237,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
priv->phylink_config.pcs_poll = true;
+ priv->phylink_config.ovr_an_inband =
+ priv->plat->mdio_bus_data->xpcs_an_inband;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
@@ -1304,11 +1426,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
- buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
- if (!buf->page)
- return -ENOMEM;
+ if (!buf->page) {
+ buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ if (!buf->page)
+ return -ENOMEM;
+ buf->page_offset = stmmac_rx_offset(priv);
+ }
- if (priv->sph) {
+ if (priv->sph && !buf->sec_page) {
buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
if (!buf->sec_page)
return -ENOMEM;
@@ -1320,7 +1445,8 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
}
- buf->addr = page_pool_get_dma_addr(buf->page);
+ buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
+
stmmac_set_desc_addr(priv, p, buf->addr);
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p);
@@ -1358,7 +1484,8 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- if (tx_q->tx_skbuff_dma[i].buf) {
+ if (tx_q->tx_skbuff_dma[i].buf &&
+ tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
if (tx_q->tx_skbuff_dma[i].map_as_page)
dma_unmap_page(priv->device,
tx_q->tx_skbuff_dma[i].buf,
@@ -1371,84 +1498,227 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
DMA_TO_DEVICE);
}
- if (tx_q->tx_skbuff[i]) {
+ if (tx_q->xdpf[i] &&
+ (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
+ tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
+ xdp_return_frame(tx_q->xdpf[i]);
+ tx_q->xdpf[i] = NULL;
+ }
+
+ if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
+ tx_q->xsk_frames_done++;
+
+ if (tx_q->tx_skbuff[i] &&
+ tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
dev_kfree_skb_any(tx_q->tx_skbuff[i]);
tx_q->tx_skbuff[i] = NULL;
- tx_q->tx_skbuff_dma[i].buf = 0;
- tx_q->tx_skbuff_dma[i].map_as_page = false;
}
+
+ tx_q->tx_skbuff_dma[i].buf = 0;
+ tx_q->tx_skbuff_dma[i].map_as_page = false;
}
/**
- * init_dma_rx_desc_rings - init the RX descriptor rings
- * @dev: net device structure
+ * dma_free_rx_skbufs - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ */
+static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
+{
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++)
+ stmmac_free_rx_buffer(priv, queue, i);
+}
+
+static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
+ gfp_t flags)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ struct dma_desc *p;
+ int ret;
+
+ if (priv->extend_desc)
+ p = &((rx_q->dma_erx + i)->basic);
+ else
+ p = rx_q->dma_rx + i;
+
+ ret = stmmac_init_rx_buffers(priv, p, i, flags,
+ queue);
+ if (ret)
+ return ret;
+
+ rx_q->buf_alloc_num++;
+ }
+
+ return 0;
+}
+
+/**
+ * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
+ * @priv: private structure
+ * @queue: RX queue index
+ */
+static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+
+ if (!buf->xdp)
+ continue;
+
+ xsk_buff_free(buf->xdp);
+ buf->xdp = NULL;
+ }
+}
+
+static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ struct stmmac_rx_buffer *buf;
+ dma_addr_t dma_addr;
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(rx_q->dma_erx + i);
+ else
+ p = rx_q->dma_rx + i;
+
+ buf = &rx_q->buf_pool[i];
+
+ buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
+ if (!buf->xdp)
+ return -ENOMEM;
+
+ dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
+ stmmac_set_desc_addr(priv, p, dma_addr);
+ rx_q->buf_alloc_num++;
+ }
+
+ return 0;
+}
+
+static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
+{
+ if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
+ return NULL;
+
+ return xsk_get_pool_from_qid(priv->dev, queue);
+}
+
+/**
+ * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
+ * @priv: driver private structure
+ * @queue: RX queue index
* @flags: gfp flag.
* Description: this function initializes the DMA RX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
+static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
{
- struct stmmac_priv *priv = netdev_priv(dev);
- u32 rx_count = priv->plat->rx_queues_to_use;
- int ret = -ENOMEM;
- int queue;
- int i;
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int ret;
- /* RX INITIALIZATION */
netif_dbg(priv, probe, priv->dev,
- "SKB addresses:\nskb\t\tskb data\tdma data\n");
+ "(%s) dma_rx_phy=0x%08x\n", __func__,
+ (u32)rx_q->dma_rx_phy);
- for (queue = 0; queue < rx_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ stmmac_clear_rx_descriptors(priv, queue);
- netif_dbg(priv, probe, priv->dev,
- "(%s) dma_rx_phy=0x%08x\n", __func__,
- (u32)rx_q->dma_rx_phy);
+ xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
- stmmac_clear_rx_descriptors(priv, queue);
+ rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
- for (i = 0; i < priv->dma_rx_size; i++) {
- struct dma_desc *p;
+ if (rx_q->xsk_pool) {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ netdev_info(priv->dev,
+ "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
+ rx_q->queue_index);
+ xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx_q->page_pool));
+ netdev_info(priv->dev,
+ "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
+ rx_q->queue_index);
+ }
- if (priv->extend_desc)
- p = &((rx_q->dma_erx + i)->basic);
- else
- p = rx_q->dma_rx + i;
+ if (rx_q->xsk_pool) {
+ /* RX XDP ZC buffer pool may not be populated, e.g.
+ * xdpsock TX-only.
+ */
+ stmmac_alloc_rx_buffers_zc(priv, queue);
+ } else {
+ ret = stmmac_alloc_rx_buffers(priv, queue, flags);
+ if (ret < 0)
+ return -ENOMEM;
+ }
- ret = stmmac_init_rx_buffers(priv, p, i, flags,
- queue);
- if (ret)
- goto err_init_rx_buffers;
- }
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = 0;
- rx_q->cur_rx = 0;
- rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
-
- /* Setup the chained descriptor addresses */
- if (priv->mode == STMMAC_CHAIN_MODE) {
- if (priv->extend_desc)
- stmmac_mode_init(priv, rx_q->dma_erx,
- rx_q->dma_rx_phy,
- priv->dma_rx_size, 1);
- else
- stmmac_mode_init(priv, rx_q->dma_rx,
- rx_q->dma_rx_phy,
- priv->dma_rx_size, 0);
- }
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc)
+ stmmac_mode_init(priv, rx_q->dma_erx,
+ rx_q->dma_rx_phy,
+ priv->dma_rx_size, 1);
+ else
+ stmmac_mode_init(priv, rx_q->dma_rx,
+ rx_q->dma_rx_phy,
+ priv->dma_rx_size, 0);
+ }
+
+ return 0;
+}
+
+static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ int ret;
+
+ /* RX INITIALIZATION */
+ netif_dbg(priv, probe, priv->dev,
+ "SKB addresses:\nskb\t\tskb data\tdma data\n");
+
+ for (queue = 0; queue < rx_count; queue++) {
+ ret = __init_dma_rx_desc_rings(priv, queue, flags);
+ if (ret)
+ goto err_init_rx_buffers;
}
return 0;
err_init_rx_buffers:
while (queue >= 0) {
- while (--i >= 0)
- stmmac_free_rx_buffer(priv, queue, i);
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ if (rx_q->xsk_pool)
+ dma_free_rx_xskbufs(priv, queue);
+ else
+ dma_free_rx_skbufs(priv, queue);
+
+ rx_q->buf_alloc_num = 0;
+ rx_q->xsk_pool = NULL;
if (queue == 0)
break;
- i = priv->dma_rx_size;
queue--;
}
@@ -1456,63 +1726,75 @@ err_init_rx_buffers:
}
/**
- * init_dma_tx_desc_rings - init the TX descriptor rings
- * @dev: net device structure.
+ * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
+ * @priv: driver private structure
+ * @queue : TX queue index
* Description: this function initializes the DMA TX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int init_dma_tx_desc_rings(struct net_device *dev)
+static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_priv *priv = netdev_priv(dev);
- u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
- u32 queue;
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
int i;
- for (queue = 0; queue < tx_queue_cnt; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ netif_dbg(priv, probe, priv->dev,
+ "(%s) dma_tx_phy=0x%08x\n", __func__,
+ (u32)tx_q->dma_tx_phy);
- netif_dbg(priv, probe, priv->dev,
- "(%s) dma_tx_phy=0x%08x\n", __func__,
- (u32)tx_q->dma_tx_phy);
-
- /* Setup the chained descriptor addresses */
- if (priv->mode == STMMAC_CHAIN_MODE) {
- if (priv->extend_desc)
- stmmac_mode_init(priv, tx_q->dma_etx,
- tx_q->dma_tx_phy,
- priv->dma_tx_size, 1);
- else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
- stmmac_mode_init(priv, tx_q->dma_tx,
- tx_q->dma_tx_phy,
- priv->dma_tx_size, 0);
- }
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc)
+ stmmac_mode_init(priv, tx_q->dma_etx,
+ tx_q->dma_tx_phy,
+ priv->dma_tx_size, 1);
+ else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
+ stmmac_mode_init(priv, tx_q->dma_tx,
+ tx_q->dma_tx_phy,
+ priv->dma_tx_size, 0);
+ }
- for (i = 0; i < priv->dma_tx_size; i++) {
- struct dma_desc *p;
- if (priv->extend_desc)
- p = &((tx_q->dma_etx + i)->basic);
- else if (tx_q->tbs & STMMAC_TBS_AVAIL)
- p = &((tx_q->dma_entx + i)->basic);
- else
- p = tx_q->dma_tx + i;
+ tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
- stmmac_clear_desc(priv, p);
+ for (i = 0; i < priv->dma_tx_size; i++) {
+ struct dma_desc *p;
- tx_q->tx_skbuff_dma[i].buf = 0;
- tx_q->tx_skbuff_dma[i].map_as_page = false;
- tx_q->tx_skbuff_dma[i].len = 0;
- tx_q->tx_skbuff_dma[i].last_segment = false;
- tx_q->tx_skbuff[i] = NULL;
- }
+ if (priv->extend_desc)
+ p = &((tx_q->dma_etx + i)->basic);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &((tx_q->dma_entx + i)->basic);
+ else
+ p = tx_q->dma_tx + i;
- tx_q->dirty_tx = 0;
- tx_q->cur_tx = 0;
- tx_q->mss = 0;
+ stmmac_clear_desc(priv, p);
- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
+ tx_q->tx_skbuff_dma[i].buf = 0;
+ tx_q->tx_skbuff_dma[i].map_as_page = false;
+ tx_q->tx_skbuff_dma[i].len = 0;
+ tx_q->tx_skbuff_dma[i].last_segment = false;
+ tx_q->tx_skbuff[i] = NULL;
}
+ tx_q->dirty_tx = 0;
+ tx_q->cur_tx = 0;
+ tx_q->mss = 0;
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
+
+ return 0;
+}
+
+static int init_dma_tx_desc_rings(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 tx_queue_cnt;
+ u32 queue;
+
+ tx_queue_cnt = priv->plat->tx_queues_to_use;
+
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ __init_dma_tx_desc_rings(priv, queue);
+
return 0;
}
@@ -1544,29 +1826,25 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
}
/**
- * dma_free_rx_skbufs - free RX dma buffers
- * @priv: private structure
- * @queue: RX queue index
- */
-static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
-{
- int i;
-
- for (i = 0; i < priv->dma_rx_size; i++)
- stmmac_free_rx_buffer(priv, queue, i);
-}
-
-/**
* dma_free_tx_skbufs - free TX dma buffers
* @priv: private structure
* @queue: TX queue index
*/
static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
int i;
+ tx_q->xsk_frames_done = 0;
+
for (i = 0; i < priv->dma_tx_size; i++)
stmmac_free_tx_buffer(priv, queue, i);
+
+ if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
+ xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
+ tx_q->xsk_frames_done = 0;
+ tx_q->xsk_pool = NULL;
+ }
}
/**
@@ -1583,137 +1861,186 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
}
/**
- * free_dma_rx_desc_resources - free RX dma desc resources
+ * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
* @priv: private structure
+ * @queue: RX queue index
*/
+static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ /* Release the DMA RX socket buffers */
+ if (rx_q->xsk_pool)
+ dma_free_rx_xskbufs(priv, queue);
+ else
+ dma_free_rx_skbufs(priv, queue);
+
+ rx_q->buf_alloc_num = 0;
+ rx_q->xsk_pool = NULL;
+
+ /* Free DMA regions of consistent memory previously allocated */
+ if (!priv->extend_desc)
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_desc),
+ rx_q->dma_rx, rx_q->dma_rx_phy);
+ else
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ rx_q->dma_erx, rx_q->dma_rx_phy);
+
+ if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_q->xdp_rxq);
+
+ kfree(rx_q->buf_pool);
+ if (rx_q->page_pool)
+ page_pool_destroy(rx_q->page_pool);
+}
+
static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
{
u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue;
/* Free RX queue resources */
- for (queue = 0; queue < rx_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
- /* Release the DMA RX socket buffers */
- dma_free_rx_skbufs(priv, queue);
-
- /* Free DMA regions of consistent memory previously allocated */
- if (!priv->extend_desc)
- dma_free_coherent(priv->device, priv->dma_rx_size *
- sizeof(struct dma_desc),
- rx_q->dma_rx, rx_q->dma_rx_phy);
- else
- dma_free_coherent(priv->device, priv->dma_rx_size *
- sizeof(struct dma_extended_desc),
- rx_q->dma_erx, rx_q->dma_rx_phy);
-
- kfree(rx_q->buf_pool);
- if (rx_q->page_pool)
- page_pool_destroy(rx_q->page_pool);
- }
+ for (queue = 0; queue < rx_count; queue++)
+ __free_dma_rx_desc_resources(priv, queue);
}
/**
- * free_dma_tx_desc_resources - free TX dma desc resources
+ * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
* @priv: private structure
+ * @queue: TX queue index
*/
-static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
+static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
{
- u32 tx_count = priv->plat->tx_queues_to_use;
- u32 queue;
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ size_t size;
+ void *addr;
- /* Free TX queue resources */
- for (queue = 0; queue < tx_count; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- size_t size;
- void *addr;
+ /* Release the DMA TX socket buffers */
+ dma_free_tx_skbufs(priv, queue);
+
+ if (priv->extend_desc) {
+ size = sizeof(struct dma_extended_desc);
+ addr = tx_q->dma_etx;
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
+ size = sizeof(struct dma_edesc);
+ addr = tx_q->dma_entx;
+ } else {
+ size = sizeof(struct dma_desc);
+ addr = tx_q->dma_tx;
+ }
- /* Release the DMA TX socket buffers */
- dma_free_tx_skbufs(priv, queue);
+ size *= priv->dma_tx_size;
- if (priv->extend_desc) {
- size = sizeof(struct dma_extended_desc);
- addr = tx_q->dma_etx;
- } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
- size = sizeof(struct dma_edesc);
- addr = tx_q->dma_entx;
- } else {
- size = sizeof(struct dma_desc);
- addr = tx_q->dma_tx;
- }
+ dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
- size *= priv->dma_tx_size;
+ kfree(tx_q->tx_skbuff_dma);
+ kfree(tx_q->tx_skbuff);
+}
- dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
+static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
+{
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 queue;
- kfree(tx_q->tx_skbuff_dma);
- kfree(tx_q->tx_skbuff);
- }
+ /* Free TX queue resources */
+ for (queue = 0; queue < tx_count; queue++)
+ __free_dma_tx_desc_resources(priv, queue);
}
/**
- * alloc_dma_rx_desc_resources - alloc RX resources.
+ * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
* @priv: private structure
+ * @queue: RX queue index
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
+static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
+ bool xdp_prog = stmmac_xdp_is_enabled(priv);
+ struct page_pool_params pp_params = { 0 };
+ unsigned int num_pages;
+ unsigned int napi_id;
+ int ret;
+
+ rx_q->queue_index = queue;
+ rx_q->priv_data = priv;
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = priv->dma_rx_size;
+ num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
+ pp_params.order = ilog2(num_pages);
+ pp_params.nid = dev_to_node(priv->device);
+ pp_params.dev = priv->device;
+ pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+ pp_params.offset = stmmac_rx_offset(priv);
+ pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
+
+ rx_q->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx_q->page_pool)) {
+ ret = PTR_ERR(rx_q->page_pool);
+ rx_q->page_pool = NULL;
+ return ret;
+ }
+
+ rx_q->buf_pool = kcalloc(priv->dma_rx_size,
+ sizeof(*rx_q->buf_pool),
+ GFP_KERNEL);
+ if (!rx_q->buf_pool)
+ return -ENOMEM;
+
+ if (priv->extend_desc) {
+ rx_q->dma_erx = dma_alloc_coherent(priv->device,
+ priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
+ if (!rx_q->dma_erx)
+ return -ENOMEM;
+
+ } else {
+ rx_q->dma_rx = dma_alloc_coherent(priv->device,
+ priv->dma_rx_size *
+ sizeof(struct dma_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
+ if (!rx_q->dma_rx)
+ return -ENOMEM;
+ }
+
+ if (stmmac_xdp_is_enabled(priv) &&
+ test_bit(queue, priv->af_xdp_zc_qps))
+ napi_id = ch->rxtx_napi.napi_id;
+ else
+ napi_id = ch->rx_napi.napi_id;
+
+ ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
+ rx_q->queue_index,
+ napi_id);
+ if (ret) {
+ netdev_err(priv->dev, "Failed to register xdp rxq info\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
{
u32 rx_count = priv->plat->rx_queues_to_use;
- int ret = -ENOMEM;
u32 queue;
+ int ret;
/* RX queues buffers and DMA */
for (queue = 0; queue < rx_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- struct page_pool_params pp_params = { 0 };
- unsigned int num_pages;
-
- rx_q->queue_index = queue;
- rx_q->priv_data = priv;
-
- pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = priv->dma_rx_size;
- num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
- pp_params.order = ilog2(num_pages);
- pp_params.nid = dev_to_node(priv->device);
- pp_params.dev = priv->device;
- pp_params.dma_dir = DMA_FROM_DEVICE;
-
- rx_q->page_pool = page_pool_create(&pp_params);
- if (IS_ERR(rx_q->page_pool)) {
- ret = PTR_ERR(rx_q->page_pool);
- rx_q->page_pool = NULL;
- goto err_dma;
- }
-
- rx_q->buf_pool = kcalloc(priv->dma_rx_size,
- sizeof(*rx_q->buf_pool),
- GFP_KERNEL);
- if (!rx_q->buf_pool)
+ ret = __alloc_dma_rx_desc_resources(priv, queue);
+ if (ret)
goto err_dma;
-
- if (priv->extend_desc) {
- rx_q->dma_erx = dma_alloc_coherent(priv->device,
- priv->dma_rx_size *
- sizeof(struct dma_extended_desc),
- &rx_q->dma_rx_phy,
- GFP_KERNEL);
- if (!rx_q->dma_erx)
- goto err_dma;
-
- } else {
- rx_q->dma_rx = dma_alloc_coherent(priv->device,
- priv->dma_rx_size *
- sizeof(struct dma_desc),
- &rx_q->dma_rx_phy,
- GFP_KERNEL);
- if (!rx_q->dma_rx)
- goto err_dma;
- }
}
return 0;
@@ -1725,60 +2052,70 @@ err_dma:
}
/**
- * alloc_dma_tx_desc_resources - alloc TX resources.
+ * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
* @priv: private structure
+ * @queue: TX queue index
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
+static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
{
- u32 tx_count = priv->plat->tx_queues_to_use;
- int ret = -ENOMEM;
- u32 queue;
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ size_t size;
+ void *addr;
- /* TX queues buffers and DMA */
- for (queue = 0; queue < tx_count; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- size_t size;
- void *addr;
+ tx_q->queue_index = queue;
+ tx_q->priv_data = priv;
- tx_q->queue_index = queue;
- tx_q->priv_data = priv;
+ tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
+ sizeof(*tx_q->tx_skbuff_dma),
+ GFP_KERNEL);
+ if (!tx_q->tx_skbuff_dma)
+ return -ENOMEM;
- tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
- sizeof(*tx_q->tx_skbuff_dma),
- GFP_KERNEL);
- if (!tx_q->tx_skbuff_dma)
- goto err_dma;
+ tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
+ sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!tx_q->tx_skbuff)
+ return -ENOMEM;
- tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
- sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!tx_q->tx_skbuff)
- goto err_dma;
+ if (priv->extend_desc)
+ size = sizeof(struct dma_extended_desc);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ size = sizeof(struct dma_edesc);
+ else
+ size = sizeof(struct dma_desc);
- if (priv->extend_desc)
- size = sizeof(struct dma_extended_desc);
- else if (tx_q->tbs & STMMAC_TBS_AVAIL)
- size = sizeof(struct dma_edesc);
- else
- size = sizeof(struct dma_desc);
+ size *= priv->dma_tx_size;
- size *= priv->dma_tx_size;
+ addr = dma_alloc_coherent(priv->device, size,
+ &tx_q->dma_tx_phy, GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
- addr = dma_alloc_coherent(priv->device, size,
- &tx_q->dma_tx_phy, GFP_KERNEL);
- if (!addr)
- goto err_dma;
+ if (priv->extend_desc)
+ tx_q->dma_etx = addr;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ tx_q->dma_entx = addr;
+ else
+ tx_q->dma_tx = addr;
- if (priv->extend_desc)
- tx_q->dma_etx = addr;
- else if (tx_q->tbs & STMMAC_TBS_AVAIL)
- tx_q->dma_entx = addr;
- else
- tx_q->dma_tx = addr;
+ return 0;
+}
+
+static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
+{
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 queue;
+ int ret;
+
+ /* TX queues buffers and DMA */
+ for (queue = 0; queue < tx_count; queue++) {
+ ret = __alloc_dma_tx_desc_resources(priv, queue);
+ if (ret)
+ goto err_dma;
}
return 0;
@@ -1815,11 +2152,13 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
*/
static void free_dma_desc_resources(struct stmmac_priv *priv)
{
- /* Release the DMA RX socket buffers */
- free_dma_rx_desc_resources(priv);
-
/* Release the DMA TX socket buffers */
free_dma_tx_desc_resources(priv);
+
+ /* Release the DMA RX socket buffers later
+ * to ensure all pending XDP_TX buffers are returned.
+ */
+ free_dma_rx_desc_resources(priv);
}
/**
@@ -1976,12 +2315,24 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
/* configure all channels */
for (chan = 0; chan < rx_channels_count; chan++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+ u32 buf_size;
+
qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
rxfifosz, qmode);
- stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
- chan);
+
+ if (rx_q->xsk_pool) {
+ buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ buf_size,
+ chan);
+ } else {
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ priv->dma_buf_sz,
+ chan);
+ }
}
for (chan = 0; chan < tx_channels_count; chan++) {
@@ -1992,6 +2343,101 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
}
}
+static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct xsk_buff_pool *pool = tx_q->xsk_pool;
+ unsigned int entry = tx_q->cur_tx;
+ struct dma_desc *tx_desc = NULL;
+ struct xdp_desc xdp_desc;
+ bool work_done = true;
+
+ /* Avoids TX time-out as we are sharing with slow path */
+ nq->trans_start = jiffies;
+
+ budget = min(budget, stmmac_tx_avail(priv, queue));
+
+ while (budget-- > 0) {
+ dma_addr_t dma_addr;
+ bool set_ic;
+
+ /* We are sharing with slow path and stop XSK TX desc submission when
+ * available TX ring is less than threshold.
+ */
+ if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
+ !netif_carrier_ok(priv->dev)) {
+ work_done = false;
+ break;
+ }
+
+ if (!xsk_tx_peek_desc(pool, &xdp_desc))
+ break;
+
+ if (likely(priv->extend_desc))
+ tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ tx_desc = &tx_q->dma_entx[entry].basic;
+ else
+ tx_desc = tx_q->dma_tx + entry;
+
+ dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
+
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
+
+ /* To return XDP buffer to XSK pool, we simple call
+ * xsk_tx_completed(), so we don't need to fill up
+ * 'buf' and 'xdpf'.
+ */
+ tx_q->tx_skbuff_dma[entry].buf = 0;
+ tx_q->xdpf[entry] = NULL;
+
+ tx_q->tx_skbuff_dma[entry].map_as_page = false;
+ tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
+ tx_q->tx_skbuff_dma[entry].last_segment = true;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+
+ stmmac_set_desc_addr(priv, tx_desc, dma_addr);
+
+ tx_q->tx_count_frames++;
+
+ if (!priv->tx_coal_frames[queue])
+ set_ic = false;
+ else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, tx_desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
+ true, priv->mode, true, true,
+ xdp_desc.len);
+
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ entry = tx_q->cur_tx;
+ }
+
+ if (tx_desc) {
+ stmmac_flush_tx_descriptors(priv, queue);
+ xsk_tx_release(pool);
+ }
+
+ /* Return true if all of the 3 conditions are met
+ * a) TX Budget is still available
+ * b) work_done = true when XSK TX desc peek is empty (no more
+ * pending XSK TX for transmission)
+ */
+ return !!budget && work_done;
+}
+
/**
* stmmac_tx_clean - to manage the transmission completion
* @priv: driver private structure
@@ -2003,18 +2449,35 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
- unsigned int entry, count = 0;
+ unsigned int entry, xmits = 0, count = 0;
__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
priv->xstats.tx_clean++;
+ tx_q->xsk_frames_done = 0;
+
entry = tx_q->dirty_tx;
- while ((entry != tx_q->cur_tx) && (count < budget)) {
- struct sk_buff *skb = tx_q->tx_skbuff[entry];
+
+ /* Try to clean all TX complete frame in 1 shot */
+ while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
struct dma_desc *p;
int status;
+ if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
+ tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
+ xdpf = tx_q->xdpf[entry];
+ skb = NULL;
+ } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
+ xdpf = NULL;
+ skb = tx_q->tx_skbuff[entry];
+ } else {
+ xdpf = NULL;
+ skb = NULL;
+ }
+
if (priv->extend_desc)
p = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -2044,10 +2507,12 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
}
- stmmac_get_tx_hwtstamp(priv, p, skb);
+ if (skb)
+ stmmac_get_tx_hwtstamp(priv, p, skb);
}
- if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
+ if (likely(tx_q->tx_skbuff_dma[entry].buf &&
+ tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
if (tx_q->tx_skbuff_dma[entry].map_as_page)
dma_unmap_page(priv->device,
tx_q->tx_skbuff_dma[entry].buf,
@@ -2068,11 +2533,28 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
tx_q->tx_skbuff_dma[entry].last_segment = false;
tx_q->tx_skbuff_dma[entry].is_jumbo = false;
- if (likely(skb != NULL)) {
- pkts_compl++;
- bytes_compl += skb->len;
- dev_consume_skb_any(skb);
- tx_q->tx_skbuff[entry] = NULL;
+ if (xdpf &&
+ tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
+ xdp_return_frame_rx_napi(xdpf);
+ tx_q->xdpf[entry] = NULL;
+ }
+
+ if (xdpf &&
+ tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
+ xdp_return_frame(xdpf);
+ tx_q->xdpf[entry] = NULL;
+ }
+
+ if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
+ tx_q->xsk_frames_done++;
+
+ if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
+ if (likely(skb)) {
+ pkts_compl++;
+ bytes_compl += skb->len;
+ dev_consume_skb_any(skb);
+ tx_q->tx_skbuff[entry] = NULL;
+ }
}
stmmac_release_tx_desc(priv, p, priv->mode);
@@ -2093,6 +2575,28 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
}
+ if (tx_q->xsk_pool) {
+ bool work_done;
+
+ if (tx_q->xsk_frames_done)
+ xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
+
+ if (xsk_uses_need_wakeup(tx_q->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_q->xsk_pool);
+
+ /* For XSK TX, we try to send as many as possible.
+ * If XSK work done (XSK TX desc empty and budget still
+ * available), return "budget - 1" to reenable TX IRQ.
+ * Else, return "budget" to make NAPI continue polling.
+ */
+ work_done = stmmac_xdp_xmit_zc(priv, queue,
+ STMMAC_XSK_TX_BUDGET_MAX);
+ if (work_done)
+ xmits = budget - 1;
+ else
+ xmits = budget;
+ }
+
if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
priv->eee_sw_timer_en) {
stmmac_enable_eee_mode(priv);
@@ -2101,12 +2605,14 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
- hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer),
+ hrtimer_start(&tx_q->txtimer,
+ STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
HRTIMER_MODE_REL);
__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
- return count;
+ /* Combine decisions from TX clean and XSK TX */
+ return max(count, xmits);
}
/**
@@ -2184,28 +2690,35 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
return false;
}
-static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
+static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
{
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
- &priv->xstats, chan);
+ &priv->xstats, chan, dir);
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
struct stmmac_channel *ch = &priv->channel[chan];
+ struct napi_struct *rx_napi;
+ struct napi_struct *tx_napi;
unsigned long flags;
+ rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
+ tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
+
if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
- if (napi_schedule_prep(&ch->rx_napi)) {
+ if (napi_schedule_prep(rx_napi)) {
spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
spin_unlock_irqrestore(&ch->lock, flags);
- __napi_schedule(&ch->rx_napi);
+ __napi_schedule(rx_napi);
}
}
if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
- if (napi_schedule_prep(&ch->tx_napi)) {
+ if (napi_schedule_prep(tx_napi)) {
spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
spin_unlock_irqrestore(&ch->lock, flags);
- __napi_schedule(&ch->tx_napi);
+ __napi_schedule(tx_napi);
}
}
@@ -2233,7 +2746,8 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
channels_to_check = ARRAY_SIZE(status);
for (chan = 0; chan < channels_to_check; chan++)
- status[chan] = stmmac_napi_check(priv, chan);
+ status[chan] = stmmac_napi_check(priv, chan,
+ DMA_DIR_RXTX);
for (chan = 0; chan < tx_channel_count; chan++) {
if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
@@ -2361,7 +2875,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
rx_q->dma_rx_phy, chan);
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
- (priv->dma_rx_size *
+ (rx_q->buf_alloc_num *
sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
rx_q->rx_tail_addr, chan);
@@ -2386,7 +2900,8 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer),
+ hrtimer_start(&tx_q->txtimer,
+ STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
HRTIMER_MODE_REL);
}
@@ -2401,16 +2916,18 @@ static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
struct stmmac_priv *priv = tx_q->priv_data;
struct stmmac_channel *ch;
+ struct napi_struct *napi;
ch = &priv->channel[tx_q->queue_index];
+ napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
- if (likely(napi_schedule_prep(&ch->tx_napi))) {
+ if (likely(napi_schedule_prep(napi))) {
unsigned long flags;
spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
spin_unlock_irqrestore(&ch->lock, flags);
- __napi_schedule(&ch->tx_napi);
+ __napi_schedule(napi);
}
return HRTIMER_NORESTART;
@@ -2427,18 +2944,21 @@ static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
static void stmmac_init_coalesce(struct stmmac_priv *priv)
{
u32 tx_channel_count = priv->plat->tx_queues_to_use;
+ u32 rx_channel_count = priv->plat->rx_queues_to_use;
u32 chan;
- priv->tx_coal_frames = STMMAC_TX_FRAMES;
- priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
- priv->rx_coal_frames = STMMAC_RX_FRAMES;
-
for (chan = 0; chan < tx_channel_count; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
+ priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
+
hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
tx_q->txtimer.function = stmmac_tx_timer;
}
+
+ for (chan = 0; chan < rx_channel_count; chan++)
+ priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
}
static void stmmac_set_rings_length(struct stmmac_priv *priv)
@@ -2655,6 +3175,26 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
}
}
+static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
+{
+ char *name;
+
+ clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
+
+ name = priv->wq_name;
+ sprintf(name, "%s-fpe", priv->dev->name);
+
+ priv->fpe_wq = create_singlethread_workqueue(name);
+ if (!priv->fpe_wq) {
+ netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
+
+ return -ENOMEM;
+ }
+ netdev_info(priv->dev, "FPE workqueue start");
+
+ return 0;
+}
+
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
@@ -2673,6 +3213,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
u32 tx_cnt = priv->plat->tx_queues_to_use;
+ bool sph_en;
u32 chan;
int ret;
@@ -2743,10 +3284,15 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
priv->tx_lpi_timer = eee_timer * 1000;
if (priv->use_riwt) {
- if (!priv->rx_riwt)
- priv->rx_riwt = DEF_DMA_RIWT;
+ u32 queue;
- ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
+ for (queue = 0; queue < rx_cnt; queue++) {
+ if (!priv->rx_riwt[queue])
+ priv->rx_riwt[queue] = DEF_DMA_RIWT;
+
+ stmmac_rx_watchdog(priv, priv->ioaddr,
+ priv->rx_riwt[queue], queue);
+ }
}
if (priv->hw->pcs)
@@ -2757,15 +3303,22 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
/* Enable TSO */
if (priv->tso) {
- for (chan = 0; chan < tx_cnt; chan++)
+ for (chan = 0; chan < tx_cnt; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+
+ /* TSO and TBS cannot co-exist */
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ continue;
+
stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
+ }
}
/* Enable Split Header */
- if (priv->sph && priv->hw->rx_csum) {
- for (chan = 0; chan < rx_cnt; chan++)
- stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
- }
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ for (chan = 0; chan < rx_cnt; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+
/* VLAN Tag Insertion */
if (priv->dma_cap.vlins)
@@ -2786,6 +3339,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
+ if (priv->dma_cap.fpesel) {
+ stmmac_fpe_start_wq(priv);
+
+ if (priv->plat->fpe_cfg->enable)
+ stmmac_fpe_handshake(priv, true);
+ }
+
return 0;
}
@@ -2796,6 +3356,271 @@ static void stmmac_hw_teardown(struct net_device *dev)
clk_disable_unprepare(priv->plat->clk_ptp_ref);
}
+static void stmmac_free_irq(struct net_device *dev,
+ enum request_irq_err irq_err, int irq_idx)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int j;
+
+ switch (irq_err) {
+ case REQ_IRQ_ERR_ALL:
+ irq_idx = priv->plat->tx_queues_to_use;
+ fallthrough;
+ case REQ_IRQ_ERR_TX:
+ for (j = irq_idx - 1; j >= 0; j--) {
+ if (priv->tx_irq[j] > 0) {
+ irq_set_affinity_hint(priv->tx_irq[j], NULL);
+ free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
+ }
+ }
+ irq_idx = priv->plat->rx_queues_to_use;
+ fallthrough;
+ case REQ_IRQ_ERR_RX:
+ for (j = irq_idx - 1; j >= 0; j--) {
+ if (priv->rx_irq[j] > 0) {
+ irq_set_affinity_hint(priv->rx_irq[j], NULL);
+ free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
+ }
+ }
+
+ if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
+ free_irq(priv->sfty_ue_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_SFTY_UE:
+ if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
+ free_irq(priv->sfty_ce_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_SFTY_CE:
+ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
+ free_irq(priv->lpi_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_LPI:
+ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
+ free_irq(priv->wol_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_WOL:
+ free_irq(dev->irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_MAC:
+ case REQ_IRQ_ERR_NO:
+ /* If MAC IRQ request error, no more IRQ to free */
+ break;
+ }
+}
+
+static int stmmac_request_irq_multi_msi(struct net_device *dev)
+{
+ enum request_irq_err irq_err = REQ_IRQ_ERR_NO;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ cpumask_t cpu_mask;
+ int irq_idx = 0;
+ char *int_name;
+ int ret;
+ int i;
+
+ /* For common interrupt */
+ int_name = priv->int_name_mac;
+ sprintf(int_name, "%s:%s", dev->name, "mac");
+ ret = request_irq(dev->irq, stmmac_mac_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc mac MSI %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ irq_err = REQ_IRQ_ERR_MAC;
+ goto irq_error;
+ }
+
+ /* Request the Wake IRQ in case of another line
+ * is used for WoL
+ */
+ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
+ int_name = priv->int_name_wol;
+ sprintf(int_name, "%s:%s", dev->name, "wol");
+ ret = request_irq(priv->wol_irq,
+ stmmac_mac_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc wol MSI %d (error: %d)\n",
+ __func__, priv->wol_irq, ret);
+ irq_err = REQ_IRQ_ERR_WOL;
+ goto irq_error;
+ }
+ }
+
+ /* Request the LPI IRQ in case of another line
+ * is used for LPI
+ */
+ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
+ int_name = priv->int_name_lpi;
+ sprintf(int_name, "%s:%s", dev->name, "lpi");
+ ret = request_irq(priv->lpi_irq,
+ stmmac_mac_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc lpi MSI %d (error: %d)\n",
+ __func__, priv->lpi_irq, ret);
+ irq_err = REQ_IRQ_ERR_LPI;
+ goto irq_error;
+ }
+ }
+
+ /* Request the Safety Feature Correctible Error line in
+ * case of another line is used
+ */
+ if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
+ int_name = priv->int_name_sfty_ce;
+ sprintf(int_name, "%s:%s", dev->name, "safety-ce");
+ ret = request_irq(priv->sfty_ce_irq,
+ stmmac_safety_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc sfty ce MSI %d (error: %d)\n",
+ __func__, priv->sfty_ce_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY_CE;
+ goto irq_error;
+ }
+ }
+
+ /* Request the Safety Feature Uncorrectible Error line in
+ * case of another line is used
+ */
+ if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
+ int_name = priv->int_name_sfty_ue;
+ sprintf(int_name, "%s:%s", dev->name, "safety-ue");
+ ret = request_irq(priv->sfty_ue_irq,
+ stmmac_safety_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc sfty ue MSI %d (error: %d)\n",
+ __func__, priv->sfty_ue_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY_UE;
+ goto irq_error;
+ }
+ }
+
+ /* Request Rx MSI irq */
+ for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
+ if (priv->rx_irq[i] == 0)
+ continue;
+
+ int_name = priv->int_name_rx_irq[i];
+ sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
+ ret = request_irq(priv->rx_irq[i],
+ stmmac_msi_intr_rx,
+ 0, int_name, &priv->rx_queue[i]);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc rx-%d MSI %d (error: %d)\n",
+ __func__, i, priv->rx_irq[i], ret);
+ irq_err = REQ_IRQ_ERR_RX;
+ irq_idx = i;
+ goto irq_error;
+ }
+ cpumask_clear(&cpu_mask);
+ cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+ irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
+ }
+
+ /* Request Tx MSI irq */
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
+ if (priv->tx_irq[i] == 0)
+ continue;
+
+ int_name = priv->int_name_tx_irq[i];
+ sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
+ ret = request_irq(priv->tx_irq[i],
+ stmmac_msi_intr_tx,
+ 0, int_name, &priv->tx_queue[i]);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc tx-%d MSI %d (error: %d)\n",
+ __func__, i, priv->tx_irq[i], ret);
+ irq_err = REQ_IRQ_ERR_TX;
+ irq_idx = i;
+ goto irq_error;
+ }
+ cpumask_clear(&cpu_mask);
+ cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+ irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
+ }
+
+ return 0;
+
+irq_error:
+ stmmac_free_irq(dev, irq_err, irq_idx);
+ return ret;
+}
+
+static int stmmac_request_irq_single(struct net_device *dev)
+{
+ enum request_irq_err irq_err = REQ_IRQ_ERR_NO;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = request_irq(dev->irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ irq_err = REQ_IRQ_ERR_MAC;
+ return ret;
+ }
+
+ /* Request the Wake IRQ in case of another line
+ * is used for WoL
+ */
+ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
+ ret = request_irq(priv->wol_irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
+ __func__, priv->wol_irq, ret);
+ irq_err = REQ_IRQ_ERR_WOL;
+ return ret;
+ }
+ }
+
+ /* Request the IRQ lines */
+ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
+ ret = request_irq(priv->lpi_irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
+ __func__, priv->lpi_irq, ret);
+ irq_err = REQ_IRQ_ERR_LPI;
+ goto irq_error;
+ }
+ }
+
+ return 0;
+
+irq_error:
+ stmmac_free_irq(dev, irq_err, 0);
+ return ret;
+}
+
+static int stmmac_request_irq(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ /* Request the IRQ lines */
+ if (priv->plat->multi_msi_en)
+ ret = stmmac_request_irq_multi_msi(dev);
+ else
+ ret = stmmac_request_irq_single(dev);
+
+ return ret;
+}
+
/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
@@ -2805,22 +3630,28 @@ static void stmmac_hw_teardown(struct net_device *dev)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_open(struct net_device *dev)
+int stmmac_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
int bfsize = 0;
u32 chan;
int ret;
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI &&
- priv->hw->xpcs == NULL) {
+ priv->hw->xpcs_args.an_mode != DW_AN_C73) {
ret = stmmac_init_phy(dev);
if (ret) {
netdev_err(priv->dev,
"%s: Cannot attach to PHY (error: %d)\n",
__func__, ret);
- return ret;
+ goto init_phy_error;
}
}
@@ -2850,9 +3681,8 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
+ /* Setup per-TXQ tbs flag before TX descriptor alloc */
tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
- if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
- tx_q->tbs &= ~STMMAC_TBS_AVAIL;
}
ret = alloc_dma_desc_resources(priv);
@@ -2881,50 +3711,15 @@ static int stmmac_open(struct net_device *dev)
/* We may have called phylink_speed_down before */
phylink_speed_up(priv->phylink);
- /* Request the IRQ lines */
- ret = request_irq(dev->irq, stmmac_interrupt,
- IRQF_SHARED, dev->name, dev);
- if (unlikely(ret < 0)) {
- netdev_err(priv->dev,
- "%s: ERROR: allocating the IRQ %d (error: %d)\n",
- __func__, dev->irq, ret);
+ ret = stmmac_request_irq(dev);
+ if (ret)
goto irq_error;
- }
-
- /* Request the Wake IRQ in case of another line is used for WoL */
- if (priv->wol_irq != dev->irq) {
- ret = request_irq(priv->wol_irq, stmmac_interrupt,
- IRQF_SHARED, dev->name, dev);
- if (unlikely(ret < 0)) {
- netdev_err(priv->dev,
- "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
- __func__, priv->wol_irq, ret);
- goto wolirq_error;
- }
- }
-
- /* Request the IRQ lines */
- if (priv->lpi_irq > 0) {
- ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
- dev->name, dev);
- if (unlikely(ret < 0)) {
- netdev_err(priv->dev,
- "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
- __func__, priv->lpi_irq, ret);
- goto lpiirq_error;
- }
- }
stmmac_enable_all_queues(priv);
netif_tx_start_all_queues(priv->dev);
return 0;
-lpiirq_error:
- if (priv->wol_irq != dev->irq)
- free_irq(priv->wol_irq, dev);
-wolirq_error:
- free_irq(dev->irq, dev);
irq_error:
phylink_stop(priv->phylink);
@@ -2936,16 +3731,28 @@ init_error:
free_dma_desc_resources(priv);
dma_desc_error:
phylink_disconnect_phy(priv->phylink);
+init_phy_error:
+ pm_runtime_put(priv->device);
return ret;
}
+static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
+{
+ set_bit(__FPE_REMOVING, &priv->fpe_task_state);
+
+ if (priv->fpe_wq)
+ destroy_workqueue(priv->fpe_wq);
+
+ netdev_info(priv->dev, "FPE workqueue stop");
+}
+
/**
* stmmac_release - close entry point of the driver
* @dev : device pointer.
* Description:
* This is the stop entry point of the driver.
*/
-static int stmmac_release(struct net_device *dev)
+int stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
@@ -2962,11 +3769,7 @@ static int stmmac_release(struct net_device *dev)
hrtimer_cancel(&priv->tx_queue[chan].txtimer);
/* Free the IRQ lines */
- free_irq(dev->irq, dev);
- if (priv->wol_irq != dev->irq)
- free_irq(priv->wol_irq, dev);
- if (priv->lpi_irq > 0)
- free_irq(priv->lpi_irq, dev);
+ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
if (priv->eee_enabled) {
priv->tx_path_in_lpi_mode = false;
@@ -2986,6 +3789,11 @@ static int stmmac_release(struct net_device *dev)
stmmac_release_ptp(priv);
+ pm_runtime_put(priv->device);
+
+ if (priv->dma_cap.fpesel)
+ stmmac_fpe_stop_wq(priv);
+
return 0;
}
@@ -3071,6 +3879,28 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
}
}
+static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ int desc_size;
+
+ if (likely(priv->extend_desc))
+ desc_size = sizeof(struct dma_extended_desc);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc_size = sizeof(struct dma_edesc);
+ else
+ desc_size = sizeof(struct dma_desc);
+
+ /* The own bit must be the latest setting done when prepare the
+ * descriptor and then barrier is needed to make sure that
+ * all is coherent before granting the DMA engine.
+ */
+ wmb();
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
+}
+
/**
* stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
* @skb : the socket buffer
@@ -3102,10 +3932,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
- int desc_size, tmp_pay_len = 0, first_tx;
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
unsigned int first_entry, tx_packets;
+ int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
u8 proto_hdr_len, hdr;
@@ -3187,6 +4017,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_skbuff_dma[first_entry].buf = des;
tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+ tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
+ tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
if (priv->dma_cap.addr64 <= 32) {
first->des0 = cpu_to_le32(des);
@@ -3222,12 +4054,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
}
tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
/* Only the last descriptor gets to point to the skb. */
tx_q->tx_skbuff[tx_q->cur_tx] = skb;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
/* Manage tx mitigation */
tx_packets = (tx_q->cur_tx + 1) - first_tx;
@@ -3235,11 +4069,12 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
set_ic = true;
- else if (!priv->tx_coal_frames)
+ else if (!priv->tx_coal_frames[queue])
set_ic = false;
- else if (tx_packets > priv->tx_coal_frames)
+ else if (tx_packets > priv->tx_coal_frames[queue])
set_ic = true;
- else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
+ else if ((tx_q->tx_count_frames %
+ priv->tx_coal_frames[queue]) < tx_packets)
set_ic = true;
else
set_ic = false;
@@ -3302,12 +4137,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_set_tx_owner(priv, mss_desc);
}
- /* The own bit must be the latest setting done when prepare the
- * descriptor and then barrier is needed to make sure that
- * all is coherent before granting the DMA engine.
- */
- wmb();
-
if (netif_msg_pktdata(priv)) {
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
@@ -3318,13 +4147,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
- if (tx_q->tbs & STMMAC_TBS_AVAIL)
- desc_size = sizeof(struct dma_edesc);
- else
- desc_size = sizeof(struct dma_desc);
-
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
- stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
+ stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
return NETDEV_TX_OK;
@@ -3354,10 +4177,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int nfrags = skb_shinfo(skb)->nr_frags;
int gso = skb_shinfo(skb)->gso_type;
struct dma_edesc *tbs_desc = NULL;
- int entry, desc_size, first_tx;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
+ int entry, first_tx;
dma_addr_t des;
tx_q = &priv->tx_queue[queue];
@@ -3445,6 +4268,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_skbuff_dma[entry].map_as_page = true;
tx_q->tx_skbuff_dma[entry].len = len;
tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
/* Prepare the descriptor and set the own bit too */
stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
@@ -3453,6 +4277,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Only the last descriptor gets to point to the skb. */
tx_q->tx_skbuff[entry] = skb;
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
/* According to the coalesce parameter the IC bit for the latest
* segment is reset and the timer re-started to clean the tx status.
@@ -3464,11 +4289,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
set_ic = true;
- else if (!priv->tx_coal_frames)
+ else if (!priv->tx_coal_frames[queue])
set_ic = false;
- else if (tx_packets > priv->tx_coal_frames)
+ else if (tx_packets > priv->tx_coal_frames[queue])
set_ic = true;
- else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
+ else if ((tx_q->tx_count_frames %
+ priv->tx_coal_frames[queue]) < tx_packets)
set_ic = true;
else
set_ic = false;
@@ -3530,6 +4356,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
goto dma_map_err;
tx_q->tx_skbuff_dma[first_entry].buf = des;
+ tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
+ tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
stmmac_set_desc_addr(priv, first, des);
@@ -3558,25 +4386,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_set_tx_owner(priv, first);
- /* The own bit must be the latest setting done when prepare the
- * descriptor and then barrier is needed to make sure that
- * all is coherent before granting the DMA engine.
- */
- wmb();
-
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr);
- if (likely(priv->extend_desc))
- desc_size = sizeof(struct dma_extended_desc);
- else if (tx_q->tbs & STMMAC_TBS_AVAIL)
- desc_size = sizeof(struct dma_edesc);
- else
- desc_size = sizeof(struct dma_desc);
-
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
- stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
+ stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
return NETDEV_TX_OK;
@@ -3619,11 +4433,9 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- int len, dirty = stmmac_rx_dirty(priv, queue);
+ int dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
- len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
-
while (dirty-- > 0) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
struct dma_desc *p;
@@ -3646,18 +4458,9 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
break;
buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
-
- dma_sync_single_for_device(priv->device, buf->sec_addr,
- len, DMA_FROM_DEVICE);
}
- buf->addr = page_pool_get_dma_addr(buf->page);
-
- /* Sync whole allocation to device. This will invalidate old
- * data.
- */
- dma_sync_single_for_device(priv->device, buf->addr, len,
- DMA_FROM_DEVICE);
+ buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
stmmac_set_desc_addr(priv, p, buf->addr);
if (priv->sph)
@@ -3667,11 +4470,11 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
stmmac_refill_desc3(priv, rx_q, p);
rx_q->rx_count_frames++;
- rx_q->rx_count_frames += priv->rx_coal_frames;
- if (rx_q->rx_count_frames > priv->rx_coal_frames)
+ rx_q->rx_count_frames += priv->rx_coal_frames[queue];
+ if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
rx_q->rx_count_frames = 0;
- use_rx_wd = !priv->rx_coal_frames;
+ use_rx_wd = !priv->rx_coal_frames[queue];
use_rx_wd |= rx_q->rx_count_frames > 0;
if (!priv->use_riwt)
use_rx_wd = false;
@@ -3736,6 +4539,487 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
return plen - len;
}
+static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
+ struct xdp_frame *xdpf, bool dma_map)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ unsigned int entry = tx_q->cur_tx;
+ struct dma_desc *tx_desc;
+ dma_addr_t dma_addr;
+ bool set_ic;
+
+ if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
+ return STMMAC_XDP_CONSUMED;
+
+ if (likely(priv->extend_desc))
+ tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ tx_desc = &tx_q->dma_entx[entry].basic;
+ else
+ tx_desc = tx_q->dma_tx + entry;
+
+ if (dma_map) {
+ dma_addr = dma_map_single(priv->device, xdpf->data,
+ xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, dma_addr))
+ return STMMAC_XDP_CONSUMED;
+
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
+ } else {
+ struct page *page = virt_to_page(xdpf->data);
+
+ dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
+ xdpf->headroom;
+ dma_sync_single_for_device(priv->device, dma_addr,
+ xdpf->len, DMA_BIDIRECTIONAL);
+
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
+ }
+
+ tx_q->tx_skbuff_dma[entry].buf = dma_addr;
+ tx_q->tx_skbuff_dma[entry].map_as_page = false;
+ tx_q->tx_skbuff_dma[entry].len = xdpf->len;
+ tx_q->tx_skbuff_dma[entry].last_segment = true;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+
+ tx_q->xdpf[entry] = xdpf;
+
+ stmmac_set_desc_addr(priv, tx_desc, dma_addr);
+
+ stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
+ true, priv->mode, true, true,
+ xdpf->len);
+
+ tx_q->tx_count_frames++;
+
+ if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, tx_desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ tx_q->cur_tx = entry;
+
+ return STMMAC_XDP_TX;
+}
+
+static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
+ int cpu)
+{
+ int index = cpu;
+
+ if (unlikely(index < 0))
+ index = 0;
+
+ while (index >= priv->plat->tx_queues_to_use)
+ index -= priv->plat->tx_queues_to_use;
+
+ return index;
+}
+
+static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
+ struct xdp_buff *xdp)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ int queue;
+ int res;
+
+ if (unlikely(!xdpf))
+ return STMMAC_XDP_CONSUMED;
+
+ queue = stmmac_xdp_get_tx_queue(priv, cpu);
+ nq = netdev_get_tx_queue(priv->dev, queue);
+
+ __netif_tx_lock(nq, cpu);
+ /* Avoids TX time-out as we are sharing with slow path */
+ nq->trans_start = jiffies;
+
+ res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
+ if (res == STMMAC_XDP_TX)
+ stmmac_flush_tx_descriptors(priv, queue);
+
+ __netif_tx_unlock(nq);
+
+ return res;
+}
+
+/* This function assumes rcu_read_lock() is held by the caller. */
+static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
+ struct bpf_prog *prog,
+ struct xdp_buff *xdp)
+{
+ u32 act;
+ int res;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ res = STMMAC_XDP_PASS;
+ break;
+ case XDP_TX:
+ res = stmmac_xdp_xmit_back(priv, xdp);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
+ res = STMMAC_XDP_CONSUMED;
+ else
+ res = STMMAC_XDP_REDIRECT;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->dev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ res = STMMAC_XDP_CONSUMED;
+ break;
+ }
+
+ return res;
+}
+
+static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
+ struct xdp_buff *xdp)
+{
+ struct bpf_prog *prog;
+ int res;
+
+ rcu_read_lock();
+
+ prog = READ_ONCE(priv->xdp_prog);
+ if (!prog) {
+ res = STMMAC_XDP_PASS;
+ goto unlock;
+ }
+
+ res = __stmmac_xdp_run_prog(priv, prog, xdp);
+unlock:
+ rcu_read_unlock();
+ return ERR_PTR(-res);
+}
+
+static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
+ int xdp_status)
+{
+ int cpu = smp_processor_id();
+ int queue;
+
+ queue = stmmac_xdp_get_tx_queue(priv, cpu);
+
+ if (xdp_status & STMMAC_XDP_TX)
+ stmmac_tx_timer_arm(priv, queue);
+
+ if (xdp_status & STMMAC_XDP_REDIRECT)
+ xdp_do_flush();
+}
+
+static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ struct sk_buff *skb;
+
+ skb = __napi_alloc_skb(&ch->rxtx_napi,
+ xdp->data_end - xdp->data_hard_start,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ return skb;
+}
+
+static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
+ struct dma_desc *p, struct dma_desc *np,
+ struct xdp_buff *xdp)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned int len = xdp->data_end - xdp->data;
+ enum pkt_hash_types hash_type;
+ int coe = priv->hw->rx_csum;
+ struct sk_buff *skb;
+ u32 hash;
+
+ skb = stmmac_construct_skb_zc(ch, xdp);
+ if (!skb) {
+ priv->dev->stats.rx_dropped++;
+ return;
+ }
+
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
+ stmmac_rx_vlan(priv->dev, skb);
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (unlikely(!coe))
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
+ skb_set_hash(skb, hash, hash_type);
+
+ skb_record_rx_queue(skb, queue);
+ napi_gro_receive(&ch->rxtx_napi, skb);
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += len;
+}
+
+static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ unsigned int entry = rx_q->dirty_rx;
+ struct dma_desc *rx_desc = NULL;
+ bool ret = true;
+
+ budget = min(budget, stmmac_rx_dirty(priv, queue));
+
+ while (budget-- > 0 && entry != rx_q->cur_rx) {
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
+ dma_addr_t dma_addr;
+ bool use_rx_wd;
+
+ if (!buf->xdp) {
+ buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
+ if (!buf->xdp) {
+ ret = false;
+ break;
+ }
+ }
+
+ if (priv->extend_desc)
+ rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
+ else
+ rx_desc = rx_q->dma_rx + entry;
+
+ dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
+ stmmac_set_desc_addr(priv, rx_desc, dma_addr);
+ stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
+ stmmac_refill_desc3(priv, rx_q, rx_desc);
+
+ rx_q->rx_count_frames++;
+ rx_q->rx_count_frames += priv->rx_coal_frames[queue];
+ if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
+ rx_q->rx_count_frames = 0;
+
+ use_rx_wd = !priv->rx_coal_frames[queue];
+ use_rx_wd |= rx_q->rx_count_frames > 0;
+ if (!priv->use_riwt)
+ use_rx_wd = false;
+
+ dma_wmb();
+ stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
+ }
+
+ if (rx_desc) {
+ rx_q->dirty_rx = entry;
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->dirty_rx * sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
+ }
+
+ return ret;
+}
+
+static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ unsigned int count = 0, error = 0, len = 0;
+ int dirty = stmmac_rx_dirty(priv, queue);
+ unsigned int next_entry = rx_q->cur_rx;
+ unsigned int desc_size;
+ struct bpf_prog *prog;
+ bool failure = false;
+ int xdp_status = 0;
+ int status = 0;
+
+ if (netif_msg_rx_status(priv)) {
+ void *rx_head;
+
+ netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
+ if (priv->extend_desc) {
+ rx_head = (void *)rx_q->dma_erx;
+ desc_size = sizeof(struct dma_extended_desc);
+ } else {
+ rx_head = (void *)rx_q->dma_rx;
+ desc_size = sizeof(struct dma_desc);
+ }
+
+ stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
+ rx_q->dma_rx_phy, desc_size);
+ }
+ while (count < limit) {
+ struct stmmac_rx_buffer *buf;
+ unsigned int buf1_len = 0;
+ struct dma_desc *np, *p;
+ int entry;
+ int res;
+
+ if (!count && rx_q->state_saved) {
+ error = rx_q->state.error;
+ len = rx_q->state.len;
+ } else {
+ rx_q->state_saved = false;
+ error = 0;
+ len = 0;
+ }
+
+ if (count >= limit)
+ break;
+
+read_again:
+ buf1_len = 0;
+ entry = next_entry;
+ buf = &rx_q->buf_pool[entry];
+
+ if (dirty >= STMMAC_RX_FILL_BATCH) {
+ failure = failure ||
+ !stmmac_rx_refill_zc(priv, queue, dirty);
+ dirty = 0;
+ }
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(rx_q->dma_erx + entry);
+ else
+ p = rx_q->dma_rx + entry;
+
+ /* read the status of the incoming frame */
+ status = stmmac_rx_status(priv, &priv->dev->stats,
+ &priv->xstats, p);
+ /* check if managed by the DMA otherwise go ahead */
+ if (unlikely(status & dma_own))
+ break;
+
+ /* Prefetch the next RX descriptor */
+ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
+ priv->dma_rx_size);
+ next_entry = rx_q->cur_rx;
+
+ if (priv->extend_desc)
+ np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
+ else
+ np = rx_q->dma_rx + next_entry;
+
+ prefetch(np);
+
+ if (priv->extend_desc)
+ stmmac_rx_extended_status(priv, &priv->dev->stats,
+ &priv->xstats,
+ rx_q->dma_erx + entry);
+ if (unlikely(status == discard_frame)) {
+ xsk_buff_free(buf->xdp);
+ buf->xdp = NULL;
+ dirty++;
+ error = 1;
+ if (!priv->hwts_rx_en)
+ priv->dev->stats.rx_errors++;
+ }
+
+ if (unlikely(error && (status & rx_not_ls)))
+ goto read_again;
+ if (unlikely(error)) {
+ count++;
+ continue;
+ }
+
+ /* Ensure a valid XSK buffer before proceed */
+ if (!buf->xdp)
+ break;
+
+ /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
+ if (likely(status & rx_not_ls)) {
+ xsk_buff_free(buf->xdp);
+ buf->xdp = NULL;
+ dirty++;
+ count++;
+ goto read_again;
+ }
+
+ /* XDP ZC Frame only support primary buffers for now */
+ buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
+ len += buf1_len;
+
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+ * Type frames (LLC/LLC-SNAP)
+ *
+ * llc_snap is never checked in GMAC >= 4, so this ACS
+ * feature is always disabled and packets need to be
+ * stripped manually.
+ */
+ if (likely(!(status & rx_not_ls)) &&
+ (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
+ unlikely(status != llc_snap))) {
+ buf1_len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ }
+
+ /* RX buffer is good and fit into a XSK pool buffer */
+ buf->xdp->data_end = buf->xdp->data + buf1_len;
+ xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
+
+ rcu_read_lock();
+ prog = READ_ONCE(priv->xdp_prog);
+ res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
+ rcu_read_unlock();
+
+ switch (res) {
+ case STMMAC_XDP_PASS:
+ stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
+ xsk_buff_free(buf->xdp);
+ break;
+ case STMMAC_XDP_CONSUMED:
+ xsk_buff_free(buf->xdp);
+ priv->dev->stats.rx_dropped++;
+ break;
+ case STMMAC_XDP_TX:
+ case STMMAC_XDP_REDIRECT:
+ xdp_status |= res;
+ break;
+ }
+
+ buf->xdp = NULL;
+ dirty++;
+ count++;
+ }
+
+ if (status & rx_not_ls) {
+ rx_q->state_saved = true;
+ rx_q->state.error = error;
+ rx_q->state.len = len;
+ }
+
+ stmmac_finalize_xdp_rx(priv, xdp_status);
+
+ if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
+ if (failure || stmmac_rx_dirty(priv, queue) > 0)
+ xsk_set_rx_need_wakeup(rx_q->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
+
+ return (int)count;
+ }
+
+ return failure ? limit : (int)count;
+}
+
/**
* stmmac_rx - manage the receive process
* @priv: driver private structure
@@ -3751,8 +5035,15 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int count = 0, error = 0, len = 0;
int status = 0, coe = priv->hw->rx_csum;
unsigned int next_entry = rx_q->cur_rx;
+ enum dma_data_direction dma_dir;
unsigned int desc_size;
struct sk_buff *skb = NULL;
+ struct xdp_buff xdp;
+ int xdp_status = 0;
+ int buf_sz;
+
+ dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
+ buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -3870,6 +5161,64 @@ read_again:
}
if (!skb) {
+ unsigned int pre_len, sync_len;
+
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ buf1_len, dma_dir);
+
+ xdp.data = page_address(buf->page) + buf->page_offset;
+ xdp.data_end = xdp.data + buf1_len;
+ xdp.data_hard_start = page_address(buf->page);
+ xdp_set_data_meta_invalid(&xdp);
+ xdp.frame_sz = buf_sz;
+ xdp.rxq = &rx_q->xdp_rxq;
+
+ pre_len = xdp.data_end - xdp.data_hard_start -
+ buf->page_offset;
+ skb = stmmac_xdp_run_prog(priv, &xdp);
+ /* Due xdp_adjust_tail: DMA sync for_device
+ * cover max len CPU touch
+ */
+ sync_len = xdp.data_end - xdp.data_hard_start -
+ buf->page_offset;
+ sync_len = max(sync_len, pre_len);
+
+ /* For Not XDP_PASS verdict */
+ if (IS_ERR(skb)) {
+ unsigned int xdp_res = -PTR_ERR(skb);
+
+ if (xdp_res & STMMAC_XDP_CONSUMED) {
+ page_pool_put_page(rx_q->page_pool,
+ virt_to_head_page(xdp.data),
+ sync_len, true);
+ buf->page = NULL;
+ priv->dev->stats.rx_dropped++;
+
+ /* Clear skb as it was set as
+ * status by XDP program.
+ */
+ skb = NULL;
+
+ if (unlikely((status & rx_not_ls)))
+ goto read_again;
+
+ count++;
+ continue;
+ } else if (xdp_res & (STMMAC_XDP_TX |
+ STMMAC_XDP_REDIRECT)) {
+ xdp_status |= xdp_res;
+ buf->page = NULL;
+ skb = NULL;
+ count++;
+ continue;
+ }
+ }
+ }
+
+ if (!skb) {
+ /* XDP program may expand or reduce tail */
+ buf1_len = xdp.data_end - xdp.data;
+
skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
if (!skb) {
priv->dev->stats.rx_dropped++;
@@ -3877,10 +5226,8 @@ read_again:
goto drain_data;
}
- dma_sync_single_for_cpu(priv->device, buf->addr,
- buf1_len, DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, page_address(buf->page),
- buf1_len);
+ /* XDP program may adjust header */
+ skb_copy_to_linear_data(skb, xdp.data, buf1_len);
skb_put(skb, buf1_len);
/* Data payload copied into SKB, page ready for recycle */
@@ -3888,9 +5235,9 @@ read_again:
buf->page = NULL;
} else if (buf1_len) {
dma_sync_single_for_cpu(priv->device, buf->addr,
- buf1_len, DMA_FROM_DEVICE);
+ buf1_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf->page, 0, buf1_len,
+ buf->page, buf->page_offset, buf1_len,
priv->dma_buf_sz);
/* Data payload appended into SKB */
@@ -3900,7 +5247,7 @@ read_again:
if (buf2_len) {
dma_sync_single_for_cpu(priv->device, buf->sec_addr,
- buf2_len, DMA_FROM_DEVICE);
+ buf2_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->sec_page, 0, buf2_len,
priv->dma_buf_sz);
@@ -3946,6 +5293,8 @@ drain_data:
rx_q->state.len = len;
}
+ stmmac_finalize_xdp_rx(priv, xdp_status);
+
stmmac_rx_refill(priv, queue);
priv->xstats.rx_pkt_n += count;
@@ -3985,7 +5334,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
priv->xstats.napi_poll++;
- work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
+ work_done = stmmac_tx_clean(priv, budget, chan);
work_done = min(work_done, budget);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -3999,6 +5348,42 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
return work_done;
}
+static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
+{
+ struct stmmac_channel *ch =
+ container_of(napi, struct stmmac_channel, rxtx_napi);
+ struct stmmac_priv *priv = ch->priv_data;
+ int rx_done, tx_done;
+ u32 chan = ch->index;
+
+ priv->xstats.napi_poll++;
+
+ tx_done = stmmac_tx_clean(priv, budget, chan);
+ tx_done = min(tx_done, budget);
+
+ rx_done = stmmac_rx_zc(priv, budget, chan);
+
+ /* If either TX or RX work is not complete, return budget
+ * and keep pooling
+ */
+ if (tx_done >= budget || rx_done >= budget)
+ return budget;
+
+ /* all work done, exit the polling mode */
+ if (napi_complete_done(napi, rx_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ /* Both RX and TX work done are compelte,
+ * so enable both RX & TX IRQs.
+ */
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ return min(rx_done, budget - 1);
+}
+
/**
* stmmac_tx_timeout
* @dev : Pointer to net device structure
@@ -4058,6 +5443,11 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return -EBUSY;
}
+ if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
+ netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
+ return -EINVAL;
+ }
+
new_mtu = STMMAC_ALIGN(new_mtu);
/* If condition true, FIFO is too small or MTU too large */
@@ -4119,27 +5509,57 @@ static int stmmac_set_features(struct net_device *netdev,
stmmac_rx_ipc(priv, priv->hw);
sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+
for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
return 0;
}
-/**
- * stmmac_interrupt - main ISR
- * @irq: interrupt number.
- * @dev_id: to pass the net device pointer (must be valid).
- * Description: this is the main driver interrupt service routine.
- * It can call:
- * o DMA service routine (to manage incoming frame reception and transmission
- * status)
- * o Core interrupts to manage: remote wake-up, management counter, LPI
- * interrupts.
- */
-static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
+static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
+ enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
+ enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
+ bool *hs_enable = &fpe_cfg->hs_enable;
+
+ if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
+ return;
+
+ /* If LP has sent verify mPacket, LP is FPE capable */
+ if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
+ if (*lp_state < FPE_STATE_CAPABLE)
+ *lp_state = FPE_STATE_CAPABLE;
+
+ /* If user has requested FPE enable, quickly response */
+ if (*hs_enable)
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ MPACKET_RESPONSE);
+ }
+
+ /* If Local has sent verify mPacket, Local is FPE capable */
+ if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
+ if (*lo_state < FPE_STATE_CAPABLE)
+ *lo_state = FPE_STATE_CAPABLE;
+ }
+
+ /* If LP has sent response mPacket, LP is entering FPE ON */
+ if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
+ *lp_state = FPE_STATE_ENTERING_ON;
+
+ /* If Local has sent response mPacket, Local is entering FPE ON */
+ if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
+ *lo_state = FPE_STATE_ENTERING_ON;
+
+ if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
+ !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
+ priv->fpe_wq) {
+ queue_work(priv->fpe_wq, &priv->fpe_task);
+ }
+}
+
+static void stmmac_common_interrupt(struct stmmac_priv *priv)
{
- struct net_device *dev = (struct net_device *)dev_id;
- struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queues_count;
@@ -4152,12 +5572,16 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if (priv->irq_wake)
pm_wakeup_event(priv->device, 0);
- /* Check if adapter is up */
- if (test_bit(STMMAC_DOWN, &priv->state))
- return IRQ_HANDLED;
- /* Check if a fatal error happened */
- if (stmmac_safety_feat_interrupt(priv))
- return IRQ_HANDLED;
+ if (priv->dma_cap.estsel)
+ stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
+ &priv->xstats, tx_cnt);
+
+ if (priv->dma_cap.fpesel) {
+ int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
+ priv->dev);
+
+ stmmac_fpe_event_status(priv, status);
+ }
/* To handle GMAC own interrupts */
if ((priv->plat->has_gmac) || xmac) {
@@ -4189,11 +5613,41 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
/* PCS link status */
if (priv->hw->pcs) {
if (priv->xstats.pcs_link)
- netif_carrier_on(dev);
+ netif_carrier_on(priv->dev);
else
- netif_carrier_off(dev);
+ netif_carrier_off(priv->dev);
}
+
+ stmmac_timestamp_interrupt(priv, priv);
}
+}
+
+/**
+ * stmmac_interrupt - main ISR
+ * @irq: interrupt number.
+ * @dev_id: to pass the net device pointer.
+ * Description: this is the main driver interrupt service routine.
+ * It can call:
+ * o DMA service routine (to manage incoming frame reception and transmission
+ * status)
+ * o Core interrupts to manage: remote wake-up, management counter, LPI
+ * interrupts.
+ */
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ /* Check if a fatal error happened */
+ if (stmmac_safety_feat_interrupt(priv))
+ return IRQ_HANDLED;
+
+ /* To handle Common interrupts */
+ stmmac_common_interrupt(priv);
/* To handle DMA interrupts */
stmmac_dma_interrupt(priv);
@@ -4201,15 +5655,136 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!dev)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ /* To handle Common interrupts */
+ stmmac_common_interrupt(priv);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!dev)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ /* Check if a fatal error happened */
+ stmmac_safety_feat_interrupt(priv);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
+{
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
+ int chan = tx_q->queue_index;
+ struct stmmac_priv *priv;
+ int status;
+
+ priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
+
+ if (unlikely(!data)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
+
+ if (unlikely(status & tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
+ tc <= 256) {
+ tc += 64;
+ if (priv->plat->force_thresh_dma_mode)
+ stmmac_set_dma_operation_mode(priv,
+ tc,
+ tc,
+ chan);
+ else
+ stmmac_set_dma_operation_mode(priv,
+ tc,
+ SF_DMA_MODE,
+ chan);
+ priv->xstats.threshold = tc;
+ }
+ } else if (unlikely(status == tx_hard_error)) {
+ stmmac_tx_err(priv, chan);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
+{
+ struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
+ int chan = rx_q->queue_index;
+ struct stmmac_priv *priv;
+
+ priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
+
+ if (unlikely(!data)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ stmmac_napi_check(priv, chan, DMA_DIR_RX);
+
+ return IRQ_HANDLED;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Polling receive - used by NETCONSOLE and other diagnostic tools
* to allow network I/O with interrupts disabled.
*/
static void stmmac_poll_controller(struct net_device *dev)
{
- disable_irq(dev->irq);
- stmmac_interrupt(dev->irq, dev);
- enable_irq(dev->irq);
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i;
+
+ /* If adapter is down, do nothing */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return;
+
+ if (priv->plat->multi_msi_en) {
+ for (i = 0; i < priv->plat->rx_queues_to_use; i++)
+ stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
+
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++)
+ stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
+ } else {
+ disable_irq(dev->irq);
+ stmmac_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+ }
}
#endif
@@ -4258,7 +5833,7 @@ static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
return ret;
- stmmac_disable_all_queues(priv);
+ __stmmac_disable_all_queues(priv);
switch (type) {
case TC_SETUP_CLSU32:
@@ -4622,6 +6197,12 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
bool is_double = false;
int ret;
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -4655,10 +6236,222 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
if (priv->hw->num_vlan) {
ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
if (ret)
- return ret;
+ goto del_vlan_error;
+ }
+
+ ret = stmmac_vlan_update(priv, is_double);
+
+del_vlan_error:
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
+ case XDP_SETUP_XSK_POOL:
+ return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
+ bpf->xsk.queue_id);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ int i, nxmit = 0;
+ int queue;
+
+ if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ queue = stmmac_xdp_get_tx_queue(priv, cpu);
+ nq = netdev_get_tx_queue(priv->dev, queue);
+
+ __netif_tx_lock(nq, cpu);
+ /* Avoids TX time-out as we are sharing with slow path */
+ nq->trans_start = jiffies;
+
+ for (i = 0; i < num_frames; i++) {
+ int res;
+
+ res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
+ if (res == STMMAC_XDP_CONSUMED)
+ break;
+
+ nxmit++;
}
- return stmmac_vlan_update(priv, is_double);
+ if (flags & XDP_XMIT_FLUSH) {
+ stmmac_flush_tx_descriptors(priv, queue);
+ stmmac_tx_timer_arm(priv, queue);
+ }
+
+ __netif_tx_unlock(nq);
+
+ return nxmit;
+}
+
+void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ stmmac_stop_rx_dma(priv, queue);
+ __free_dma_rx_desc_resources(priv, queue);
+}
+
+void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+ u32 buf_size;
+ int ret;
+
+ ret = __alloc_dma_rx_desc_resources(priv, queue);
+ if (ret) {
+ netdev_err(priv->dev, "Failed to alloc RX desc.\n");
+ return;
+ }
+
+ ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
+ if (ret) {
+ __free_dma_rx_desc_resources(priv, queue);
+ netdev_err(priv->dev, "Failed to init RX desc.\n");
+ return;
+ }
+
+ stmmac_clear_rx_descriptors(priv, queue);
+
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, rx_q->queue_index);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
+ sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, rx_q->queue_index);
+
+ if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
+ buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ buf_size,
+ rx_q->queue_index);
+ } else {
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ priv->dma_buf_sz,
+ rx_q->queue_index);
+ }
+
+ stmmac_start_rx_dma(priv, queue);
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ stmmac_stop_tx_dma(priv, queue);
+ __free_dma_tx_desc_resources(priv, queue);
+}
+
+void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+ int ret;
+
+ ret = __alloc_dma_tx_desc_resources(priv, queue);
+ if (ret) {
+ netdev_err(priv->dev, "Failed to alloc TX desc.\n");
+ return;
+ }
+
+ ret = __init_dma_tx_desc_rings(priv, queue);
+ if (ret) {
+ __free_dma_tx_desc_resources(priv, queue);
+ netdev_err(priv->dev, "Failed to init TX desc.\n");
+ return;
+ }
+
+ stmmac_clear_tx_descriptors(priv, queue);
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, tx_q->queue_index);
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, tx_q->queue_index);
+
+ stmmac_start_tx_dma(priv, queue);
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ struct stmmac_channel *ch;
+
+ if (test_bit(STMMAC_DOWN, &priv->state) ||
+ !netif_carrier_ok(priv->dev))
+ return -ENETDOWN;
+
+ if (!stmmac_xdp_is_enabled(priv))
+ return -ENXIO;
+
+ if (queue >= priv->plat->rx_queues_to_use ||
+ queue >= priv->plat->tx_queues_to_use)
+ return -EINVAL;
+
+ rx_q = &priv->rx_queue[queue];
+ tx_q = &priv->tx_queue[queue];
+ ch = &priv->channel[queue];
+
+ if (!rx_q->xsk_pool && !tx_q->xsk_pool)
+ return -ENXIO;
+
+ if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
+ /* EQoS does not have per-DMA channel SW interrupt,
+ * so we schedule RX Napi straight-away.
+ */
+ if (likely(napi_schedule_prep(&ch->rxtx_napi)))
+ __napi_schedule(&ch->rxtx_napi);
+ }
+
+ return 0;
}
static const struct net_device_ops stmmac_netdev_ops = {
@@ -4679,6 +6472,9 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_set_mac_address = stmmac_set_mac_address,
.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
+ .ndo_bpf = stmmac_bpf,
+ .ndo_xdp_xmit = stmmac_xdp_xmit,
+ .ndo_xsk_wakeup = stmmac_xsk_wakeup,
};
static void stmmac_reset_subtask(struct stmmac_priv *priv)
@@ -4837,6 +6633,12 @@ static void stmmac_napi_add(struct net_device *dev)
stmmac_napi_poll_tx,
NAPI_POLL_WEIGHT);
}
+ if (queue < priv->plat->rx_queues_to_use &&
+ queue < priv->plat->tx_queues_to_use) {
+ netif_napi_add(dev, &ch->rxtx_napi,
+ stmmac_napi_poll_rxtx,
+ NAPI_POLL_WEIGHT);
+ }
}
}
@@ -4854,6 +6656,10 @@ static void stmmac_napi_del(struct net_device *dev)
netif_napi_del(&ch->rx_napi);
if (queue < priv->plat->tx_queues_to_use)
netif_napi_del(&ch->tx_napi);
+ if (queue < priv->plat->rx_queues_to_use &&
+ queue < priv->plat->tx_queues_to_use) {
+ netif_napi_del(&ch->rxtx_napi);
+ }
}
}
@@ -4895,6 +6701,68 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
return ret;
}
+#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
+static void stmmac_fpe_lp_task(struct work_struct *work)
+{
+ struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
+ fpe_task);
+ struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
+ enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
+ enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
+ bool *hs_enable = &fpe_cfg->hs_enable;
+ bool *enable = &fpe_cfg->enable;
+ int retries = 20;
+
+ while (retries-- > 0) {
+ /* Bail out immediately if FPE handshake is OFF */
+ if (*lo_state == FPE_STATE_OFF || !*hs_enable)
+ break;
+
+ if (*lo_state == FPE_STATE_ENTERING_ON &&
+ *lp_state == FPE_STATE_ENTERING_ON) {
+ stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ *enable);
+
+ netdev_info(priv->dev, "configured FPE\n");
+
+ *lo_state = FPE_STATE_ON;
+ *lp_state = FPE_STATE_ON;
+ netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
+ break;
+ }
+
+ if ((*lo_state == FPE_STATE_CAPABLE ||
+ *lo_state == FPE_STATE_ENTERING_ON) &&
+ *lp_state != FPE_STATE_ON) {
+ netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
+ *lo_state, *lp_state);
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ MPACKET_VERIFY);
+ }
+ /* Sleep then retry */
+ msleep(500);
+ }
+
+ clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
+}
+
+void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
+{
+ if (priv->plat->fpe_cfg->hs_enable != enable) {
+ if (enable) {
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ MPACKET_VERIFY);
+ } else {
+ priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
+ priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
+ }
+
+ priv->plat->fpe_cfg->hs_enable = enable;
+ }
+}
+
/**
* stmmac_dvr_probe
* @device: device pointer
@@ -4930,12 +6798,19 @@ int stmmac_dvr_probe(struct device *device,
priv->plat = plat_dat;
priv->ioaddr = res->addr;
priv->dev->base_addr = (unsigned long)res->addr;
+ priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
priv->dev->irq = res->irq;
priv->wol_irq = res->wol_irq;
priv->lpi_irq = res->lpi_irq;
-
- if (!IS_ERR_OR_NULL(res->mac))
+ priv->sfty_ce_irq = res->sfty_ce_irq;
+ priv->sfty_ue_irq = res->sfty_ue_irq;
+ for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
+ priv->rx_irq[i] = res->rx_irq[i];
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
+ priv->tx_irq[i] = res->tx_irq[i];
+
+ if (!is_zero_ether_addr(res->mac))
memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
dev_set_drvdata(device, priv->dev);
@@ -4943,6 +6818,10 @@ int stmmac_dvr_probe(struct device *device,
/* Verify driver arguments */
stmmac_verify_args();
+ priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
+ if (!priv->af_xdp_zc_qps)
+ return -ENOMEM;
+
/* Allocate workqueue */
priv->wq = create_singlethread_workqueue("stmmac_wq");
if (!priv->wq) {
@@ -4952,6 +6831,9 @@ int stmmac_dvr_probe(struct device *device,
INIT_WORK(&priv->service_task, stmmac_service_task);
+ /* Initialize Link Partner FPE workqueue */
+ INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
+
/* Override with kernel parameters if supplied XXX CRS XXX
* this needs to have multiple instances
*/
@@ -4973,6 +6855,11 @@ int stmmac_dvr_probe(struct device *device,
if (ret)
goto error_hw_init;
+ /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
+ */
+ if (priv->synopsys_id < DWMAC_CORE_5_20)
+ priv->plat->dma_cfg->dche = false;
+
stmmac_check_ether_addr(priv);
ndev->netdev_ops = &stmmac_netdev_ops;
@@ -4995,7 +6882,8 @@ int stmmac_dvr_probe(struct device *device,
if (priv->dma_cap.sphen) {
ndev->hw_features |= NETIF_F_GRO;
- priv->sph = true;
+ priv->sph_cap = true;
+ priv->sph = priv->sph_cap;
dev_info(priv->device, "SPH feature enabled\n");
}
@@ -5097,6 +6985,10 @@ int stmmac_dvr_probe(struct device *device,
stmmac_check_pcs_mode(priv);
+ pm_runtime_get_noresume(device);
+ pm_runtime_set_active(device);
+ pm_runtime_enable(device);
+
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
/* MDIO bus Registration */
@@ -5134,6 +7026,11 @@ int stmmac_dvr_probe(struct device *device,
stmmac_init_fs(ndev);
#endif
+ /* Let pm_runtime_put() disable the clocks.
+ * If CONFIG_PM is not enabled, the clocks will stay powered.
+ */
+ pm_runtime_put(device);
+
return ret;
error_serdes_powerup:
@@ -5148,6 +7045,8 @@ error_mdio_register:
stmmac_napi_del(ndev);
error_hw_init:
destroy_workqueue(priv->wq);
+ stmmac_bus_clks_config(priv, false);
+ bitmap_free(priv->af_xdp_zc_qps);
return ret;
}
@@ -5183,13 +7082,14 @@ int stmmac_dvr_remove(struct device *dev)
phylink_destroy(priv->phylink);
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
- clk_disable_unprepare(priv->plat->pclk);
- clk_disable_unprepare(priv->plat->stmmac_clk);
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
destroy_workqueue(priv->wq);
mutex_destroy(&priv->lock);
+ bitmap_free(priv->af_xdp_zc_qps);
return 0;
}
@@ -5207,6 +7107,7 @@ int stmmac_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
u32 chan;
+ int ret;
if (!ndev || !netif_running(ndev))
return 0;
@@ -5250,11 +7151,24 @@ int stmmac_suspend(struct device *dev)
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
clk_disable_unprepare(priv->plat->clk_ptp_ref);
- clk_disable_unprepare(priv->plat->pclk);
- clk_disable_unprepare(priv->plat->stmmac_clk);
+ ret = pm_runtime_force_suspend(dev);
+ if (ret) {
+ mutex_unlock(&priv->lock);
+ return ret;
+ }
}
+
mutex_unlock(&priv->lock);
+ if (priv->dma_cap.fpesel) {
+ /* Disable FPE */
+ stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use, false);
+
+ stmmac_fpe_handshake(priv, false);
+ }
+
priv->speed = SPEED_UNKNOWN;
return 0;
}
@@ -5317,8 +7231,9 @@ int stmmac_resume(struct device *dev)
} else {
pinctrl_pm_select_default_state(priv->device);
/* enable the clk previously disabled */
- clk_prepare_enable(priv->plat->stmmac_clk);
- clk_prepare_enable(priv->plat->pclk);
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
if (priv->plat->clk_ptp_ref)
clk_prepare_enable(priv->plat->clk_ptp_ref);
/* reset the phy so that it's ready */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index d64116e0543e..b750074f8f9c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -15,6 +15,7 @@
#include <linux/iopoll.h>
#include <linux/mii.h>
#include <linux/of_mdio.h>
+#include <linux/pm_runtime.h>
#include <linux/phy.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -87,21 +88,29 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
u32 tmp, addr, value = MII_XGMAC_BUSY;
int ret;
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000))
- return -EBUSY;
+ !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
+ ret = -EBUSY;
+ goto err_disable_clks;
+ }
if (phyreg & MII_ADDR_C45) {
phyreg &= ~MII_ADDR_C45;
ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
if (ret)
- return ret;
+ goto err_disable_clks;
} else {
ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
if (ret)
- return ret;
+ goto err_disable_clks;
value |= MII_XGMAC_SADDR;
}
@@ -112,8 +121,10 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000))
- return -EBUSY;
+ !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
+ ret = -EBUSY;
+ goto err_disable_clks;
+ }
/* Set the MII address register to read */
writel(addr, priv->ioaddr + mii_address);
@@ -121,11 +132,18 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000))
- return -EBUSY;
+ !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
+ ret = -EBUSY;
+ goto err_disable_clks;
+ }
/* Read the data from the MII data register */
- return readl(priv->ioaddr + mii_data) & GENMASK(15, 0);
+ ret = (int)readl(priv->ioaddr + mii_data) & GENMASK(15, 0);
+
+err_disable_clks:
+ pm_runtime_put(priv->device);
+
+ return ret;
}
static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
@@ -138,21 +156,29 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
u32 addr, tmp, value = MII_XGMAC_BUSY;
int ret;
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000))
- return -EBUSY;
+ !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
+ ret = -EBUSY;
+ goto err_disable_clks;
+ }
if (phyreg & MII_ADDR_C45) {
phyreg &= ~MII_ADDR_C45;
ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
if (ret)
- return ret;
+ goto err_disable_clks;
} else {
ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
if (ret)
- return ret;
+ goto err_disable_clks;
value |= MII_XGMAC_SADDR;
}
@@ -164,16 +190,23 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000))
- return -EBUSY;
+ !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
+ ret = -EBUSY;
+ goto err_disable_clks;
+ }
/* Set the MII address register to write */
writel(addr, priv->ioaddr + mii_address);
writel(value, priv->ioaddr + mii_data);
/* Wait until any existing MII operation is complete */
- return readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000);
+ ret = readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000);
+
+err_disable_clks:
+ pm_runtime_put(priv->device);
+
+ return ret;
}
/**
@@ -196,6 +229,12 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
int data = 0;
u32 v;
+ data = pm_runtime_get_sync(priv->device);
+ if (data < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return data;
+ }
+
value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask;
value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
@@ -216,19 +255,26 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
}
if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000))
- return -EBUSY;
+ 100, 10000)) {
+ data = -EBUSY;
+ goto err_disable_clks;
+ }
writel(data, priv->ioaddr + mii_data);
writel(value, priv->ioaddr + mii_address);
if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000))
- return -EBUSY;
+ 100, 10000)) {
+ data = -EBUSY;
+ goto err_disable_clks;
+ }
/* Read the data from the MII data register */
data = (int)readl(priv->ioaddr + mii_data) & MII_DATA_MASK;
+err_disable_clks:
+ pm_runtime_put(priv->device);
+
return data;
}
@@ -247,10 +293,16 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
+ int ret, data = phydata;
u32 value = MII_BUSY;
- int data = phydata;
u32 v;
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask;
value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
@@ -275,16 +327,23 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000))
- return -EBUSY;
+ 100, 10000)) {
+ ret = -EBUSY;
+ goto err_disable_clks;
+ }
/* Set the MII address register to write */
writel(data, priv->ioaddr + mii_data);
writel(value, priv->ioaddr + mii_address);
/* Wait until any existing MII operation is complete */
- return readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000);
+ ret = readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000);
+
+err_disable_clks:
+ pm_runtime_put(priv->device);
+
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 272cb47af9f2..95e0e4d6f74d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -198,8 +198,6 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- pci_enable_msi(pdev);
-
memset(&res, 0, sizeof(res));
res.addr = pcim_iomap_table(pdev)[i];
res.wol_irq = pdev->irq;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 6dc9f10414e4..1e17a23d9118 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -394,7 +394,7 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
* set some private fields that will be used by the main at runtime.
*/
struct plat_stmmacenet_data *
-stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
+stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
{
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
@@ -406,12 +406,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
if (!plat)
return ERR_PTR(-ENOMEM);
- *mac = of_get_mac_address(np);
- if (IS_ERR(*mac)) {
- if (PTR_ERR(*mac) == -EPROBE_DEFER)
- return ERR_CAST(*mac);
+ rc = of_get_mac_address(np, mac);
+ if (rc) {
+ if (rc == -EPROBE_DEFER)
+ return ERR_PTR(rc);
- *mac = NULL;
+ eth_zero_addr(mac);
}
plat->phy_interface = device_get_phy_mode(&pdev->dev);
@@ -627,7 +627,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev,
}
#else
struct plat_stmmacenet_data *
-stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
+stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
{
return ERR_PTR(-EINVAL);
}
@@ -704,7 +704,6 @@ int stmmac_pltfr_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
-#ifdef CONFIG_PM_SLEEP
/**
* stmmac_pltfr_suspend
* @dev: device pointer
@@ -712,7 +711,7 @@ EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
* call the main suspend function and then, if required, on some platform, it
* can call an exit helper.
*/
-static int stmmac_pltfr_suspend(struct device *dev)
+static int __maybe_unused stmmac_pltfr_suspend(struct device *dev)
{
int ret;
struct net_device *ndev = dev_get_drvdata(dev);
@@ -733,7 +732,7 @@ static int stmmac_pltfr_suspend(struct device *dev)
* the main resume function, on some platforms, it can call own init helper
* if required.
*/
-static int stmmac_pltfr_resume(struct device *dev)
+static int __maybe_unused stmmac_pltfr_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -744,10 +743,29 @@ static int stmmac_pltfr_resume(struct device *dev)
return stmmac_resume(dev);
}
-#endif /* CONFIG_PM_SLEEP */
-SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
- stmmac_pltfr_resume);
+static int __maybe_unused stmmac_runtime_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ stmmac_bus_clks_config(priv, false);
+
+ return 0;
+}
+
+static int __maybe_unused stmmac_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ return stmmac_bus_clks_config(priv, true);
+}
+
+const struct dev_pm_ops stmmac_pltfr_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
+ SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
+};
EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 3a4663b7b460..3fff3f59d73d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -12,7 +12,7 @@
#include "stmmac.h"
struct plat_stmmacenet_data *
-stmmac_probe_config_dt(struct platform_device *pdev, const char **mac);
+stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac);
void stmmac_remove_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 0989e2bb6ee3..4e86cdf2bc9f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -9,6 +9,7 @@
*******************************************************************************/
#include "stmmac.h"
#include "stmmac_ptp.h"
+#include "dwmac4.h"
/**
* stmmac_adjust_freq
@@ -134,7 +135,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
{
struct stmmac_priv *priv =
container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ void __iomem *ptpaddr = priv->ptpaddr;
+ void __iomem *ioaddr = priv->hw->pcsr;
struct stmmac_pps_cfg *cfg;
+ u32 intr_value, acr_value;
int ret = -EOPNOTSUPP;
unsigned long flags;
@@ -158,6 +162,37 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
priv->systime_flags);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
break;
+ case PTP_CLK_REQ_EXTTS:
+ priv->plat->ext_snapshot_en = on;
+ mutex_lock(&priv->aux_ts_lock);
+ acr_value = readl(ptpaddr + PTP_ACR);
+ acr_value &= ~PTP_ACR_MASK;
+ if (on) {
+ /* Enable External snapshot trigger */
+ acr_value |= priv->plat->ext_snapshot_num;
+ acr_value |= PTP_ACR_ATSFC;
+ netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n",
+ priv->plat->ext_snapshot_num >>
+ PTP_ACR_ATSEN_SHIFT);
+ /* Enable Timestamp Interrupt */
+ intr_value = readl(ioaddr + GMAC_INT_EN);
+ intr_value |= GMAC_INT_TSIE;
+ writel(intr_value, ioaddr + GMAC_INT_EN);
+
+ } else {
+ netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n",
+ priv->plat->ext_snapshot_num >>
+ PTP_ACR_ATSEN_SHIFT);
+ /* Disable Timestamp Interrupt */
+ intr_value = readl(ioaddr + GMAC_INT_EN);
+ intr_value &= ~GMAC_INT_TSIE;
+ writel(intr_value, ioaddr + GMAC_INT_EN);
+ }
+ writel(acr_value, ptpaddr + PTP_ACR);
+ mutex_unlock(&priv->aux_ts_lock);
+ ret = 0;
+ break;
+
default:
break;
}
@@ -165,13 +200,43 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
return ret;
}
+/**
+ * stmmac_get_syncdevicetime
+ * @device: current device time
+ * @system: system counter value read synchronously with device time
+ * @ctx: context provided by timekeeping code
+ * Description: Read device and system clock simultaneously and return the
+ * corrected clock values in ns.
+ **/
+static int stmmac_get_syncdevicetime(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
+
+ if (priv->plat->crosststamp)
+ return priv->plat->crosststamp(device, system, ctx);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int stmmac_getcrosststamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *xtstamp)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+
+ return get_device_system_crosststamp(stmmac_get_syncdevicetime,
+ priv, NULL, xtstamp);
+}
+
/* structure describing a PTP hardware clock */
static struct ptp_clock_info stmmac_ptp_clock_ops = {
.owner = THIS_MODULE,
.name = "stmmac ptp",
.max_adj = 62500000,
.n_alarm = 0,
- .n_ext_ts = 0,
+ .n_ext_ts = 0, /* will be overwritten in stmmac_ptp_register */
.n_per_out = 0, /* will be overwritten in stmmac_ptp_register */
.n_pins = 0,
.pps = 0,
@@ -180,6 +245,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
.gettime64 = stmmac_get_time,
.settime64 = stmmac_set_time,
.enable = stmmac_enable,
+ .getcrosststamp = stmmac_getcrosststamp,
};
/**
@@ -192,6 +258,9 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
{
int i;
+ if (priv->plat->ptp_clk_freq_config)
+ priv->plat->ptp_clk_freq_config(priv);
+
for (i = 0; i < priv->dma_cap.pps_out_num; i++) {
if (i >= STMMAC_PPS_MAX)
break;
@@ -202,8 +271,10 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
+ stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n;
spin_lock_init(&priv->ptp_lock);
+ mutex_init(&priv->aux_ts_lock);
priv->ptp_clock_ops = stmmac_ptp_clock_ops;
priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
@@ -229,4 +300,6 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv)
pr_debug("Removed PTP HW clock successfully on %s\n",
priv->dev->name);
}
+
+ mutex_destroy(&priv->aux_ts_lock);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 7abb1d47e7da..53172a439810 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -23,6 +23,9 @@
#define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */
#define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */
#define PTP_TAR 0x18 /* Timestamp Addend Reg */
+#define PTP_ACR 0x40 /* Auxiliary Control Reg */
+#define PTP_ATNR 0x48 /* Auxiliary Timestamp - Nanoseconds Reg */
+#define PTP_ATSR 0x4c /* Auxiliary Timestamp - Seconds Reg */
#define PTP_STNSUR_ADDSUB_SHIFT 31
#define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */
@@ -64,4 +67,25 @@
#define PTP_SSIR_SSINC_MASK 0xff
#define GMAC4_PTP_SSIR_SSINC_SHIFT 16
+/* Auxiliary Control defines */
+#define PTP_ACR_ATSFC BIT(0) /* Auxiliary Snapshot FIFO Clear */
+#define PTP_ACR_ATSEN0 BIT(4) /* Auxiliary Snapshot 0 Enable */
+#define PTP_ACR_ATSEN1 BIT(5) /* Auxiliary Snapshot 1 Enable */
+#define PTP_ACR_ATSEN2 BIT(6) /* Auxiliary Snapshot 2 Enable */
+#define PTP_ACR_ATSEN3 BIT(7) /* Auxiliary Snapshot 3 Enable */
+#define PTP_ACR_ATSEN_SHIFT 5 /* Auxiliary Snapshot shift */
+#define PTP_ACR_MASK GENMASK(7, 4) /* Aux Snapshot Mask */
+#define PMC_ART_VALUE0 0x01 /* PMC_ART[15:0] timer value */
+#define PMC_ART_VALUE1 0x02 /* PMC_ART[31:16] timer value */
+#define PMC_ART_VALUE2 0x03 /* PMC_ART[47:32] timer value */
+#define PMC_ART_VALUE3 0x04 /* PMC_ART[63:48] timer value */
+#define GMAC4_ART_TIME_SHIFT 16 /* ART TIME 16-bits shift */
+
+enum aux_snapshot {
+ AUX_SNAPSHOT0 = 0x10,
+ AUX_SNAPSHOT1 = 0x20,
+ AUX_SNAPSHOT2 = 0x40,
+ AUX_SNAPSHOT3 = 0x80,
+};
+
#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 44bb133c3000..4e70efc45458 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -254,6 +254,16 @@ static int tc_init(struct stmmac_priv *priv)
priv->flow_entries_max);
}
+ if (!priv->plat->fpe_cfg) {
+ priv->plat->fpe_cfg = devm_kzalloc(priv->device,
+ sizeof(*priv->plat->fpe_cfg),
+ GFP_KERNEL);
+ if (!priv->plat->fpe_cfg)
+ return -ENOMEM;
+ } else {
+ memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg));
+ }
+
/* Fail silently as we can still use remaining features, e.g. CBS */
if (!dma_cap->frpsel)
return 0;
@@ -297,6 +307,7 @@ static int tc_init(struct stmmac_priv *priv)
dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
priv->tc_entries_max, priv->tc_off_max);
+
return 0;
}
@@ -598,6 +609,87 @@ static int tc_del_flow(struct stmmac_priv *priv,
return ret;
}
+#define VLAN_PRIO_FULL_MASK (0x07)
+
+static int tc_add_vlan_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
+ struct flow_match_vlan match;
+
+ /* Nothing to do here */
+ if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
+ return -EINVAL;
+
+ if (tc < 0) {
+ netdev_err(priv->dev, "Invalid traffic class\n");
+ return -EINVAL;
+ }
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (match.mask->vlan_priority) {
+ u32 prio;
+
+ if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
+ netdev_err(priv->dev, "Only full mask is supported for VLAN priority");
+ return -EINVAL;
+ }
+
+ prio = BIT(match.key->vlan_priority);
+ stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
+ }
+
+ return 0;
+}
+
+static int tc_del_vlan_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
+
+ /* Nothing to do here */
+ if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
+ return -EINVAL;
+
+ if (tc < 0) {
+ netdev_err(priv->dev, "Invalid traffic class\n");
+ return -EINVAL;
+ }
+
+ stmmac_rx_queue_prio(priv, priv->hw, 0, tc);
+
+ return 0;
+}
+
+static int tc_add_flow_cls(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ int ret;
+
+ ret = tc_add_flow(priv, cls);
+ if (!ret)
+ return ret;
+
+ return tc_add_vlan_flow(priv, cls);
+}
+
+static int tc_del_flow_cls(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ int ret;
+
+ ret = tc_del_flow(priv, cls);
+ if (!ret)
+ return ret;
+
+ return tc_del_vlan_flow(priv, cls);
+}
+
static int tc_setup_cls(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
@@ -609,10 +701,10 @@ static int tc_setup_cls(struct stmmac_priv *priv,
switch (cls->command) {
case FLOW_CLS_REPLACE:
- ret = tc_add_flow(priv, cls);
+ ret = tc_add_flow_cls(priv, cls);
break;
case FLOW_CLS_DESTROY:
- ret = tc_del_flow(priv, cls);
+ ret = tc_del_flow_cls(priv, cls);
break;
default:
return -EOPNOTSUPP;
@@ -748,13 +840,10 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
if (fpe && !priv->dma_cap.fpesel)
return -EOPNOTSUPP;
- ret = stmmac_fpe_configure(priv, priv->ioaddr,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use, fpe);
- if (ret && fpe) {
- netdev_err(priv->dev, "failed to enable Frame Preemption\n");
- return ret;
- }
+ /* Actual FPE register configuration will be done after FPE handshake
+ * is success.
+ */
+ priv->plat->fpe_cfg->enable = fpe;
ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
@@ -764,12 +853,29 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
}
netdev_info(priv->dev, "configured EST\n");
+
+ if (fpe) {
+ stmmac_fpe_handshake(priv, true);
+ netdev_info(priv->dev, "start FPE handshake\n");
+ }
+
return 0;
disable:
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
+
+ priv->plat->fpe_cfg->enable = false;
+ stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ false);
+ netdev_info(priv->dev, "disabled FPE\n");
+
+ stmmac_fpe_handshake(priv, false);
+ netdev_info(priv->dev, "stop FPE handshake\n");
+
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
new file mode 100644
index 000000000000..105821b53020
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021, Intel Corporation. */
+
+#include <net/xdp_sock_drv.h>
+
+#include "stmmac.h"
+#include "stmmac_xdp.h"
+
+static int stmmac_xdp_enable_pool(struct stmmac_priv *priv,
+ struct xsk_buff_pool *pool, u16 queue)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ bool need_update;
+ u32 frame_size;
+ int err;
+
+ if (queue >= priv->plat->rx_queues_to_use ||
+ queue >= priv->plat->tx_queues_to_use)
+ return -EINVAL;
+
+ frame_size = xsk_pool_get_rx_frame_size(pool);
+ /* XDP ZC does not span multiple frame, make sure XSK pool buffer
+ * size can at least store Q-in-Q frame.
+ */
+ if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2)
+ return -EOPNOTSUPP;
+
+ err = xsk_pool_dma_map(pool, priv->device, STMMAC_RX_DMA_ATTR);
+ if (err) {
+ netdev_err(priv->dev, "Failed to map xsk pool\n");
+ return err;
+ }
+
+ need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
+
+ if (need_update) {
+ stmmac_disable_rx_queue(priv, queue);
+ stmmac_disable_tx_queue(priv, queue);
+ napi_disable(&ch->rx_napi);
+ napi_disable(&ch->tx_napi);
+ }
+
+ set_bit(queue, priv->af_xdp_zc_qps);
+
+ if (need_update) {
+ napi_enable(&ch->rxtx_napi);
+ stmmac_enable_rx_queue(priv, queue);
+ stmmac_enable_tx_queue(priv, queue);
+
+ err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ struct xsk_buff_pool *pool;
+ bool need_update;
+
+ if (queue >= priv->plat->rx_queues_to_use ||
+ queue >= priv->plat->tx_queues_to_use)
+ return -EINVAL;
+
+ pool = xsk_get_pool_from_qid(priv->dev, queue);
+ if (!pool)
+ return -EINVAL;
+
+ need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
+
+ if (need_update) {
+ stmmac_disable_rx_queue(priv, queue);
+ stmmac_disable_tx_queue(priv, queue);
+ synchronize_rcu();
+ napi_disable(&ch->rxtx_napi);
+ }
+
+ xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR);
+
+ clear_bit(queue, priv->af_xdp_zc_qps);
+
+ if (need_update) {
+ napi_enable(&ch->rx_napi);
+ napi_enable(&ch->tx_napi);
+ stmmac_enable_rx_queue(priv, queue);
+ stmmac_enable_tx_queue(priv, queue);
+ }
+
+ return 0;
+}
+
+int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
+ u16 queue)
+{
+ return pool ? stmmac_xdp_enable_pool(priv, pool, queue) :
+ stmmac_xdp_disable_pool(priv, queue);
+}
+
+int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev = priv->dev;
+ struct bpf_prog *old_prog;
+ bool need_update;
+ bool if_running;
+
+ if_running = netif_running(dev);
+
+ if (prog && dev->mtu > ETH_DATA_LEN) {
+ /* For now, the driver doesn't support XDP functionality with
+ * jumbo frames so we return error.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
+ return -EOPNOTSUPP;
+ }
+
+ need_update = !!priv->xdp_prog != !!prog;
+ if (if_running && need_update)
+ stmmac_release(dev);
+
+ old_prog = xchg(&priv->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ /* Disable RX SPH for XDP operation */
+ priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv);
+
+ if (if_running && need_update)
+ stmmac_open(dev);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
new file mode 100644
index 000000000000..896dc987d4ef
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, Intel Corporation. */
+
+#ifndef _STMMAC_XDP_H_
+#define _STMMAC_XDP_H_
+
+#define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM)
+#define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
+ u16 queue);
+int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack);
+
+#endif /* _STMMAC_XDP_H_ */
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 9ff894ba8d3e..54f45d8c79a7 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -1599,6 +1599,7 @@ static inline int cas_mdio_link_not_up(struct cas *cp)
cas_phy_write(cp, MII_BMCR, val);
break;
}
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 58f142ee78a3..9790656cf970 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1674,8 +1674,8 @@ static void gem_init_phy(struct gem *gp)
if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
int i;
- /* Those delay sucks, the HW seem to love them though, I'll
- * serisouly consider breaking some locks here to be able
+ /* Those delays sucks, the HW seems to love them though, I'll
+ * seriously consider breaking some locks here to be able
* to schedule instead
*/
for (i = 0; i < 3; i++) {
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 638d7b03be4b..6a67b026df0b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1824,7 +1824,6 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
for_each_child_of_node(node, port_np) {
struct am65_cpsw_port *port;
- const void *mac_addr;
u32 port_id;
/* it is not a slave port node, continue */
@@ -1903,15 +1902,15 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
return ret;
}
- mac_addr = of_get_mac_address(port_np);
- if (!IS_ERR(mac_addr)) {
- ether_addr_copy(port->slave.mac_addr, mac_addr);
- } else if (am65_cpsw_am654_get_efuse_macid(port_np,
- port->port_id,
- port->slave.mac_addr) ||
- !is_valid_ether_addr(port->slave.mac_addr)) {
- random_ether_addr(port->slave.mac_addr);
- dev_err(dev, "Use random MAC address\n");
+ ret = of_get_mac_address(port_np, port->slave.mac_addr);
+ if (ret) {
+ am65_cpsw_am654_get_efuse_macid(port_np,
+ port->port_id,
+ port->slave.mac_addr);
+ if (!is_valid_ether_addr(port->slave.mac_addr)) {
+ random_ether_addr(port->slave.mac_addr);
+ dev_err(dev, "Use random MAC address\n");
+ }
}
}
of_node_put(node);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
index d93ffd8a08b0..23cfb91e9c4d 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
@@ -385,7 +385,7 @@ static void am65_cpsw_switchdev_event_work(struct work_struct *work)
fdb->addr, fdb->vid, fdb->added_by_user,
fdb->offloaded, port_id);
- if (!fdb->added_by_user)
+ if (!fdb->added_by_user || fdb->is_local)
break;
if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
port_id = HOST_PORT_NUM;
@@ -401,7 +401,7 @@ static void am65_cpsw_switchdev_event_work(struct work_struct *work)
fdb->addr, fdb->vid, fdb->added_by_user,
fdb->offloaded, port_id);
- if (!fdb->added_by_user)
+ if (!fdb->added_by_user || fdb->is_local)
break;
if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
port_id = HOST_PORT_NUM;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index fd966567464c..c0cd7de88316 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1123,25 +1123,23 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
struct xdp_frame *xdpf;
- int i, drops = 0, port;
+ int i, nxmit = 0, port;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
for (i = 0; i < n; i++) {
xdpf = frames[i];
- if (xdpf->len < CPSW_MIN_PACKET_SIZE) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- continue;
- }
+ if (xdpf->len < CPSW_MIN_PACKET_SIZE)
+ break;
port = priv->emac_port + cpsw->data.dual_emac;
if (cpsw_xdp_tx_frame(priv, xdpf, NULL, port))
- drops++;
+ break;
+ nxmit++;
}
- return n - drops;
+ return nxmit;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1298,7 +1296,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
for_each_available_child_of_node(node, slave_node) {
struct cpsw_slave_data *slave_data = data->slave_data + i;
- const void *mac_addr = NULL;
int lenp;
const __be32 *parp;
@@ -1370,10 +1367,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
no_phy_slave:
- mac_addr = of_get_mac_address(slave_node);
- if (!IS_ERR(mac_addr)) {
- ether_addr_copy(slave_data->mac_addr, mac_addr);
- } else {
+ ret = of_get_mac_address(slave_node, slave_data->mac_addr);
+ if (ret) {
ret = ti_cm_get_macid(&pdev->dev, i,
slave_data->mac_addr);
if (ret)
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 58a64313ac00..69b7a4e0220a 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1093,24 +1093,22 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct xdp_frame *xdpf;
- int i, drops = 0;
+ int i, nxmit = 0;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
for (i = 0; i < n; i++) {
xdpf = frames[i];
- if (xdpf->len < CPSW_MIN_PACKET_SIZE) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- continue;
- }
+ if (xdpf->len < CPSW_MIN_PACKET_SIZE)
+ break;
if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
- drops++;
+ break;
+ nxmit++;
}
- return n - drops;
+ return nxmit;
}
static int cpsw_get_port_parent_id(struct net_device *ndev,
@@ -1259,7 +1257,6 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
for_each_child_of_node(tmp_node, port_np) {
struct cpsw_slave_data *slave_data;
- const void *mac_addr;
u32 port_id;
ret = of_property_read_u32(port_np, "reg", &port_id);
@@ -1318,10 +1315,8 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
goto err_node_put;
}
- mac_addr = of_get_mac_address(port_np);
- if (!IS_ERR(mac_addr)) {
- ether_addr_copy(slave_data->mac_addr, mac_addr);
- } else {
+ ret = of_get_mac_address(port_np, slave_data->mac_addr);
+ if (ret) {
ret = ti_cm_get_macid(dev, port_id - 1,
slave_data->mac_addr);
if (ret)
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index bb59e768915e..5862f0a4a975 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -1305,19 +1305,15 @@ int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
dma, xdpf->len, port);
} else {
- if (sizeof(*xmeta) > xdpf->headroom) {
- xdp_return_frame_rx_napi(xdpf);
+ if (sizeof(*xmeta) > xdpf->headroom)
return -EINVAL;
- }
ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
xdpf->data, xdpf->len, port);
}
- if (ret) {
+ if (ret)
priv->ndev->stats.tx_dropped++;
- xdp_return_frame_rx_napi(xdpf);
- }
return ret;
}
@@ -1353,7 +1349,8 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
if (unlikely(!xdpf))
goto drop;
- cpsw_xdp_tx_frame(priv, xdpf, page, port);
+ if (cpsw_xdp_tx_frame(priv, xdpf, page, port))
+ xdp_return_frame_rx_napi(xdpf);
break;
case XDP_REDIRECT:
if (xdp_do_redirect(ndev, xdp, prog))
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
index a72bb570756f..05a64fb7a04f 100644
--- a/drivers/net/ethernet/ti/cpsw_switchdev.c
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -395,7 +395,7 @@ static void cpsw_switchdev_event_work(struct work_struct *work)
fdb->addr, fdb->vid, fdb->added_by_user,
fdb->offloaded, port);
- if (!fdb->added_by_user)
+ if (!fdb->added_by_user || fdb->is_local)
break;
if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
port = HOST_PORT_NUM;
@@ -411,7 +411,7 @@ static void cpsw_switchdev_event_work(struct work_struct *work)
fdb->addr, fdb->vid, fdb->added_by_user,
fdb->offloaded, port);
- if (!fdb->added_by_user)
+ if (!fdb->added_by_user || fdb->is_local)
break;
if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
port = HOST_PORT_NUM;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index c7031e1960d4..f9417b44cae8 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -169,11 +169,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
/* EMAC mac_status register */
#define EMAC_MACSTATUS_TXERRCODE_MASK (0xF00000)
#define EMAC_MACSTATUS_TXERRCODE_SHIFT (20)
-#define EMAC_MACSTATUS_TXERRCH_MASK (0x7)
+#define EMAC_MACSTATUS_TXERRCH_MASK (0x70000)
#define EMAC_MACSTATUS_TXERRCH_SHIFT (16)
#define EMAC_MACSTATUS_RXERRCODE_MASK (0xF000)
#define EMAC_MACSTATUS_RXERRCODE_SHIFT (12)
-#define EMAC_MACSTATUS_RXERRCH_MASK (0x7)
+#define EMAC_MACSTATUS_RXERRCH_MASK (0x700)
#define EMAC_MACSTATUS_RXERRCH_SHIFT (8)
/* EMAC RX register masks */
@@ -1687,7 +1687,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
const struct of_device_id *match;
const struct emac_platform_data *auxdata;
struct emac_platform_data *pdata = NULL;
- const u8 *mac_addr;
if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
return dev_get_platdata(&pdev->dev);
@@ -1699,11 +1698,8 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
np = pdev->dev.of_node;
pdata->version = EMAC_VERSION_2;
- if (!is_valid_ether_addr(pdata->mac_addr)) {
- mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(pdata->mac_addr, mac_addr);
- }
+ if (!is_valid_ether_addr(pdata->mac_addr))
+ of_get_mac_address(np, pdata->mac_addr);
of_property_read_u32(np, "ti,davinci-ctrl-reg-offset",
&pdata->ctrl_reg_offset);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index d7a144b4a09f..9030e619e543 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1966,7 +1966,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
struct resource res;
void __iomem *efuse = NULL;
u32 efuse_mac = 0;
- const void *mac_addr;
u8 efuse_mac_addr[6];
u32 temp[2];
int ret = 0;
@@ -2036,10 +2035,8 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
devm_iounmap(dev, efuse);
devm_release_mem_region(dev, res.start, size);
} else {
- mac_addr = of_get_mac_address(node_interface);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(ndev->dev_addr, mac_addr);
- else
+ ret = of_get_mac_address(node_interface, ndev->dev_addr);
+ if (ret)
eth_random_addr(ndev->dev_addr);
}
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index d5a75ef7e3ca..226a76633e65 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -146,7 +146,8 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
/* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
* interrupt, as we poll for the completion of the read operation
- * in spider_net_read_phy. Should take about 50 us */
+ * in spider_net_read_phy. Should take about 50 us
+ */
do {
readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
} while (readvalue & SPIDER_NET_GPREXEC);
@@ -387,7 +388,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
(~(SPIDER_NET_RXBUF_ALIGN - 1));
/* and we need to have it 128 byte aligned, therefore we allocate a
- * bit more */
+ * bit more
+ */
/* allocate an skb */
descr->skb = netdev_alloc_skb(card->netdev,
bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
@@ -488,7 +490,8 @@ spider_net_refill_rx_chain(struct spider_net_card *card)
/* one context doing the refill (and a second context seeing that
* and omitting it) is ok. If called by NAPI, we'll be called again
* as spider_net_decode_one_descr is called several times. If some
- * interrupt calls us, the NAPI is about to clean up anyway. */
+ * interrupt calls us, the NAPI is about to clean up anyway.
+ */
if (!spin_trylock_irqsave(&chain->lock, flags))
return;
@@ -523,14 +526,16 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
/* Put at least one buffer into the chain. if this fails,
* we've got a problem. If not, spider_net_refill_rx_chain
- * will do the rest at the end of this function. */
+ * will do the rest at the end of this function.
+ */
if (spider_net_prepare_rx_descr(card, chain->head))
goto error;
else
chain->head = chain->head->next;
/* This will allocate the rest of the rx buffers;
- * if not, it's business as usual later on. */
+ * if not, it's business as usual later on.
+ */
spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
return 0;
@@ -706,7 +711,8 @@ spider_net_set_low_watermark(struct spider_net_card *card)
int i;
/* Measure the length of the queue. Measurement does not
- * need to be precise -- does not need a lock. */
+ * need to be precise -- does not need a lock.
+ */
while (descr != card->tx_chain.head) {
status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
if (status == SPIDER_NET_DESCR_NOT_IN_USE)
@@ -786,7 +792,8 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
/* fallthrough, if we release the descriptors
* brutally (then we don't care about
- * SPIDER_NET_DESCR_CARDOWNED) */
+ * SPIDER_NET_DESCR_CARDOWNED)
+ */
fallthrough;
case SPIDER_NET_DESCR_RESPONSE_ERROR:
@@ -948,7 +955,8 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
skb_put(skb, hwdescr->valid_size);
/* the card seems to add 2 bytes of junk in front
- * of the ethernet frame */
+ * of the ethernet frame
+ */
#define SPIDER_MISALIGN 2
skb_pull(skb, SPIDER_MISALIGN);
skb->protocol = eth_type_trans(skb, netdev);
@@ -1382,7 +1390,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
/* PHY read operation completed */
/* we don't use semaphores, as we poll for the completion
* of the read operation in spider_net_read_phy. Should take
- * about 50 us */
+ * about 50 us
+ */
show_error = 0;
break;
case SPIDER_NET_GPWFFINT:
@@ -1450,7 +1459,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
{
case SPIDER_NET_GTMFLLINT:
/* TX RAM full may happen on a usual case.
- * Logging is not needed. */
+ * Logging is not needed.
+ */
show_error = 0;
break;
case SPIDER_NET_GRFDFLLINT:
@@ -1694,7 +1704,8 @@ spider_net_enable_card(struct spider_net_card *card)
{
int i;
/* the following array consists of (register),(value) pairs
- * that are set in this function. A register of 0 ends the list */
+ * that are set in this function. A register of 0 ends the list
+ */
u32 regs[][2] = {
{ SPIDER_NET_GRESUMINTNUM, 0 },
{ SPIDER_NET_GREINTNUM, 0 },
@@ -1757,7 +1768,8 @@ spider_net_enable_card(struct spider_net_card *card)
spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
/* set chain tail address for RX chains and
- * enable DMA */
+ * enable DMA
+ */
spider_net_enable_rxchtails(card);
spider_net_enable_rxdmac(card);
@@ -1995,7 +2007,8 @@ static void spider_net_link_phy(struct timer_list *t)
case BCM54XX_UNKNOWN:
/* copper, fiber with and without failed,
- * retry from beginning */
+ * retry from beginning
+ */
spider_net_setup_aneg(card);
card->medium = BCM54XX_COPPER;
break;
@@ -2263,7 +2276,8 @@ spider_net_setup_netdev(struct spider_net_card *card)
netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX;
/* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- * NETIF_F_HW_VLAN_CTAG_FILTER */
+ * NETIF_F_HW_VLAN_CTAG_FILTER
+ */
/* MTU range: 64 - 2294 */
netdev->min_mtu = SPIDER_NET_MIN_MTU;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 7a6e5ff8e5d4..fedb2bf69261 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1914,7 +1914,8 @@ tc35815_set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
/* With some (all?) 100MHalf HUB, controller will hang
- * if we enabled promiscuous mode before linkup... */
+ * if we enabled promiscuous mode before linkup...
+ */
struct tc35815_local *lp = netdev_priv(dev);
if (!lp->link)
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index b65767f9e499..fecc4d7b00b0 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2525,7 +2525,7 @@ static int velocity_close(struct net_device *dev)
* @skb: buffer to transmit
* @dev: network device
*
- * Called by the networ layer to request a packet is queued to
+ * Called by the network layer to request a packet is queued to
* the velocity. Returns zero on success.
*/
static netdev_tx_t velocity_xmit(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c
index 2b4126d2427d..2b84848dc26a 100644
--- a/drivers/net/ethernet/wiznet/w5100-spi.c
+++ b/drivers/net/ethernet/wiznet/w5100-spi.c
@@ -423,8 +423,14 @@ static int w5100_spi_probe(struct spi_device *spi)
const struct of_device_id *of_id;
const struct w5100_ops *ops;
kernel_ulong_t driver_data;
+ const void *mac = NULL;
+ u8 tmpmac[ETH_ALEN];
int priv_size;
- const void *mac = of_get_mac_address(spi->dev.of_node);
+ int ret;
+
+ ret = of_get_mac_address(spi->dev.of_node, tmpmac);
+ if (!ret)
+ mac = tmpmac;
if (spi->dev.of_node) {
of_id = of_match_device(w5100_of_match, &spi->dev);
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index c0d181a7f83a..ec5db481c9cd 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -1157,7 +1157,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
INIT_WORK(&priv->setrx_work, w5100_setrx_work);
INIT_WORK(&priv->restart_work, w5100_restart_work);
- if (!IS_ERR_OR_NULL(mac_addr))
+ if (mac_addr)
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
else
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index c6eb7f2368aa..911b5ef9e680 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -18,12 +18,14 @@ if NET_VENDOR_XILINX
config XILINX_EMACLITE
tristate "Xilinx 10/100 Ethernet Lite support"
+ depends on HAS_IOMEM
select PHYLIB
help
This driver supports the 10/100 Ethernet Lite from Xilinx.
config XILINX_AXI_EMAC
tristate "Xilinx 10/100/1000 AXI Ethernet support"
+ depends on HAS_IOMEM
select PHYLINK
help
This driver supports the 10/100/1000 Ethernet from Xilinx for the
@@ -31,6 +33,7 @@ config XILINX_AXI_EMAC
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
+ depends on HAS_IOMEM
select PHYLIB
help
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 030185301014..a1f5f07f4ca9 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -438,7 +438,7 @@ static void temac_do_set_mac_address(struct net_device *ndev)
static int temac_init_mac_address(struct net_device *ndev, const void *address)
{
- ether_addr_copy(ndev->dev_addr, address);
+ memcpy(ndev->dev_addr, address, ETH_ALEN);
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
temac_do_set_mac_address(ndev);
@@ -1351,7 +1351,7 @@ static int temac_probe(struct platform_device *pdev)
struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
struct temac_local *lp;
struct net_device *ndev;
- const void *addr;
+ u8 addr[ETH_ALEN];
__be32 *p;
bool little_endian;
int rc = 0;
@@ -1542,8 +1542,8 @@ static int temac_probe(struct platform_device *pdev)
if (temac_np) {
/* Retrieve the MAC address */
- addr = of_get_mac_address(temac_np);
- if (IS_ERR(addr)) {
+ rc = of_get_mac_address(temac_np, addr);
+ if (rc) {
dev_err(&pdev->dev, "could not find MAC address\n");
return -ENODEV;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index aca7f82f6791..5b4d153b1492 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -376,6 +376,8 @@ struct axidma_bd {
struct sk_buff *skb;
} __aligned(XAXIDMA_BD_MINIMUM_ALIGNMENT);
+#define XAE_NUM_MISC_CLOCKS 3
+
/**
* struct axienet_local - axienet private per device data
* @ndev: Pointer for net_device to which it will be attached.
@@ -385,7 +387,8 @@ struct axidma_bd {
* @phylink_config: phylink configuration settings
* @pcs_phy: Reference to PCS/PMA PHY if used
* @switch_x_sgmii: Whether switchable 1000BaseX/SGMII mode is enabled in the core
- * @clk: Clock for AXI bus
+ * @axi_clk: AXI4-Lite bus clock
+ * @misc_clks: Misc ethernet clocks (AXI4-Stream, Ref, MGT clocks)
* @mii_bus: Pointer to MII bus structure
* @mii_clk_div: MII bus clock divider value
* @regs_start: Resource start for axienet device addresses
@@ -434,7 +437,8 @@ struct axienet_local {
bool switch_x_sgmii;
- struct clk *clk;
+ struct clk *axi_clk;
+ struct clk_bulk_data misc_clks[XAE_NUM_MISC_CLOCKS];
struct mii_bus *mii_bus;
u8 mii_clk_div;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index f8f8654ea728..b508c9453f40 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1835,8 +1835,8 @@ static int axienet_probe(struct platform_device *pdev)
struct device_node *np;
struct axienet_local *lp;
struct net_device *ndev;
- const void *mac_addr;
struct resource *ethres;
+ u8 mac_addr[ETH_ALEN];
int addr_width = 32;
u32 value;
@@ -1863,22 +1863,39 @@ static int axienet_probe(struct platform_device *pdev)
lp->rx_bd_num = RX_BD_NUM_DEFAULT;
lp->tx_bd_num = TX_BD_NUM_DEFAULT;
- lp->clk = devm_clk_get_optional(&pdev->dev, NULL);
- if (IS_ERR(lp->clk)) {
- ret = PTR_ERR(lp->clk);
+ lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
+ if (!lp->axi_clk) {
+ /* For backward compatibility, if named AXI clock is not present,
+ * treat the first clock specified as the AXI clock.
+ */
+ lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
+ }
+ if (IS_ERR(lp->axi_clk)) {
+ ret = PTR_ERR(lp->axi_clk);
goto free_netdev;
}
- ret = clk_prepare_enable(lp->clk);
+ ret = clk_prepare_enable(lp->axi_clk);
if (ret) {
- dev_err(&pdev->dev, "Unable to enable clock: %d\n", ret);
+ dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
goto free_netdev;
}
+ lp->misc_clks[0].id = "axis_clk";
+ lp->misc_clks[1].id = "ref_clk";
+ lp->misc_clks[2].id = "mgt_clk";
+
+ ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
+ if (ret)
+ goto cleanup_clk;
+
+ ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
+ if (ret)
+ goto cleanup_clk;
+
/* Map device registers */
ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
if (IS_ERR(lp->regs)) {
- dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
ret = PTR_ERR(lp->regs);
goto cleanup_clk;
}
@@ -2045,13 +2062,14 @@ static int axienet_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
/* Retrieve the MAC address */
- mac_addr = of_get_mac_address(pdev->dev.of_node);
- if (IS_ERR(mac_addr)) {
- dev_warn(&pdev->dev, "could not find MAC address property: %ld\n",
- PTR_ERR(mac_addr));
- mac_addr = NULL;
+ ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
+ if (!ret) {
+ axienet_set_mac_address(ndev, mac_addr);
+ } else {
+ dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
+ ret);
+ axienet_set_mac_address(ndev, NULL);
}
- axienet_set_mac_address(ndev, mac_addr);
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
@@ -2109,7 +2127,8 @@ cleanup_mdio:
of_node_put(lp->phy_node);
cleanup_clk:
- clk_disable_unprepare(lp->clk);
+ clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
+ clk_disable_unprepare(lp->axi_clk);
free_netdev:
free_netdev(ndev);
@@ -2132,7 +2151,8 @@ static int axienet_remove(struct platform_device *pdev)
axienet_mdio_teardown(lp);
- clk_disable_unprepare(lp->clk);
+ clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
+ clk_disable_unprepare(lp->axi_clk);
of_node_put(lp->phy_node);
lp->phy_node = NULL;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 9c014cee34b2..48f544f6c999 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -159,8 +159,8 @@ int axienet_mdio_enable(struct axienet_local *lp)
lp->mii_clk_div = 0;
- if (lp->clk) {
- host_clock = clk_get_rate(lp->clk);
+ if (lp->axi_clk) {
+ host_clock = clk_get_rate(lp->axi_clk);
} else {
struct device_node *np1;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 007840d4a807..d9d58a7dabee 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1115,7 +1115,6 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
struct net_device *ndev = NULL;
struct net_local *lp = NULL;
struct device *dev = &ofdev->dev;
- const void *mac_address;
int rc = 0;
@@ -1157,12 +1156,9 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
lp->next_rx_buf_to_use = 0x0;
lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
- mac_address = of_get_mac_address(ofdev->dev.of_node);
- if (!IS_ERR(mac_address)) {
- /* Set the MAC address. */
- ether_addr_copy(ndev->dev_addr, mac_address);
- } else {
+ rc = of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr);
+ if (rc) {
dev_warn(dev, "No MAC address found, using random\n");
eth_hw_addr_random(ndev);
}
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 3e337142b516..2049d76a0e68 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -798,8 +798,6 @@ xirc2ps_config(struct pcmcia_device * link)
goto config_error;
}
port_found:
- if (err)
- goto config_error;
/****************
* Now allocate an interrupt line. Note that this does not
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index 7b83a6e5d894..468ffe3d1707 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -22,6 +22,7 @@ config IXP4XX_ETH
tristate "Intel IXP4xx Ethernet support"
depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
select PHYLIB
+ select OF_MDIO if OF
select NET_PTP_CLASSIFY
help
Say Y here if you want to use built-in Ethernet ports
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 0152f1e70783..cb89323855d8 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <linux/net_tstamp.h>
#include <linux/of.h>
+#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_data/eth_ixp4xx.h>
#include <linux/platform_device.h>
@@ -165,7 +166,6 @@ struct eth_regs {
};
struct port {
- struct resource *mem_res;
struct eth_regs __iomem *regs;
struct npe *npe;
struct net_device *netdev;
@@ -250,6 +250,7 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
static DEFINE_SPINLOCK(mdio_lock);
static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
static struct mii_bus *mdio_bus;
+static struct device_node *mdio_bus_np;
static int ports_open;
static struct port *npe_port_tab[MAX_NPES];
static struct dma_pool *dma_pool;
@@ -533,7 +534,8 @@ static int ixp4xx_mdio_register(struct eth_regs __iomem *regs)
mdio_bus->write = &ixp4xx_mdio_write;
snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0");
- if ((err = mdiobus_register(mdio_bus)))
+ err = of_mdiobus_register(mdio_bus, mdio_bus_np);
+ if (err)
mdiobus_free(mdio_bus);
return err;
}
@@ -1085,7 +1087,7 @@ static int init_queues(struct port *port)
int i;
if (!ports_open) {
- dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent,
+ dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
POOL_ALLOC_SIZE, 32, 0);
if (!dma_pool)
return -ENOMEM;
@@ -1358,19 +1360,118 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+#ifdef CONFIG_OF
+static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args queue_spec;
+ struct of_phandle_args npe_spec;
+ struct device_node *mdio_np;
+ struct eth_plat_info *plat;
+ int ret;
+
+ plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return NULL;
+
+ ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle", 1, 0,
+ &npe_spec);
+ if (ret) {
+ dev_err(dev, "no NPE engine specified\n");
+ return NULL;
+ }
+ /* NPE ID 0x00, 0x10, 0x20... */
+ plat->npe = (npe_spec.args[0] << 4);
+
+ /* Check if this device has an MDIO bus */
+ mdio_np = of_get_child_by_name(np, "mdio");
+ if (mdio_np) {
+ plat->has_mdio = true;
+ mdio_bus_np = mdio_np;
+ /* DO NOT put the mdio_np, it will be used */
+ }
+
+ /* Get the rx queue as a resource from queue manager */
+ ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
+ &queue_spec);
+ if (ret) {
+ dev_err(dev, "no rx queue phandle\n");
+ return NULL;
+ }
+ plat->rxq = queue_spec.args[0];
+
+ /* Get the txready queue as resource from queue manager */
+ ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
+ &queue_spec);
+ if (ret) {
+ dev_err(dev, "no txready queue phandle\n");
+ return NULL;
+ }
+ plat->txreadyq = queue_spec.args[0];
+
+ return plat;
+}
+#else
+static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int ixp4xx_eth_probe(struct platform_device *pdev)
{
- char phy_id[MII_BUS_ID_SIZE + 3];
struct phy_device *phydev = NULL;
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct eth_plat_info *plat;
- resource_size_t regs_phys;
struct net_device *ndev;
struct resource *res;
struct port *port;
int err;
- plat = dev_get_platdata(dev);
+ if (np) {
+ plat = ixp4xx_of_get_platdata(dev);
+ if (!plat)
+ return -ENODEV;
+ } else {
+ plat = dev_get_platdata(dev);
+ if (!plat)
+ return -ENODEV;
+ plat->npe = pdev->id;
+ switch (plat->npe) {
+ case IXP4XX_ETH_NPEA:
+ /* If the MDIO bus is not up yet, defer probe */
+ break;
+ case IXP4XX_ETH_NPEB:
+ /* On all except IXP43x, NPE-B is used for the MDIO bus.
+ * If there is no NPE-B in the feature set, bail out,
+ * else we have the MDIO bus here.
+ */
+ if (!cpu_is_ixp43x()) {
+ if (!(ixp4xx_read_feature_bits() &
+ IXP4XX_FEATURE_NPEB_ETH0))
+ return -ENODEV;
+ /* Else register the MDIO bus on NPE-B */
+ plat->has_mdio = true;
+ }
+ break;
+ case IXP4XX_ETH_NPEC:
+ /* IXP43x lacks NPE-B and uses NPE-C for the MDIO bus
+ * access, if there is no NPE-C, no bus, nothing works,
+ * so bail out.
+ */
+ if (cpu_is_ixp43x()) {
+ if (!(ixp4xx_read_feature_bits() &
+ IXP4XX_FEATURE_NPEC_ETH))
+ return -ENODEV;
+ /* Else register the MDIO bus on NPE-B */
+ plat->has_mdio = true;
+ }
+ break;
+ default:
+ return -ENODEV;
+ }
+ }
if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port))))
return -ENOMEM;
@@ -1378,75 +1479,42 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, dev);
port = netdev_priv(ndev);
port->netdev = ndev;
- port->id = pdev->id;
+ port->id = plat->npe;
/* Get the port resource and remap */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- regs_phys = res->start;
port->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(port->regs))
return PTR_ERR(port->regs);
- switch (port->id) {
- case IXP4XX_ETH_NPEA:
- /* If the MDIO bus is not up yet, defer probe */
- if (!mdio_bus)
- return -EPROBE_DEFER;
- break;
- case IXP4XX_ETH_NPEB:
- /*
- * On all except IXP43x, NPE-B is used for the MDIO bus.
- * If there is no NPE-B in the feature set, bail out, else
- * register the MDIO bus.
- */
- if (!cpu_is_ixp43x()) {
- if (!(ixp4xx_read_feature_bits() &
- IXP4XX_FEATURE_NPEB_ETH0))
- return -ENODEV;
- /* Else register the MDIO bus on NPE-B */
- if ((err = ixp4xx_mdio_register(port->regs)))
- return err;
- }
- if (!mdio_bus)
- return -EPROBE_DEFER;
- break;
- case IXP4XX_ETH_NPEC:
- /*
- * IXP43x lacks NPE-B and uses NPE-C for the MDIO bus access,
- * of there is no NPE-C, no bus, nothing works, so bail out.
- */
- if (cpu_is_ixp43x()) {
- if (!(ixp4xx_read_feature_bits() &
- IXP4XX_FEATURE_NPEC_ETH))
- return -ENODEV;
- /* Else register the MDIO bus on NPE-C */
- if ((err = ixp4xx_mdio_register(port->regs)))
- return err;
+ /* Register the MDIO bus if we have it */
+ if (plat->has_mdio) {
+ err = ixp4xx_mdio_register(port->regs);
+ if (err) {
+ dev_err(dev, "failed to register MDIO bus\n");
+ return err;
}
- if (!mdio_bus)
- return -EPROBE_DEFER;
- break;
- default:
- return -ENODEV;
}
+ /* If the instance with the MDIO bus has not yet appeared,
+ * defer probing until it gets probed.
+ */
+ if (!mdio_bus)
+ return -EPROBE_DEFER;
ndev->netdev_ops = &ixp4xx_netdev_ops;
ndev->ethtool_ops = &ixp4xx_ethtool_ops;
ndev->tx_queue_len = 100;
+ /* Inherit the DMA masks from the platform device */
+ ndev->dev.dma_mask = dev->dma_mask;
+ ndev->dev.coherent_dma_mask = dev->coherent_dma_mask;
netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
if (!(port->npe = npe_request(NPE_ID(port->id))))
return -EIO;
- port->mem_res = request_mem_region(regs_phys, REGS_SIZE, ndev->name);
- if (!port->mem_res) {
- err = -EBUSY;
- goto err_npe_rel;
- }
-
port->plat = plat;
npe_port_tab[NPE_ID(port->id)] = port;
memcpy(ndev->dev_addr, plat->hwaddr, ETH_ALEN);
@@ -1459,12 +1527,24 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
__raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
udelay(50);
- snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
- mdio_bus->id, plat->phy);
- phydev = phy_connect(ndev, phy_id, &ixp4xx_adjust_link,
- PHY_INTERFACE_MODE_MII);
- if (IS_ERR(phydev)) {
- err = PTR_ERR(phydev);
+ if (np) {
+ phydev = of_phy_get_and_connect(ndev, np, ixp4xx_adjust_link);
+ } else {
+ phydev = mdiobus_get_phy(mdio_bus, plat->phy);
+ if (IS_ERR(phydev)) {
+ err = PTR_ERR(phydev);
+ dev_err(dev, "could not connect phydev (%d)\n", err);
+ goto err_free_mem;
+ }
+ err = phy_connect_direct(ndev, phydev, ixp4xx_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (err)
+ goto err_free_mem;
+
+ }
+ if (!phydev) {
+ err = -ENODEV;
+ dev_err(dev, "no phydev\n");
goto err_free_mem;
}
@@ -1482,8 +1562,6 @@ err_phy_dis:
phy_disconnect(phydev);
err_free_mem:
npe_port_tab[NPE_ID(port->id)] = NULL;
- release_resource(port->mem_res);
-err_npe_rel:
npe_release(port->npe);
return err;
}
@@ -1499,12 +1577,21 @@ static int ixp4xx_eth_remove(struct platform_device *pdev)
ixp4xx_mdio_remove();
npe_port_tab[NPE_ID(port->id)] = NULL;
npe_release(port->npe);
- release_resource(port->mem_res);
return 0;
}
+static const struct of_device_id ixp4xx_eth_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-ethernet",
+ },
+ { },
+};
+
static struct platform_driver ixp4xx_eth_driver = {
- .driver.name = DRV_NAME,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(ixp4xx_eth_of_match),
+ },
.probe = ixp4xx_eth_probe,
.remove = ixp4xx_eth_remove,
};
diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
index f722079dfb6a..846bf41c2717 100644
--- a/drivers/net/fddi/Kconfig
+++ b/drivers/net/fddi/Kconfig
@@ -38,22 +38,6 @@ config DEFXX
To compile this driver as a module, choose M here: the module
will be called defxx. If unsure, say N.
-config DEFXX_MMIO
- bool
- prompt "Use MMIO instead of PIO" if PCI || EISA
- depends on DEFXX
- default n if PCI || EISA
- default y
- help
- This instructs the driver to use EISA or PCI memory-mapped I/O
- (MMIO) as appropriate instead of programmed I/O ports (PIO).
- Enabling this gives an improvement in processing time in parts
- of the driver, but it may cause problems with EISA (DEFEA)
- adapters. TURBOchannel does not have the concept of I/O ports,
- so MMIO is always used for these (DEFTA) adapters.
-
- If unsure, say N.
-
config SKFP
tristate "SysKonnect FDDI PCI support"
depends on FDDI && PCI
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 077c68498f04..6d1e3f49a3d3 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -21,7 +21,7 @@
* LVS Lawrence V. Stefani <lstefani@yahoo.com>
*
* Maintainers:
- * macro Maciej W. Rozycki <macro@linux-mips.org>
+ * macro Maciej W. Rozycki <macro@orcam.me.uk>
*
* Credits:
* I'd like to thank Patricia Cross for helping me get started with
@@ -197,6 +197,7 @@
* 23 Oct 2006 macro Big-endian host support.
* 14 Dec 2006 macro TURBOchannel support.
* 01 Jul 2014 macro Fixes for DMA on 64-bit hosts.
+ * 10 Mar 2021 macro Dynamic MMIO vs port I/O.
*/
/* Include files */
@@ -225,8 +226,8 @@
/* Version information string should be updated prior to each new release! */
#define DRV_NAME "defxx"
-#define DRV_VERSION "v1.11"
-#define DRV_RELDATE "2014/07/01"
+#define DRV_VERSION "v1.12"
+#define DRV_RELDATE "2021/03/10"
static const char version[] =
DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
@@ -253,10 +254,10 @@ static const char version[] =
#define DFX_BUS_TC(dev) 0
#endif
-#ifdef CONFIG_DEFXX_MMIO
-#define DFX_MMIO 1
+#if defined(CONFIG_EISA) || defined(CONFIG_PCI)
+#define dfx_use_mmio bp->mmio
#else
-#define DFX_MMIO 0
+#define dfx_use_mmio true
#endif
/* Define module-wide (static) routines */
@@ -374,8 +375,6 @@ static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
{
struct device __maybe_unused *bdev = bp->bus_dev;
- int dfx_bus_tc = DFX_BUS_TC(bdev);
- int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
if (dfx_use_mmio)
dfx_writel(bp, offset, data);
@@ -398,8 +397,6 @@ static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
{
struct device __maybe_unused *bdev = bp->bus_dev;
- int dfx_bus_tc = DFX_BUS_TC(bdev);
- int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
if (dfx_use_mmio)
dfx_readl(bp, offset, data);
@@ -421,7 +418,7 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
* None
*
* Arguments:
- * bdev - pointer to device information
+ * bp - pointer to board information
* bar_start - pointer to store the start addresses
* bar_len - pointer to store the lengths of the areas
*
@@ -431,13 +428,13 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
* Side Effects:
* None
*/
-static void dfx_get_bars(struct device *bdev,
+static void dfx_get_bars(DFX_board_t *bp,
resource_size_t *bar_start, resource_size_t *bar_len)
{
+ struct device *bdev = bp->bus_dev;
int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
- int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
if (dfx_bus_pci) {
int num = dfx_use_mmio ? 0 : 1;
@@ -495,6 +492,13 @@ static const struct net_device_ops dfx_netdev_ops = {
.ndo_set_mac_address = dfx_ctl_set_mac_address,
};
+static void dfx_register_res_err(const char *print_name, bool mmio,
+ unsigned long start, unsigned long len)
+{
+ pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n",
+ print_name, mmio ? "MMIO" : "I/O", len, start);
+}
+
/*
* ================
* = dfx_register =
@@ -528,8 +532,6 @@ static int dfx_register(struct device *bdev)
static int version_disp;
int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
- int dfx_bus_tc = DFX_BUS_TC(bdev);
- int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
const char *print_name = dev_name(bdev);
struct net_device *dev;
DFX_board_t *bp; /* board pointer */
@@ -567,46 +569,48 @@ static int dfx_register(struct device *bdev)
bp->bus_dev = bdev;
dev_set_drvdata(bdev, dev);
- dfx_get_bars(bdev, bar_start, bar_len);
- if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) {
- pr_err("%s: Cannot use MMIO, no address set, aborting\n",
- print_name);
- pr_err("%s: Run ECU and set adapter's MMIO location\n",
- print_name);
- pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\""
- "\n", print_name);
- err = -ENXIO;
- goto err_out;
+ bp->mmio = true;
+
+ dfx_get_bars(bp, bar_start, bar_len);
+ if (bar_len[0] == 0 ||
+ (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) {
+ bp->mmio = false;
+ dfx_get_bars(bp, bar_start, bar_len);
}
- if (dfx_use_mmio)
+ if (dfx_use_mmio) {
region = request_mem_region(bar_start[0], bar_len[0],
- print_name);
- else
- region = request_region(bar_start[0], bar_len[0], print_name);
+ bdev->driver->name);
+ if (!region && (dfx_bus_eisa || dfx_bus_pci)) {
+ bp->mmio = false;
+ dfx_get_bars(bp, bar_start, bar_len);
+ }
+ }
+ if (!dfx_use_mmio)
+ region = request_region(bar_start[0], bar_len[0],
+ bdev->driver->name);
if (!region) {
- pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, "
- "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name,
- (long)bar_len[0], (long)bar_start[0]);
+ dfx_register_res_err(print_name, dfx_use_mmio,
+ bar_start[0], bar_len[0]);
err = -EBUSY;
goto err_out_disable;
}
if (bar_start[1] != 0) {
- region = request_region(bar_start[1], bar_len[1], print_name);
+ region = request_region(bar_start[1], bar_len[1],
+ bdev->driver->name);
if (!region) {
- pr_err("%s: Cannot reserve I/O resource "
- "0x%lx @ 0x%lx, aborting\n", print_name,
- (long)bar_len[1], (long)bar_start[1]);
+ dfx_register_res_err(print_name, 0,
+ bar_start[1], bar_len[1]);
err = -EBUSY;
goto err_out_csr_region;
}
}
if (bar_start[2] != 0) {
- region = request_region(bar_start[2], bar_len[2], print_name);
+ region = request_region(bar_start[2], bar_len[2],
+ bdev->driver->name);
if (!region) {
- pr_err("%s: Cannot reserve I/O resource "
- "0x%lx @ 0x%lx, aborting\n", print_name,
- (long)bar_len[2], (long)bar_start[2]);
+ dfx_register_res_err(print_name, 0,
+ bar_start[2], bar_len[2]);
err = -EBUSY;
goto err_out_bh_region;
}
@@ -721,7 +725,6 @@ static void dfx_bus_init(struct net_device *dev)
int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
- int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
u8 val;
DBG_printk("In dfx_bus_init...\n");
@@ -1041,7 +1044,6 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
- int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
int alloc_size; /* total buffer size needed */
char *top_v, *curr_v; /* virtual addrs into memory block */
dma_addr_t top_p, curr_p; /* physical addrs into memory block */
@@ -3695,8 +3697,6 @@ static void dfx_unregister(struct device *bdev)
struct net_device *dev = dev_get_drvdata(bdev);
DFX_board_t *bp = netdev_priv(dev);
int dfx_bus_pci = dev_is_pci(bdev);
- int dfx_bus_tc = DFX_BUS_TC(bdev);
- int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
resource_size_t bar_start[3] = {0}; /* pointers to ports */
resource_size_t bar_len[3] = {0}; /* resource lengths */
int alloc_size; /* total buffer size used */
@@ -3716,7 +3716,7 @@ static void dfx_unregister(struct device *bdev)
dfx_bus_uninit(dev);
- dfx_get_bars(bdev, bar_start, bar_len);
+ dfx_get_bars(bp, bar_start, bar_len);
if (bar_start[2] != 0)
release_region(bar_start[2], bar_len[2]);
if (bar_start[1] != 0)
@@ -3748,7 +3748,7 @@ static const struct pci_device_id dfx_pci_table[] = {
MODULE_DEVICE_TABLE(pci, dfx_pci_table);
static struct pci_driver dfx_pci_driver = {
- .name = "defxx",
+ .name = DRV_NAME,
.id_table = dfx_pci_table,
.probe = dfx_pci_register,
.remove = dfx_pci_unregister,
@@ -3779,7 +3779,7 @@ MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
static struct eisa_driver dfx_eisa_driver = {
.id_table = dfx_eisa_table,
.driver = {
- .name = "defxx",
+ .name = DRV_NAME,
.bus = &eisa_bus_type,
.probe = dfx_dev_register,
.remove = dfx_dev_unregister,
@@ -3800,7 +3800,7 @@ MODULE_DEVICE_TABLE(tc, dfx_tc_table);
static struct tc_driver dfx_tc_driver = {
.id_table = dfx_tc_table,
.driver = {
- .name = "defxx",
+ .name = DRV_NAME,
.bus = &tc_bus_type,
.probe = dfx_dev_register,
.remove = dfx_dev_unregister,
diff --git a/drivers/net/fddi/defxx.h b/drivers/net/fddi/defxx.h
index 9d30fde2ef3c..8193713e12db 100644
--- a/drivers/net/fddi/defxx.h
+++ b/drivers/net/fddi/defxx.h
@@ -16,7 +16,7 @@
* LVS Lawrence V. Stefani <lstefani@yahoo.com>
*
* Maintainers:
- * macro Maciej W. Rozycki <macro@linux-mips.org>
+ * macro Maciej W. Rozycki <macro@orcam.me.uk>
*
* Modification History:
* Date Name Description
@@ -27,6 +27,7 @@
* 04 Aug 2003 macro Converted to the DMA API.
* 23 Oct 2006 macro Big-endian host support.
* 14 Dec 2006 macro TURBOchannel support.
+ * 10 Mar 2021 macro Dynamic MMIO vs port I/O.
*/
#ifndef _DEFXX_H_
@@ -1776,6 +1777,8 @@ typedef struct DFX_board_tag
int port;
} base; /* base address */
struct device *bus_dev;
+ /* Whether to use MMIO or port I/O. */
+ bool mmio;
u32 full_duplex_enb; /* FDDI Full Duplex enable (1 == on, 2 == off) */
u32 req_ttrt; /* requested TTRT value (in 80ns units) */
u32 burst_size; /* adapter burst size (enumerated) */
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
index eaf85db53a5e..14f07050b6b1 100644
--- a/drivers/net/fddi/defza.c
+++ b/drivers/net/fddi/defza.c
@@ -60,7 +60,7 @@
static const char version[] =
DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n";
-MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
+MODULE_AUTHOR("Maciej W. Rozycki <macro@orcam.me.uk>");
MODULE_DESCRIPTION("DEC FDDIcontroller 700 (DEFZA-xx) driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/fddi/skfp/h/smc.h b/drivers/net/fddi/skfp/h/smc.h
index 706fa619b703..3814a2ff64ae 100644
--- a/drivers/net/fddi/skfp/h/smc.h
+++ b/drivers/net/fddi/skfp/h/smc.h
@@ -228,7 +228,7 @@ struct s_phy {
u_char timer1_exp ;
u_char timer2_exp ;
u_char pcm_pad1[1] ;
- int cem_pst ; /* CEM privae state; used for dual homing */
+ int cem_pst ; /* CEM private state; used for dual homing */
struct lem_counter lem ;
#ifdef AMDPLC
struct s_plc plc ;
diff --git a/drivers/net/fddi/skfp/h/smt.h b/drivers/net/fddi/skfp/h/smt.h
index a0dbc0f57a55..b19c7a99d32a 100644
--- a/drivers/net/fddi/skfp/h/smt.h
+++ b/drivers/net/fddi/skfp/h/smt.h
@@ -411,7 +411,7 @@ struct smt_p_reason {
#define SMT_RDF_ILLEGAL 0x00000005 /* read only (PMF) */
#define SMT_RDF_NOPARAM 0x6 /* parameter not supported (PMF) */
#define SMT_RDF_RANGE 0x8 /* out of range */
-#define SMT_RDF_AUTHOR 0x9 /* not autohorized */
+#define SMT_RDF_AUTHOR 0x9 /* not authorized */
#define SMT_RDF_LENGTH 0x0a /* length error */
#define SMT_RDF_TOOLONG 0x0b /* length error */
#define SMT_RDF_SBA 0x0d /* SBA denied */
@@ -450,7 +450,7 @@ struct smt_p_version {
struct smt_p_0015 {
struct smt_para para ; /* generic parameter header */
- u_int res_type ; /* recsource type */
+ u_int res_type ; /* resource type */
} ;
#define SYNC_BW 0x00000001L /* Synchronous Bandwidth */
@@ -489,7 +489,7 @@ struct smt_p_0017 {
struct smt_p_0018 {
struct smt_para para ; /* generic parameter header */
int sba_ov_req ; /* total sync bandwidth req for overhead*/
-} ; /* measuered in bytes per T_Neg */
+} ; /* measured in bytes per T_Neg */
/*
* P19 : SBA Allocation Address
@@ -562,7 +562,7 @@ struct smt_p_fsc {
#define FSC_TYPE2 2 /* Special A/C indicator forwarding */
/*
- * P00 21 : user defined authoriziation (see pmf.c)
+ * P00 21 : user defined authorization (see pmf.c)
*/
#define SMT_P_AUTHOR 0x0021
@@ -764,10 +764,8 @@ struct smt_sif_operation {
struct smt_p_setcount setcount ; /* Set Count mandatory */
#endif
/* must be last */
- struct smt_p_lem lem[1] ; /* phy lem status */
+ struct smt_p_lem lem[]; /* phy lem status */
} ;
-#define SIZEOF_SMT_SIF_OPERATION (sizeof(struct smt_sif_operation)- \
- sizeof(struct smt_p_lem))
/*
* ECF : echo frame
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index 774a6e3b0a67..6b68a53f1b38 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -1063,9 +1063,9 @@ static void smt_send_sif_operation(struct s_smc *smc, struct fddi_addr *dest,
#endif
if (!(mb = smt_build_frame(smc,SMT_SIF_OPER,SMT_REPLY,
- SIZEOF_SMT_SIF_OPERATION+ports*sizeof(struct smt_p_lem))))
+ struct_size(sif, lem, ports))))
return ;
- sif = smtod(mb, struct smt_sif_operation *) ;
+ sif = smtod(mb, typeof(sif));
smt_fill_timestamp(smc,&sif->ts) ; /* set time stamp */
smt_fill_mac_status(smc,&sif->status) ; /* set mac status */
smt_fill_mac_counter(smc,&sif->mc) ; /* set mac counter field */
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 42f31c681846..1ab94b5f9bbf 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -461,6 +461,7 @@ static struct socket *geneve_create_sock(struct net *net, bool ipv6,
if (err < 0)
return ERR_PTR(err);
+ udp_allow_gso(sock->sk);
return sock;
}
@@ -891,7 +892,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ if (!pskb_inet_may_pull(skb))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
@@ -988,7 +989,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ if (!pskb_inet_may_pull(skb))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 59ac04a610ad..442c520ab8f3 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -269,7 +269,7 @@ int rndis_filter_receive(struct net_device *ndev,
int rndis_filter_set_device_mac(struct netvsc_device *ndev,
const char *mac);
-void netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
+int netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
@@ -1718,4 +1718,8 @@ struct rndis_message {
#define TRANSPORT_INFO_IPV6_TCP 0x10
#define TRANSPORT_INFO_IPV6_UDP 0x20
+#define RETRY_US_LO 5000
+#define RETRY_US_HI 10000
+#define RETRY_MAX 2000 /* >10 sec */
+
#endif /* _HYPERV_NET_H */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index c64cc7639c39..9d07c9ce4be2 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -31,12 +31,13 @@
* Switch the data path from the synthetic interface to the VF
* interface.
*/
-void netvsc_switch_datapath(struct net_device *ndev, bool vf)
+int netvsc_switch_datapath(struct net_device *ndev, bool vf)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct hv_device *dev = net_device_ctx->device_ctx;
struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
+ int ret, retry = 0;
/* Block sending traffic to VF if it's about to be gone */
if (!vf)
@@ -51,15 +52,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
init_pkt->msg.v4_msg.active_dp.active_datapath =
NVSP_DATAPATH_SYNTHETIC;
+again:
trace_nvsp_send(ndev, init_pkt);
- vmbus_sendpacket(dev->channel, init_pkt,
+ ret = vmbus_sendpacket(dev->channel, init_pkt,
sizeof(struct nvsp_message),
- (unsigned long)init_pkt,
- VM_PKT_DATA_INBAND,
+ (unsigned long)init_pkt, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+
+ /* If failed to switch to/from VF, let data_path_is_vf stay false,
+ * so we use synthetic path to send data.
+ */
+ if (ret) {
+ if (ret != -EAGAIN) {
+ netdev_err(ndev,
+ "Unable to send sw datapath msg, err: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (retry++ < RETRY_MAX) {
+ usleep_range(RETRY_US_LO, RETRY_US_HI);
+ goto again;
+ } else {
+ netdev_err(
+ ndev,
+ "Retry failed to send sw datapath msg, err: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
wait_for_completion(&nv_dev->channel_init_wait);
net_device_ctx->data_path_is_vf = vf;
+
+ return 0;
}
/* Worker to setup sub channels on initial setup
@@ -1017,6 +1044,26 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
}
/* RCU already held by caller */
+/* Batching/bouncing logic is designed to attempt to optimize
+ * performance.
+ *
+ * For small, non-LSO packets we copy the packet to a send buffer
+ * which is pre-registered with the Hyper-V side. This enables the
+ * hypervisor to avoid remapping the aperture to access the packet
+ * descriptor and data.
+ *
+ * If we already started using a buffer and the netdev is transmitting
+ * a burst of packets, keep on copying into the buffer until it is
+ * full or we are done collecting a burst. If there is an existing
+ * buffer with space for the RNDIS descriptor but not the packet, copy
+ * the RNDIS descriptor to the buffer, keeping the packet in place.
+ *
+ * If we do batching and send more than one packet using a single
+ * NetVSC message, free the SKBs of the packets copied, except for the
+ * last packet. This is done to streamline the handling of the case
+ * where the last packet only had the RNDIS descriptor copied to the
+ * send buffer, with the data pointers included in the NetVSC message.
+ */
int netvsc_send(struct net_device *ndev,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 15f262b70489..f682a5572d84 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -38,9 +38,6 @@
#include "hyperv_net.h"
#define RING_SIZE_MIN 64
-#define RETRY_US_LO 5000
-#define RETRY_US_HI 10000
-#define RETRY_MAX 2000 /* >10 sec */
#define LINKCHANGE_INT (2 * HZ)
#define VF_TAKEOVER_INT (HZ / 10)
@@ -1612,34 +1609,23 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
- memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
+ ethtool_sprintf(&p, netvsc_stats[i].name);
- for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
- memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(vf_stats); i++)
+ ethtool_sprintf(&p, vf_stats[i].name);
for (i = 0; i < nvdev->num_chn; i++) {
- sprintf(p, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_xdp_drop", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "tx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "rx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i);
}
for_each_present_cpu(cpu) {
- for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
- sprintf(p, pcpu_stats[i].name, cpu);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++)
+ ethtool_sprintf(&p, pcpu_stats[i].name, cpu);
}
break;
@@ -2311,6 +2297,7 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
{
struct device *parent = vf_netdev->dev.parent;
struct net_device_context *ndev_ctx;
+ struct net_device *ndev;
struct pci_dev *pdev;
u32 serial;
@@ -2333,8 +2320,17 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
if (!ndev_ctx->vf_alloc)
continue;
- if (ndev_ctx->vf_serial == serial)
- return hv_get_drvdata(ndev_ctx->device_ctx);
+ if (ndev_ctx->vf_serial != serial)
+ continue;
+
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+ if (ndev->addr_len != vf_netdev->addr_len ||
+ memcmp(ndev->perm_addr, vf_netdev->perm_addr,
+ ndev->addr_len) != 0)
+ continue;
+
+ return ndev;
+
}
netdev_notice(vf_netdev,
@@ -2413,6 +2409,7 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
struct netvsc_device *netvsc_dev;
struct net_device *ndev;
bool vf_is_up = false;
+ int ret;
if (event != NETDEV_GOING_DOWN)
vf_is_up = netif_running(vf_netdev);
@@ -2429,9 +2426,17 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
if (net_device_ctx->data_path_is_vf == vf_is_up)
return NOTIFY_OK;
- netvsc_switch_datapath(ndev, vf_is_up);
- netdev_info(ndev, "Data path switched %s VF: %s\n",
- vf_is_up ? "to" : "from", vf_netdev->name);
+ ret = netvsc_switch_datapath(ndev, vf_is_up);
+
+ if (ret) {
+ netdev_err(ndev,
+ "Data path failed to switch %s VF: %s, err: %d\n",
+ vf_is_up ? "to" : "from", vf_netdev->name, ret);
+ return NOTIFY_DONE;
+ } else {
+ netdev_info(ndev, "Data path switched %s VF: %s\n",
+ vf_is_up ? "to" : "from", vf_netdev->name);
+ }
return NOTIFY_OK;
}
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index c0bf7d78276e..da9135231c07 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -268,7 +268,7 @@ static int hwsim_get_radio(struct sk_buff *skb, struct hwsim_phy *phy,
struct netlink_callback *cb, int flags)
{
void *hdr;
- int res = -EMSGSIZE;
+ int res;
hdr = genlmsg_put(skb, portid, seq, &hwsim_genl_family, flags,
MAC802154_HWSIM_CMD_GET_RADIO);
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index b68f1289b89e..8f99cfa14680 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -1,6 +1,6 @@
config QCOM_IPA
tristate "Qualcomm IPA support"
- depends on 64BIT && NET && QCOM_SMEM
+ depends on NET && QCOM_SMEM
depends on ARCH_QCOM || COMPILE_TEST
depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
select QCOM_MDT_LOADER if ARCH_QCOM
@@ -12,8 +12,7 @@ config QCOM_IPA
that is capable of generic hardware handling of IP packets,
including routing, filtering, and NAT. Currently the IPA
driver supports only basic transport of network traffic
- between the AP and modem, on the Qualcomm SDM845 and SC7180
- SoCs.
+ between the AP and modem.
Note that if selected, the selection type must match that
of QCOM_Q6V5_COMMON (Y or M).
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index afe5df1e6eee..1efe1a88104b 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -7,6 +7,8 @@ ipa-y := ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \
ipa_table.o ipa_interrupt.o gsi.o gsi_trans.o \
ipa_gsi.o ipa_smp2p.o ipa_uc.o \
ipa_endpoint.o ipa_cmd.o ipa_modem.o \
- ipa_qmi.o ipa_qmi_msg.o
+ ipa_resource.o ipa_qmi.o ipa_qmi_msg.o
-ipa-y += ipa_data-sdm845.o ipa_data-sc7180.o
+ipa-y += ipa_data-v3.5.1.o ipa_data-v4.2.o \
+ ipa_data-v4.5.o ipa_data-v4.9.o \
+ ipa_data-v4.11.o
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 390d3403386a..9f06663cef26 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -198,7 +198,7 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
}
-/* Turn off all GSI interrupts initially */
+/* Turn off all GSI interrupts initially; there is no gsi_irq_teardown() */
static void gsi_irq_setup(struct gsi *gsi)
{
/* Disable all interrupt types */
@@ -217,12 +217,6 @@ static void gsi_irq_setup(struct gsi *gsi)
iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
}
-/* Turn off all GSI interrupts when we're all done */
-static void gsi_irq_teardown(struct gsi *gsi)
-{
- /* Nothing to do */
-}
-
/* Event ring commands are performed one at a time. Their completion
* is signaled by the event ring control GSI interrupt type, which is
* only enabled when we issue an event ring command. Only the event
@@ -351,7 +345,7 @@ void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
/* Return the 32-bit DMA address associated with a ring index */
static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
{
- return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
+ return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE;
}
/* Return the ring index of a 32-bit ring offset */
@@ -701,17 +695,16 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
- val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
+ val = ev_r_length_encoded(gsi->version, size);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
/* The context 2 and 3 registers store the low-order and
* high-order 32 bits of the address of the event ring,
* respectively.
*/
- val = evt_ring->ring.addr & GENMASK(31, 0);
+ val = lower_32_bits(evt_ring->ring.addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
-
- val = evt_ring->ring.addr >> 32;
+ val = upper_32_bits(evt_ring->ring.addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
/* Enable interrupt moderation by setting the moderation delay */
@@ -787,7 +780,7 @@ static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
}
}
-/* Program a channel for use */
+/* Program a channel for use; there is no gsi_channel_deprogram() */
static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
{
size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
@@ -802,24 +795,23 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
channel->tre_ring.index = 0;
/* We program all channels as GPI type/protocol */
- val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK);
+ val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
if (channel->toward_ipa)
val |= CHTYPE_DIR_FMASK;
val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
- val = u32_encode_bits(size, R_LENGTH_FMASK);
+ val = r_length_encoded(gsi->version, size);
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
/* The context 2 and 3 registers store the low-order and
* high-order 32 bits of the address of the channel ring,
* respectively.
*/
- val = channel->tre_ring.addr & GENMASK(31, 0);
+ val = lower_32_bits(channel->tre_ring.addr);
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
-
- val = channel->tre_ring.addr >> 32;
+ val = upper_32_bits(channel->tre_ring.addr);
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
/* Command channel gets low weighted round-robin priority */
@@ -829,14 +821,14 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
- /* We enable the doorbell engine for IPA v3.5.1 */
- if (gsi->version == IPA_VERSION_3_5_1 && doorbell)
+ /* No need to use the doorbell engine starting at IPA v4.0 */
+ if (gsi->version < IPA_VERSION_4_0 && doorbell)
val |= USE_DB_ENG_FMASK;
/* v4.0 introduces an escape buffer for prefetch. We use it
* on all but the AP command channel.
*/
- if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) {
+ if (gsi->version >= IPA_VERSION_4_0 && !channel->command) {
/* If not otherwise set, prefetch buffers are used */
if (gsi->version < IPA_VERSION_4_5)
val |= USE_ESCAPE_BUF_ONLY_FMASK;
@@ -844,6 +836,9 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
PREFETCH_MODE_FMASK);
}
+ /* All channels set DB_IN_BYTES */
+ if (gsi->version >= IPA_VERSION_4_9)
+ val |= DB_IN_BYTES;
iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
@@ -873,11 +868,6 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* All done! */
}
-static void gsi_channel_deprogram(struct gsi_channel *channel)
-{
- /* Nothing to do */
-}
-
static int __gsi_channel_start(struct gsi_channel *channel, bool start)
{
struct gsi *gsi = channel->gsi;
@@ -975,7 +965,7 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
gsi_channel_reset_command(channel);
/* Due to a hardware quirk we may need to reset RX channels twice. */
- if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa)
+ if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
gsi_channel_reset_command(channel);
gsi_channel_program(channel, doorbell);
@@ -1337,10 +1327,9 @@ static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
int ret;
ret = platform_get_irq_byname(pdev, "gsi");
- if (ret <= 0) {
- dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
+ if (ret <= 0)
return ret ? : -EINVAL;
- }
+
irq = ret;
ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
@@ -1366,7 +1355,7 @@ static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
u32 tre_index;
/* Event xfer_ptr records the TRE it's associated with */
- tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
+ tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
return gsi_channel_trans_mapped(channel, tre_index);
@@ -1439,20 +1428,18 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
/* Initialize a ring, including allocating DMA memory for its entries */
static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
{
- size_t size = count * GSI_RING_ELEMENT_SIZE;
+ u32 size = count * GSI_RING_ELEMENT_SIZE;
struct device *dev = gsi->dev;
dma_addr_t addr;
- /* Hardware requires a 2^n ring size, with alignment equal to size */
+ /* Hardware requires a 2^n ring size, with alignment equal to size.
+ * The DMA address returned by dma_alloc_coherent() is guaranteed to
+ * be a power-of-2 number of pages, which satisfies the requirement.
+ */
ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
- if (ring->virt && addr % size) {
- dma_free_coherent(dev, size, ring->virt, addr);
- dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
- size);
- return -EINVAL; /* Not a good error value, but distinct */
- } else if (!ring->virt) {
+ if (!ring->virt)
return -ENOMEM;
- }
+
ring->addr = addr;
ring->count = count;
@@ -1625,18 +1612,6 @@ static u32 gsi_event_bitmap_init(u32 evt_ring_max)
return event_bitmap;
}
-/* Setup function for event rings */
-static void gsi_evt_ring_setup(struct gsi *gsi)
-{
- /* Nothing to do */
-}
-
-/* Inverse of gsi_evt_ring_setup() */
-static void gsi_evt_ring_teardown(struct gsi *gsi)
-{
- /* Nothing to do */
-}
-
/* Setup function for a single channel */
static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
{
@@ -1686,7 +1661,6 @@ static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
netif_napi_del(&channel->napi);
- gsi_channel_deprogram(channel);
gsi_channel_de_alloc_command(gsi, channel_id);
gsi_evt_ring_reset_command(gsi, evt_ring_id);
gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
@@ -1761,7 +1735,6 @@ static int gsi_channel_setup(struct gsi *gsi)
u32 mask;
int ret;
- gsi_evt_ring_setup(gsi);
gsi_irq_enable(gsi);
mutex_lock(&gsi->mutex);
@@ -1821,7 +1794,6 @@ err_unwind:
mutex_unlock(&gsi->mutex);
gsi_irq_disable(gsi);
- gsi_evt_ring_teardown(gsi);
return ret;
}
@@ -1850,7 +1822,6 @@ static void gsi_channel_teardown(struct gsi *gsi)
mutex_unlock(&gsi->mutex);
gsi_irq_disable(gsi);
- gsi_evt_ring_teardown(gsi);
}
/* Setup function for GSI. GSI firmware must be loaded and initialized */
@@ -1858,7 +1829,6 @@ int gsi_setup(struct gsi *gsi)
{
struct device *dev = gsi->dev;
u32 val;
- int ret;
/* Here is where we first touch the GSI hardware */
val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
@@ -1867,7 +1837,7 @@ int gsi_setup(struct gsi *gsi)
return -EIO;
}
- gsi_irq_setup(gsi);
+ gsi_irq_setup(gsi); /* No matching teardown required */
val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
@@ -1901,18 +1871,13 @@ int gsi_setup(struct gsi *gsi)
/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
- ret = gsi_channel_setup(gsi);
- if (ret)
- gsi_irq_teardown(gsi);
-
- return ret;
+ return gsi_channel_setup(gsi);
}
/* Inverse of gsi_setup() */
void gsi_teardown(struct gsi *gsi)
{
gsi_channel_teardown(gsi);
- gsi_irq_teardown(gsi);
}
/* Initialize a channel's event ring */
@@ -1954,7 +1919,7 @@ static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
gsi_evt_ring_id_free(gsi, evt_ring_id);
}
-/* Init function for event rings */
+/* Init function for event rings; there is no gsi_evt_ring_exit() */
static void gsi_evt_ring_init(struct gsi *gsi)
{
u32 evt_ring_id = 0;
@@ -1966,12 +1931,6 @@ static void gsi_evt_ring_init(struct gsi *gsi)
while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
}
-/* Inverse of gsi_evt_ring_init() */
-static void gsi_evt_ring_exit(struct gsi *gsi)
-{
- /* Nothing to do */
-}
-
static bool gsi_channel_data_valid(struct gsi *gsi,
const struct ipa_gsi_endpoint_data *data)
{
@@ -2116,7 +2075,7 @@ static int gsi_channel_init(struct gsi *gsi, u32 count,
/* IPA v4.2 requires the AP to allocate channels for the modem */
modem_alloc = gsi->version == IPA_VERSION_4_2;
- gsi_evt_ring_init(gsi);
+ gsi_evt_ring_init(gsi); /* No matching exit required */
/* The endpoint data array is indexed by endpoint name */
for (i = 0; i < count; i++) {
@@ -2150,7 +2109,6 @@ err_unwind:
}
gsi_channel_exit_one(&gsi->channel[data->channel_id]);
}
- gsi_evt_ring_exit(gsi);
return ret;
}
@@ -2164,8 +2122,6 @@ static void gsi_channel_exit(struct gsi *gsi)
gsi_channel_exit_one(&gsi->channel[channel_id]);
while (channel_id--);
gsi->modem_channel_bitmap = 0;
-
- gsi_evt_ring_exit(gsi);
}
/* Init function for GSI. GSI hardware does not need to be "ready" */
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index efc980f96109..d5996bdb20ef 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -16,8 +16,8 @@
#include "ipa_version.h"
/* Maximum number of channels and event rings supported by the driver */
-#define GSI_CHANNEL_COUNT_MAX 17
-#define GSI_EVT_RING_COUNT_MAX 13
+#define GSI_CHANNEL_COUNT_MAX 23
+#define GSI_EVT_RING_COUNT_MAX 20
/* Maximum TLV FIFO size for a channel; 64 here is arbitrary (and high) */
#define GSI_TLV_MAX 64
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
index 1785c9d3344d..ea333a244cf5 100644
--- a/drivers/net/ipa/gsi_private.h
+++ b/drivers/net/ipa/gsi_private.h
@@ -14,7 +14,7 @@ struct gsi_trans;
struct gsi_ring;
struct gsi_channel;
-#define GSI_RING_ELEMENT_SIZE 16 /* bytes */
+#define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */
/* Return the entry that follows one provided in a transaction pool */
void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element);
@@ -100,7 +100,7 @@ void gsi_channel_doorbell(struct gsi_channel *channel);
/**
* gsi_ring_virt() - Return virtual address for a ring entry
* @ring: Ring whose address is to be translated
- * @addr: Index (slot number) of entry
+ * @index: Index (slot number) of entry
*/
void *gsi_ring_virt(struct gsi_ring *ring, u32 index);
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index 1622d8cf8dea..b4ac0258d6e1 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -64,6 +64,21 @@
(0x0000c01c + 0x1000 * (ee))
/* All other register offsets are relative to gsi->virt */
+
+/** enum gsi_channel_type - CHTYPE_PROTOCOL field values in CH_C_CNTXT_0 */
+enum gsi_channel_type {
+ GSI_CHANNEL_TYPE_MHI = 0x0,
+ GSI_CHANNEL_TYPE_XHCI = 0x1,
+ GSI_CHANNEL_TYPE_GPI = 0x2,
+ GSI_CHANNEL_TYPE_XDCI = 0x3,
+ GSI_CHANNEL_TYPE_WDI2 = 0x4,
+ GSI_CHANNEL_TYPE_GCI = 0x5,
+ GSI_CHANNEL_TYPE_WDI3 = 0x6,
+ GSI_CHANNEL_TYPE_MHIP = 0x7,
+ GSI_CHANNEL_TYPE_AQC = 0x8,
+ GSI_CHANNEL_TYPE_11AD = 0x9,
+};
+
#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP)
#define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \
@@ -78,19 +93,35 @@
#define CHSTATE_FMASK GENMASK(23, 20)
#define ELEMENT_SIZE_FMASK GENMASK(31, 24)
-/** enum gsi_channel_type - CHTYPE_PROTOCOL field values in CH_C_CNTXT_0 */
-enum gsi_channel_type {
- GSI_CHANNEL_TYPE_MHI = 0x0,
- GSI_CHANNEL_TYPE_XHCI = 0x1,
- GSI_CHANNEL_TYPE_GPI = 0x2,
- GSI_CHANNEL_TYPE_XDCI = 0x3,
-};
+/* Encoded value for CH_C_CNTXT_0 register channel protocol fields */
+static inline u32
+chtype_protocol_encoded(enum ipa_version version, enum gsi_channel_type type)
+{
+ u32 val;
+
+ val = u32_encode_bits(type, CHTYPE_PROTOCOL_FMASK);
+ if (version < IPA_VERSION_4_5)
+ return val;
+
+ /* Encode upper bit(s) as well */
+ type >>= hweight32(CHTYPE_PROTOCOL_FMASK);
+ val |= u32_encode_bits(type, CHTYPE_PROTOCOL_MSB_FMASK);
+
+ return val;
+}
#define GSI_CH_C_CNTXT_1_OFFSET(ch) \
GSI_EE_N_CH_C_CNTXT_1_OFFSET((ch), GSI_EE_AP)
#define GSI_EE_N_CH_C_CNTXT_1_OFFSET(ch, ee) \
(0x0001c004 + 0x4000 * (ee) + 0x80 * (ch))
-#define R_LENGTH_FMASK GENMASK(15, 0)
+
+/* Encoded value for CH_C_CNTXT_1 register R_LENGTH field */
+static inline u32 r_length_encoded(enum ipa_version version, u32 length)
+{
+ if (version < IPA_VERSION_4_9)
+ return u32_encode_bits(length, GENMASK(15, 0));
+ return u32_encode_bits(length, GENMASK(19, 0));
+}
#define GSI_CH_C_CNTXT_2_OFFSET(ch) \
GSI_EE_N_CH_C_CNTXT_2_OFFSET((ch), GSI_EE_AP)
@@ -114,6 +145,9 @@ enum gsi_channel_type {
/* The next two fields are present for IPA v4.5 and above */
#define PREFETCH_MODE_FMASK GENMASK(13, 10)
#define EMPTY_LVL_THRSHOLD_FMASK GENMASK(23, 16)
+/* The next field is present for IPA v4.9 and above */
+#define DB_IN_BYTES GENMASK(24, 24)
+
/** enum gsi_prefetch_mode - PREFETCH_MODE field in CH_C_QOS */
enum gsi_prefetch_mode {
GSI_USE_PREFETCH_BUFS = 0x0,
@@ -146,19 +180,25 @@ enum gsi_prefetch_mode {
GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET((ev), GSI_EE_AP)
#define GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET(ev, ee) \
(0x0001d000 + 0x4000 * (ee) + 0x80 * (ev))
+/* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */
#define EV_CHTYPE_FMASK GENMASK(3, 0)
#define EV_EE_FMASK GENMASK(7, 4)
#define EV_EVCHID_FMASK GENMASK(15, 8)
#define EV_INTYPE_FMASK GENMASK(16, 16)
#define EV_CHSTATE_FMASK GENMASK(23, 20)
#define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24)
-/* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */
#define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \
GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET((ev), GSI_EE_AP)
#define GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET(ev, ee) \
(0x0001d004 + 0x4000 * (ee) + 0x80 * (ev))
-#define EV_R_LENGTH_FMASK GENMASK(15, 0)
+/* Encoded value for EV_CH_C_CNTXT_1 register EV_R_LENGTH field */
+static inline u32 ev_r_length_encoded(enum ipa_version version, u32 length)
+{
+ if (version < IPA_VERSION_4_9)
+ return u32_encode_bits(length, GENMASK(15, 0));
+ return u32_encode_bits(length, GENMASK(19, 0));
+}
#define GSI_EV_CH_E_CNTXT_2_OFFSET(ev) \
GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET((ev), GSI_EE_AP)
@@ -248,6 +288,7 @@ enum gsi_ch_cmd_opcode {
GSI_CH_STOP = 0x2,
GSI_CH_RESET = 0x9,
GSI_CH_DE_ALLOC = 0xa,
+ GSI_CH_DB_STOP = 0xb,
};
#define GSI_EV_CH_CMD_OFFSET \
@@ -278,6 +319,7 @@ enum gsi_generic_cmd_opcode {
GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
};
+/* The next register is present for IPA v3.5.1 and above */
#define GSI_GSI_HW_PARAM_2_OFFSET \
GSI_EE_N_GSI_HW_PARAM_2_OFFSET(GSI_EE_AP)
#define GSI_EE_N_GSI_HW_PARAM_2_OFFSET(ee) \
@@ -300,7 +342,7 @@ enum gsi_generic_cmd_opcode {
enum gsi_iram_size {
IRAM_SIZE_ONE_KB = 0x0,
IRAM_SIZE_TWO_KB = 0x1,
-/* The next two values are available for IPA v4.0 and above */
+ /* The next two values are available for IPA v4.0 and above */
IRAM_SIZE_TWO_N_HALF_KB = 0x2,
IRAM_SIZE_THREE_KB = 0x3,
/* The next two values are available for IPA v4.5 and above */
@@ -424,6 +466,8 @@ enum gsi_general_id {
GSI_EE_N_ERROR_LOG_OFFSET(GSI_EE_AP)
#define GSI_EE_N_ERROR_LOG_OFFSET(ee) \
(0x0001f200 + 0x4000 * (ee))
+
+/* Fields below are present for IPA v3.5.1 and above */
#define ERR_ARG3_FMASK GENMASK(3, 0)
#define ERR_ARG2_FMASK GENMASK(7, 4)
#define ERR_ARG1_FMASK GENMASK(11, 8)
@@ -474,7 +518,4 @@ enum gsi_generic_ee_result {
GENERIC_EE_NO_RESOURCES = 0x7,
};
-#define USB_MAX_PACKET_FMASK GENMASK(15, 15) /* 0: HS; 1: SS */
-#define MHI_BASE_CHANNEL_FMASK GENMASK(31, 24)
-
#endif /* _GSI_REG_H_ */
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 6c3ed5b17b80..8c795a6a8598 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -91,7 +91,7 @@ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
void *virt;
#ifdef IPA_VALIDATE
- if (!size || size % 8)
+ if (!size)
return -EINVAL;
if (count < max_alloc)
return -EINVAL;
@@ -141,7 +141,7 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
void *virt;
#ifdef IPA_VALIDATE
- if (!size || size % 8)
+ if (!size)
return -EINVAL;
if (count < max_alloc)
return -EINVAL;
@@ -153,11 +153,10 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
size = __roundup_pow_of_two(size);
total_size = (count + max_alloc - 1) * size;
- /* The allocator will give us a power-of-2 number of pages. But we
- * can't guarantee that, so request it. That way we won't waste any
- * memory that would be available beyond the required space.
- *
- * Note that gsi_trans_pool_exit_dma() assumes the total allocated
+ /* The allocator will give us a power-of-2 number of pages
+ * sufficient to satisfy our request. Round up our requested
+ * size to avoid any unused space in the allocation. This way
+ * gsi_trans_pool_exit_dma() can assume the total allocated
* size is exactly (count * size).
*/
total_size = get_order(total_size) << PAGE_SHIFT;
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 3a4ab8a94d82..17fd1822d8a9 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -71,7 +71,7 @@ struct gsi_trans {
/**
* gsi_trans_pool_init() - Initialize a pool of structures for transactions
- * @gsi: GSI pointer
+ * @pool: GSI transaction poll pointer
* @size: Size of elements in the pool
* @count: Minimum number of elements in the pool
* @max_alloc: Maximum number of elements allocated at a time from pool
@@ -123,7 +123,8 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr);
/**
- * gsi_trans_pool_exit() - Inverse of gsi_trans_pool_init()
+ * gsi_trans_pool_exit_dma() - Inverse of gsi_trans_pool_init_dma()
+ * @dev: Device used for DMA
* @pool: Pool pointer
*/
void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool);
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 802077631371..e7ff376cb5b7 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -44,6 +44,8 @@ enum ipa_flag {
* @version: IPA hardware version
* @pdev: Platform device
* @completion: Used to signal pipeline clear transfer complete
+ * @nb: Notifier block used for remoteproc SSR
+ * @notifier: Remoteproc SSR notifier
* @smp2p: SMP2P information
* @clock: IPA clocking information
* @table_addr: DMA address of filter/route table content
@@ -58,13 +60,12 @@ enum ipa_flag {
* @mem_size: Total size (bytes) of memory at @mem_virt
* @mem: Array of IPA-local memory region descriptors
* @imem_iova: I/O virtual address of IPA region in IMEM
- * @imem_size; Size of IMEM region
+ * @imem_size: Size of IMEM region
* @smem_iova: I/O virtual address of IPA region in SMEM
- * @smem_size; Size of SMEM region
+ * @smem_size: Size of SMEM region
* @zero_addr: DMA address of preallocated zero-filled memory
* @zero_virt: Virtual address of preallocated zero-filled memory
* @zero_size: Size (bytes) of preallocated zero-filled memory
- * @wakeup_source: Wakeup source information
* @available: Bit mask indicating endpoints hardware supports
* @filter_map: Bit mask indicating endpoints that support filtering
* @initialized: Bit mask indicating endpoints initialized
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index d73b03a80ef8..525cdf28d9ea 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -71,13 +71,12 @@ struct ipa_cmd_hw_hdr_init_local {
/* IPA_CMD_REGISTER_WRITE */
-/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
-
+/* For IPA v4.0+, the pipeline clear options are encoded in the opcode */
#define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
#define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
struct ipa_cmd_register_write {
- __le16 flags; /* Unused/reserved for IPA v3.5.1 */
+ __le16 flags; /* Unused/reserved prior to IPA v4.0 */
__le16 offset;
__le32 value;
__le32 value_mask;
@@ -85,12 +84,12 @@ struct ipa_cmd_register_write {
};
/* Field masks for ipa_cmd_register_write structure fields */
-/* The next field is present for IPA v4.0 and above */
+/* The next field is present for IPA v4.0+ */
#define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11)
-/* The next field is present for IPA v3.5.1 only */
+/* The next field is not present for IPA v4.0+ */
#define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15)
-/* The next field and its values are present for IPA v3.5.1 only */
+/* The next field and its values are not present for IPA v4.0+ */
#define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0)
/* IPA_CMD_IP_PACKET_INIT */
@@ -123,7 +122,7 @@ struct ipa_cmd_hw_dma_mem_mem {
/* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
#define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0)
-/* The next two fields are present for IPA v3.5.1 only. */
+/* The next two fields are not present for IPA v4.0+ */
#define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1)
#define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2)
@@ -154,7 +153,7 @@ static void ipa_cmd_validate_build(void)
* of entries, as and IPv4 and IPv6 route tables have the same number
* of entries.
*/
-#define TABLE_SIZE (TABLE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE)
+#define TABLE_SIZE (TABLE_COUNT_MAX * sizeof(__le64))
#define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
@@ -253,11 +252,12 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
u32 bit_count;
/* The maximum offset in a register_write immediate command depends
- * on the version of IPA. IPA v3.5.1 supports a 16 bit offset, but
- * newer versions allow some additional high-order bits.
+ * on the version of IPA. A 16 bit offset is always supported,
+ * but starting with IPA v4.0 some additional high-order bits are
+ * allowed.
*/
bit_count = BITS_PER_BYTE * sizeof(payload->offset);
- if (ipa->version != IPA_VERSION_3_5_1)
+ if (ipa->version >= IPA_VERSION_4_0)
bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
BUILD_BUG_ON(bit_count > 32);
offset_max = ~0U >> (32 - bit_count);
@@ -456,7 +456,11 @@ void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
/* pipeline_clear_src_grp is not used */
clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
- if (ipa->version != IPA_VERSION_3_5_1) {
+ /* IPA v4.0+ represents the pipeline clear options in the opcode. It
+ * also supports a larger offset by encoding additional high-order
+ * bits in the payload flags field.
+ */
+ if (ipa->version >= IPA_VERSION_4_0) {
u16 offset_high;
u32 val;
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 6dd3d35cf315..b99262281f41 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -20,11 +20,18 @@ struct gsi_channel;
/**
* enum ipa_cmd_opcode: IPA immediate commands
*
- * All immediate commands are issued using the AP command TX endpoint.
- * The numeric values here are the opcodes for IPA v3.5.1 hardware.
+ * @IPA_CMD_IP_V4_FILTER_INIT: Initialize IPv4 filter table
+ * @IPA_CMD_IP_V6_FILTER_INIT: Initialize IPv6 filter table
+ * @IPA_CMD_IP_V4_ROUTING_INIT: Initialize IPv4 routing table
+ * @IPA_CMD_IP_V6_ROUTING_INIT: Initialize IPv6 routing table
+ * @IPA_CMD_HDR_INIT_LOCAL: Initialize IPA-local header memory
+ * @IPA_CMD_REGISTER_WRITE: Register write performed by IPA
+ * @IPA_CMD_IP_PACKET_INIT: Set up next packet's destination endpoint
+ * @IPA_CMD_DMA_SHARED_MEM: DMA command performed by IPA
+ * @IPA_CMD_IP_PACKET_TAG_STATUS: Have next packet generate tag * status
+ * @IPA_CMD_NONE: Special (invalid) "not a command" value
*
- * IPA_CMD_NONE is a special (invalid) value that's used to indicate
- * a request is *not* an immediate command.
+ * All immediate commands are issued using the AP command TX endpoint.
*/
enum ipa_cmd_opcode {
IPA_CMD_NONE = 0x0,
@@ -96,7 +103,7 @@ static inline bool ipa_cmd_data_valid(struct ipa *ipa)
*
* Return: 0 if successful, or a negative error code
*/
-int ipa_cmd_pool_init(struct gsi_channel *gsi_channel, u32 tre_count);
+int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_count);
/**
* ipa_cmd_pool_exit() - Inverse of ipa_cmd_pool_init()
@@ -124,7 +131,7 @@ void ipa_cmd_table_init_add(struct gsi_trans *trans, enum ipa_cmd_opcode opcode,
/**
* ipa_cmd_hdr_init_local_add() - Add a header init command to a transaction
- * @ipa: IPA structure
+ * @trans: GSI transaction
* @offset: Offset of header memory in IPA local space
* @size: Size of header memory
* @addr: DMA address of buffer to be written from
diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-v3.5.1.c
index 88c9c3562ab7..ead1a82f32f5 100644
--- a/drivers/net/ipa/ipa_data-sdm845.c
+++ b/drivers/net/ipa/ipa_data-v3.5.1.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2021 Linaro Ltd.
*/
#include <linux/log2.h>
@@ -11,7 +11,49 @@
#include "ipa_endpoint.h"
#include "ipa_mem.h"
-/* Endpoint configuration for the SDM845 SoC. */
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.5.1 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v3.5.1 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_LWA_DL = 0,
+ IPA_RSRC_GROUP_SRC_UL_DL,
+ IPA_RSRC_GROUP_SRC_MHI_DMA,
+ IPA_RSRC_GROUP_SRC_UC_RX_Q,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_LWA_DL = 0,
+ IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ IPA_RSRC_GROUP_DST_UNUSED_2,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v3.5.1 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 8,
+ .max_reads = 8,
+ },
+ [IPA_QSB_MASTER_PCIE] = {
+ .max_writes = 4,
+ .max_reads = 12,
+ },
+};
+
+/* Endpoint datdata for an SoC having IPA v3.5.1 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
@@ -24,11 +66,13 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.tlv_count = 20,
},
.endpoint = {
- .seq_type = IPA_SEQ_DMA_ONLY,
.config = {
- .resource_group = 1,
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
},
},
},
@@ -43,9 +87,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.tlv_count = 8,
},
.endpoint = {
- .seq_type = IPA_SEQ_INVALID,
.config = {
- .resource_group = 1,
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.aggregation = true,
.status_enable = true,
.rx = {
@@ -66,14 +109,14 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
},
.endpoint = {
.filter_support = true,
- .seq_type =
- IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
.config = {
- .resource_group = 1,
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
+ .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+ .seq_rep_type = IPA_SEQ_REP_DMA_PARSER,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
@@ -91,9 +134,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.tlv_count = 8,
},
.endpoint = {
- .seq_type = IPA_SEQ_INVALID,
.config = {
- .resource_group = 1,
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.checksum = true,
.qmap = true,
.aggregation = true,
@@ -103,12 +145,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
},
},
},
- [IPA_ENDPOINT_MODEM_COMMAND_TX] = {
- .ee_id = GSI_EE_MODEM,
- .channel_id = 1,
- .endpoint_id = 4,
- .toward_ipa = true,
- },
[IPA_ENDPOINT_MODEM_LAN_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 0,
@@ -118,12 +154,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.filter_support = true,
},
},
- [IPA_ENDPOINT_MODEM_LAN_RX] = {
- .ee_id = GSI_EE_MODEM,
- .channel_id = 3,
- .endpoint_id = 13,
- .toward_ipa = false,
- },
[IPA_ENDPOINT_MODEM_AP_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 4,
@@ -141,102 +171,105 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
},
};
-/* For the SDM845, resource groups are allocated this way:
- * group 0: LWA_DL
- * group 1: UL_DL
- */
-static const struct ipa_resource_src ipa_resource_src[] = {
- {
- .type = IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS,
- .limits[0] = {
- .min = 1,
- .max = 255,
+/* Source resource configuration data for an SoC having IPA v3.5.1 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
+ .min = 1, .max = 255,
},
- .limits[1] = {
- .min = 1,
- .max = 255,
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 1, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 1, .max = 63,
},
},
- {
- .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
- .limits[0] = {
- .min = 10,
- .max = 10,
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
+ .min = 10, .max = 10,
},
- .limits[1] = {
- .min = 10,
- .max = 10,
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 10, .max = 10,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
},
},
- {
- .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
- .limits[0] = {
- .min = 12,
- .max = 12,
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
+ .min = 12, .max = 12,
},
- .limits[1] = {
- .min = 14,
- .max = 14,
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 14, .max = 14,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
},
},
- {
- .type = IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
- .limits[0] = {
- .min = 0,
- .max = 63,
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
+ .min = 0, .max = 63,
},
- .limits[1] = {
- .min = 0,
- .max = 63,
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_MHI_DMA] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 63,
},
},
- {
- .type = IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
- .limits[0] = {
- .min = 14,
- .max = 14,
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
+ .min = 14, .max = 14,
},
- .limits[1] = {
- .min = 20,
- .max = 20,
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 20, .max = 20,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 14, .max = 14,
},
},
};
-static const struct ipa_resource_dst ipa_resource_dst[] = {
- {
- .type = IPA_RESOURCE_TYPE_DST_DATA_SECTORS,
- .limits[0] = {
- .min = 4,
- .max = 4,
+/* Destination resource configuration data for an SoC having IPA v3.5.1 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_LWA_DL] = {
+ .min = 4, .max = 4,
},
.limits[1] = {
- .min = 4,
- .max = 4,
+ .min = 4, .max = 4,
},
+ .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
+ .min = 3, .max = 3,
+ }
},
- {
- .type = IPA_RESOURCE_TYPE_DST_DPS_DMARS,
- .limits[0] = {
- .min = 2,
- .max = 63,
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_LWA_DL] = {
+ .min = 2, .max = 63,
},
- .limits[1] = {
- .min = 1,
- .max = 63,
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 1, .max = 63,
},
+ .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
+ .min = 1, .max = 2,
+ }
},
};
-/* Resource configuration for the SDM845 SoC. */
+/* Resource configuration data for an SoC having IPA v3.5.1 */
static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
-/* IPA-resident memory region configuration for the SDM845 SoC. */
+/* IPA-resident memory region data for an SoC having IPA v3.5.1 */
static const struct ipa_mem ipa_mem_local_data[] = {
[IPA_MEM_UC_SHARED] = {
.offset = 0x0000,
@@ -293,11 +326,6 @@ static const struct ipa_mem ipa_mem_local_data[] = {
.size = 0x0140,
.canary_count = 2,
},
- [IPA_MEM_AP_HEADER] = {
- .offset = 0x07c8,
- .size = 0x0000,
- .canary_count = 0,
- },
[IPA_MEM_MODEM_PROC_CTX] = {
.offset = 0x07d0,
.size = 0x0200,
@@ -320,7 +348,8 @@ static const struct ipa_mem ipa_mem_local_data[] = {
},
};
-static struct ipa_mem_data ipa_mem_data = {
+/* Memory configuration data for an SoC having IPA v3.5.1 */
+static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
@@ -330,7 +359,7 @@ static struct ipa_mem_data ipa_mem_data = {
};
/* Interconnect bandwidths are in 1000 byte/second units */
-static struct ipa_interconnect_data ipa_interconnect_data[] = {
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 600000, /* 600 MBps */
@@ -349,15 +378,23 @@ static struct ipa_interconnect_data ipa_interconnect_data[] = {
},
};
-static struct ipa_clock_data ipa_clock_data = {
+/* Clock and interconnect configuration data for an SoC having IPA v3.5.1 */
+static const struct ipa_clock_data ipa_clock_data = {
.core_clock_rate = 75 * 1000 * 1000, /* Hz */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
-/* Configuration data for the SDM845 SoC. */
-const struct ipa_data ipa_data_sdm845 = {
+/* Configuration data for an SoC having IPA v3.5.1 */
+const struct ipa_data ipa_data_v3_5_1 = {
.version = IPA_VERSION_3_5_1,
+ .backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK |
+ BCR_TX_NOT_USING_BRESP_FMASK |
+ BCR_SUSPEND_L2_IRQ_FMASK |
+ BCR_HOLB_DROP_L2_IRQ_FMASK |
+ BCR_DUAL_TX_FMASK,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c
new file mode 100644
index 000000000000..05806ceae8b5
--- /dev/null
+++ b/drivers/net/ipa/ipa_data-v4.11.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2021 Linaro Ltd. */
+
+#include <linux/log2.h>
+
+#include "gsi.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.11 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v4.11 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_UL_DL = 0,
+ IPA_RSRC_GROUP_SRC_UC_RX_Q,
+ IPA_RSRC_GROUP_SRC_UNUSED_2,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
+ IPA_RSRC_GROUP_DST_UNUSED_1,
+ IPA_RSRC_GROUP_DST_DRB_IP,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v4.11 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 12,
+ .max_reads = 13,
+ .max_reads_beats = 120,
+ },
+};
+
+/* Endpoint configuration data for an SoC having IPA v4.11 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 5,
+ .endpoint_id = 7,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 14,
+ .endpoint_id = 9,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .pad_align = ilog2(sizeof(u32)),
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 2,
+ .endpoint_id = 2,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 16,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 7,
+ .endpoint_id = 16,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 7,
+ .endpoint_id = 14,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 8,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+};
+
+/* Source resource configuration data for an SoC having IPA v4.11 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 6, .max = 6,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 18, .max = 18,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 2, .max = 2,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 15, .max = 15,
+ },
+ },
+};
+
+/* Destination resource configuration data for an SoC having IPA v4.11 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 3, .max = 3,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DRB_IP] = {
+ .min = 25, .max = 25,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 2, .max = 2,
+ },
+ },
+};
+
+/* Resource configuration data for an SoC having IPA v4.11 */
+static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v4.11 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+ [IPA_MEM_UC_SHARED] = {
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ [IPA_MEM_UC_INFO] = {
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_V4_FILTER_HASHED] = {
+ .offset = 0x0288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_FILTER] = {
+ .offset = 0x0308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER_HASHED] = {
+ .offset = 0x0388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER] = {
+ .offset = 0x0408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE_HASHED] = {
+ .offset = 0x0488,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE] = {
+ .offset = 0x0508,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE_HASHED] = {
+ .offset = 0x0588,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE] = {
+ .offset = 0x0608,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_MODEM_HEADER] = {
+ .offset = 0x0688,
+ .size = 0x0240,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_HEADER] = {
+ .offset = 0x08c8,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM_PROC_CTX] = {
+ .offset = 0x0ad0,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_PROC_CTX] = {
+ .offset = 0x0cd0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_NAT_TABLE] = {
+ .offset = 0x0ee0,
+ .size = 0x0d00,
+ .canary_count = 4,
+ },
+ [IPA_MEM_PDN_CONFIG] = {
+ .offset = 0x1be8,
+ .size = 0x0050,
+ .canary_count = 0,
+ },
+ [IPA_MEM_STATS_QUOTA_MODEM] = {
+ .offset = 0x1c40,
+ .size = 0x0030,
+ .canary_count = 4,
+ },
+ [IPA_MEM_STATS_QUOTA_AP] = {
+ .offset = 0x1c70,
+ .size = 0x0048,
+ .canary_count = 0,
+ },
+ [IPA_MEM_STATS_TETHERING] = {
+ .offset = 0x1cb8,
+ .size = 0x0238,
+ .canary_count = 0,
+ },
+ [IPA_MEM_STATS_DROP] = {
+ .offset = 0x1ef0,
+ .size = 0x0020,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM] = {
+ .offset = 0x1f18,
+ .size = 0x100c,
+ .canary_count = 2,
+ },
+ [IPA_MEM_UC_EVENT_RING] = {
+ .offset = 0x3000,
+ .size = 0x0000,
+ .canary_count = 1,
+ },
+};
+
+/* Memory configuration data for an SoC having IPA v4.11 */
+static const struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x146a8000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x00009000,
+};
+
+/* Interconnect rates are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 465000, /* 465 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average rate is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 68570, /* 68.57 MBps */
+ .average_bandwidth = 80000, /* 80 MBps (unused?) */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 30000, /* 30 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v4.11 */
+static const struct ipa_clock_data ipa_clock_data = {
+ .core_clock_rate = 60 * 1000 * 1000, /* Hz */
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v4.11 */
+const struct ipa_data ipa_data_v4_11 = {
+ .version = IPA_VERSION_4_11,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .clock_data = &ipa_clock_data,
+};
diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-v4.2.c
index 997b51ceb7d7..8744f19c6401 100644
--- a/drivers/net/ipa/ipa_data-sc7180.c
+++ b/drivers/net/ipa/ipa_data-v4.2.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2019-2020 Linaro Ltd. */
+/* Copyright (C) 2019-2021 Linaro Ltd. */
#include <linux/log2.h>
@@ -9,7 +9,41 @@
#include "ipa_endpoint.h"
#include "ipa_mem.h"
-/* Endpoint configuration for the SC7180 SoC. */
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.2 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v4.2 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_UL_DL = 0,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v4.2 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 8,
+ .max_reads = 12,
+ /* no outstanding read byte (beat) limit */
+ },
+};
+
+/* Endpoint configuration data for an SoC having IPA v4.2 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
@@ -22,11 +56,13 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.tlv_count = 20,
},
.endpoint = {
- .seq_type = IPA_SEQ_DMA_ONLY,
.config = {
- .resource_group = 0,
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
},
},
},
@@ -41,9 +77,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.tlv_count = 6,
},
.endpoint = {
- .seq_type = IPA_SEQ_INVALID,
.config = {
- .resource_group = 0,
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.aggregation = true,
.status_enable = true,
.rx = {
@@ -64,14 +99,14 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
},
.endpoint = {
.filter_support = true,
- .seq_type =
- IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP,
.config = {
- .resource_group = 0,
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
+ .seq_type = IPA_SEQ_1_PASS_SKIP_LAST_UC,
+ .seq_rep_type = IPA_SEQ_REP_DMA_PARSER,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
@@ -89,9 +124,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.tlv_count = 6,
},
.endpoint = {
- .seq_type = IPA_SEQ_INVALID,
.config = {
- .resource_group = 0,
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.checksum = true,
.qmap = true,
.aggregation = true,
@@ -130,73 +164,60 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
},
};
-/* For the SC7180, resource groups are allocated this way:
- * group 0: UL_DL
- */
-static const struct ipa_resource_src ipa_resource_src[] = {
- {
- .type = IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS,
- .limits[0] = {
- .min = 3,
- .max = 63,
+/* Source resource configuration data for an SoC having IPA v4.2 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 3, .max = 63,
},
},
- {
- .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
- .limits[0] = {
- .min = 3,
- .max = 3,
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 3, .max = 3,
},
},
- {
- .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
- .limits[0] = {
- .min = 10,
- .max = 10,
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 10, .max = 10,
},
},
- {
- .type = IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
- .limits[0] = {
- .min = 1,
- .max = 1,
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 1, .max = 1,
},
},
- {
- .type = IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
- .limits[0] = {
- .min = 5,
- .max = 5,
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 5, .max = 5,
},
},
};
-static const struct ipa_resource_dst ipa_resource_dst[] = {
- {
- .type = IPA_RESOURCE_TYPE_DST_DATA_SECTORS,
- .limits[0] = {
- .min = 3,
- .max = 3,
+/* Destination resource configuration data for an SoC having IPA v4.2 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 3, .max = 3,
},
},
- {
- .type = IPA_RESOURCE_TYPE_DST_DPS_DMARS,
- .limits[0] = {
- .min = 1,
- .max = 63,
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 1, .max = 63,
},
},
};
-/* Resource configuration for the SC7180 SoC. */
+/* Resource configuration data for an SoC having IPA v4.2 */
static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
-/* IPA-resident memory region configuration for the SC7180 SoC. */
+/* IPA-resident memory region data for an SoC having IPA v4.2 */
static const struct ipa_mem ipa_mem_local_data[] = {
[IPA_MEM_UC_SHARED] = {
.offset = 0x0000,
@@ -206,7 +227,7 @@ static const struct ipa_mem ipa_mem_local_data[] = {
[IPA_MEM_UC_INFO] = {
.offset = 0x0080,
.size = 0x0200,
- .canary_count = 2,
+ .canary_count = 0,
},
[IPA_MEM_V4_FILTER_HASHED] = {
.offset = 0x0288,
@@ -253,11 +274,6 @@ static const struct ipa_mem ipa_mem_local_data[] = {
.size = 0x0140,
.canary_count = 2,
},
- [IPA_MEM_AP_HEADER] = {
- .offset = 0x05e8,
- .size = 0x0000,
- .canary_count = 0,
- },
[IPA_MEM_MODEM_PROC_CTX] = {
.offset = 0x05f0,
.size = 0x0200,
@@ -273,7 +289,7 @@ static const struct ipa_mem ipa_mem_local_data[] = {
.size = 0x0050,
.canary_count = 2,
},
- [IPA_MEM_STATS_QUOTA] = {
+ [IPA_MEM_STATS_QUOTA_MODEM] = {
.offset = 0x0a50,
.size = 0x0060,
.canary_count = 2,
@@ -283,11 +299,6 @@ static const struct ipa_mem ipa_mem_local_data[] = {
.size = 0x0140,
.canary_count = 0,
},
- [IPA_MEM_STATS_DROP] = {
- .offset = 0x0bf0,
- .size = 0,
- .canary_count = 0,
- },
[IPA_MEM_MODEM] = {
.offset = 0x0bf0,
.size = 0x140c,
@@ -300,7 +311,8 @@ static const struct ipa_mem ipa_mem_local_data[] = {
},
};
-static struct ipa_mem_data ipa_mem_data = {
+/* Memory configuration data for an SoC having IPA v4.2 */
+static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x146a8000,
@@ -309,8 +321,8 @@ static struct ipa_mem_data ipa_mem_data = {
.smem_size = 0x00002000,
};
-/* Interconnect bandwidths are in 1000 byte/second units */
-static struct ipa_interconnect_data ipa_interconnect_data[] = {
+/* Interconnect rates are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 465000, /* 465 MBps */
@@ -329,15 +341,19 @@ static struct ipa_interconnect_data ipa_interconnect_data[] = {
},
};
-static struct ipa_clock_data ipa_clock_data = {
+/* Clock and interconnect configuration data for an SoC having IPA v4.2 */
+static const struct ipa_clock_data ipa_clock_data = {
.core_clock_rate = 100 * 1000 * 1000, /* Hz */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
-/* Configuration data for the SC7180 SoC. */
-const struct ipa_data ipa_data_sc7180 = {
+/* Configuration data for an SoC having IPA v4.2 */
+const struct ipa_data ipa_data_v4_2 = {
.version = IPA_VERSION_4_2,
+ /* backward_compat value is 0 */
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c
new file mode 100644
index 000000000000..5f67a3a909ee
--- /dev/null
+++ b/drivers/net/ipa/ipa_data-v4.5.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2021 Linaro Ltd. */
+
+#include <linux/log2.h>
+
+#include "gsi.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.5 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v4.5 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_UNUSED_0 = 0,
+ IPA_RSRC_GROUP_SRC_UL_DL,
+ IPA_RSRC_GROUP_SRC_UNUSED_2,
+ IPA_RSRC_GROUP_SRC_UNUSED_3,
+ IPA_RSRC_GROUP_SRC_UC_RX_Q,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_UNUSED_0 = 0,
+ IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ IPA_RSRC_GROUP_DST_UNUSED_2,
+ IPA_RSRC_GROUP_DST_UNUSED_3,
+ IPA_RSRC_GROUP_DST_UC,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v4.5 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 8,
+ .max_reads = 0, /* no limit (hardware max) */
+ .max_reads_beats = 120,
+ },
+ [IPA_QSB_MASTER_PCIE] = {
+ .max_writes = 8,
+ .max_reads = 12,
+ /* no outstanding read byte (beat) limit */
+ },
+};
+
+/* Endpoint configuration data for an SoC having IPA v4.5 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 9,
+ .endpoint_id = 7,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 10,
+ .endpoint_id = 16,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .pad_align = ilog2(sizeof(u32)),
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 7,
+ .endpoint_id = 2,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 16,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 1,
+ .endpoint_id = 14,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 7,
+ .endpoint_id = 21,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 8,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+};
+
+/* Source resource configuration data for an SoC having IPA v4.5 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 1, .max = 11,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 1, .max = 63,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 14, .max = 14,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 3, .max = 3,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 18, .max = 18,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UNUSED_0] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UNUSED_2] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UNUSED_3] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 63,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 24, .max = 24,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+};
+
+/* Destination resource configuration data for an SoC having IPA v4.5 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 16, .max = 16,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
+ .min = 2, .max = 2,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UNUSED_3] = {
+ .min = 2, .max = 2,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 2, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
+ .min = 1, .max = 2,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UNUSED_3] = {
+ .min = 1, .max = 2,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UC] = {
+ .min = 0, .max = 2,
+ },
+ },
+};
+
+/* Resource configuration data for an SoC having IPA v4.5 */
+static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v4.5 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+ [IPA_MEM_UC_SHARED] = {
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ [IPA_MEM_UC_INFO] = {
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_V4_FILTER_HASHED] = {
+ .offset = 0x0288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_FILTER] = {
+ .offset = 0x0308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER_HASHED] = {
+ .offset = 0x0388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER] = {
+ .offset = 0x0408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE_HASHED] = {
+ .offset = 0x0488,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE] = {
+ .offset = 0x0508,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE_HASHED] = {
+ .offset = 0x0588,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE] = {
+ .offset = 0x0608,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_MODEM_HEADER] = {
+ .offset = 0x0688,
+ .size = 0x0240,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_HEADER] = {
+ .offset = 0x08c8,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM_PROC_CTX] = {
+ .offset = 0x0ad0,
+ .size = 0x0b20,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_PROC_CTX] = {
+ .offset = 0x15f0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_NAT_TABLE] = {
+ .offset = 0x1800,
+ .size = 0x0d00,
+ .canary_count = 4,
+ },
+ [IPA_MEM_STATS_QUOTA_MODEM] = {
+ .offset = 0x2510,
+ .size = 0x0030,
+ .canary_count = 4,
+ },
+ [IPA_MEM_STATS_QUOTA_AP] = {
+ .offset = 0x2540,
+ .size = 0x0048,
+ .canary_count = 0,
+ },
+ [IPA_MEM_STATS_TETHERING] = {
+ .offset = 0x2588,
+ .size = 0x0238,
+ .canary_count = 0,
+ },
+ [IPA_MEM_STATS_FILTER_ROUTE] = {
+ .offset = 0x27c0,
+ .size = 0x0800,
+ .canary_count = 0,
+ },
+ [IPA_MEM_STATS_DROP] = {
+ .offset = 0x2fc0,
+ .size = 0x0020,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM] = {
+ .offset = 0x2fe8,
+ .size = 0x0800,
+ .canary_count = 2,
+ },
+ [IPA_MEM_UC_EVENT_RING] = {
+ .offset = 0x3800,
+ .size = 0x1000,
+ .canary_count = 1,
+ },
+ [IPA_MEM_PDN_CONFIG] = {
+ .offset = 0x4800,
+ .size = 0x0050,
+ .canary_count = 0,
+ },
+};
+
+/* Memory configuration data for an SoC having IPA v4.5 */
+static const struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x14688000,
+ .imem_size = 0x00003000,
+ .smem_id = 497,
+ .smem_size = 0x00009000,
+};
+
+/* Interconnect rates are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory-a",
+ .peak_bandwidth = 600000, /* 600 MBps */
+ .average_bandwidth = 150000, /* 150 MBps */
+ },
+ {
+ .name = "memory-b",
+ .peak_bandwidth = 1804000, /* 1.804 GBps */
+ .average_bandwidth = 150000, /* 150 MBps */
+ },
+ /* Average rate is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 450000, /* 450 MBps */
+ .average_bandwidth = 75000, /* 75 MBps (unused?) */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 171400, /* 171.4 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v4.5 */
+static const struct ipa_clock_data ipa_clock_data = {
+ .core_clock_rate = 150 * 1000 * 1000, /* Hz (150? 60?) */
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v4.5 */
+const struct ipa_data ipa_data_v4_5 = {
+ .version = IPA_VERSION_4_5,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .clock_data = &ipa_clock_data,
+};
diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c
new file mode 100644
index 000000000000..e41be790f45e
--- /dev/null
+++ b/drivers/net/ipa/ipa_data-v4.9.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2021 Linaro Ltd. */
+
+#include <linux/log2.h>
+
+#include "gsi.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.9 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v4.9 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_UL_DL = 0,
+ IPA_RSRC_GROUP_SRC_DMA,
+ IPA_RSRC_GROUP_SRC_UC_RX_Q,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
+ IPA_RSRC_GROUP_DST_DMA,
+ IPA_RSRC_GROUP_DST_UC,
+ IPA_RSRC_GROUP_DST_DRB_IP,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v4.9 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 8,
+ .max_reads = 0, /* no limit (hardware max) */
+ .max_reads_beats = 120,
+ },
+};
+
+/* Endpoint configuration data for an SoC having IPA v4.9 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 6,
+ .endpoint_id = 7,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 7,
+ .endpoint_id = 11,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .pad_align = ilog2(sizeof(u32)),
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 2,
+ .endpoint_id = 2,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 16,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 12,
+ .endpoint_id = 20,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 7,
+ .endpoint_id = 16,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 8,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+};
+
+/* Source resource configuration data for an SoC having IPA v4.9 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 1, .max = 12,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 1, .max = 12,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 20, .max = 20,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 2, .max = 2,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 3, .max = 3,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 38, .max = 38,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 4, .max = 4,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 0, .max = 4,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 0, .max = 4,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 4,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 30, .max = 30,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 8, .max = 8,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+};
+
+/* Destination resource configuration data for an SoC having IPA v4.9 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 9, .max = 9,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DMA] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UC] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DRB_IP] = {
+ .min = 39, .max = 39,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 2, .max = 3,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DMA] = {
+ .min = 1, .max = 2,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UC] = {
+ .min = 0, .max = 2,
+ },
+ },
+};
+
+/* Resource configuration data for an SoC having IPA v4.9 */
+static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v4.9 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+ [IPA_MEM_UC_SHARED] = {
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ [IPA_MEM_UC_INFO] = {
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_V4_FILTER_HASHED] = { .offset = 0x0288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_FILTER] = {
+ .offset = 0x0308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER_HASHED] = {
+ .offset = 0x0388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER] = {
+ .offset = 0x0408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE_HASHED] = {
+ .offset = 0x0488,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE] = {
+ .offset = 0x0508,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE_HASHED] = {
+ .offset = 0x0588,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE] = {
+ .offset = 0x0608,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_MODEM_HEADER] = {
+ .offset = 0x0688,
+ .size = 0x0240,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_HEADER] = {
+ .offset = 0x08c8,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM_PROC_CTX] = {
+ .offset = 0x0ad0,
+ .size = 0x0b20,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_PROC_CTX] = {
+ .offset = 0x15f0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_NAT_TABLE] = {
+ .offset = 0x1800,
+ .size = 0x0d00,
+ .canary_count = 4,
+ },
+ [IPA_MEM_STATS_QUOTA_MODEM] = {
+ .offset = 0x2510,
+ .size = 0x0030,
+ .canary_count = 4,
+ },
+ [IPA_MEM_STATS_QUOTA_AP] = {
+ .offset = 0x2540,
+ .size = 0x0048,
+ .canary_count = 0,
+ },
+ [IPA_MEM_STATS_TETHERING] = {
+ .offset = 0x2588,
+ .size = 0x0238,
+ .canary_count = 0,
+ },
+ [IPA_MEM_STATS_FILTER_ROUTE] = {
+ .offset = 0x27c0,
+ .size = 0x0800,
+ .canary_count = 0,
+ },
+ [IPA_MEM_STATS_DROP] = {
+ .offset = 0x2fc0,
+ .size = 0x0020,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM] = {
+ .offset = 0x2fe8,
+ .size = 0x0800,
+ .canary_count = 2,
+ },
+ [IPA_MEM_UC_EVENT_RING] = {
+ .offset = 0x3800,
+ .size = 0x1000,
+ .canary_count = 1,
+ },
+ [IPA_MEM_PDN_CONFIG] = {
+ .offset = 0x4800,
+ .size = 0x0050,
+ .canary_count = 0,
+ },
+};
+
+/* Memory configuration data for an SoC having IPA v4.9 */
+static const struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x146bd000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x00009000,
+};
+
+/* Interconnect rates are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "ipa_to_llcc",
+ .peak_bandwidth = 600000, /* 600 MBps */
+ .average_bandwidth = 150000, /* 150 MBps */
+ },
+ {
+ .name = "llcc_to_ebi1",
+ .peak_bandwidth = 1804000, /* 1.804 GBps */
+ .average_bandwidth = 150000, /* 150 MBps */
+ },
+ /* Average rate is unused for the next interconnect */
+ {
+ .name = "appss_to_ipa",
+ .peak_bandwidth = 74000, /* 74 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v4.9 */
+static const struct ipa_clock_data ipa_clock_data = {
+ .core_clock_rate = 60 * 1000 * 1000, /* Hz */
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v4.9. */
+const struct ipa_data ipa_data_v4_9 = {
+ .version = IPA_VERSION_4_9,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .clock_data = &ipa_clock_data,
+};
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index b476fc373f7f..5c4c8d72d7d8 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2021 Linaro Ltd.
*/
#ifndef _IPA_DATA_H_
#define _IPA_DATA_H_
@@ -18,8 +18,9 @@
* Boot-time configuration data is used to define the configuration of the
* IPA and GSI resources to use for a given platform. This data is supplied
* via the Device Tree match table, associated with a particular compatible
- * string. The data defines information about resources, endpoints, and
- * channels.
+ * string. The data defines information about how resources, endpoints and
+ * channels, memory, clocking and so on are allocated and used for the
+ * platform.
*
* Resources are data structures used internally by the IPA hardware. The
* configuration data defines the number (or limits of the number) of various
@@ -45,9 +46,26 @@
* the IPA endpoint.
*/
-/* The maximum value returned by ipa_resource_group_{src,dst}_count() */
-#define IPA_RESOURCE_GROUP_SRC_MAX 5
-#define IPA_RESOURCE_GROUP_DST_MAX 5
+/* The maximum possible number of source or destination resource groups */
+#define IPA_RESOURCE_GROUP_MAX 8
+
+/** enum ipa_qsb_master_id - array index for IPA QSB configuration data */
+enum ipa_qsb_master_id {
+ IPA_QSB_MASTER_DDR,
+ IPA_QSB_MASTER_PCIE,
+};
+
+/**
+ * struct ipa_qsb_data - Qualcomm System Bus configuration data
+ * @max_writes: Maximum outstanding write requests for this master
+ * @max_reads: Maximum outstanding read requests for this master
+ * @max_reads_beats: Max outstanding read bytes in 8-byte "beats" (if non-zero)
+ */
+struct ipa_qsb_data {
+ u8 max_writes;
+ u8 max_reads;
+ u8 max_reads_beats; /* Not present for IPA v3.5.1 */
+};
/**
* struct gsi_channel_data - GSI channel configuration data
@@ -57,10 +75,10 @@
*
* A GSI channel is a unidirectional means of transferring data to or
* from (and through) the IPA. A GSI channel has a ring buffer made
- * up of "transfer elements" (TREs) that specify individual data transfers
- * or IPA immediate commands. TREs are filled by the AP, and control
- * is passed to IPA hardware by writing the last written element
- * into a doorbell register.
+ * up of "transfer ring elements" (TREs) that specify individual data
+ * transfers or IPA immediate commands. TREs are filled by the AP,
+ * and control is passed to IPA hardware by writing the last written
+ * element into a doorbell register.
*
* When data transfer commands have completed the GSI generates an
* event (a structure of data) and optionally signals the AP with
@@ -72,19 +90,23 @@
* that can be included in a single transaction.
*/
struct gsi_channel_data {
- u16 tre_count;
- u16 event_count;
+ u16 tre_count; /* must be a power of 2 */
+ u16 event_count; /* must be a power of 2 */
u8 tlv_count;
};
/**
* struct ipa_endpoint_tx_data - configuration data for TX endpoints
+ * @seq_type: primary packet processing sequencer type
+ * @seq_rep_type: sequencer type for replication processing
* @status_endpoint: endpoint to which status elements are sent
*
* The @status_endpoint is only valid if the endpoint's @status_enable
* flag is set.
*/
struct ipa_endpoint_tx_data {
+ enum ipa_seq_type seq_type;
+ enum ipa_seq_rep_type seq_rep_type;
enum ipa_endpoint_name status_endpoint;
};
@@ -136,7 +158,6 @@ struct ipa_endpoint_config_data {
/**
* struct ipa_endpoint_data - IPA endpoint configuration data
* @filter_support: whether endpoint supports filtering
- * @seq_type: hardware sequencer type used for endpoint
* @config: hardware configuration (see above)
*
* Not all endpoints support the IPA filtering capability. A filter table
@@ -146,25 +167,21 @@ struct ipa_endpoint_config_data {
* in the system, and indicate whether they support filtering.
*
* The remaining endpoint configuration data applies only to AP endpoints.
- * The IPA hardware is implemented by sequencers, and the AP must program
- * the type(s) of these sequencers at initialization time. The remaining
- * endpoint configuration data is defined above.
*/
struct ipa_endpoint_data {
bool filter_support;
- /* The next two are specified only for AP endpoints */
- enum ipa_seq_type seq_type;
+ /* Everything else is specified only for AP endpoints */
struct ipa_endpoint_config_data config;
};
/**
* struct ipa_gsi_endpoint_data - GSI channel/IPA endpoint data
- * ee: GSI execution environment ID
- * channel_id: GSI channel ID
- * endpoint_id: IPA endpoint ID
- * toward_ipa: direction of data transfer
- * gsi: GSI channel configuration data (see above)
- * ipa: IPA endpoint configuration data (see above)
+ * @ee_id: GSI execution environment ID
+ * @channel_id: GSI channel ID
+ * @endpoint_id: IPA endpoint ID
+ * @toward_ipa: direction of data transfer
+ * @channel: GSI channel configuration data (see above)
+ * @endpoint: IPA endpoint configuration data (see above)
*/
struct ipa_gsi_endpoint_data {
u8 ee_id; /* enum gsi_ee_id */
@@ -176,21 +193,6 @@ struct ipa_gsi_endpoint_data {
struct ipa_endpoint_data endpoint;
};
-/** enum ipa_resource_type_src - source resource types */
-enum ipa_resource_type_src {
- IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS,
- IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
- IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
- IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
- IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
-};
-
-/** enum ipa_resource_type_dst - destination resource types */
-enum ipa_resource_type_dst {
- IPA_RESOURCE_TYPE_DST_DATA_SECTORS,
- IPA_RESOURCE_TYPE_DST_DPS_DMARS,
-};
-
/**
* struct ipa_resource_limits - minimum and maximum resource counts
* @min: minimum number of resources of a given type
@@ -202,27 +204,17 @@ struct ipa_resource_limits {
};
/**
- * struct ipa_resource_src - source endpoint group resource usage
- * @type: source group resource type
- * @limits: array of limits to use for each resource group
- */
-struct ipa_resource_src {
- enum ipa_resource_type_src type;
- struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_SRC_MAX];
-};
-
-/**
- * struct ipa_resource_dst - destination endpoint group resource usage
- * @type: destination group resource type
- * @limits: array of limits to use for each resource group
+ * struct ipa_resource - resource group source or destination resource usage
+ * @limits: array of resource limits, indexed by group
*/
-struct ipa_resource_dst {
- enum ipa_resource_type_dst type;
- struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_DST_MAX];
+struct ipa_resource {
+ struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_MAX];
};
/**
* struct ipa_resource_data - IPA resource configuration data
+ * @rsrc_group_src_count: number of source resource groups supported
+ * @rsrc_group_dst_count: number of destination resource groups supported
* @resource_src_count: number of entries in the resource_src array
* @resource_src: source endpoint group resources
* @resource_dst_count: number of entries in the resource_dst array
@@ -234,10 +226,12 @@ struct ipa_resource_dst {
* programming it at initialization time, so we specify it here.
*/
struct ipa_resource_data {
+ u32 rsrc_group_src_count;
+ u32 rsrc_group_dst_count;
u32 resource_src_count;
- const struct ipa_resource_src *resource_src;
+ const struct ipa_resource *resource_src;
u32 resource_dst_count;
- const struct ipa_resource_dst *resource_dst;
+ const struct ipa_resource *resource_dst;
};
/**
@@ -247,7 +241,7 @@ struct ipa_resource_data {
* @imem_addr: physical address of IPA region within IMEM
* @imem_size: size in bytes of IPA IMEM region
* @smem_id: item identifier for IPA region within SMEM memory
- * @imem_size: size in bytes of the IPA SMEM region
+ * @smem_size: size in bytes of the IPA SMEM region
*/
struct ipa_mem_data {
u32 local_count;
@@ -285,22 +279,31 @@ struct ipa_clock_data {
/**
* struct ipa_data - combined IPA/GSI configuration data
* @version: IPA hardware version
- * @endpoint_count: number of entries in endpoint_data array
+ * @backward_compat: BCR register value (prior to IPA v4.5 only)
+ * @qsb_count: number of entries in the qsb_data array
+ * @qsb_data: Qualcomm System Bus configuration data
+ * @endpoint_count: number of entries in the endpoint_data array
* @endpoint_data: IPA endpoint/GSI channel data
* @resource_data: IPA resource configuration data
- * @mem_count: number of entries in mem_data array
- * @mem_data: IPA-local shared memory region data
+ * @mem_data: IPA memory region data
+ * @clock_data: IPA clock and interconnect data
*/
struct ipa_data {
enum ipa_version version;
- u32 endpoint_count; /* # entries in endpoint_data[] */
+ u32 backward_compat;
+ u32 qsb_count; /* number of entries in qsb_data[] */
+ const struct ipa_qsb_data *qsb_data;
+ u32 endpoint_count; /* number of entries in endpoint_data[] */
const struct ipa_gsi_endpoint_data *endpoint_data;
const struct ipa_resource_data *resource_data;
const struct ipa_mem_data *mem_data;
const struct ipa_clock_data *clock_data;
};
-extern const struct ipa_data ipa_data_sdm845;
-extern const struct ipa_data ipa_data_sc7180;
+extern const struct ipa_data ipa_data_v3_5_1;
+extern const struct ipa_data ipa_data_v4_2;
+extern const struct ipa_data ipa_data_v4_5;
+extern const struct ipa_data ipa_data_v4_9;
+extern const struct ipa_data ipa_data_v4_11;
#endif /* _IPA_DATA_H_ */
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 7209ee3c3124..ccc99ad983eb 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2021 Linaro Ltd.
*/
#include <linux/types.h>
@@ -88,6 +88,11 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
if (ipa_gsi_endpoint_data_empty(data))
return true;
+ /* IPA v4.5+ uses checksum offload, not yet supported by RMNet */
+ if (ipa->version >= IPA_VERSION_4_5)
+ if (data->endpoint.config.checksum)
+ return false;
+
if (!data->toward_ipa) {
if (data->endpoint.filter_support) {
dev_err(dev, "filtering not supported for "
@@ -230,6 +235,17 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
+ const struct ipa_gsi_endpoint_data *dp = data;
+ enum ipa_endpoint_name name;
+
+ if (ipa->version < IPA_VERSION_4_5)
+ return true;
+
+ /* IPA v4.5+ uses checksum offload, not yet supported by RMNet */
+ for (name = 0; name < count; name++, dp++)
+ if (data->endpoint.config.checksum)
+ return false;
+
return true;
}
@@ -266,7 +282,7 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
* if (endpoint->toward_ipa)
* assert(ipa->version != IPA_VERSION_4.2);
* else
- * assert(ipa->version == IPA_VERSION_3_5_1);
+ * assert(ipa->version < IPA_VERSION_4_0);
*/
mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
@@ -347,7 +363,7 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
{
bool suspended;
- if (endpoint->ipa->version != IPA_VERSION_3_5_1)
+ if (endpoint->ipa->version >= IPA_VERSION_4_0)
return enable; /* For IPA v4.0+, no change made */
/* assert(!endpoint->toward_ipa); */
@@ -397,7 +413,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
/* We need one command per modem TX endpoint. We can get an upper
* bound on that by assuming all initialized endpoints are modem->IPA.
* That won't happen, and we could be more precise, but this is fine
- * for now. We need to end the transaction with a "tag process."
+ * for now. End the transaction with commands to clear the pipeline.
*/
count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count);
@@ -468,6 +484,20 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
+static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
+{
+ u32 offset;
+ u32 val;
+
+ if (!endpoint->toward_ipa)
+ return;
+
+ offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id);
+ val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
/**
* ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
* @endpoint: Endpoint pointer
@@ -515,7 +545,7 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
/* Where IPA will write the length */
offset = offsetof(struct rmnet_map_header, pkt_len);
/* Upper bits are stored in HDR_EXT with IPA v4.5 */
- if (version == IPA_VERSION_4_5)
+ if (version >= IPA_VERSION_4_5)
offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
@@ -562,7 +592,7 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
/* IPA v4.5 adds some most-significant bits to a few fields,
* two of which are defined in the HDR (not HDR_EXT) register.
*/
- if (ipa->version == IPA_VERSION_4_5) {
+ if (ipa->version >= IPA_VERSION_4_5) {
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
if (endpoint->data->qmap && !endpoint->toward_ipa) {
u32 offset;
@@ -776,7 +806,7 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
if (!microseconds)
return 0; /* Nothing to compute if timer period is 0 */
- if (ipa->version == IPA_VERSION_4_5)
+ if (ipa->version >= IPA_VERSION_4_5)
return hol_block_timer_qtime_val(ipa, microseconds);
/* Use 64 bit arithmetic to avoid overflow... */
@@ -795,7 +825,7 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
* The best precision is achieved when the base value is as
* large as possible. Find the highest set bit in the tick
* count, and extract the number of bits in the base field
- * such that that high bit is included.
+ * such that high bit is included.
*/
high = fls(ticks); /* 1..32 */
width = HWEIGHT32(BASE_VALUE_FMASK);
@@ -884,18 +914,17 @@ static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
- u32 seq_type = endpoint->seq_type;
u32 val = 0;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
- /* Sequencer type is made up of four nibbles */
- val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
- val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
- /* The second two apply to replicated packets */
- val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK);
- val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK);
+ /* Low-order byte configures primary packet processing */
+ val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK);
+
+ /* Second byte configures replicated packet processing */
+ val |= u32_encode_bits(endpoint->data->tx.seq_rep_type,
+ SEQ_REP_TYPE_FMASK);
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -1435,7 +1464,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
if (ret)
goto out_suspend_again;
- /* Finally, reset and reconfigure the channel again (re-enabling the
+ /* Finally, reset and reconfigure the channel again (re-enabling
* the doorbell engine if appropriate). Sleep for 1 millisecond to
* complete the channel reset sequence. Finish by suspending the
* channel again (if necessary).
@@ -1469,8 +1498,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
* is active, we need to handle things specially to recover.
* All other cases just need to reset the underlying GSI channel.
*/
- special = ipa->version == IPA_VERSION_3_5_1 &&
- !endpoint->toward_ipa &&
+ special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
endpoint->data->aggregation;
if (special && ipa_endpoint_aggr_active(endpoint))
ret = ipa_endpoint_reset_rx_aggr(endpoint);
@@ -1490,6 +1518,7 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
else
(void)ipa_endpoint_program_suspend(endpoint, false);
ipa_endpoint_init_cfg(endpoint);
+ ipa_endpoint_init_nat(endpoint);
ipa_endpoint_init_hdr(endpoint);
ipa_endpoint_init_hdr_ext(endpoint);
ipa_endpoint_init_hdr_metadata_mask(endpoint);
@@ -1568,8 +1597,10 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
(void)ipa_endpoint_program_suspend(endpoint, true);
}
- /* IPA v3.5.1 doesn't use channel stop for suspend */
- stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
+ /* Starting with IPA v4.0, endpoints are suspended by stopping the
+ * underlying GSI channel rather than using endpoint suspend mode.
+ */
+ stop_channel = endpoint->ipa->version >= IPA_VERSION_4_0;
ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
if (ret)
dev_err(dev, "error %d suspending channel %u\n", ret,
@@ -1589,8 +1620,10 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
if (!endpoint->toward_ipa)
(void)ipa_endpoint_program_suspend(endpoint, false);
- /* IPA v3.5.1 doesn't use channel start for resume */
- start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
+ /* Starting with IPA v4.0, the underlying GSI channel must be
+ * restarted for resume.
+ */
+ start_channel = endpoint->ipa->version >= IPA_VERSION_4_0;
ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
if (ret)
dev_err(dev, "error %d resuming channel %u\n", ret,
@@ -1738,7 +1771,7 @@ int ipa_endpoint_config(struct ipa *ipa)
/* Make sure it's pointing in the right direction */
endpoint = &ipa->endpoint[endpoint_id];
- if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
+ if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
dev_err(dev, "endpoint id %u wrong direction\n",
endpoint_id);
ret = -EINVAL;
@@ -1766,7 +1799,6 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
endpoint->ipa = ipa;
endpoint->ee_id = data->ee_id;
- endpoint->seq_type = data->endpoint.seq_type;
endpoint->channel_id = data->channel_id;
endpoint->endpoint_id = data->endpoint_id;
endpoint->toward_ipa = data->toward_ipa;
@@ -1775,7 +1807,7 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
ipa->initialized |= BIT(endpoint->endpoint_id);
}
-void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
+static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
{
endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 881ecc27bd6e..0a859d10312d 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -25,15 +25,16 @@ struct ipa_gsi_endpoint_data;
#define IPA_MTU ETH_DATA_LEN
enum ipa_endpoint_name {
- IPA_ENDPOINT_AP_MODEM_TX,
- IPA_ENDPOINT_MODEM_LAN_TX,
- IPA_ENDPOINT_MODEM_COMMAND_TX,
IPA_ENDPOINT_AP_COMMAND_TX,
- IPA_ENDPOINT_MODEM_AP_TX,
IPA_ENDPOINT_AP_LAN_RX,
+ IPA_ENDPOINT_AP_MODEM_TX,
IPA_ENDPOINT_AP_MODEM_RX,
- IPA_ENDPOINT_MODEM_AP_RX,
+ IPA_ENDPOINT_MODEM_COMMAND_TX,
+ IPA_ENDPOINT_MODEM_LAN_TX,
IPA_ENDPOINT_MODEM_LAN_RX,
+ IPA_ENDPOINT_MODEM_AP_TX,
+ IPA_ENDPOINT_MODEM_AP_RX,
+ IPA_ENDPOINT_MODEM_DL_NLO_TX,
IPA_ENDPOINT_COUNT, /* Number of names (not an index) */
};
@@ -41,19 +42,30 @@ enum ipa_endpoint_name {
/**
* struct ipa_endpoint - IPA endpoint information
- * @channel_id: EP's GSI channel
- * @evt_ring_id: EP's GSI channel event ring
+ * @ipa: IPA pointer
+ * @ee_id: Execution environmnent endpoint is associated with
+ * @channel_id: GSI channel used by the endpoint
+ * @endpoint_id: IPA endpoint number
+ * @toward_ipa: Endpoint direction (true = TX, false = RX)
+ * @data: Endpoint configuration data
+ * @trans_tre_max: Maximum number of TRE descriptors per transaction
+ * @evt_ring_id: GSI event ring used by the endpoint
+ * @netdev: Network device pointer, if endpoint uses one
+ * @replenish_enabled: Whether receive buffer replenishing is enabled
+ * @replenish_ready: Number of replenish transactions without doorbell
+ * @replenish_saved: Replenish requests held while disabled
+ * @replenish_backlog: Number of buffers needed to fill hardware queue
+ * @replenish_work: Work item used for repeated replenish failures
*/
struct ipa_endpoint {
struct ipa *ipa;
- enum ipa_seq_type seq_type;
enum gsi_ee_id ee_id;
u32 channel_id;
u32 endpoint_id;
bool toward_ipa;
const struct ipa_endpoint_config_data *data;
- u32 trans_tre_max; /* maximum descriptors per transaction */
+ u32 trans_tre_max;
u32 evt_ring_id;
/* Net device this endpoint is associated with, if any */
@@ -75,8 +87,6 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa);
int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb);
-void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint);
-
int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint);
void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint);
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index 61dd7605bcb6..c46df0b7c4e5 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -54,12 +54,14 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
bool uc_irq = ipa_interrupt_uc(interrupt, irq_id);
struct ipa *ipa = interrupt->ipa;
u32 mask = BIT(irq_id);
+ u32 offset;
/* For microcontroller interrupts, clear the interrupt right away,
* "to avoid clearing unhandled interrupts."
*/
+ offset = ipa_reg_irq_clr_offset(ipa->version);
if (uc_irq)
- iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET);
+ iowrite32(mask, ipa->reg_virt + offset);
if (irq_id < IPA_IRQ_COUNT && interrupt->handler[irq_id])
interrupt->handler[irq_id](interrupt->ipa, irq_id);
@@ -69,7 +71,7 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
* so defer clearing until after the handler has been called.
*/
if (!uc_irq)
- iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET);
+ iowrite32(mask, ipa->reg_virt + offset);
}
/* Process all IPA interrupt types that have been signaled */
@@ -77,13 +79,15 @@ static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt)
{
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
+ u32 offset;
u32 mask;
/* The status register indicates which conditions are present,
* including conditions whose interrupt is not enabled. Handle
* only the enabled ones.
*/
- mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET);
+ offset = ipa_reg_irq_stts_offset(ipa->version);
+ mask = ioread32(ipa->reg_virt + offset);
while ((mask &= enabled)) {
do {
u32 irq_id = __ffs(mask);
@@ -92,7 +96,7 @@ static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt)
ipa_interrupt_process(interrupt, irq_id);
} while (mask);
- mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET);
+ mask = ioread32(ipa->reg_virt + offset);
}
}
@@ -115,14 +119,17 @@ static irqreturn_t ipa_isr(int irq, void *dev_id)
{
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
+ u32 offset;
u32 mask;
- mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET);
+ offset = ipa_reg_irq_stts_offset(ipa->version);
+ mask = ioread32(ipa->reg_virt + offset);
if (mask & interrupt->enabled)
return IRQ_WAKE_THREAD;
/* Nothing in the mask was supposed to cause an interrupt */
- iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET);
+ offset = ipa_reg_irq_clr_offset(ipa->version);
+ iowrite32(mask, ipa->reg_virt + offset);
dev_err(&ipa->pdev->dev, "%s: unexpected interrupt, mask 0x%08x\n",
__func__, mask);
@@ -136,15 +143,22 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
{
struct ipa *ipa = interrupt->ipa;
u32 mask = BIT(endpoint_id);
+ u32 offset;
u32 val;
/* assert(mask & ipa->available); */
- val = ioread32(ipa->reg_virt + IPA_REG_IRQ_SUSPEND_EN_OFFSET);
+
+ /* IPA version 3.0 does not support TX_SUSPEND interrupt control */
+ if (ipa->version == IPA_VERSION_3_0)
+ return;
+
+ offset = ipa_reg_irq_suspend_en_offset(ipa->version);
+ val = ioread32(ipa->reg_virt + offset);
if (enable)
val |= mask;
else
val &= ~mask;
- iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_SUSPEND_EN_OFFSET);
+ iowrite32(val, ipa->reg_virt + offset);
}
/* Enable TX_SUSPEND for an endpoint */
@@ -165,10 +179,18 @@ ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
{
struct ipa *ipa = interrupt->ipa;
+ u32 offset;
u32 val;
- val = ioread32(ipa->reg_virt + IPA_REG_IRQ_SUSPEND_INFO_OFFSET);
- iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_SUSPEND_CLR_OFFSET);
+ offset = ipa_reg_irq_suspend_info_offset(ipa->version);
+ val = ioread32(ipa->reg_virt + offset);
+
+ /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
+ if (ipa->version == IPA_VERSION_3_0)
+ return;
+
+ offset = ipa_reg_irq_suspend_clr_offset(ipa->version);
+ iowrite32(val, ipa->reg_virt + offset);
}
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
@@ -182,13 +204,15 @@ void ipa_interrupt_add(struct ipa_interrupt *interrupt,
enum ipa_irq_id ipa_irq, ipa_irq_handler_t handler)
{
struct ipa *ipa = interrupt->ipa;
+ u32 offset;
/* assert(ipa_irq < IPA_IRQ_COUNT); */
interrupt->handler[ipa_irq] = handler;
/* Update the IPA interrupt mask to enable it */
interrupt->enabled |= BIT(ipa_irq);
- iowrite32(interrupt->enabled, ipa->reg_virt + IPA_REG_IRQ_EN_OFFSET);
+ offset = ipa_reg_irq_en_offset(ipa->version);
+ iowrite32(interrupt->enabled, ipa->reg_virt + offset);
}
/* Remove the handler for an IPA interrupt type */
@@ -196,11 +220,13 @@ void
ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
{
struct ipa *ipa = interrupt->ipa;
+ u32 offset;
/* assert(ipa_irq < IPA_IRQ_COUNT); */
/* Update the IPA interrupt mask to disable it */
interrupt->enabled &= ~BIT(ipa_irq);
- iowrite32(interrupt->enabled, ipa->reg_virt + IPA_REG_IRQ_EN_OFFSET);
+ offset = ipa_reg_irq_en_offset(ipa->version);
+ iowrite32(interrupt->enabled, ipa->reg_virt + offset);
interrupt->handler[ipa_irq] = NULL;
}
@@ -211,6 +237,7 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa)
struct device *dev = &ipa->pdev->dev;
struct ipa_interrupt *interrupt;
unsigned int irq;
+ u32 offset;
int ret;
ret = platform_get_irq_byname(ipa->pdev, "ipa");
@@ -228,7 +255,8 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa)
interrupt->irq = irq;
/* Start with all IPA interrupts disabled */
- iowrite32(0, ipa->reg_virt + IPA_REG_IRQ_EN_OFFSET);
+ offset = ipa_reg_irq_en_offset(ipa->version);
+ iowrite32(0, ipa->reg_virt + offset);
ret = request_threaded_irq(irq, ipa_isr, ipa_isr_thread, IRQF_ONESHOT,
"ipa", interrupt);
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index b5d63a0cd19e..d5c486a6800d 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -24,6 +24,7 @@ typedef void (*ipa_irq_handler_t)(struct ipa *ipa, enum ipa_irq_id irq_id);
/**
* ipa_interrupt_add() - Register a handler for an IPA interrupt type
+ * @interrupt: IPA interrupt structure
* @irq_id: IPA interrupt type
* @handler: Handler function for the interrupt
*
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 97c1b55405cb..9915603ed10b 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2021 Linaro Ltd.
*/
#include <linux/types.h>
@@ -22,6 +22,7 @@
#include "ipa_clock.h"
#include "ipa_data.h"
#include "ipa_endpoint.h"
+#include "ipa_resource.h"
#include "ipa_cmd.h"
#include "ipa_reg.h"
#include "ipa_mem.h"
@@ -66,7 +67,7 @@
*/
/* The name of the GSI firmware file relative to /lib/firmware */
-#define IPA_FWS_PATH "ipa_fws.mdt"
+#define IPA_FW_PATH_DEFAULT "ipa_fws.mdt"
#define IPA_PAS_ID 15
/* Shift of 19.2 MHz timestamp to achieve lower resolution timestamps */
@@ -146,13 +147,13 @@ int ipa_setup(struct ipa *ipa)
if (ret)
goto err_endpoint_teardown;
- ret = ipa_mem_setup(ipa);
+ ret = ipa_mem_setup(ipa); /* No matching teardown required */
if (ret)
goto err_command_disable;
- ret = ipa_table_setup(ipa);
+ ret = ipa_table_setup(ipa); /* No matching teardown required */
if (ret)
- goto err_mem_teardown;
+ goto err_command_disable;
/* Enable the exception handling endpoint, and tell the hardware
* to use it by default.
@@ -160,7 +161,7 @@ int ipa_setup(struct ipa *ipa)
exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
ret = ipa_endpoint_enable_one(exception_endpoint);
if (ret)
- goto err_table_teardown;
+ goto err_command_disable;
ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id);
@@ -178,10 +179,6 @@ int ipa_setup(struct ipa *ipa)
err_default_route_clear:
ipa_endpoint_default_route_clear(ipa);
ipa_endpoint_disable_one(exception_endpoint);
-err_table_teardown:
- ipa_table_teardown(ipa);
-err_mem_teardown:
- ipa_mem_teardown(ipa);
err_command_disable:
ipa_endpoint_disable_one(command_endpoint);
err_endpoint_teardown:
@@ -210,8 +207,6 @@ static void ipa_teardown(struct ipa *ipa)
ipa_endpoint_default_route_clear(ipa);
exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
ipa_endpoint_disable_one(exception_endpoint);
- ipa_table_teardown(ipa);
- ipa_mem_teardown(ipa);
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
ipa_endpoint_disable_one(command_endpoint);
ipa_endpoint_teardown(ipa);
@@ -222,13 +217,13 @@ static void ipa_teardown(struct ipa *ipa)
gsi_teardown(&ipa->gsi);
}
-/* Configure QMB Core Master Port selection */
+/* Configure bus access behavior for IPA components */
static void ipa_hardware_config_comp(struct ipa *ipa)
{
u32 val;
- /* Nothing to configure for IPA v3.5.1 */
- if (ipa->version == IPA_VERSION_3_5_1)
+ /* Nothing to configure prior to IPA v4.0 */
+ if (ipa->version < IPA_VERSION_4_0)
return;
val = ioread32(ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
@@ -249,56 +244,59 @@ static void ipa_hardware_config_comp(struct ipa *ipa)
iowrite32(val, ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
}
-/* Configure DDR and PCIe max read/write QSB values */
-static void ipa_hardware_config_qsb(struct ipa *ipa)
+/* Configure DDR and (possibly) PCIe max read/write QSB values */
+static void
+ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
{
- enum ipa_version version = ipa->version;
- u32 max0;
- u32 max1;
+ const struct ipa_qsb_data *data0;
+ const struct ipa_qsb_data *data1;
u32 val;
- /* QMB_0 represents DDR; QMB_1 represents PCIe */
- val = u32_encode_bits(8, GEN_QMB_0_MAX_WRITES_FMASK);
- switch (version) {
- case IPA_VERSION_4_2:
- max1 = 0; /* PCIe not present */
- break;
- case IPA_VERSION_4_5:
- max1 = 8;
- break;
- default:
- max1 = 4;
- break;
- }
- val |= u32_encode_bits(max1, GEN_QMB_1_MAX_WRITES_FMASK);
+ /* assert(data->qsb_count > 0); */
+ /* assert(data->qsb_count < 3); */
+
+ /* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
+ data0 = &data->qsb_data[IPA_QSB_MASTER_DDR];
+ if (data->qsb_count > 1)
+ data1 = &data->qsb_data[IPA_QSB_MASTER_PCIE];
+
+ /* Max outstanding write accesses for QSB masters */
+ val = u32_encode_bits(data0->max_writes, GEN_QMB_0_MAX_WRITES_FMASK);
+ if (data->qsb_count > 1)
+ val |= u32_encode_bits(data1->max_writes,
+ GEN_QMB_1_MAX_WRITES_FMASK);
iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET);
- max1 = 12;
- switch (version) {
- case IPA_VERSION_3_5_1:
- max0 = 8;
- break;
- case IPA_VERSION_4_0:
- case IPA_VERSION_4_1:
- max0 = 12;
- break;
- case IPA_VERSION_4_2:
- max0 = 12;
- max1 = 0; /* PCIe not present */
- break;
- case IPA_VERSION_4_5:
- max0 = 0; /* No limit (hardware maximum) */
- break;
- }
- val = u32_encode_bits(max0, GEN_QMB_0_MAX_READS_FMASK);
- val |= u32_encode_bits(max1, GEN_QMB_1_MAX_READS_FMASK);
- if (version != IPA_VERSION_3_5_1) {
- /* GEN_QMB_0_MAX_READS_BEATS is 0 */
- /* GEN_QMB_1_MAX_READS_BEATS is 0 */
+ /* Max outstanding read accesses for QSB masters */
+ val = u32_encode_bits(data0->max_reads, GEN_QMB_0_MAX_READS_FMASK);
+ if (ipa->version >= IPA_VERSION_4_0)
+ val |= u32_encode_bits(data0->max_reads_beats,
+ GEN_QMB_0_MAX_READS_BEATS_FMASK);
+ if (data->qsb_count > 1) {
+ val |= u32_encode_bits(data1->max_reads,
+ GEN_QMB_1_MAX_READS_FMASK);
+ if (ipa->version >= IPA_VERSION_4_0)
+ val |= u32_encode_bits(data1->max_reads_beats,
+ GEN_QMB_1_MAX_READS_BEATS_FMASK);
}
iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET);
}
+/* The internal inactivity timer clock is used for the aggregation timer */
+#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */
+
+/* Compute the value to use in the COUNTER_CFG register AGGR_GRANULARITY
+ * field to represent the given number of microseconds. The value is one
+ * less than the number of timer ticks in the requested period. 0 is not
+ * a valid granularity value.
+ */
+static u32 ipa_aggr_granularity_val(u32 usec)
+{
+ /* assert(usec != 0); */
+
+ return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1;
+}
+
/* IPA uses unified Qtime starting at IPA v4.5, implementing various
* timestamps and timers independent of the IPA core clock rate. The
* Qtimer is based on a 56-bit timestamp incremented at each tick of
@@ -385,21 +383,22 @@ static void ipa_hardware_dcd_deconfig(struct ipa *ipa)
/**
* ipa_hardware_config() - Primitive hardware initialization
* @ipa: IPA pointer
+ * @data: IPA configuration data
*/
-static void ipa_hardware_config(struct ipa *ipa)
+static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data)
{
enum ipa_version version = ipa->version;
u32 granularity;
u32 val;
- /* IPA v4.5 has no backward compatibility register */
+ /* IPA v4.5+ has no backward compatibility register */
if (version < IPA_VERSION_4_5) {
- val = ipa_reg_bcr_val(version);
+ val = data->backward_compat;
iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET);
}
/* Implement some hardware workarounds */
- if (version != IPA_VERSION_3_5_1 && version < IPA_VERSION_4_5) {
+ if (version >= IPA_VERSION_4_0 && version < IPA_VERSION_4_5) {
/* Enable open global clocks (not needed for IPA v4.5) */
val = GLOBAL_FMASK;
val |= GLOBAL_2X_CLK_FMASK;
@@ -414,7 +413,7 @@ static void ipa_hardware_config(struct ipa *ipa)
ipa_hardware_config_comp(ipa);
/* Configure system bus limits */
- ipa_hardware_config_qsb(ipa);
+ ipa_hardware_config_qsb(ipa, data);
if (version < IPA_VERSION_4_5) {
/* Configure aggregation timer granularity */
@@ -448,151 +447,6 @@ static void ipa_hardware_deconfig(struct ipa *ipa)
ipa_hardware_dcd_deconfig(ipa);
}
-#ifdef IPA_VALIDATION
-
-static bool ipa_resource_limits_valid(struct ipa *ipa,
- const struct ipa_resource_data *data)
-{
- u32 group_count;
- u32 i;
- u32 j;
-
- /* We program at most 6 source or destination resource group limits */
- BUILD_BUG_ON(IPA_RESOURCE_GROUP_SRC_MAX > 6);
-
- group_count = ipa_resource_group_src_count(ipa->version);
- if (!group_count || group_count > IPA_RESOURCE_GROUP_SRC_MAX)
- return false;
-
- /* Return an error if a non-zero resource limit is specified
- * for a resource group not supported by hardware.
- */
- for (i = 0; i < data->resource_src_count; i++) {
- const struct ipa_resource_src *resource;
-
- resource = &data->resource_src[i];
- for (j = group_count; j < IPA_RESOURCE_GROUP_SRC_MAX; j++)
- if (resource->limits[j].min || resource->limits[j].max)
- return false;
- }
-
- group_count = ipa_resource_group_dst_count(ipa->version);
- if (!group_count || group_count > IPA_RESOURCE_GROUP_DST_MAX)
- return false;
-
- for (i = 0; i < data->resource_dst_count; i++) {
- const struct ipa_resource_dst *resource;
-
- resource = &data->resource_dst[i];
- for (j = group_count; j < IPA_RESOURCE_GROUP_DST_MAX; j++)
- if (resource->limits[j].min || resource->limits[j].max)
- return false;
- }
-
- return true;
-}
-
-#else /* !IPA_VALIDATION */
-
-static bool ipa_resource_limits_valid(struct ipa *ipa,
- const struct ipa_resource_data *data)
-{
- return true;
-}
-
-#endif /* !IPA_VALIDATION */
-
-static void
-ipa_resource_config_common(struct ipa *ipa, u32 offset,
- const struct ipa_resource_limits *xlimits,
- const struct ipa_resource_limits *ylimits)
-{
- u32 val;
-
- val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK);
- val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK);
- if (ylimits) {
- val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK);
- val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK);
- }
-
- iowrite32(val, ipa->reg_virt + offset);
-}
-
-static void ipa_resource_config_src(struct ipa *ipa,
- const struct ipa_resource_src *resource)
-{
- u32 group_count = ipa_resource_group_src_count(ipa->version);
- const struct ipa_resource_limits *ylimits;
- u32 offset;
-
- offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
- ylimits = group_count == 1 ? NULL : &resource->limits[1];
- ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
-
- if (group_count < 2)
- return;
-
- offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
- ylimits = group_count == 3 ? NULL : &resource->limits[3];
- ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
-
- if (group_count < 4)
- return;
-
- offset = IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource->type);
- ylimits = group_count == 5 ? NULL : &resource->limits[5];
- ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
-}
-
-static void ipa_resource_config_dst(struct ipa *ipa,
- const struct ipa_resource_dst *resource)
-{
- u32 group_count = ipa_resource_group_dst_count(ipa->version);
- const struct ipa_resource_limits *ylimits;
- u32 offset;
-
- offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
- ylimits = group_count == 1 ? NULL : &resource->limits[1];
- ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
-
- if (group_count < 2)
- return;
-
- offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
- ylimits = group_count == 3 ? NULL : &resource->limits[3];
- ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
-
- if (group_count < 4)
- return;
-
- offset = IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource->type);
- ylimits = group_count == 5 ? NULL : &resource->limits[5];
- ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
-}
-
-static int
-ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
-{
- u32 i;
-
- if (!ipa_resource_limits_valid(ipa, data))
- return -EINVAL;
-
- for (i = 0; i < data->resource_src_count; i++)
- ipa_resource_config_src(ipa, &data->resource_src[i]);
-
- for (i = 0; i < data->resource_dst_count; i++)
- ipa_resource_config_dst(ipa, &data->resource_dst[i]);
-
- return 0;
-}
-
-static void ipa_resource_deconfig(struct ipa *ipa)
-{
- /* Nothing to do */
-}
-
/**
* ipa_config() - Configure IPA hardware
* @ipa: IPA pointer
@@ -610,7 +464,7 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
*/
ipa_clock_get(ipa);
- ipa_hardware_config(ipa);
+ ipa_hardware_config(ipa, data);
ret = ipa_endpoint_config(ipa);
if (ret)
@@ -620,23 +474,20 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
if (ret)
goto err_endpoint_deconfig;
- ipa_table_config(ipa);
+ ipa_table_config(ipa); /* No deconfig required */
- /* Assign resource limitation to each group */
+ /* Assign resource limitation to each group; no deconfig required */
ret = ipa_resource_config(ipa, data->resource_data);
if (ret)
- goto err_table_deconfig;
+ goto err_mem_deconfig;
ret = ipa_modem_config(ipa);
if (ret)
- goto err_resource_deconfig;
+ goto err_mem_deconfig;
return 0;
-err_resource_deconfig:
- ipa_resource_deconfig(ipa);
-err_table_deconfig:
- ipa_table_deconfig(ipa);
+err_mem_deconfig:
ipa_mem_deconfig(ipa);
err_endpoint_deconfig:
ipa_endpoint_deconfig(ipa);
@@ -654,8 +505,6 @@ err_hardware_deconfig:
static void ipa_deconfig(struct ipa *ipa)
{
ipa_modem_deconfig(ipa);
- ipa_resource_deconfig(ipa);
- ipa_table_deconfig(ipa);
ipa_mem_deconfig(ipa);
ipa_endpoint_deconfig(ipa);
ipa_hardware_deconfig(ipa);
@@ -668,6 +517,7 @@ static int ipa_firmware_load(struct device *dev)
struct device_node *node;
struct resource res;
phys_addr_t phys;
+ const char *path;
ssize_t size;
void *virt;
int ret;
@@ -685,9 +535,17 @@ static int ipa_firmware_load(struct device *dev)
return ret;
}
- ret = request_firmware(&fw, IPA_FWS_PATH, dev);
+ /* Use name from DTB if specified; use default for *any* error */
+ ret = of_property_read_string(dev->of_node, "firmware-name", &path);
if (ret) {
- dev_err(dev, "error %d requesting \"%s\"\n", ret, IPA_FWS_PATH);
+ dev_dbg(dev, "error %d getting \"firmware-name\" resource\n",
+ ret);
+ path = IPA_FW_PATH_DEFAULT;
+ }
+
+ ret = request_firmware(&fw, path, dev);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"%s\"\n", ret, path);
return ret;
}
@@ -700,13 +558,11 @@ static int ipa_firmware_load(struct device *dev)
goto out_release_firmware;
}
- ret = qcom_mdt_load(dev, fw, IPA_FWS_PATH, IPA_PAS_ID,
- virt, phys, size, NULL);
+ ret = qcom_mdt_load(dev, fw, path, IPA_PAS_ID, virt, phys, size, NULL);
if (ret)
- dev_err(dev, "error %d loading \"%s\"\n", ret, IPA_FWS_PATH);
+ dev_err(dev, "error %d loading \"%s\"\n", ret, path);
else if ((ret = qcom_scm_pas_auth_and_reset(IPA_PAS_ID)))
- dev_err(dev, "error %d authenticating \"%s\"\n", ret,
- IPA_FWS_PATH);
+ dev_err(dev, "error %d authenticating \"%s\"\n", ret, path);
memunmap(virt);
out_release_firmware:
@@ -718,11 +574,23 @@ out_release_firmware:
static const struct of_device_id ipa_match[] = {
{
.compatible = "qcom,sdm845-ipa",
- .data = &ipa_data_sdm845,
+ .data = &ipa_data_v3_5_1,
},
{
.compatible = "qcom,sc7180-ipa",
- .data = &ipa_data_sc7180,
+ .data = &ipa_data_v4_2,
+ },
+ {
+ .compatible = "qcom,sdx55-ipa",
+ .data = &ipa_data_v4_5,
+ },
+ {
+ .compatible = "qcom,sm8350-ipa",
+ .data = &ipa_data_v4_9,
+ },
+ {
+ .compatible = "qcom,sc7280-ipa",
+ .data = &ipa_data_v4_11,
},
{ },
};
@@ -735,8 +603,14 @@ MODULE_DEVICE_TABLE(of, ipa_match);
static void ipa_validate_build(void)
{
#ifdef IPA_VALIDATE
- /* We assume we're working on 64-bit hardware */
- BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT));
+ /* At one time we assumed a 64-bit build, allowing some do_div()
+ * calls to be replaced by simple division or modulo operations.
+ * We currently only perform divide and modulo operations on u32,
+ * u16, or size_t objects, and of those only size_t has any chance
+ * of being a 64-bit value. (It should be guaranteed 32 bits wide
+ * on a 32-bit build, but there is no harm in verifying that.)
+ */
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT) && sizeof(size_t) != 4);
/* Code assumes the EE ID for the AP is 0 (zeroed structure field) */
BUILD_BUG_ON(GSI_EE_AP != 0);
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index f25029b9ec85..c5c3b1b7e67d 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2021 Linaro Ltd.
*/
#include <linux/types.h>
@@ -53,6 +53,8 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
* The AP informs the modem where its portions of memory are located
* in a QMI exchange that occurs at modem startup.
*
+ * There is no need for a matching ipa_mem_teardown() function.
+ *
* Return: 0 if successful, or a negative error code
*/
int ipa_mem_setup(struct ipa *ipa)
@@ -61,6 +63,7 @@ int ipa_mem_setup(struct ipa *ipa)
struct gsi_trans *trans;
u32 offset;
u16 size;
+ u32 val;
/* Get a transaction to define the header memory region and to zero
* the processing context and modem memory regions.
@@ -89,17 +92,13 @@ int ipa_mem_setup(struct ipa *ipa)
gsi_trans_commit_wait(trans);
/* Tell the hardware where the processing context area is located */
- iowrite32(ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset,
- ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET);
+ offset = ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset;
+ val = proc_cntxt_base_addr_encoded(ipa->version, offset);
+ iowrite32(val, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET);
return 0;
}
-void ipa_mem_teardown(struct ipa *ipa)
-{
- /* Nothing to do */
-}
-
#ifdef IPA_VALIDATE
static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
index f99180f84f0d..a422aec69e5d 100644
--- a/drivers/net/ipa/ipa_mem.h
+++ b/drivers/net/ipa/ipa_mem.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2021 Linaro Ltd.
*/
#ifndef _IPA_MEM_H_
#define _IPA_MEM_H_
@@ -28,6 +28,7 @@ struct ipa_mem_data;
* The set of memory regions is defined in configuration data. They are
* subject to these constraints:
* - a zero offset and zero size represents and undefined region
+ * - a region's size does not include space for its "canary" values
* - a region's offset is defined to be *past* all "canary" values
* - offset must be large enough to account for all canaries
* - a region's size may be zero, but may still have canaries
@@ -56,11 +57,18 @@ enum ipa_mem_id {
IPA_MEM_AP_HEADER, /* 0 canaries */
IPA_MEM_MODEM_PROC_CTX, /* 2 canaries */
IPA_MEM_AP_PROC_CTX, /* 0 canaries */
- IPA_MEM_PDN_CONFIG, /* 2 canaries (IPA v4.0 and above) */
- IPA_MEM_STATS_QUOTA, /* 2 canaries (IPA v4.0 and above) */
+ IPA_MEM_NAT_TABLE, /* 4 canaries (IPA v4.5 and above) */
+ IPA_MEM_PDN_CONFIG, /* 0/2 canaries (IPA v4.0 and above) */
+ IPA_MEM_STATS_QUOTA_MODEM, /* 2/4 canaries (IPA v4.0 and above) */
+ IPA_MEM_STATS_QUOTA_AP, /* 0 canaries (IPA v4.0 and above) */
IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0 and above) */
+ IPA_MEM_STATS_V4_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
+ IPA_MEM_STATS_V6_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
+ IPA_MEM_STATS_V4_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
+ IPA_MEM_STATS_V6_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
+ IPA_MEM_STATS_FILTER_ROUTE, /* 0 canaries (IPA v4.5 and above) */
IPA_MEM_STATS_DROP, /* 0 canaries (IPA v4.0 and above) */
- IPA_MEM_MODEM, /* 0 canaries */
+ IPA_MEM_MODEM, /* 0/2 canaries */
IPA_MEM_UC_EVENT_RING, /* 1 canary */
IPA_MEM_COUNT, /* Number of regions (not an index) */
};
@@ -69,7 +77,7 @@ enum ipa_mem_id {
* struct ipa_mem - IPA local memory region description
* @offset: offset in IPA memory space to base of the region
* @size: size in bytes base of the region
- * @canary_count # 32-bit "canary" values that precede region
+ * @canary_count: Number of 32-bit "canary" values that precede region
*/
struct ipa_mem {
u32 offset;
@@ -80,8 +88,7 @@ struct ipa_mem {
int ipa_mem_config(struct ipa *ipa);
void ipa_mem_deconfig(struct ipa *ipa);
-int ipa_mem_setup(struct ipa *ipa);
-void ipa_mem_teardown(struct ipa *ipa);
+int ipa_mem_setup(struct ipa *ipa); /* No ipa_mem_teardown() needed */
int ipa_mem_zero_modem(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index 9b08eb823984..af9aedbde717 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2021 Linaro Ltd.
*/
#include <linux/errno.h>
@@ -213,18 +213,18 @@ int ipa_modem_start(struct ipa *ipa)
goto out_set_state;
}
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
-
SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
priv = netdev_priv(netdev);
priv->ipa = ipa;
ret = register_netdev(netdev);
- if (ret)
- free_netdev(netdev);
- else
+ if (!ret) {
ipa->modem_netdev = netdev;
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
+ } else {
+ free_netdev(netdev);
+ }
out_set_state:
if (ret)
@@ -240,7 +240,6 @@ int ipa_modem_stop(struct ipa *ipa)
{
struct net_device *netdev = ipa->modem_netdev;
enum ipa_modem_state state;
- int ret;
/* Only attempt to stop the modem if it's running */
state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_RUNNING,
@@ -257,27 +256,20 @@ int ipa_modem_stop(struct ipa *ipa)
/* Prevent the modem from triggering a call to ipa_setup() */
ipa_smp2p_disable(ipa);
+ /* Stop the queue and disable the endpoints if it's open */
if (netdev) {
- /* Stop the queue and disable the endpoints if it's open */
- ret = ipa_stop(netdev);
- if (ret)
- goto out_set_state;
-
+ (void)ipa_stop(netdev);
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
ipa->modem_netdev = NULL;
unregister_netdev(netdev);
free_netdev(netdev);
- } else {
- ret = 0;
}
-out_set_state:
- if (ret)
- atomic_set(&ipa->modem_state, IPA_MODEM_STATE_RUNNING);
- else
- atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
smp_mb__after_atomic();
- return ret;
+ return 0;
}
/* Treat a "clean" modem stop the same as a crash */
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index e594bf3b600f..593665efbcf9 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
mem = &ipa->mem[IPA_MEM_V4_ROUTE];
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v4_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE;
+ req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
mem = &ipa->mem[IPA_MEM_V6_ROUTE];
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v6_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE;
+ req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
mem = &ipa->mem[IPA_MEM_V4_FILTER];
req.v4_filter_tbl_start_valid = 1;
@@ -352,8 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v4_hash_route_tbl_info.count =
- mem->size / IPA_TABLE_ENTRY_SIZE;
+ req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
}
mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED];
@@ -361,8 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v6_hash_route_tbl_info.count =
- mem->size / IPA_TABLE_ENTRY_SIZE;
+ req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
}
mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED];
@@ -379,8 +377,8 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
/* None of the stats fields are valid (IPA v4.0 and above) */
- if (ipa->version != IPA_VERSION_3_5_1) {
- mem = &ipa->mem[IPA_MEM_STATS_QUOTA];
+ if (ipa->version >= IPA_VERSION_4_0) {
+ mem = &ipa->mem[IPA_MEM_STATS_QUOTA_MODEM];
if (mem->size) {
req.hw_stats_quota_base_addr_valid = 1;
req.hw_stats_quota_base_addr =
diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h
index 3993687593d0..b6f2055d35a6 100644
--- a/drivers/net/ipa/ipa_qmi.h
+++ b/drivers/net/ipa/ipa_qmi.h
@@ -13,11 +13,15 @@ struct ipa;
/**
* struct ipa_qmi - QMI state associated with an IPA
- * @client_handle - used to send an QMI requests to the modem
- * @server_handle - used to handle QMI requests from the modem
- * @initialized - whether QMI initialization has completed
- * @indication_register_received - tracks modem request receipt
- * @init_driver_response_received - tracks modem response receipt
+ * @client_handle: Used to send an QMI requests to the modem
+ * @server_handle: Used to handle QMI requests from the modem
+ * @modem_sq: QMAP socket address for the modem QMI server
+ * @init_driver_work: Work structure used for INIT_DRIVER message handling
+ * @initial_boot: True if first boot has not yet completed
+ * @uc_ready: True once DRIVER_INIT_COMPLETE request received
+ * @modem_ready: True when INIT_DRIVER response received
+ * @indication_requested: True when INDICATION_REGISTER request received
+ * @indication_sent: True when INIT_COMPLETE indication sent
*/
struct ipa_qmi {
struct qmi_handle client_handle;
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
index 73413371e3d3..6838e8065072 100644
--- a/drivers/net/ipa/ipa_qmi_msg.c
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -56,7 +56,7 @@ struct qmi_elem_info ipa_indication_register_req_ei[] = {
.elem_size =
sizeof_field(struct ipa_indication_register_req,
ipa_mhi_ready_ind_valid),
- .tlv_type = 0x11,
+ .tlv_type = 0x12,
.offset = offsetof(struct ipa_indication_register_req,
ipa_mhi_ready_ind_valid),
},
@@ -66,11 +66,51 @@ struct qmi_elem_info ipa_indication_register_req_ei[] = {
.elem_size =
sizeof_field(struct ipa_indication_register_req,
ipa_mhi_ready_ind),
- .tlv_type = 0x11,
+ .tlv_type = 0x12,
.offset = offsetof(struct ipa_indication_register_req,
ipa_mhi_ready_ind),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ endpoint_desc_ind_valid),
+ .tlv_type = 0x13,
+ .offset = offsetof(struct ipa_indication_register_req,
+ endpoint_desc_ind_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ endpoint_desc_ind),
+ .tlv_type = 0x13,
+ .offset = offsetof(struct ipa_indication_register_req,
+ endpoint_desc_ind),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ bw_change_ind_valid),
+ .tlv_type = 0x14,
+ .offset = offsetof(struct ipa_indication_register_req,
+ bw_change_ind_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ bw_change_ind),
+ .tlv_type = 0x14,
+ .offset = offsetof(struct ipa_indication_register_req,
+ bw_change_ind),
+ },
+ {
.data_type = QMI_EOTI,
},
};
@@ -530,7 +570,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
hw_stats_quota_base_addr_valid),
},
{
- .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
@@ -545,17 +585,17 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
hw_stats_quota_size_valid),
- .tlv_type = 0x1f,
+ .tlv_type = 0x20,
.offset = offsetof(struct ipa_init_modem_driver_req,
hw_stats_quota_size_valid),
},
{
- .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
hw_stats_quota_size),
- .tlv_type = 0x1f,
+ .tlv_type = 0x20,
.offset = offsetof(struct ipa_init_modem_driver_req,
hw_stats_quota_size),
},
@@ -564,18 +604,38 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_drop_base_addr_valid),
+ .tlv_type = 0x21,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_drop_base_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_drop_base_addr),
+ .tlv_type = 0x21,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_drop_base_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
hw_stats_drop_size_valid),
- .tlv_type = 0x1f,
+ .tlv_type = 0x22,
.offset = offsetof(struct ipa_init_modem_driver_req,
hw_stats_drop_size_valid),
},
{
- .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
hw_stats_drop_size),
- .tlv_type = 0x1f,
+ .tlv_type = 0x22,
.offset = offsetof(struct ipa_init_modem_driver_req,
hw_stats_drop_size),
},
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index 12b6621f4b0e..3233d145fd87 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -24,7 +24,7 @@
* information for each field. The qmi_send_*() interfaces require
* the message size to be provided.
*/
-#define IPA_QMI_INDICATION_REGISTER_REQ_SZ 12 /* -> server handle */
+#define IPA_QMI_INDICATION_REGISTER_REQ_SZ 20 /* -> server handle */
#define IPA_QMI_INDICATION_REGISTER_RSP_SZ 7 /* <- server handle */
#define IPA_QMI_INIT_DRIVER_REQ_SZ 162 /* client handle -> */
#define IPA_QMI_INIT_DRIVER_RSP_SZ 25 /* client handle <- */
@@ -44,6 +44,10 @@ struct ipa_indication_register_req {
u8 data_usage_quota_reached;
u8 ipa_mhi_ready_ind_valid;
u8 ipa_mhi_ready_ind;
+ u8 endpoint_desc_ind_valid;
+ u8 endpoint_desc_ind;
+ u8 bw_change_ind_valid;
+ u8 bw_change_ind;
};
/* The response to a IPA_QMI_INDICATION_REGISTER request consists only of
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 732e691e9aa6..286ea9634c49 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2021 Linaro Ltd.
*/
#ifndef _IPA_REG_H_
#define _IPA_REG_H_
@@ -66,14 +66,16 @@ struct ipa;
*/
#define IPA_REG_COMP_CFG_OFFSET 0x0000003c
-/* The next field is not supported for IPA v4.1 */
+/* The next field is not supported for IPA v4.0+, not present for IPA v4.5+ */
#define ENABLE_FMASK GENMASK(0, 0)
+/* The next field is present for IPA v4.7+ */
+#define RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS_FMASK GENMASK(0, 0)
#define GSI_SNOC_BYPASS_DIS_FMASK GENMASK(1, 1)
#define GEN_QMB_0_SNOC_BYPASS_DIS_FMASK GENMASK(2, 2)
#define GEN_QMB_1_SNOC_BYPASS_DIS_FMASK GENMASK(3, 3)
-/* The next field is not present for IPA v4.5 */
+/* The next field is not present for IPA v4.5+ */
#define IPA_DCMP_FAST_CLK_EN_FMASK GENMASK(4, 4)
-/* The remaining fields are not present for IPA v3.5.1 */
+/* The next twelve fields are present for IPA v4.0+ */
#define IPA_QMB_SELECT_CONS_EN_FMASK GENMASK(5, 5)
#define IPA_QMB_SELECT_PROD_EN_FMASK GENMASK(6, 6)
#define GSI_MULTI_INORDER_RD_DIS_FMASK GENMASK(7, 7)
@@ -86,9 +88,41 @@ struct ipa;
#define GSI_SNOC_CNOC_LOOP_PROT_DISABLE_FMASK GENMASK(14, 14)
#define GSI_MULTI_AXI_MASTERS_DIS_FMASK GENMASK(15, 15)
#define IPA_QMB_SELECT_GLOBAL_EN_FMASK GENMASK(16, 16)
-#define IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_FMASK GENMASK(20, 17)
-/* The next field is present for IPA v4.5 */
-#define IPA_FULL_FLUSH_WAIT_RSC_CLOSE_EN_FMASK GENMASK(21, 21)
+/* The next five fields are present for IPA v4.9+ */
+#define QMB_RAM_RD_CACHE_DISABLE_FMASK GENMASK(19, 19)
+#define GENQMB_AOOOWR_FMASK GENMASK(20, 20)
+#define IF_OUT_OF_BUF_STOP_RESET_MASK_EN_FMASK GENMASK(21, 21)
+#define GEN_QMB_1_DYNAMIC_ASIZE_FMASK GENMASK(30, 30)
+#define GEN_QMB_0_DYNAMIC_ASIZE_FMASK GENMASK(31, 31)
+
+/* Encoded value for COMP_CFG register ATOMIC_FETCHER_ARB_LOCK_DIS field */
+static inline u32 arbitration_lock_disable_encoded(enum ipa_version version,
+ u32 mask)
+{
+ /* assert(version >= IPA_VERSION_4_0); */
+
+ if (version < IPA_VERSION_4_9)
+ return u32_encode_bits(mask, GENMASK(20, 17));
+
+ if (version == IPA_VERSION_4_9)
+ return u32_encode_bits(mask, GENMASK(24, 22));
+
+ return u32_encode_bits(mask, GENMASK(23, 22));
+}
+
+/* Encoded value for COMP_CFG register FULL_FLUSH_WAIT_RS_CLOSURE_EN field */
+static inline u32 full_flush_rsc_closure_en_encoded(enum ipa_version version,
+ bool enable)
+{
+ u32 val = enable ? 1 : 0;
+
+ /* assert(version >= IPA_VERSION_4_5); */
+
+ if (version == IPA_VERSION_4_5 || version == IPA_VERSION_4_7)
+ return u32_encode_bits(val, GENMASK(21, 21));
+
+ return u32_encode_bits(val, GENMASK(17, 17));
+}
#define IPA_REG_CLKON_CFG_OFFSET 0x00000044
#define RX_FMASK GENMASK(0, 0)
@@ -108,13 +142,15 @@ struct ipa;
#define ACK_MNGR_FMASK GENMASK(14, 14)
#define D_DCPH_FMASK GENMASK(15, 15)
#define H_DCPH_FMASK GENMASK(16, 16)
-/* The next field is not present for IPA v4.5 */
+/* The next field is not present for IPA v4.5+ */
#define DCMP_FMASK GENMASK(17, 17)
+/* The next three fields are present for IPA v3.5+ */
#define NTF_TX_CMDQS_FMASK GENMASK(18, 18)
#define TX_0_FMASK GENMASK(19, 19)
#define TX_1_FMASK GENMASK(20, 20)
+/* The next field is present for IPA v3.5.1+ */
#define FNR_FMASK GENMASK(21, 21)
-/* The remaining fields are not present for IPA v3.5.1 */
+/* The next eight fields are present for IPA v4.0+ */
#define QSB2AXI_CMDQ_L_FMASK GENMASK(22, 22)
#define AGGR_WRAPPER_FMASK GENMASK(23, 23)
#define RAM_SLAVEWAY_FMASK GENMASK(24, 24)
@@ -123,8 +159,10 @@ struct ipa;
#define GSI_IF_FMASK GENMASK(27, 27)
#define GLOBAL_FMASK GENMASK(28, 28)
#define GLOBAL_2X_CLK_FMASK GENMASK(29, 29)
-/* The next field is present for IPA v4.5 */
+/* The next field is present for IPA v4.5+ */
#define DPL_FIFO_FMASK GENMASK(30, 30)
+/* The next field is present for IPA v4.7+ */
+#define DRBIP_FMASK GENMASK(31, 31)
#define IPA_REG_ROUTE_OFFSET 0x00000048
#define ROUTE_DIS_FMASK GENMASK(0, 0)
@@ -145,13 +183,13 @@ struct ipa;
#define IPA_REG_QSB_MAX_READS_OFFSET 0x00000078
#define GEN_QMB_0_MAX_READS_FMASK GENMASK(3, 0)
#define GEN_QMB_1_MAX_READS_FMASK GENMASK(7, 4)
-/* The next two fields are not present for IPA v3.5.1 */
+/* The next two fields are present for IPA v4.0+ */
#define GEN_QMB_0_MAX_READS_BEATS_FMASK GENMASK(23, 16)
#define GEN_QMB_1_MAX_READS_BEATS_FMASK GENMASK(31, 24)
static inline u32 ipa_reg_filt_rout_hash_en_offset(enum ipa_version version)
{
- if (version == IPA_VERSION_3_5_1)
+ if (version < IPA_VERSION_4_0)
return 0x000008c;
return 0x0000148;
@@ -159,7 +197,7 @@ static inline u32 ipa_reg_filt_rout_hash_en_offset(enum ipa_version version)
static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version)
{
- if (version == IPA_VERSION_3_5_1)
+ if (version < IPA_VERSION_4_0)
return 0x0000090;
return 0x000014c;
@@ -174,96 +212,79 @@ static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version)
/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */
static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version)
{
- if (version == IPA_VERSION_3_5_1)
+ if (version < IPA_VERSION_4_0)
return 0x0000010c;
return 0x000000b4;
}
-/* The next register is not present for IPA v4.5 */
+/* The next register is not present for IPA v4.5+ */
#define IPA_REG_BCR_OFFSET 0x000001d0
-/* The next two fields are not present for IPA v4.2 */
+/* The next two fields are not present for IPA v4.2+ */
#define BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK GENMASK(0, 0)
#define BCR_TX_NOT_USING_BRESP_FMASK GENMASK(1, 1)
-/* The next field is invalid for IPA v4.1 */
+/* The next field is invalid for IPA v4.0+ */
#define BCR_TX_SUSPEND_IRQ_ASSERT_ONCE_FMASK GENMASK(2, 2)
-/* The next two fields are not present for IPA v4.2 */
+/* The next two fields are not present for IPA v4.2+ */
#define BCR_SUSPEND_L2_IRQ_FMASK GENMASK(3, 3)
#define BCR_HOLB_DROP_L2_IRQ_FMASK GENMASK(4, 4)
+/* The next five fields are present for IPA v3.5+ */
#define BCR_DUAL_TX_FMASK GENMASK(5, 5)
#define BCR_ENABLE_FILTER_DATA_CACHE_FMASK GENMASK(6, 6)
#define BCR_NOTIF_PRIORITY_OVER_ZLT_FMASK GENMASK(7, 7)
#define BCR_FILTER_PREFETCH_EN_FMASK GENMASK(8, 8)
#define BCR_ROUTER_PREFETCH_EN_FMASK GENMASK(9, 9)
-/* Backward compatibility register value to use for each version */
-static inline u32 ipa_reg_bcr_val(enum ipa_version version)
+/* The value of the next register must be a multiple of 8 (bottom 3 bits 0) */
+#define IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET 0x000001e8
+
+/* Encoded value for LOCAL_PKT_PROC_CNTXT register BASE_ADDR field */
+static inline u32 proc_cntxt_base_addr_encoded(enum ipa_version version,
+ u32 addr)
{
- if (version == IPA_VERSION_3_5_1)
- return BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK |
- BCR_TX_NOT_USING_BRESP_FMASK |
- BCR_SUSPEND_L2_IRQ_FMASK |
- BCR_HOLB_DROP_L2_IRQ_FMASK |
- BCR_DUAL_TX_FMASK;
-
- if (version == IPA_VERSION_4_0 || version == IPA_VERSION_4_1)
- return BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK |
- BCR_SUSPEND_L2_IRQ_FMASK |
- BCR_HOLB_DROP_L2_IRQ_FMASK |
- BCR_DUAL_TX_FMASK;
-
- /* assert(version != IPA_VERSION_4_5); */
-
- return 0x00000000;
-}
+ if (version < IPA_VERSION_4_5)
+ return u32_encode_bits(addr, GENMASK(16, 0));
-/* The value of the next register must be a multiple of 8 */
-#define IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET 0x000001e8
+ return u32_encode_bits(addr, GENMASK(17, 0));
+}
/* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */
#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
-/* The next register is not present for IPA v4.5 */
+/* The next register is not present for IPA v4.5+ */
#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
+/* The next field is not present for IPA v3.5+ */
+#define EOT_COAL_GRANULARITY GENMASK(3, 0)
#define AGGR_GRANULARITY_FMASK GENMASK(8, 4)
-/* The internal inactivity timer clock is used for the aggregation timer */
-#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */
-
-/* Compute the value to use in the AGGR_GRANULARITY field representing the
- * given number of microseconds. The value is one less than the number of
- * timer ticks in the requested period. 0 not a valid granularity value.
- */
-static inline u32 ipa_aggr_granularity_val(u32 usec)
-{
- return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1;
-}
-
-/* The next register is not present for IPA v4.5 */
+/* The next register is present for IPA v3.5+ */
#define IPA_REG_TX_CFG_OFFSET 0x000001fc
-/* The first three fields are present for IPA v3.5.1 only */
+/* The next three fields are not present for IPA v4.0+ */
#define TX0_PREFETCH_DISABLE_FMASK GENMASK(0, 0)
#define TX1_PREFETCH_DISABLE_FMASK GENMASK(1, 1)
#define PREFETCH_ALMOST_EMPTY_SIZE_FMASK GENMASK(4, 2)
-/* The next six fields are present for IPA v4.0 and above */
+/* The next six fields are present for IPA v4.0+ */
#define PREFETCH_ALMOST_EMPTY_SIZE_TX0_FMASK GENMASK(5, 2)
#define DMAW_SCND_OUTSD_PRED_THRESHOLD_FMASK GENMASK(9, 6)
#define DMAW_SCND_OUTSD_PRED_EN_FMASK GENMASK(10, 10)
#define DMAW_MAX_BEATS_256_DIS_FMASK GENMASK(11, 11)
#define PA_MASK_EN_FMASK GENMASK(12, 12)
#define PREFETCH_ALMOST_EMPTY_SIZE_TX1_FMASK GENMASK(16, 13)
-/* The next field is present for IPA v4.5 */
+/* The next field is present for IPA v4.5+ */
#define DUAL_TX_ENABLE_FMASK GENMASK(17, 17)
-/* The next two fields are present for IPA v4.2 only */
+/* The next field is present for IPA v4.2+, but not IPA v4.5 */
#define SSPND_PA_NO_START_STATE_FMASK GENMASK(18, 18)
+/* The next field is present for IPA v4.2 only */
#define SSPND_PA_NO_BQ_STATE_FMASK GENMASK(19, 19)
+/* The next register is present for IPA v3.5+ */
#define IPA_REG_FLAVOR_0_OFFSET 0x00000210
#define IPA_MAX_PIPES_FMASK GENMASK(3, 0)
#define IPA_MAX_CONS_PIPES_FMASK GENMASK(12, 8)
#define IPA_MAX_PROD_PIPES_FMASK GENMASK(20, 16)
#define IPA_PROD_LOWEST_FMASK GENMASK(27, 24)
+/* The next register is present for IPA v3.5+ */
static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
{
if (version >= IPA_VERSION_4_2)
@@ -275,19 +296,19 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define ENTER_IDLE_DEBOUNCE_THRESH_FMASK GENMASK(15, 0)
#define CONST_NON_IDLE_ENABLE_FMASK GENMASK(16, 16)
-/* The next register is present for IPA v4.5 */
+/* The next register is present for IPA v4.5+ */
#define IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET 0x0000024c
#define DPL_TIMESTAMP_LSB_FMASK GENMASK(4, 0)
#define DPL_TIMESTAMP_SEL_FMASK GENMASK(7, 7)
#define TAG_TIMESTAMP_LSB_FMASK GENMASK(12, 8)
#define NAT_TIMESTAMP_LSB_FMASK GENMASK(20, 16)
-/* The next register is present for IPA v4.5 */
+/* The next register is present for IPA v4.5+ */
#define IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET 0x00000250
#define DIV_VALUE_FMASK GENMASK(8, 0)
#define DIV_ENABLE_FMASK GENMASK(31, 31)
-/* The next register is present for IPA v4.5 */
+/* The next register is present for IPA v4.5+ */
#define IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET 0x00000254
#define GRAN_0_FMASK GENMASK(2, 0)
#define GRAN_1_FMASK GENMASK(5, 3)
@@ -304,63 +325,23 @@ enum ipa_pulse_gran {
IPA_GRAN_655350_US = 0x7,
};
-/* # IPA source resource groups available based on version */
-static inline u32 ipa_resource_group_src_count(enum ipa_version version)
-{
- switch (version) {
- case IPA_VERSION_3_5_1:
- case IPA_VERSION_4_0:
- case IPA_VERSION_4_1:
- return 4;
-
- case IPA_VERSION_4_2:
- return 1;
-
- case IPA_VERSION_4_5:
- return 5;
-
- default:
- return 0;
- }
-}
-
-/* # IPA destination resource groups available based on version */
-static inline u32 ipa_resource_group_dst_count(enum ipa_version version)
-{
- switch (version) {
- case IPA_VERSION_3_5_1:
- return 3;
-
- case IPA_VERSION_4_0:
- case IPA_VERSION_4_1:
- return 4;
-
- case IPA_VERSION_4_2:
- return 1;
-
- case IPA_VERSION_4_5:
- return 5;
-
- default:
- return 0;
- }
-}
-
-/* Not all of the following are valid (depends on the count, above) */
+/* Not all of the following are present (depends on IPA version) */
#define IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
(0x00000400 + 0x0020 * (rt))
#define IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
(0x00000404 + 0x0020 * (rt))
-/* The next register is only present for IPA v4.5 */
#define IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
(0x00000408 + 0x0020 * (rt))
+#define IPA_REG_SRC_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(rt) \
+ (0x0000040c + 0x0020 * (rt))
#define IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
(0x00000500 + 0x0020 * (rt))
#define IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
(0x00000504 + 0x0020 * (rt))
-/* The next register is only present for IPA v4.5 */
#define IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
(0x00000508 + 0x0020 * (rt))
+#define IPA_REG_DST_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(rt) \
+ (0x0000050c + 0x0020 * (rt))
/* The next four fields are used for all resource group registers */
#define X_MIN_LIM_FMASK GENMASK(5, 0)
#define X_MAX_LIM_FMASK GENMASK(13, 8)
@@ -370,8 +351,9 @@ static inline u32 ipa_resource_group_dst_count(enum ipa_version version)
#define IPA_REG_ENDP_INIT_CTRL_N_OFFSET(ep) \
(0x00000800 + 0x0070 * (ep))
-/* The next field should only used for IPA v3.5.1 */
+/* Valid only for RX (IPA producer) endpoints (do not use for IPA v4.0+) */
#define ENDP_SUSPEND_FMASK GENMASK(0, 0)
+/* Valid only for TX (IPA consumer) endpoints */
#define ENDP_DELAY_FMASK GENMASK(1, 1)
#define IPA_REG_ENDP_INIT_CFG_N_OFFSET(ep) \
@@ -381,11 +363,23 @@ static inline u32 ipa_resource_group_dst_count(enum ipa_version version)
#define CS_METADATA_HDR_OFFSET_FMASK GENMASK(6, 3)
#define CS_GEN_QMB_MASTER_SEL_FMASK GENMASK(8, 8)
-/** enum ipa_cs_offload_en - checksum offload field in ENDP_INIT_CFG_N */
+/** enum ipa_cs_offload_en - ENDP_INIT_CFG register CS_OFFLOAD_EN field value */
enum ipa_cs_offload_en {
IPA_CS_OFFLOAD_NONE = 0x0,
- IPA_CS_OFFLOAD_UL = 0x1,
- IPA_CS_OFFLOAD_DL = 0x2,
+ IPA_CS_OFFLOAD_UL = 0x1, /* Before IPA v4.5 (TX) */
+ IPA_CS_OFFLOAD_DL = 0x2, /* Before IPA v4.5 (RX) */
+};
+
+/* Valid only for TX (IPA consumer) endpoints */
+#define IPA_REG_ENDP_INIT_NAT_N_OFFSET(ep) \
+ (0x0000080c + 0x0070 * (ep))
+#define NAT_EN_FMASK GENMASK(1, 0)
+
+/** enum ipa_nat_en - ENDP_INIT_NAT register NAT_EN field value */
+enum ipa_nat_en {
+ IPA_NAT_BYPASS = 0x0,
+ IPA_NAT_SRC = 0x1,
+ IPA_NAT_DST = 0x2,
};
#define IPA_REG_ENDP_INIT_HDR_N_OFFSET(ep) \
@@ -396,11 +390,12 @@ enum ipa_cs_offload_en {
#define HDR_ADDITIONAL_CONST_LEN_FMASK GENMASK(18, 13)
#define HDR_OFST_PKT_SIZE_VALID_FMASK GENMASK(19, 19)
#define HDR_OFST_PKT_SIZE_FMASK GENMASK(25, 20)
+/* The next field is not present for IPA v4.9+ */
#define HDR_A5_MUX_FMASK GENMASK(26, 26)
#define HDR_LEN_INC_DEAGG_HDR_FMASK GENMASK(27, 27)
-/* The next field is not present for IPA v4.5 */
+/* The next field is not present for IPA v4.5+ */
#define HDR_METADATA_REG_VALID_FMASK GENMASK(28, 28)
-/* The next two fields are present for IPA v4.5 */
+/* The next two fields are present for IPA v4.5+ */
#define HDR_LEN_MSB_FMASK GENMASK(29, 28)
#define HDR_OFST_METADATA_MSB_FMASK GENMASK(31, 30)
@@ -452,7 +447,7 @@ static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
#define HDR_PAYLOAD_LEN_INC_PADDING_FMASK GENMASK(3, 3)
#define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK GENMASK(9, 4)
#define HDR_PAD_TO_ALIGNMENT_FMASK GENMASK(13, 10)
-/* The next three fields are present for IPA v4.5 */
+/* The next three fields are present for IPA v4.5+ */
#define HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_FMASK GENMASK(17, 16)
#define HDR_OFST_PKT_SIZE_MSB_FMASK GENMASK(19, 18)
#define HDR_ADDITIONAL_CONST_LEN_MSB_FMASK GENMASK(21, 20)
@@ -465,16 +460,18 @@ static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(txep) \
(0x00000820 + 0x0070 * (txep))
#define MODE_FMASK GENMASK(2, 0)
-/* The next field is present for IPA v4.5 */
+/* The next field is present for IPA v4.5+ */
#define DCPH_ENABLE_FMASK GENMASK(3, 3)
#define DEST_PIPE_INDEX_FMASK GENMASK(8, 4)
#define BYTE_THRESHOLD_FMASK GENMASK(27, 12)
#define PIPE_REPLICATION_EN_FMASK GENMASK(28, 28)
#define PAD_EN_FMASK GENMASK(29, 29)
-/* The next register is not present for IPA v4.5 */
+/* The next field is not present for IPA v4.5+ */
#define HDR_FTCH_DISABLE_FMASK GENMASK(30, 30)
+/* The next field is present for IPA v4.9+ */
+#define DRBIP_ACL_ENABLE GENMASK(30, 30)
-/** enum ipa_mode - mode field in ENDP_INIT_MODE_N */
+/** enum ipa_mode - ENDP_INIT_MODE register MODE field value */
enum ipa_mode {
IPA_BASIC = 0x0,
IPA_ENABLE_FRAMING_HDLC = 0x1,
@@ -486,47 +483,54 @@ enum ipa_mode {
(0x00000824 + 0x0070 * (ep))
#define AGGR_EN_FMASK GENMASK(1, 0)
#define AGGR_TYPE_FMASK GENMASK(4, 2)
+
+/* The legacy value is used for IPA hardware before IPA v4.5 */
static inline u32 aggr_byte_limit_fmask(bool legacy)
{
return legacy ? GENMASK(9, 5) : GENMASK(10, 5);
}
+/* The legacy value is used for IPA hardware before IPA v4.5 */
static inline u32 aggr_time_limit_fmask(bool legacy)
{
return legacy ? GENMASK(14, 10) : GENMASK(16, 12);
}
+/* The legacy value is used for IPA hardware before IPA v4.5 */
static inline u32 aggr_pkt_limit_fmask(bool legacy)
{
return legacy ? GENMASK(20, 15) : GENMASK(22, 17);
}
+/* The legacy value is used for IPA hardware before IPA v4.5 */
static inline u32 aggr_sw_eof_active_fmask(bool legacy)
{
return legacy ? GENMASK(21, 21) : GENMASK(23, 23);
}
+/* The legacy value is used for IPA hardware before IPA v4.5 */
static inline u32 aggr_force_close_fmask(bool legacy)
{
return legacy ? GENMASK(22, 22) : GENMASK(24, 24);
}
+/* The legacy value is used for IPA hardware before IPA v4.5 */
static inline u32 aggr_hard_byte_limit_enable_fmask(bool legacy)
{
return legacy ? GENMASK(24, 24) : GENMASK(26, 26);
}
-/* The next field is present for IPA v4.5 */
+/* The next field is present for IPA v4.5+ */
#define AGGR_GRAN_SEL_FMASK GENMASK(27, 27)
-/** enum ipa_aggr_en - aggregation enable field in ENDP_INIT_AGGR_N */
+/** enum ipa_aggr_en - ENDP_INIT_AGGR register AGGR_EN field value */
enum ipa_aggr_en {
- IPA_BYPASS_AGGR = 0x0,
- IPA_ENABLE_AGGR = 0x1,
- IPA_ENABLE_DEAGGR = 0x2,
+ IPA_BYPASS_AGGR = 0x0, /* (TX, RX) */
+ IPA_ENABLE_AGGR = 0x1, /* (RX) */
+ IPA_ENABLE_DEAGGR = 0x2, /* (TX) */
};
-/** enum ipa_aggr_type - aggregation type field in ENDP_INIT_AGGR_N */
+/** enum ipa_aggr_type - ENDP_INIT_AGGR register AGGR_TYPE field value */
enum ipa_aggr_type {
IPA_MBIM_16 = 0x0,
IPA_HDLC = 0x1,
@@ -567,53 +571,73 @@ enum ipa_aggr_type {
/* Encoded value for ENDP_INIT_RSRC_GRP register RSRC_GRP field */
static inline u32 rsrc_grp_encoded(enum ipa_version version, u32 rsrc_grp)
{
- switch (version) {
- case IPA_VERSION_4_2:
- return u32_encode_bits(rsrc_grp, GENMASK(0, 0));
- case IPA_VERSION_4_5:
+ if (version < IPA_VERSION_3_5 || version == IPA_VERSION_4_5)
return u32_encode_bits(rsrc_grp, GENMASK(2, 0));
- default:
- return u32_encode_bits(rsrc_grp, GENMASK(1, 0));
- }
+
+ if (version == IPA_VERSION_4_2 || version == IPA_VERSION_4_7)
+ return u32_encode_bits(rsrc_grp, GENMASK(0, 0));
+
+ return u32_encode_bits(rsrc_grp, GENMASK(1, 0));
}
/* Valid only for TX (IPA consumer) endpoints */
#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(txep) \
(0x0000083c + 0x0070 * (txep))
-#define HPS_SEQ_TYPE_FMASK GENMASK(3, 0)
-#define DPS_SEQ_TYPE_FMASK GENMASK(7, 4)
-#define HPS_REP_SEQ_TYPE_FMASK GENMASK(11, 8)
-#define DPS_REP_SEQ_TYPE_FMASK GENMASK(15, 12)
+#define SEQ_TYPE_FMASK GENMASK(7, 0)
+#define SEQ_REP_TYPE_FMASK GENMASK(15, 8)
/**
- * enum ipa_seq_type - HPS and DPS sequencer type fields in ENDP_INIT_SEQ_N
- * @IPA_SEQ_DMA_ONLY: only DMA is performed
- * @IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP:
- * second packet processing pass + no decipher + microcontroller
- * @IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP:
- * packet processing + no decipher + no uCP + HPS REP DMA parser
- * @IPA_SEQ_INVALID: invalid sequencer type
+ * enum ipa_seq_type - HPS and DPS sequencer type
+ * @IPA_SEQ_DMA: Perform DMA only
+ * @IPA_SEQ_1_PASS: One pass through the pipeline
+ * @IPA_SEQ_2_PASS_SKIP_LAST_UC: Two passes, skip the microcprocessor
+ * @IPA_SEQ_1_PASS_SKIP_LAST_UC: One pass, skip the microcprocessor
+ * @IPA_SEQ_2_PASS: Two passes through the pipeline
+ * @IPA_SEQ_3_PASS_SKIP_LAST_UC: Three passes, skip the microcprocessor
+ * @IPA_SEQ_DECIPHER: Optional deciphering step (combined)
*
- * The values defined here are broken into 4-bit nibbles that are written
- * into fields of the ENDP_INIT_SEQ registers.
+ * The low-order byte of the sequencer type register defines the number of
+ * passes a packet takes through the IPA pipeline. The last pass through can
+ * optionally skip the microprocessor. Deciphering is optional for all types;
+ * if enabled, an additional mask (two bits) is added to the type value.
+ *
+ * Note: not all combinations of ipa_seq_type and ipa_seq_rep_type are
+ * supported (or meaningful).
*/
enum ipa_seq_type {
- IPA_SEQ_DMA_ONLY = 0x0000,
- IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP = 0x0004,
- IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP = 0x0806,
- IPA_SEQ_INVALID = 0xffff,
+ IPA_SEQ_DMA = 0x00,
+ IPA_SEQ_1_PASS = 0x02,
+ IPA_SEQ_2_PASS_SKIP_LAST_UC = 0x04,
+ IPA_SEQ_1_PASS_SKIP_LAST_UC = 0x06,
+ IPA_SEQ_2_PASS = 0x0a,
+ IPA_SEQ_3_PASS_SKIP_LAST_UC = 0x0c,
+ /* The next value can be ORed with the above */
+ IPA_SEQ_DECIPHER = 0x11,
+};
+
+/**
+ * enum ipa_seq_rep_type - replicated packet sequencer type
+ * @IPA_SEQ_REP_DMA_PARSER: DMA parser for replicated packets
+ *
+ * This goes in the second byte of the endpoint sequencer type register.
+ *
+ * Note: not all combinations of ipa_seq_type and ipa_seq_rep_type are
+ * supported (or meaningful).
+ */
+enum ipa_seq_rep_type {
+ IPA_SEQ_REP_DMA_PARSER = 0x08,
};
#define IPA_REG_ENDP_STATUS_N_OFFSET(ep) \
(0x00000840 + 0x0070 * (ep))
#define STATUS_EN_FMASK GENMASK(0, 0)
#define STATUS_ENDP_FMASK GENMASK(5, 1)
-/* The next field is not present for IPA v4.5 */
+/* The next field is not present for IPA v4.5+ */
#define STATUS_LOCATION_FMASK GENMASK(8, 8)
-/* The next field is not present for IPA v3.5.1 */
+/* The next field is present for IPA v4.0+ */
#define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9)
-/* The next register is only present for IPA versions that support hashing */
+/* The next register is not present for IPA v4.2 (which no hashing support) */
#define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \
(0x0000085c + 0x0070 * (er))
#define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0)
@@ -634,31 +658,87 @@ enum ipa_seq_type {
#define ROUTER_HASH_MSK_METADATA_FMASK GENMASK(22, 22)
#define IPA_REG_ENDP_ROUTER_HASH_MSK_ALL GENMASK(22, 16)
-#define IPA_REG_IRQ_STTS_OFFSET \
- IPA_REG_IRQ_STTS_EE_N_OFFSET(GSI_EE_AP)
-#define IPA_REG_IRQ_STTS_EE_N_OFFSET(ee) \
- (0x00003008 + 0x1000 * (ee))
+static inline u32 ipa_reg_irq_stts_ee_n_offset(enum ipa_version version,
+ u32 ee)
+{
+ if (version < IPA_VERSION_4_9)
+ return 0x00003008 + 0x1000 * ee;
+
+ return 0x00004008 + 0x1000 * ee;
+}
+
+static inline u32 ipa_reg_irq_stts_offset(enum ipa_version version)
+{
+ return ipa_reg_irq_stts_ee_n_offset(version, GSI_EE_AP);
+}
+
+static inline u32 ipa_reg_irq_en_ee_n_offset(enum ipa_version version, u32 ee)
+{
+ if (version < IPA_VERSION_4_9)
+ return 0x0000300c + 0x1000 * ee;
+
+ return 0x0000400c + 0x1000 * ee;
+}
+
+static inline u32 ipa_reg_irq_en_offset(enum ipa_version version)
+{
+ return ipa_reg_irq_en_ee_n_offset(version, GSI_EE_AP);
+}
+
+static inline u32 ipa_reg_irq_clr_ee_n_offset(enum ipa_version version, u32 ee)
+{
+ if (version < IPA_VERSION_4_9)
+ return 0x00003010 + 0x1000 * ee;
-#define IPA_REG_IRQ_EN_OFFSET \
- IPA_REG_IRQ_EN_EE_N_OFFSET(GSI_EE_AP)
-#define IPA_REG_IRQ_EN_EE_N_OFFSET(ee) \
- (0x0000300c + 0x1000 * (ee))
+ return 0x00004010 + 0x1000 * ee;
+}
+
+static inline u32 ipa_reg_irq_clr_offset(enum ipa_version version)
+{
+ return ipa_reg_irq_clr_ee_n_offset(version, GSI_EE_AP);
+}
-#define IPA_REG_IRQ_CLR_OFFSET \
- IPA_REG_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP)
-#define IPA_REG_IRQ_CLR_EE_N_OFFSET(ee) \
- (0x00003010 + 0x1000 * (ee))
/**
* enum ipa_irq_id - Bit positions representing type of IPA IRQ
* @IPA_IRQ_UC_0: Microcontroller event interrupt
* @IPA_IRQ_UC_1: Microcontroller response interrupt
* @IPA_IRQ_TX_SUSPEND: Data ready interrupt
+ * @IPA_IRQ_COUNT: Number of IRQ ids (must be last)
*
* IRQ types not described above are not currently used.
+ *
+ * @IPA_IRQ_BAD_SNOC_ACCESS: (Not currently used)
+ * @IPA_IRQ_EOT_COAL: (Not currently used)
+ * @IPA_IRQ_UC_2: (Not currently used)
+ * @IPA_IRQ_UC_3: (Not currently used)
+ * @IPA_IRQ_UC_IN_Q_NOT_EMPTY: (Not currently used)
+ * @IPA_IRQ_UC_RX_CMD_Q_NOT_FULL: (Not currently used)
+ * @IPA_IRQ_PROC_UC_ACK_Q_NOT_EMPTY: (Not currently used)
+ * @IPA_IRQ_RX_ERR: (Not currently used)
+ * @IPA_IRQ_DEAGGR_ERR: (Not currently used)
+ * @IPA_IRQ_TX_ERR: (Not currently used)
+ * @IPA_IRQ_STEP_MODE: (Not currently used)
+ * @IPA_IRQ_PROC_ERR: (Not currently used)
+ * @IPA_IRQ_TX_HOLB_DROP: (Not currently used)
+ * @IPA_IRQ_BAM_GSI_IDLE: (Not currently used)
+ * @IPA_IRQ_PIPE_YELLOW_BELOW: (Not currently used)
+ * @IPA_IRQ_PIPE_RED_BELOW: (Not currently used)
+ * @IPA_IRQ_PIPE_YELLOW_ABOVE: (Not currently used)
+ * @IPA_IRQ_PIPE_RED_ABOVE: (Not currently used)
+ * @IPA_IRQ_UCP: (Not currently used)
+ * @IPA_IRQ_DCMP: (Not currently used)
+ * @IPA_IRQ_GSI_EE: (Not currently used)
+ * @IPA_IRQ_GSI_IPA_IF_TLV_RCVD: (Not currently used)
+ * @IPA_IRQ_GSI_UC: (Not currently used)
+ * @IPA_IRQ_TLV_LEN_MIN_DSM: (Not currently used)
+ * @IPA_IRQ_DRBIP_PKT_EXCEED_MAX_SIZE_EN: (Not currently used)
+ * @IPA_IRQ_DRBIP_DATA_SCTR_CFG_ERROR_EN: (Not currently used)
+ * @IPA_IRQ_DRBIP_IMM_CMD_NO_FLSH_HZRD_EN: (Not currently used)
*/
enum ipa_irq_id {
IPA_IRQ_BAD_SNOC_ACCESS = 0x0,
- /* Type (bit) 0x1 is not defined */
+ /* The next bit is not present for IPA v3.5+ */
+ IPA_IRQ_EOT_COAL = 0x1,
IPA_IRQ_UC_0 = 0x2,
IPA_IRQ_UC_1 = 0x3,
IPA_IRQ_UC_2 = 0x4,
@@ -679,38 +759,89 @@ enum ipa_irq_id {
IPA_IRQ_PIPE_YELLOW_ABOVE = 0x13,
IPA_IRQ_PIPE_RED_ABOVE = 0x14,
IPA_IRQ_UCP = 0x15,
+ /* The next bit is not present for IPA v4.5+ */
IPA_IRQ_DCMP = 0x16,
IPA_IRQ_GSI_EE = 0x17,
IPA_IRQ_GSI_IPA_IF_TLV_RCVD = 0x18,
IPA_IRQ_GSI_UC = 0x19,
- /* The next bit is present for IPA v4.5 */
+ /* The next bit is present for IPA v4.5+ */
IPA_IRQ_TLV_LEN_MIN_DSM = 0x1a,
+ /* The next three bits are present for IPA v4.9+ */
+ IPA_IRQ_DRBIP_PKT_EXCEED_MAX_SIZE_EN = 0x1b,
+ IPA_IRQ_DRBIP_DATA_SCTR_CFG_ERROR_EN = 0x1c,
+ IPA_IRQ_DRBIP_IMM_CMD_NO_FLSH_HZRD_EN = 0x1d,
IPA_IRQ_COUNT, /* Last; not an id */
};
-#define IPA_REG_IRQ_UC_OFFSET \
- IPA_REG_IRQ_UC_EE_N_OFFSET(GSI_EE_AP)
-#define IPA_REG_IRQ_UC_EE_N_OFFSET(ee) \
- (0x0000301c + 0x1000 * (ee))
+static inline u32 ipa_reg_irq_uc_ee_n_offset(enum ipa_version version, u32 ee)
+{
+ if (version < IPA_VERSION_4_9)
+ return 0x0000301c + 0x1000 * ee;
+
+ return 0x0000401c + 0x1000 * ee;
+}
+
+static inline u32 ipa_reg_irq_uc_offset(enum ipa_version version)
+{
+ return ipa_reg_irq_uc_ee_n_offset(version, GSI_EE_AP);
+}
+
#define UC_INTR_FMASK GENMASK(0, 0)
/* ipa->available defines the valid bits in the SUSPEND_INFO register */
-#define IPA_REG_IRQ_SUSPEND_INFO_OFFSET \
- IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(GSI_EE_AP)
-#define IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(ee) \
- (0x00003030 + 0x1000 * (ee))
-
-/* ipa->available defines the valid bits in the IRQ_SUSPEND_EN register */
-#define IPA_REG_IRQ_SUSPEND_EN_OFFSET \
- IPA_REG_IRQ_SUSPEND_EN_EE_N_OFFSET(GSI_EE_AP)
-#define IPA_REG_IRQ_SUSPEND_EN_EE_N_OFFSET(ee) \
- (0x00003034 + 0x1000 * (ee))
-
-/* ipa->available defines the valid bits in the IRQ_SUSPEND_CLR register */
-#define IPA_REG_IRQ_SUSPEND_CLR_OFFSET \
- IPA_REG_IRQ_SUSPEND_CLR_EE_N_OFFSET(GSI_EE_AP)
-#define IPA_REG_IRQ_SUSPEND_CLR_EE_N_OFFSET(ee) \
- (0x00003038 + 0x1000 * (ee))
+static inline u32
+ipa_reg_irq_suspend_info_ee_n_offset(enum ipa_version version, u32 ee)
+{
+ if (version == IPA_VERSION_3_0)
+ return 0x00003098 + 0x1000 * ee;
+
+ if (version < IPA_VERSION_4_9)
+ return 0x00003030 + 0x1000 * ee;
+
+ return 0x00004030 + 0x1000 * ee;
+}
+
+static inline u32
+ipa_reg_irq_suspend_info_offset(enum ipa_version version)
+{
+ return ipa_reg_irq_suspend_info_ee_n_offset(version, GSI_EE_AP);
+}
+
+/* ipa->available defines the valid bits in the SUSPEND_EN register */
+static inline u32
+ipa_reg_irq_suspend_en_ee_n_offset(enum ipa_version version, u32 ee)
+{
+ /* assert(version != IPA_VERSION_3_0); */
+
+ if (version < IPA_VERSION_4_9)
+ return 0x00003034 + 0x1000 * ee;
+
+ return 0x00004034 + 0x1000 * ee;
+}
+
+static inline u32
+ipa_reg_irq_suspend_en_offset(enum ipa_version version)
+{
+ return ipa_reg_irq_suspend_en_ee_n_offset(version, GSI_EE_AP);
+}
+
+/* ipa->available defines the valid bits in the SUSPEND_CLR register */
+static inline u32
+ipa_reg_irq_suspend_clr_ee_n_offset(enum ipa_version version, u32 ee)
+{
+ /* assert(version != IPA_VERSION_3_0); */
+
+ if (version < IPA_VERSION_4_9)
+ return 0x00003038 + 0x1000 * ee;
+
+ return 0x00004038 + 0x1000 * ee;
+}
+
+static inline u32
+ipa_reg_irq_suspend_clr_offset(enum ipa_version version)
+{
+ return ipa_reg_irq_suspend_clr_ee_n_offset(version, GSI_EE_AP);
+}
int ipa_reg_init(struct ipa *ipa);
void ipa_reg_exit(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c
new file mode 100644
index 000000000000..3b2dc216d3a6
--- /dev/null
+++ b/drivers/net/ipa/ipa_resource.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2021 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include "ipa.h"
+#include "ipa_data.h"
+#include "ipa_reg.h"
+#include "ipa_resource.h"
+
+/**
+ * DOC: IPA Resources
+ *
+ * The IPA manages a set of resources internally for various purposes.
+ * A given IPA version has a fixed number of resource types, and a fixed
+ * total number of resources of each type. "Source" resource types
+ * are separate from "destination" resource types.
+ *
+ * Each version of IPA also has some number of resource groups. Each
+ * endpoint is assigned to a resource group, and all endpoints in the
+ * same group share pools of each type of resource. A subset of the
+ * total resources of each type is assigned for use by each group.
+ */
+
+static bool ipa_resource_limits_valid(struct ipa *ipa,
+ const struct ipa_resource_data *data)
+{
+#ifdef IPA_VALIDATION
+ u32 group_count;
+ u32 i;
+ u32 j;
+
+ /* We program at most 8 source or destination resource group limits */
+ BUILD_BUG_ON(IPA_RESOURCE_GROUP_MAX > 8);
+
+ group_count = data->rsrc_group_src_count;
+ if (!group_count || group_count > IPA_RESOURCE_GROUP_MAX)
+ return false;
+
+ /* Return an error if a non-zero resource limit is specified
+ * for a resource group not supported by hardware.
+ */
+ for (i = 0; i < data->resource_src_count; i++) {
+ const struct ipa_resource *resource;
+
+ resource = &data->resource_src[i];
+ for (j = group_count; j < IPA_RESOURCE_GROUP_MAX; j++)
+ if (resource->limits[j].min || resource->limits[j].max)
+ return false;
+ }
+
+ group_count = data->rsrc_group_src_count;
+ if (!group_count || group_count > IPA_RESOURCE_GROUP_MAX)
+ return false;
+
+ for (i = 0; i < data->resource_dst_count; i++) {
+ const struct ipa_resource *resource;
+
+ resource = &data->resource_dst[i];
+ for (j = group_count; j < IPA_RESOURCE_GROUP_MAX; j++)
+ if (resource->limits[j].min || resource->limits[j].max)
+ return false;
+ }
+#endif /* !IPA_VALIDATION */
+ return true;
+}
+
+static void
+ipa_resource_config_common(struct ipa *ipa, u32 offset,
+ const struct ipa_resource_limits *xlimits,
+ const struct ipa_resource_limits *ylimits)
+{
+ u32 val;
+
+ val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK);
+ val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK);
+ if (ylimits) {
+ val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK);
+ val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK);
+ }
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
+ const struct ipa_resource_data *data)
+{
+ u32 group_count = data->rsrc_group_src_count;
+ const struct ipa_resource_limits *ylimits;
+ const struct ipa_resource *resource;
+ u32 offset;
+
+ resource = &data->resource_src[resource_type];
+
+ offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource_type);
+ ylimits = group_count == 1 ? NULL : &resource->limits[1];
+ ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
+
+ if (group_count < 3)
+ return;
+
+ offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource_type);
+ ylimits = group_count == 3 ? NULL : &resource->limits[3];
+ ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
+
+ if (group_count < 5)
+ return;
+
+ offset = IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource_type);
+ ylimits = group_count == 5 ? NULL : &resource->limits[5];
+ ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
+
+ if (group_count < 7)
+ return;
+
+ offset = IPA_REG_SRC_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(resource_type);
+ ylimits = group_count == 7 ? NULL : &resource->limits[7];
+ ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits);
+}
+
+static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type,
+ const struct ipa_resource_data *data)
+{
+ u32 group_count = data->rsrc_group_dst_count;
+ const struct ipa_resource_limits *ylimits;
+ const struct ipa_resource *resource;
+ u32 offset;
+
+ resource = &data->resource_dst[resource_type];
+
+ offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource_type);
+ ylimits = group_count == 1 ? NULL : &resource->limits[1];
+ ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
+
+ if (group_count < 3)
+ return;
+
+ offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource_type);
+ ylimits = group_count == 3 ? NULL : &resource->limits[3];
+ ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
+
+ if (group_count < 5)
+ return;
+
+ offset = IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource_type);
+ ylimits = group_count == 5 ? NULL : &resource->limits[5];
+ ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
+
+ if (group_count < 7)
+ return;
+
+ offset = IPA_REG_DST_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(resource_type);
+ ylimits = group_count == 7 ? NULL : &resource->limits[7];
+ ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits);
+}
+
+/* Configure resources; there is no ipa_resource_deconfig() */
+int ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
+{
+ u32 i;
+
+ if (!ipa_resource_limits_valid(ipa, data))
+ return -EINVAL;
+
+ for (i = 0; i < data->resource_src_count; i++)
+ ipa_resource_config_src(ipa, i, data);
+
+ for (i = 0; i < data->resource_dst_count; i++)
+ ipa_resource_config_dst(ipa, i, data);
+
+ return 0;
+}
diff --git a/drivers/net/ipa/ipa_resource.h b/drivers/net/ipa/ipa_resource.h
new file mode 100644
index 000000000000..ef5818bff180
--- /dev/null
+++ b/drivers/net/ipa/ipa_resource.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ */
+#ifndef _IPA_RESOURCE_H_
+#define _IPA_RESOURCE_H_
+
+struct ipa;
+struct ipa_resource_data;
+
+/**
+ * ipa_resource_config() - Configure resources
+ * @ipa: IPA pointer
+ * @data: IPA resource configuration data
+ *
+ * There is no need for a matching ipa_resource_deconfig() function.
+ *
+ * Return: true if all regions are valid, false otherwise
+ */
+int ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data);
+
+#endif /* _IPA_RESOURCE_H_ */
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
index bf0e4063cfd9..20319438a841 100644
--- a/drivers/net/ipa/ipa_smp2p.h
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -28,7 +28,7 @@ void ipa_smp2p_exit(struct ipa *ipa);
/**
* ipa_smp2p_disable() - Prevent "ipa-setup-ready" interrupt handling
- * @IPA: IPA pointer
+ * @ipa: IPA pointer
*
* Prevent handling of the "setup ready" interrupt from the modem.
* This is used before initiating shutdown of the driver.
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index baaab3dd0e63..3168d72f4245 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -27,28 +27,38 @@
/**
* DOC: IPA Filter and Route Tables
*
- * The IPA has tables defined in its local shared memory that define filter
- * and routing rules. Each entry in these tables contains a 64-bit DMA
- * address that refers to DRAM (system memory) containing a rule definition.
+ * The IPA has tables defined in its local (IPA-resident) memory that define
+ * filter and routing rules. An entry in either of these tables is a little
+ * endian 64-bit "slot" that holds the address of a rule definition. (The
+ * size of these slots is 64 bits regardless of the host DMA address size.)
+ *
+ * Separate tables (both filter and route) used for IPv4 and IPv6. There
+ * are normally another set of "hashed" filter and route tables, which are
+ * used with a hash of message metadata. Hashed operation is not supported
+ * by all IPA hardware (IPA v4.2 doesn't support hashed tables).
+ *
+ * Rules can be in local memory or in DRAM (system memory). The offset of
+ * an object (such as a route or filter table) in IPA-resident memory must
+ * 128-byte aligned. An object in system memory (such as a route or filter
+ * rule) must be at an 8-byte aligned address. We currently only place
+ * route or filter rules in system memory.
+ *
* A rule consists of a contiguous block of 32-bit values terminated with
* 32 zero bits. A special "zero entry" rule consisting of 64 zero bits
* represents "no filtering" or "no routing," and is the reset value for
- * filter or route table rules. Separate tables (both filter and route)
- * used for IPv4 and IPv6. Additionally, there can be hashed filter or
- * route tables, which are used when a hash of message metadata matches.
- * Hashed operation is not supported by all IPA hardware.
+ * filter or route table rules.
*
* Each filter rule is associated with an AP or modem TX endpoint, though
- * not all TX endpoints support filtering. The first 64-bit entry in a
+ * not all TX endpoints support filtering. The first 64-bit slot in a
* filter table is a bitmap indicating which endpoints have entries in
* the table. The low-order bit (bit 0) in this bitmap represents a
* special global filter, which applies to all traffic. This is not
* used in the current code. Bit 1, if set, indicates that there is an
- * entry (i.e. a DMA address referring to a rule) for endpoint 0 in the
- * table. Bit 2, if set, indicates there is an entry for endpoint 1,
- * and so on. Space is set aside in IPA local memory to hold as many
- * filter table entries as might be required, but typically they are not
- * all used.
+ * entry (i.e. slot containing a system address referring to a rule) for
+ * endpoint 0 in the table. Bit 3, if set, indicates there is an entry
+ * for endpoint 2, and so on. Space is set aside in IPA local memory to
+ * hold as many filter table entries as might be required, but typically
+ * they are not all used.
*
* The AP initializes all entries in a filter table to refer to a "zero"
* entry. Once initialized the modem and AP update the entries for
@@ -96,9 +106,6 @@
* ----------------------
*/
-/* IPA hardware constrains filter and route tables alignment */
-#define IPA_TABLE_ALIGN 128 /* Minimum table alignment */
-
/* Assignment of route table entries to the modem and AP */
#define IPA_ROUTE_MODEM_MIN 0
#define IPA_ROUTE_MODEM_COUNT 8
@@ -118,21 +125,14 @@
/* Check things that can be validated at build time. */
static void ipa_table_validate_build(void)
{
- /* IPA hardware accesses memory 128 bytes at a time. Addresses
- * referred to by entries in filter and route tables must be
- * aligned on 128-byte byte boundaries. The only rule address
- * ever use is the "zero rule", and it's aligned at the base
- * of a coherent DMA allocation.
- */
- BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN);
-
- /* Filter and route tables contain DMA addresses that refer to
- * filter or route rules. We use a fixed constant to represent
- * the size of either type of table entry. Code in ipa_table_init()
- * uses a pointer to __le64 to initialize table entriews.
+ /* Filter and route tables contain DMA addresses that refer
+ * to filter or route rules. But the size of a table entry
+ * is 64 bits regardless of what the size of an AP DMA address
+ * is. A fixed constant defines the size of an entry, and
+ * code in ipa_table_init() uses a pointer to __le64 to
+ * initialize tables.
*/
- BUILD_BUG_ON(IPA_TABLE_ENTRY_SIZE != sizeof(dma_addr_t));
- BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(__le64));
+ BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(__le64));
/* A "zero rule" is used to represent no filtering or no routing.
* It is a 64-bit block of zeroed memory. Code in ipa_table_init()
@@ -163,7 +163,7 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
else
mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]
: &ipa->mem[IPA_MEM_V4_ROUTE];
- size = IPA_ROUTE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE;
+ size = IPA_ROUTE_COUNT_MAX * sizeof(__le64);
} else {
if (ipv6)
mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED]
@@ -171,7 +171,7 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
else
mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED]
: &ipa->mem[IPA_MEM_V4_FILTER];
- size = (1 + IPA_FILTER_COUNT_MAX) * IPA_TABLE_ENTRY_SIZE;
+ size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
}
if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
@@ -239,11 +239,6 @@ static void ipa_table_validate_build(void)
#endif /* !IPA_VALIDATE */
-bool ipa_table_hash_support(struct ipa *ipa)
-{
- return ipa->version != IPA_VERSION_4_2;
-}
-
/* Zero entry count means no table, so just return a 0 address */
static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
{
@@ -275,8 +270,8 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
if (filter)
first++; /* skip over bitmap */
- offset = mem->offset + first * IPA_TABLE_ENTRY_SIZE;
- size = count * IPA_TABLE_ENTRY_SIZE;
+ offset = mem->offset + first * sizeof(__le64);
+ size = count * sizeof(__le64);
addr = ipa_table_addr(ipa, false, count);
ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true);
@@ -458,11 +453,11 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
count = hweight32(ipa->filter_map);
hash_count = hash_mem->size ? count : 0;
} else {
- count = mem->size / IPA_TABLE_ENTRY_SIZE;
- hash_count = hash_mem->size / IPA_TABLE_ENTRY_SIZE;
+ count = mem->size / sizeof(__le64);
+ hash_count = hash_mem->size / sizeof(__le64);
}
- size = count * IPA_TABLE_ENTRY_SIZE;
- hash_size = hash_count * IPA_TABLE_ENTRY_SIZE;
+ size = count * sizeof(__le64);
+ hash_size = hash_count * sizeof(__le64);
addr = ipa_table_addr(ipa, filter, count);
hash_addr = ipa_table_addr(ipa, filter, hash_count);
@@ -502,11 +497,6 @@ int ipa_table_setup(struct ipa *ipa)
return 0;
}
-void ipa_table_teardown(struct ipa *ipa)
-{
- /* Nothing to do */ /* XXX Maybe reset the tables? */
-}
-
/**
* ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple
* @endpoint: Endpoint whose filter hash tuple should be zeroed
@@ -530,6 +520,7 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
+/* Configure a hashed filter table; there is no ipa_filter_deconfig() */
static void ipa_filter_config(struct ipa *ipa, bool modem)
{
enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
@@ -550,11 +541,6 @@ static void ipa_filter_config(struct ipa *ipa, bool modem)
}
}
-static void ipa_filter_deconfig(struct ipa *ipa, bool modem)
-{
- /* Nothing to do */
-}
-
static bool ipa_route_id_modem(u32 route_id)
{
return route_id >= IPA_ROUTE_MODEM_MIN &&
@@ -581,6 +567,7 @@ static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
iowrite32(val, ipa->reg_virt + offset);
}
+/* Configure a hashed route table; there is no ipa_route_deconfig() */
static void ipa_route_config(struct ipa *ipa, bool modem)
{
u32 route_id;
@@ -593,11 +580,7 @@ static void ipa_route_config(struct ipa *ipa, bool modem)
ipa_route_tuple_zero(ipa, route_id);
}
-static void ipa_route_deconfig(struct ipa *ipa, bool modem)
-{
- /* Nothing to do */
-}
-
+/* Configure a filter and route tables; there is no ipa_table_deconfig() */
void ipa_table_config(struct ipa *ipa)
{
ipa_filter_config(ipa, false);
@@ -606,14 +589,6 @@ void ipa_table_config(struct ipa *ipa)
ipa_route_config(ipa, true);
}
-void ipa_table_deconfig(struct ipa *ipa)
-{
- ipa_route_deconfig(ipa, true);
- ipa_route_deconfig(ipa, false);
- ipa_filter_deconfig(ipa, true);
- ipa_filter_deconfig(ipa, false);
-}
-
/*
* Initialize a coherent DMA allocation containing initialized filter and
* route table data. This is used when initializing or resetting the IPA
@@ -663,7 +638,13 @@ int ipa_table_init(struct ipa *ipa)
ipa_table_validate_build();
- size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
+ /* The IPA hardware requires route and filter table rules to be
+ * aligned on a 128-byte boundary. We put the "zero rule" at the
+ * base of the table area allocated here. The DMA address returned
+ * by dma_alloc_coherent() is guaranteed to be a power-of-2 number
+ * of pages, which satisfies the rule alignment requirement.
+ */
+ size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
if (!virt)
return -ENOMEM;
@@ -695,7 +676,7 @@ void ipa_table_exit(struct ipa *ipa)
struct device *dev = &ipa->pdev->dev;
size_t size;
- size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
+ size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr);
ipa->table_addr = 0;
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index 1a68d20f19d6..1e2be9fce2f8 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -10,9 +10,6 @@
struct ipa;
-/* The size of a filter or route table entry */
-#define IPA_TABLE_ENTRY_SIZE sizeof(__le64) /* Holds a physical address */
-
/* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
#define IPA_FILTER_COUNT_MAX 14
@@ -24,7 +21,7 @@ struct ipa;
/**
* ipa_table_valid() - Validate route and filter table memory regions
* @ipa: IPA pointer
-
+ *
* Return: true if all regions are valid, false otherwise
*/
bool ipa_table_valid(struct ipa *ipa);
@@ -32,6 +29,7 @@ bool ipa_table_valid(struct ipa *ipa);
/**
* ipa_filter_map_valid() - Validate a filter table endpoint bitmap
* @ipa: IPA pointer
+ * @filter_mask: Filter table endpoint bitmap to check
*
* Return: true if all regions are valid, false otherwise
*/
@@ -55,7 +53,10 @@ static inline bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask)
* ipa_table_hash_support() - Return true if hashed tables are supported
* @ipa: IPA pointer
*/
-bool ipa_table_hash_support(struct ipa *ipa);
+static inline bool ipa_table_hash_support(struct ipa *ipa)
+{
+ return ipa->version != IPA_VERSION_4_2;
+}
/**
* ipa_table_reset() - Reset filter and route tables entries to "none"
@@ -73,28 +74,20 @@ int ipa_table_hash_flush(struct ipa *ipa);
/**
* ipa_table_setup() - Set up filter and route tables
* @ipa: IPA pointer
+ *
+ * There is no need for a matching ipa_table_teardown() function.
*/
int ipa_table_setup(struct ipa *ipa);
/**
- * ipa_table_teardown() - Inverse of ipa_table_setup()
- * @ipa: IPA pointer
- */
-void ipa_table_teardown(struct ipa *ipa);
-
-/**
* ipa_table_config() - Configure filter and route tables
* @ipa: IPA pointer
+ *
+ * There is no need for a matching ipa_table_deconfig() function.
*/
void ipa_table_config(struct ipa *ipa);
/**
- * ipa_table_deconfig() - Inverse of ipa_table_config()
- * @ipa: IPA pointer
- */
-void ipa_table_deconfig(struct ipa *ipa);
-
-/**
* ipa_table_init() - Do early initialization of filter and route tables
* @ipa: IPA pointer
*/
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index dee58a6596d4..2756363e6938 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -192,6 +192,7 @@ void ipa_uc_teardown(struct ipa *ipa)
static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+ u32 offset;
u32 val;
/* Fill in the command data */
@@ -203,8 +204,8 @@ static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
/* Use an interrupt to tell the microcontroller the command is ready */
val = u32_encode_bits(1, UC_INTR_FMASK);
-
- iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_UC_OFFSET);
+ offset = ipa_reg_irq_uc_offset(ipa->version);
+ iowrite32(val, ipa->reg_virt + offset);
}
/* Tell the microcontroller the AP is shutting down */
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index 2944e2a89023..ee2b3d02f3cd 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -8,17 +8,32 @@
/**
* enum ipa_version
+ * @IPA_VERSION_3_0: IPA version 3.0/GSI version 1.0
+ * @IPA_VERSION_3_1: IPA version 3.1/GSI version 1.1
+ * @IPA_VERSION_3_5: IPA version 3.5/GSI version 1.2
+ * @IPA_VERSION_3_5_1: IPA version 3.5.1/GSI version 1.3
+ * @IPA_VERSION_4_0: IPA version 4.0/GSI version 2.0
+ * @IPA_VERSION_4_1: IPA version 4.1/GSI version 2.0
+ * @IPA_VERSION_4_2: IPA version 4.2/GSI version 2.2
+ * @IPA_VERSION_4_5: IPA version 4.5/GSI version 2.5
+ * @IPA_VERSION_4_7: IPA version 4.7/GSI version 2.7
+ * @IPA_VERSION_4_9: IPA version 4.9/GSI version 2.9
+ * @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1)
*
* Defines the version of IPA (and GSI) hardware present on the platform.
- * It seems this might be better defined elsewhere, but having it here gets
- * it where it's needed.
*/
enum ipa_version {
- IPA_VERSION_3_5_1, /* GSI version 1.3.0 */
- IPA_VERSION_4_0, /* GSI version 2.0 */
- IPA_VERSION_4_1, /* GSI version 2.1 */
- IPA_VERSION_4_2, /* GSI version 2.2 */
- IPA_VERSION_4_5, /* GSI version 2.5 */
+ IPA_VERSION_3_0,
+ IPA_VERSION_3_1,
+ IPA_VERSION_3_5,
+ IPA_VERSION_3_5_1,
+ IPA_VERSION_4_0,
+ IPA_VERSION_4_1,
+ IPA_VERSION_4_2,
+ IPA_VERSION_4_5,
+ IPA_VERSION_4_7,
+ IPA_VERSION_4_9,
+ IPA_VERSION_4_11,
};
#endif /* _IPA_VERSION_H_ */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 9a9a5cf36a4b..1b998aa481f8 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -272,25 +272,22 @@ static void macvlan_broadcast(struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_PAUSE))
return;
- for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(vlan, &port->vlan_hash[i], hlist) {
- if (vlan->dev == src || !(vlan->mode & mode))
- continue;
+ hash_for_each_rcu(port->vlan_hash, i, vlan, hlist) {
+ if (vlan->dev == src || !(vlan->mode & mode))
+ continue;
- hash = mc_hash(vlan, eth->h_dest);
- if (!test_bit(hash, vlan->mc_filter))
- continue;
+ hash = mc_hash(vlan, eth->h_dest);
+ if (!test_bit(hash, vlan->mc_filter))
+ continue;
- err = NET_RX_DROP;
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (likely(nskb))
- err = macvlan_broadcast_one(
- nskb, vlan, eth,
+ err = NET_RX_DROP;
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (likely(nskb))
+ err = macvlan_broadcast_one(nskb, vlan, eth,
mode == MACVLAN_MODE_BRIDGE) ?:
- netif_rx_ni(nskb);
- macvlan_count_rx(vlan, skb->len + ETH_HLEN,
- err == NET_RX_SUCCESS, true);
- }
+ netif_rx_ni(nskb);
+ macvlan_count_rx(vlan, skb->len + ETH_HLEN,
+ err == NET_RX_SUCCESS, true);
}
}
@@ -380,20 +377,14 @@ err:
static void macvlan_flush_sources(struct macvlan_port *port,
struct macvlan_dev *vlan)
{
+ struct macvlan_source_entry *entry;
+ struct hlist_node *next;
int i;
- for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
- struct hlist_node *h, *n;
-
- hlist_for_each_safe(h, n, &port->vlan_source_hash[i]) {
- struct macvlan_source_entry *entry;
+ hash_for_each_safe(port->vlan_source_hash, i, next, entry, hlist)
+ if (entry->vlan == vlan)
+ macvlan_hash_del_source(entry);
- entry = hlist_entry(h, struct macvlan_source_entry,
- hlist);
- if (entry->vlan == vlan)
- macvlan_hash_del_source(entry);
- }
- }
vlan->macaddr_count = 0;
}
@@ -423,18 +414,24 @@ static void macvlan_forward_source_one(struct sk_buff *skb,
macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false);
}
-static void macvlan_forward_source(struct sk_buff *skb,
+static bool macvlan_forward_source(struct sk_buff *skb,
struct macvlan_port *port,
const unsigned char *addr)
{
struct macvlan_source_entry *entry;
u32 idx = macvlan_eth_hash(addr);
struct hlist_head *h = &port->vlan_source_hash[idx];
+ bool consume = false;
hlist_for_each_entry_rcu(entry, h, hlist) {
- if (ether_addr_equal_64bits(entry->addr, addr))
+ if (ether_addr_equal_64bits(entry->addr, addr)) {
+ if (entry->vlan->flags & MACVLAN_FLAG_NODST)
+ consume = true;
macvlan_forward_source_one(skb, entry->vlan);
+ }
}
+
+ return consume;
}
/* called under rcu_read_lock() from netif_receive_skb */
@@ -463,7 +460,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_CONSUMED;
*pskb = skb;
eth = eth_hdr(skb);
- macvlan_forward_source(skb, port, eth->h_source);
+ if (macvlan_forward_source(skb, port, eth->h_source))
+ return RX_HANDLER_CONSUMED;
src = macvlan_hash_lookup(port, eth->h_source);
if (src && src->mode != MACVLAN_MODE_VEPA &&
src->mode != MACVLAN_MODE_BRIDGE) {
@@ -482,7 +480,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_PASS;
}
- macvlan_forward_source(skb, port, eth->h_source);
+ if (macvlan_forward_source(skb, port, eth->h_source))
+ return RX_HANDLER_CONSUMED;
if (macvlan_passthru(port))
vlan = list_first_or_null_rcu(&port->vlans,
struct macvlan_dev, list);
@@ -1286,7 +1285,8 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
if (data[IFLA_MACVLAN_FLAGS] &&
- nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
+ nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~(MACVLAN_FLAG_NOPROMISC |
+ MACVLAN_FLAG_NODST))
return -EINVAL;
if (data[IFLA_MACVLAN_MODE]) {
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index 5e72cc55afbd..e08c90ac0c6e 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -83,7 +83,7 @@ int mdio_set_flag(const struct mdio_if_info *mdio,
EXPORT_SYMBOL(mdio_set_flag);
/**
- * mdio_link_ok - is link status up/OK
+ * mdio45_links_ok - is link status up/OK
* @mdio: MDIO interface
* @mmd_mask: Mask for MMDs to check
*
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index a10cc460d7cf..d06e06f5e31a 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -200,6 +200,17 @@ config MDIO_BUS_MUX_MESON_G12A
the amlogic g12a SoC. The multiplexers connects either the external
or the internal MDIO bus to the parent bus.
+config MDIO_BUS_MUX_BCM6368
+ tristate "Broadcom BCM6368 MDIO bus multiplexers"
+ depends on OF && OF_MDIO && (BMIPS_GENERIC || COMPILE_TEST)
+ select MDIO_BUS_MUX
+ default BMIPS_GENERIC
+ help
+ This module provides a driver for MDIO bus multiplexers found in
+ BCM6368 based Broadcom SoCs. This multiplexer connects one of several
+ child MDIO bus to a parent bus. Buses could be internal as well as
+ external and selection logic lies inside the same multiplexer.
+
config MDIO_BUS_MUX_BCM_IPROC
tristate "Broadcom iProc based MDIO bus multiplexers"
depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST)
diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile
index 5c498dde463f..c3ec0ef989df 100644
--- a/drivers/net/mdio/Makefile
+++ b/drivers/net/mdio/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
+obj-$(CONFIG_MDIO_BUS_MUX_BCM6368) += mdio-mux-bcm6368.o
obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A) += mdio-mux-meson-g12a.o
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index fbd36891ee64..5d171e7f118d 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -5,20 +5,18 @@
* Copyright (C) 2014-2017 Broadcom
*/
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/phy.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/of_mdio.h>
-
+#include <linux/of_platform.h>
+#include <linux/phy.h>
#include <linux/platform_data/mdio-bcm-unimac.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
#define MDIO_CMD 0x00
#define MDIO_START_BUSY (1 << 29)
diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c
index d3915f831854..07609114a26b 100644
--- a/drivers/net/mdio/mdio-bitbang.c
+++ b/drivers/net/mdio/mdio-bitbang.c
@@ -14,10 +14,10 @@
* Vitaly Bordug <vbordug@ru.mvista.com>
*/
-#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/mdio-bitbang.h>
+#include <linux/module.h>
#include <linux/types.h>
-#include <linux/delay.h>
#define MDIO_READ 2
#define MDIO_WRITE 1
@@ -158,7 +158,7 @@ int mdiobb_read(struct mii_bus *bus, int phy, int reg)
reg = mdiobb_cmd_addr(ctrl, phy, reg);
mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg);
} else
- mdiobb_cmd(ctrl, MDIO_READ, phy, reg);
+ mdiobb_cmd(ctrl, ctrl->op_c22_read, phy, reg);
ctrl->ops->set_mdio_dir(ctrl, 0);
@@ -190,7 +190,7 @@ int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
reg = mdiobb_cmd_addr(ctrl, phy, reg);
mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg);
} else
- mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg);
+ mdiobb_cmd(ctrl, ctrl->op_c22_write, phy, reg);
/* send the turnaround (10) */
mdiobb_send_bit(ctrl, 1);
@@ -217,6 +217,10 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
bus->read = mdiobb_read;
bus->write = mdiobb_write;
bus->priv = ctrl;
+ if (!ctrl->override_op_c22) {
+ ctrl->op_c22_read = MDIO_READ;
+ ctrl->op_c22_write = MDIO_WRITE;
+ }
return bus;
}
diff --git a/drivers/net/mdio/mdio-cavium.c b/drivers/net/mdio/mdio-cavium.c
index 1afd6fc1a351..95ce274c1be1 100644
--- a/drivers/net/mdio/mdio-cavium.c
+++ b/drivers/net/mdio/mdio-cavium.c
@@ -4,9 +4,9 @@
*/
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/phy.h>
-#include <linux/io.h>
#include "mdio-cavium.h"
diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c
index 1b00235d7dc5..0fb3c2de0845 100644
--- a/drivers/net/mdio/mdio-gpio.c
+++ b/drivers/net/mdio/mdio-gpio.c
@@ -17,15 +17,15 @@
* Vitaly Bordug <vbordug@ru.mvista.com>
*/
-#include <linux/module.h>
-#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/mdio-gpio.h>
#include <linux/mdio-bitbang.h>
#include <linux/mdio-gpio.h>
-#include <linux/gpio/consumer.h>
+#include <linux/module.h>
#include <linux/of_mdio.h>
+#include <linux/platform_data/mdio-gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
struct mdio_gpio_info {
struct mdiobb_ctrl ctrl;
@@ -132,6 +132,13 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask;
}
+ if (dev->of_node &&
+ of_device_is_compatible(dev->of_node, "microchip,mdio-smi0")) {
+ bitbang->ctrl.op_c22_read = 0;
+ bitbang->ctrl.op_c22_write = 0;
+ bitbang->ctrl.override_op_c22 = 1;
+ }
+
dev_set_drvdata(dev, new_bus);
return new_bus;
@@ -196,6 +203,7 @@ static int mdio_gpio_remove(struct platform_device *pdev)
static const struct of_device_id mdio_gpio_of_match[] = {
{ .compatible = "virtual,mdio-gpio", },
+ { .compatible = "microchip,mdio-smi0" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mdio_gpio_of_match);
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 25c25ea6da66..9cd71d896963 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -3,10 +3,10 @@
/* Copyright (c) 2020 Sartura Ltd. */
#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c
index 1bd18857e1c5..8fe8f0119fc1 100644
--- a/drivers/net/mdio/mdio-ipq8064.c
+++ b/drivers/net/mdio/mdio-ipq8064.c
@@ -7,12 +7,12 @@
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/regmap.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
-#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
/* MII address register definitions */
#define MII_ADDR_REG_ADDR 0x10
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 11f583fd4611..b36e5ea04ddf 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -6,14 +6,14 @@
* Copyright (c) 2017 Microsemi Corporation
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/phy.h>
-#include <linux/platform_device.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
#define MSCC_MIIM_REG_STATUS 0x0
#define MSCC_MIIM_STATUS_STAT_PENDING BIT(2)
diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c
index 42fb5f166136..03261e6b9ceb 100644
--- a/drivers/net/mdio/mdio-mux-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c
@@ -3,14 +3,14 @@
* Copyright 2016 Broadcom
*/
#include <linux/clk.h>
-#include <linux/platform_device.h>
+#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/of_mdio.h>
+#include <linux/iopoll.h>
+#include <linux/mdio-mux.h>
#include <linux/module.h>
+#include <linux/of_mdio.h>
#include <linux/phy.h>
-#include <linux/mdio-mux.h>
-#include <linux/delay.h>
-#include <linux/iopoll.h>
+#include <linux/platform_device.h>
#define MDIO_RATE_ADJ_EXT_OFFSET 0x000
#define MDIO_RATE_ADJ_INT_OFFSET 0x004
@@ -197,10 +197,8 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
}
md->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(md->base)) {
- dev_err(&pdev->dev, "failed to ioremap register\n");
+ if (IS_ERR(md->base))
return PTR_ERR(md->base);
- }
md->mii_bus = devm_mdiobus_alloc(&pdev->dev);
if (!md->mii_bus) {
diff --git a/drivers/net/mdio/mdio-mux-bcm6368.c b/drivers/net/mdio/mdio-mux-bcm6368.c
new file mode 100644
index 000000000000..6dcbf987d61b
--- /dev/null
+++ b/drivers/net/mdio/mdio-mux-bcm6368.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Broadcom BCM6368 mdiomux bus controller driver
+ *
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mdio-mux.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+
+#define MDIOC_REG 0x0
+#define MDIOC_EXT_MASK BIT(16)
+#define MDIOC_REG_SHIFT 20
+#define MDIOC_PHYID_SHIFT 25
+#define MDIOC_RD_MASK BIT(30)
+#define MDIOC_WR_MASK BIT(31)
+
+#define MDIOD_REG 0x4
+
+struct bcm6368_mdiomux_desc {
+ void *mux_handle;
+ void __iomem *base;
+ struct device *dev;
+ struct mii_bus *mii_bus;
+ int ext_phy;
+};
+
+static int bcm6368_mdiomux_read(struct mii_bus *bus, int phy_id, int loc)
+{
+ struct bcm6368_mdiomux_desc *md = bus->priv;
+ uint32_t reg;
+ int ret;
+
+ __raw_writel(0, md->base + MDIOC_REG);
+
+ reg = MDIOC_RD_MASK |
+ (phy_id << MDIOC_PHYID_SHIFT) |
+ (loc << MDIOC_REG_SHIFT);
+ if (md->ext_phy)
+ reg |= MDIOC_EXT_MASK;
+
+ __raw_writel(reg, md->base + MDIOC_REG);
+ udelay(50);
+ ret = __raw_readw(md->base + MDIOD_REG);
+
+ return ret;
+}
+
+static int bcm6368_mdiomux_write(struct mii_bus *bus, int phy_id, int loc,
+ uint16_t val)
+{
+ struct bcm6368_mdiomux_desc *md = bus->priv;
+ uint32_t reg;
+
+ __raw_writel(0, md->base + MDIOC_REG);
+
+ reg = MDIOC_WR_MASK |
+ (phy_id << MDIOC_PHYID_SHIFT) |
+ (loc << MDIOC_REG_SHIFT);
+ if (md->ext_phy)
+ reg |= MDIOC_EXT_MASK;
+ reg |= val;
+
+ __raw_writel(reg, md->base + MDIOC_REG);
+ udelay(50);
+
+ return 0;
+}
+
+static int bcm6368_mdiomux_switch_fn(int current_child, int desired_child,
+ void *data)
+{
+ struct bcm6368_mdiomux_desc *md = data;
+
+ md->ext_phy = desired_child;
+
+ return 0;
+}
+
+static int bcm6368_mdiomux_probe(struct platform_device *pdev)
+{
+ struct bcm6368_mdiomux_desc *md;
+ struct mii_bus *bus;
+ struct resource *res;
+ int rc;
+
+ md = devm_kzalloc(&pdev->dev, sizeof(*md), GFP_KERNEL);
+ if (!md)
+ return -ENOMEM;
+ md->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ /*
+ * Just ioremap, as this MDIO block is usually integrated into an
+ * Ethernet MAC controller register range
+ */
+ md->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!md->base) {
+ dev_err(&pdev->dev, "failed to ioremap register\n");
+ return -ENOMEM;
+ }
+
+ md->mii_bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!md->mii_bus) {
+ dev_err(&pdev->dev, "mdiomux bus alloc failed\n");
+ return ENOMEM;
+ }
+
+ bus = md->mii_bus;
+ bus->priv = md;
+ bus->name = "BCM6368 MDIO mux bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id);
+ bus->parent = &pdev->dev;
+ bus->read = bcm6368_mdiomux_read;
+ bus->write = bcm6368_mdiomux_write;
+ bus->phy_mask = 0x3f;
+ bus->dev.of_node = pdev->dev.of_node;
+
+ rc = mdiobus_register(bus);
+ if (rc) {
+ dev_err(&pdev->dev, "mdiomux registration failed\n");
+ return rc;
+ }
+
+ platform_set_drvdata(pdev, md);
+
+ rc = mdio_mux_init(md->dev, md->dev->of_node,
+ bcm6368_mdiomux_switch_fn, &md->mux_handle, md,
+ md->mii_bus);
+ if (rc) {
+ dev_info(md->dev, "mdiomux initialization failed\n");
+ goto out_register;
+ }
+
+ dev_info(&pdev->dev, "Broadcom BCM6368 MDIO mux bus\n");
+
+ return 0;
+
+out_register:
+ mdiobus_unregister(bus);
+ return rc;
+}
+
+static int bcm6368_mdiomux_remove(struct platform_device *pdev)
+{
+ struct bcm6368_mdiomux_desc *md = platform_get_drvdata(pdev);
+
+ mdio_mux_uninit(md->mux_handle);
+ mdiobus_unregister(md->mii_bus);
+
+ return 0;
+}
+
+static const struct of_device_id bcm6368_mdiomux_ids[] = {
+ { .compatible = "brcm,bcm6368-mdio-mux", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm6368_mdiomux_ids);
+
+static struct platform_driver bcm6368_mdiomux_driver = {
+ .driver = {
+ .name = "bcm6368-mdio-mux",
+ .of_match_table = bcm6368_mdiomux_ids,
+ },
+ .probe = bcm6368_mdiomux_probe,
+ .remove = bcm6368_mdiomux_remove,
+};
+module_platform_driver(bcm6368_mdiomux_driver);
+
+MODULE_AUTHOR("Álvaro Fernández Rojas <noltari@gmail.com>");
+MODULE_DESCRIPTION("BCM6368 mdiomux bus controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/mdio/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c
index 10a758fdc9e6..3c7f16f06b45 100644
--- a/drivers/net/mdio/mdio-mux-gpio.c
+++ b/drivers/net/mdio/mdio-mux-gpio.c
@@ -3,13 +3,13 @@
* Copyright (C) 2011, 2012 Cavium, Inc.
*/
-#include <linux/platform_device.h>
#include <linux/device.h>
-#include <linux/of_mdio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mdio-mux.h>
#include <linux/module.h>
+#include <linux/of_mdio.h>
#include <linux/phy.h>
-#include <linux/mdio-mux.h>
-#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
#define DRV_VERSION "1.1"
#define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver"
diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c
index d1a8780e24d8..c02fb2a067ee 100644
--- a/drivers/net/mdio/mdio-mux-mmioreg.c
+++ b/drivers/net/mdio/mdio-mux-mmioreg.c
@@ -7,13 +7,13 @@
* Copyright 2012 Freescale Semiconductor, Inc.
*/
-#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/mdio-mux.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
-#include <linux/module.h>
#include <linux/phy.h>
-#include <linux/mdio-mux.h>
+#include <linux/platform_device.h>
struct mdio_mux_mmioreg_state {
void *mux_handle;
diff --git a/drivers/net/mdio/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c
index d6564381aa3e..527acfc3c045 100644
--- a/drivers/net/mdio/mdio-mux-multiplexer.c
+++ b/drivers/net/mdio/mdio-mux-multiplexer.c
@@ -4,10 +4,10 @@
* Copyright 2019 NXP
*/
-#include <linux/platform_device.h>
#include <linux/mdio-mux.h>
#include <linux/module.h>
#include <linux/mux/consumer.h>
+#include <linux/platform_device.h>
struct mdio_mux_multiplexer_state {
struct mux_control *muxc;
diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c
index 6a1d3540210b..110e4ee85785 100644
--- a/drivers/net/mdio/mdio-mux.c
+++ b/drivers/net/mdio/mdio-mux.c
@@ -3,12 +3,12 @@
* Copyright (C) 2011, 2012 Cavium, Inc.
*/
-#include <linux/platform_device.h>
-#include <linux/mdio-mux.h>
-#include <linux/of_mdio.h>
#include <linux/device.h>
+#include <linux/mdio-mux.h>
#include <linux/module.h>
+#include <linux/of_mdio.h>
#include <linux/phy.h>
+#include <linux/platform_device.h>
#define DRV_DESCRIPTION "MDIO bus multiplexer driver"
diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index d1e1009d51af..8ce99c4888e1 100644
--- a/drivers/net/mdio/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
@@ -3,13 +3,13 @@
* Copyright (C) 2009-2015 Cavium, Inc.
*/
-#include <linux/platform_device.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
-#include <linux/module.h>
-#include <linux/gfp.h>
#include <linux/phy.h>
-#include <linux/io.h>
+#include <linux/platform_device.h>
#include "mdio-cavium.h"
diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index 3d7eda99d34e..cb1761693b69 100644
--- a/drivers/net/mdio/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
@@ -3,14 +3,14 @@
* Copyright (C) 2009-2016 Cavium, Inc.
*/
-#include <linux/of_address.h>
-#include <linux/of_mdio.h>
-#include <linux/module.h>
+#include <linux/acpi.h>
#include <linux/gfp.h>
-#include <linux/phy.h>
#include <linux/io.h>
-#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
#include <linux/pci.h>
+#include <linux/phy.h>
#include "mdio-cavium.h"
diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c
index 461207cdf5d6..7ab4e26db08c 100644
--- a/drivers/net/mdio/mdio-xgene.c
+++ b/drivers/net/mdio/mdio-xgene.c
@@ -13,11 +13,11 @@
#include <linux/io.h>
#include <linux/mdio/mdio-xgene.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/of_net.h>
#include <linux/of_mdio.h>
-#include <linux/prefetch.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
#include <linux/phy.h>
+#include <linux/prefetch.h>
#include <net/ip.h>
static bool xgene_mdio_status;
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index ea9d5855fb52..094494a68ddf 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -8,17 +8,17 @@
* out of the OpenFirmware device tree and using it to populate an mii_bus.
*/
-#include <linux/kernel.h>
#include <linux/device.h>
-#include <linux/netdevice.h>
#include <linux/err.h>
-#include <linux/phy.h>
-#include <linux/phy_fixed.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#define DEFAULT_GPIO_RESET_DELAY 10 /* in microseconds */
diff --git a/drivers/net/mhi/mhi.h b/drivers/net/mhi/mhi.h
index 12e7407d712a..1d0c499d27a3 100644
--- a/drivers/net/mhi/mhi.h
+++ b/drivers/net/mhi/mhi.h
@@ -29,6 +29,7 @@ struct mhi_net_dev {
struct mhi_net_stats stats;
u32 rx_queue_sz;
int msg_enable;
+ unsigned int mru;
};
struct mhi_net_proto {
diff --git a/drivers/net/mhi/net.c b/drivers/net/mhi/net.c
index f59960876083..0d8293a47a56 100644
--- a/drivers/net/mhi/net.c
+++ b/drivers/net/mhi/net.c
@@ -265,10 +265,12 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
rx_refill.work);
struct net_device *ndev = mhi_netdev->ndev;
struct mhi_device *mdev = mhi_netdev->mdev;
- int size = READ_ONCE(ndev->mtu);
struct sk_buff *skb;
+ unsigned int size;
int err;
+ size = mhi_netdev->mru ? mhi_netdev->mru : READ_ONCE(ndev->mtu);
+
while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
skb = netdev_alloc_skb(ndev, size);
if (unlikely(!skb))
@@ -359,8 +361,7 @@ static void mhi_net_remove(struct mhi_device *mhi_dev)
mhi_unprepare_from_transfer(mhi_netdev->mdev);
- if (mhi_netdev->skbagg_head)
- kfree_skb(mhi_netdev->skbagg_head);
+ kfree_skb(mhi_netdev->skbagg_head);
free_netdev(mhi_netdev->ndev);
}
diff --git a/drivers/net/mhi/proto_mbim.c b/drivers/net/mhi/proto_mbim.c
index 75b5484c40d5..fc72b3f6ec9e 100644
--- a/drivers/net/mhi/proto_mbim.c
+++ b/drivers/net/mhi/proto_mbim.c
@@ -26,6 +26,15 @@
#define MBIM_NDP16_SIGN_MASK 0x00ffffff
+/* Usual WWAN MTU */
+#define MHI_MBIM_DEFAULT_MTU 1500
+
+/* 3500 allows to optimize skb allocation, the skbs will basically fit in
+ * one 4K page. Large MBIM packets will simply be split over several MHI
+ * transfers and chained by the MHI net layer (zerocopy).
+ */
+#define MHI_MBIM_DEFAULT_MRU 3500
+
struct mbim_context {
u16 rx_seq;
u16 tx_seq;
@@ -91,20 +100,11 @@ static int mbim_rx_verify_nth16(struct sk_buff *skb)
return le16_to_cpu(nth16->wNdpIndex);
}
-static int mbim_rx_verify_ndp16(struct sk_buff *skb, int ndpoffset)
+static int mbim_rx_verify_ndp16(struct sk_buff *skb, struct usb_cdc_ncm_ndp16 *ndp16)
{
struct mhi_net_dev *dev = netdev_priv(skb->dev);
- struct usb_cdc_ncm_ndp16 *ndp16;
int ret;
- if (ndpoffset + sizeof(struct usb_cdc_ncm_ndp16) > skb->len) {
- netif_dbg(dev, rx_err, dev->ndev, "invalid NDP offset <%u>\n",
- ndpoffset);
- return -EINVAL;
- }
-
- ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
-
if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
netif_dbg(dev, rx_err, dev->ndev, "invalid DPT16 length <%u>\n",
le16_to_cpu(ndp16->wLength));
@@ -130,9 +130,6 @@ static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb)
struct net_device *ndev = mhi_netdev->ndev;
int ndpoffset;
- if (skb_linearize(skb))
- goto error;
-
/* Check NTB header and retrieve first NDP offset */
ndpoffset = mbim_rx_verify_nth16(skb);
if (ndpoffset < 0) {
@@ -142,12 +139,19 @@ static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb)
/* Process each NDP */
while (1) {
- struct usb_cdc_ncm_ndp16 *ndp16;
- struct usb_cdc_ncm_dpe16 *dpe16;
- int nframes, n;
+ struct usb_cdc_ncm_ndp16 ndp16;
+ struct usb_cdc_ncm_dpe16 dpe16;
+ int nframes, n, dpeoffset;
+
+ if (skb_copy_bits(skb, ndpoffset, &ndp16, sizeof(ndp16))) {
+ net_err_ratelimited("%s: Incorrect NDP offset (%u)\n",
+ ndev->name, ndpoffset);
+ __mbim_length_errors_inc(mhi_netdev);
+ goto error;
+ }
/* Check NDP header and retrieve number of datagrams */
- nframes = mbim_rx_verify_ndp16(skb, ndpoffset);
+ nframes = mbim_rx_verify_ndp16(skb, &ndp16);
if (nframes < 0) {
net_err_ratelimited("%s: Incorrect NDP16\n", ndev->name);
__mbim_length_errors_inc(mhi_netdev);
@@ -155,8 +159,7 @@ static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb)
}
/* Only IP data type supported, no DSS in MHI context */
- ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
- if ((ndp16->dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK))
+ if ((ndp16.dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK))
!= cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN)) {
net_err_ratelimited("%s: Unsupported NDP type\n", ndev->name);
__mbim_errors_inc(mhi_netdev);
@@ -164,19 +167,24 @@ static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb)
}
/* Only primary IP session 0 (0x00) supported for now */
- if (ndp16->dwSignature & ~cpu_to_le32(MBIM_NDP16_SIGN_MASK)) {
+ if (ndp16.dwSignature & ~cpu_to_le32(MBIM_NDP16_SIGN_MASK)) {
net_err_ratelimited("%s: bad packet session\n", ndev->name);
__mbim_errors_inc(mhi_netdev);
goto next_ndp;
}
/* de-aggregate and deliver IP packets */
- dpe16 = ndp16->dpe16;
- for (n = 0; n < nframes; n++, dpe16++) {
- u16 dgram_offset = le16_to_cpu(dpe16->wDatagramIndex);
- u16 dgram_len = le16_to_cpu(dpe16->wDatagramLength);
+ dpeoffset = ndpoffset + sizeof(struct usb_cdc_ncm_ndp16);
+ for (n = 0; n < nframes; n++, dpeoffset += sizeof(dpe16)) {
+ u16 dgram_offset, dgram_len;
struct sk_buff *skbn;
+ if (skb_copy_bits(skb, dpeoffset, &dpe16, sizeof(dpe16)))
+ break;
+
+ dgram_offset = le16_to_cpu(dpe16.wDatagramIndex);
+ dgram_len = le16_to_cpu(dpe16.wDatagramLength);
+
if (!dgram_offset || !dgram_len)
break; /* null terminator */
@@ -185,7 +193,7 @@ static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb)
continue;
skb_put(skbn, dgram_len);
- memcpy(skbn->data, skb->data + dgram_offset, dgram_len);
+ skb_copy_bits(skb, dgram_offset, skbn->data, dgram_len);
switch (skbn->data[0] & 0xf0) {
case 0x40:
@@ -206,7 +214,7 @@ static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb)
}
next_ndp:
/* Other NDP to process? */
- ndpoffset = (int)le16_to_cpu(ndp16->wNextNdpIndex);
+ ndpoffset = (int)le16_to_cpu(ndp16.wNextNdpIndex);
if (!ndpoffset)
break;
}
@@ -282,6 +290,8 @@ static int mbim_init(struct mhi_net_dev *mhi_netdev)
return -ENOMEM;
ndev->needed_headroom = sizeof(struct mbim_tx_hdr);
+ ndev->mtu = MHI_MBIM_DEFAULT_MTU;
+ mhi_netdev->mru = MHI_MBIM_DEFAULT_MRU;
return 0;
}
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index ade086eed955..a1cbfa44a1e1 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -13,3 +13,7 @@ endif
ifneq ($(CONFIG_XFRM_OFFLOAD),)
netdevsim-objs += ipsec.o
endif
+
+ifneq ($(CONFIG_PSAMPLE),)
+netdevsim-objs += psample.o
+endif
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index dbeb29fa16e8..6189a4c0d39e 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -1032,10 +1032,14 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
if (err)
goto err_fib_destroy;
- err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ err = nsim_dev_psample_init(nsim_dev);
if (err)
goto err_health_exit;
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_psample_exit;
+
nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
0200,
nsim_dev->ddir,
@@ -1043,6 +1047,8 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
&nsim_dev_take_snapshot_fops);
return 0;
+err_psample_exit:
+ nsim_dev_psample_exit(nsim_dev);
err_health_exit:
nsim_dev_health_exit(nsim_dev);
err_fib_destroy:
@@ -1118,14 +1124,20 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
if (err)
goto err_health_exit;
- err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ err = nsim_dev_psample_init(nsim_dev);
if (err)
goto err_bpf_dev_exit;
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_psample_exit;
+
devlink_params_publish(devlink);
devlink_reload_enable(devlink);
return 0;
+err_psample_exit:
+ nsim_dev_psample_exit(nsim_dev);
err_bpf_dev_exit:
nsim_bpf_dev_exit(nsim_dev);
err_health_exit:
@@ -1158,6 +1170,7 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
return;
debugfs_remove(nsim_dev->take_snapshot);
nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_psample_exit(nsim_dev);
nsim_dev_health_exit(nsim_dev);
nsim_fib_destroy(devlink, nsim_dev->fib_data);
nsim_dev_traps_exit(devlink);
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index 166f0d6cbcf7..c9ae52595a8f 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -77,6 +77,34 @@ static int nsim_set_ringparam(struct net_device *dev,
return 0;
}
+static int
+nsim_get_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (ns->ethtool.get_err)
+ return -ns->ethtool.get_err;
+ memcpy(fecparam, &ns->ethtool.fec, sizeof(ns->ethtool.fec));
+ return 0;
+}
+
+static int
+nsim_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ u32 fec;
+
+ if (ns->ethtool.set_err)
+ return -ns->ethtool.set_err;
+ memcpy(&ns->ethtool.fec, fecparam, sizeof(ns->ethtool.fec));
+ fec = fecparam->fec;
+ if (fec == ETHTOOL_FEC_AUTO)
+ fec |= ETHTOOL_FEC_OFF;
+ fec |= ETHTOOL_FEC_NONE;
+ ns->ethtool.fec.active_fec = 1 << (fls(fec) - 1);
+ return 0;
+}
+
static const struct ethtool_ops nsim_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_ALL_PARAMS,
.get_pause_stats = nsim_get_pause_stats,
@@ -86,6 +114,8 @@ static const struct ethtool_ops nsim_ethtool_ops = {
.get_coalesce = nsim_get_coalesce,
.get_ringparam = nsim_get_ringparam,
.set_ringparam = nsim_set_ringparam,
+ .get_fecparam = nsim_get_fecparam,
+ .set_fecparam = nsim_set_fecparam,
};
static void nsim_ethtool_ring_init(struct netdevsim *ns)
@@ -104,8 +134,14 @@ void nsim_ethtool_init(struct netdevsim *ns)
nsim_ethtool_ring_init(ns);
+ ns->ethtool.fec.fec = ETHTOOL_FEC_NONE;
+ ns->ethtool.fec.active_fec = ETHTOOL_FEC_NONE;
+
ethtool = debugfs_create_dir("ethtool", ns->nsim_dev_port->ddir);
+ debugfs_create_u32("get_err", 0600, ethtool, &ns->ethtool.get_err);
+ debugfs_create_u32("set_err", 0600, ethtool, &ns->ethtool.set_err);
+
dir = debugfs_create_dir("pause", ethtool);
debugfs_create_bool("report_stats_rx", 0600, dir,
&ns->ethtool.pauseparam.report_stats_rx);
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 46fb414f7ca6..213d3e5056c8 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -14,6 +14,7 @@
* THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
*/
+#include <linux/bitmap.h>
#include <linux/in6.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -47,15 +48,18 @@ struct nsim_fib_data {
struct nsim_fib_entry nexthops;
struct rhashtable fib_rt_ht;
struct list_head fib_rt_list;
- struct mutex fib_lock; /* Protects hashtable and list */
+ struct mutex fib_lock; /* Protects FIB HT and list */
struct notifier_block nexthop_nb;
struct rhashtable nexthop_ht;
struct devlink *devlink;
struct work_struct fib_event_work;
struct list_head fib_event_queue;
spinlock_t fib_event_queue_lock; /* Protects fib event queue list */
+ struct mutex nh_lock; /* Protects NH HT */
struct dentry *ddir;
bool fail_route_offload;
+ bool fail_res_nexthop_group_replace;
+ bool fail_nexthop_bucket_replace;
};
struct nsim_fib_rt_key {
@@ -116,6 +120,7 @@ struct nsim_nexthop {
struct rhash_head ht_node;
u64 occ;
u32 id;
+ bool is_resilient;
};
static const struct rhashtable_params nsim_nexthop_ht_params = {
@@ -561,7 +566,7 @@ nsim_fib6_rt_create(struct nsim_fib_data *data,
err_fib6_rt_nh_del:
for (i--; i >= 0; i--) {
nsim_fib6_rt_nh_del(fib6_rt, rt_arr[i]);
- };
+ }
nsim_fib_rt_fini(&fib6_rt->common);
kfree(fib6_rt);
return ERR_PTR(err);
@@ -869,10 +874,8 @@ err_rt_offload_failed_flag_set:
return err;
}
-static int nsim_fib_event(struct nsim_fib_event *fib_event)
+static void nsim_fib_event(struct nsim_fib_event *fib_event)
{
- int err = 0;
-
switch (fib_event->family) {
case AF_INET:
nsim_fib4_event(fib_event->data, &fib_event->fen_info,
@@ -885,8 +888,6 @@ static int nsim_fib_event(struct nsim_fib_event *fib_event)
nsim_fib6_event_fini(&fib_event->fib6_event);
break;
}
-
- return err;
}
static int nsim_fib4_prepare_event(struct fib_notifier_info *info,
@@ -1118,6 +1119,10 @@ static struct nsim_nexthop *nsim_nexthop_create(struct nsim_fib_data *data,
for (i = 0; i < info->nh_grp->num_nh; i++)
occ += info->nh_grp->nh_entries[i].weight;
break;
+ case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
+ occ = info->nh_res_table->num_nh_buckets;
+ nexthop->is_resilient = true;
+ break;
default:
NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
kfree(nexthop);
@@ -1160,6 +1165,21 @@ err_num_decrease:
}
+static void nsim_nexthop_hw_flags_set(struct net *net,
+ const struct nsim_nexthop *nexthop,
+ bool trap)
+{
+ int i;
+
+ nexthop_set_hw_flags(net, nexthop->id, false, trap);
+
+ if (!nexthop->is_resilient)
+ return;
+
+ for (i = 0; i < nexthop->occ; i++)
+ nexthop_bucket_set_hw_flags(net, nexthop->id, i, false, trap);
+}
+
static int nsim_nexthop_add(struct nsim_fib_data *data,
struct nsim_nexthop *nexthop,
struct netlink_ext_ack *extack)
@@ -1178,7 +1198,7 @@ static int nsim_nexthop_add(struct nsim_fib_data *data,
goto err_nexthop_dismiss;
}
- nexthop_set_hw_flags(net, nexthop->id, false, true);
+ nsim_nexthop_hw_flags_set(net, nexthop, true);
return 0;
@@ -1207,7 +1227,7 @@ static int nsim_nexthop_replace(struct nsim_fib_data *data,
goto err_nexthop_dismiss;
}
- nexthop_set_hw_flags(net, nexthop->id, false, true);
+ nsim_nexthop_hw_flags_set(net, nexthop, true);
nsim_nexthop_account(data, nexthop_old->occ, false, extack);
nsim_nexthop_destroy(nexthop_old);
@@ -1258,6 +1278,32 @@ static void nsim_nexthop_remove(struct nsim_fib_data *data,
nsim_nexthop_destroy(nexthop);
}
+static int nsim_nexthop_res_table_pre_replace(struct nsim_fib_data *data,
+ struct nh_notifier_info *info)
+{
+ if (data->fail_res_nexthop_group_replace) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Failed to replace a resilient nexthop group");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nsim_nexthop_bucket_replace(struct nsim_fib_data *data,
+ struct nh_notifier_info *info)
+{
+ if (data->fail_nexthop_bucket_replace) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Failed to replace nexthop bucket");
+ return -EINVAL;
+ }
+
+ nexthop_bucket_set_hw_flags(info->net, info->id,
+ info->nh_res_bucket->bucket_index,
+ false, true);
+
+ return 0;
+}
+
static int nsim_nexthop_event_nb(struct notifier_block *nb, unsigned long event,
void *ptr)
{
@@ -1266,8 +1312,7 @@ static int nsim_nexthop_event_nb(struct notifier_block *nb, unsigned long event,
struct nh_notifier_info *info = ptr;
int err = 0;
- ASSERT_RTNL();
-
+ mutex_lock(&data->nh_lock);
switch (event) {
case NEXTHOP_EVENT_REPLACE:
err = nsim_nexthop_insert(data, info);
@@ -1275,10 +1320,17 @@ static int nsim_nexthop_event_nb(struct notifier_block *nb, unsigned long event,
case NEXTHOP_EVENT_DEL:
nsim_nexthop_remove(data, info);
break;
+ case NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE:
+ err = nsim_nexthop_res_table_pre_replace(data, info);
+ break;
+ case NEXTHOP_EVENT_BUCKET_REPLACE:
+ err = nsim_nexthop_bucket_replace(data, info);
+ break;
default:
break;
}
+ mutex_unlock(&data->nh_lock);
return notifier_from_errno(err);
}
@@ -1289,11 +1341,68 @@ static void nsim_nexthop_free(void *ptr, void *arg)
struct net *net;
net = devlink_net(data->devlink);
- nexthop_set_hw_flags(net, nexthop->id, false, false);
+ nsim_nexthop_hw_flags_set(net, nexthop, false);
nsim_nexthop_account(data, nexthop->occ, false, NULL);
nsim_nexthop_destroy(nexthop);
}
+static ssize_t nsim_nexthop_bucket_activity_write(struct file *file,
+ const char __user *user_buf,
+ size_t size, loff_t *ppos)
+{
+ struct nsim_fib_data *data = file->private_data;
+ struct net *net = devlink_net(data->devlink);
+ struct nsim_nexthop *nexthop;
+ unsigned long *activity;
+ loff_t pos = *ppos;
+ u16 bucket_index;
+ char buf[128];
+ int err = 0;
+ u32 nhid;
+
+ if (pos != 0)
+ return -EINVAL;
+ if (size > sizeof(buf))
+ return -EINVAL;
+ if (copy_from_user(buf, user_buf, size))
+ return -EFAULT;
+ if (sscanf(buf, "%u %hu", &nhid, &bucket_index) != 2)
+ return -EINVAL;
+
+ rtnl_lock();
+
+ nexthop = rhashtable_lookup_fast(&data->nexthop_ht, &nhid,
+ nsim_nexthop_ht_params);
+ if (!nexthop || !nexthop->is_resilient ||
+ bucket_index >= nexthop->occ) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ activity = bitmap_zalloc(nexthop->occ, GFP_KERNEL);
+ if (!activity) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ bitmap_set(activity, bucket_index, 1);
+ nexthop_res_grp_activity_update(net, nhid, nexthop->occ, activity);
+ bitmap_free(activity);
+
+out:
+ rtnl_unlock();
+
+ *ppos = size;
+ return err ?: size;
+}
+
+static const struct file_operations nsim_nexthop_bucket_activity_fops = {
+ .open = simple_open,
+ .write = nsim_nexthop_bucket_activity_write,
+ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+};
+
static u64 nsim_fib_ipv4_resource_occ_get(void *priv)
{
struct nsim_fib_data *data = priv;
@@ -1383,6 +1492,17 @@ nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev)
data->fail_route_offload = false;
debugfs_create_bool("fail_route_offload", 0600, data->ddir,
&data->fail_route_offload);
+
+ data->fail_res_nexthop_group_replace = false;
+ debugfs_create_bool("fail_res_nexthop_group_replace", 0600, data->ddir,
+ &data->fail_res_nexthop_group_replace);
+
+ data->fail_nexthop_bucket_replace = false;
+ debugfs_create_bool("fail_nexthop_bucket_replace", 0600, data->ddir,
+ &data->fail_nexthop_bucket_replace);
+
+ debugfs_create_file("nexthop_bucket_activity", 0200, data->ddir,
+ data, &nsim_nexthop_bucket_activity_fops);
return 0;
}
@@ -1408,6 +1528,7 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
if (err)
goto err_data_free;
+ mutex_init(&data->nh_lock);
err = rhashtable_init(&data->nexthop_ht, &nsim_nexthop_ht_params);
if (err)
goto err_debugfs_exit;
@@ -1473,6 +1594,7 @@ err_rhashtable_nexthop_destroy:
data);
mutex_destroy(&data->fib_lock);
err_debugfs_exit:
+ mutex_destroy(&data->nh_lock);
nsim_fib_debugfs_exit(data);
err_data_free:
kfree(data);
@@ -1501,6 +1623,7 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
WARN_ON_ONCE(!list_empty(&data->fib_event_queue));
WARN_ON_ONCE(!list_empty(&data->fib_rt_list));
mutex_destroy(&data->fib_lock);
+ mutex_destroy(&data->nh_lock);
nsim_fib_debugfs_exit(data);
kfree(data);
}
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index 21e2974660e7..04aebdf85747 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -235,15 +235,10 @@ static ssize_t nsim_dev_health_break_write(struct file *file,
char *break_msg;
int err;
- break_msg = kmalloc(count + 1, GFP_KERNEL);
- if (!break_msg)
- return -ENOMEM;
+ break_msg = memdup_user_nul(data, count);
+ if (IS_ERR(break_msg))
+ return PTR_ERR(break_msg);
- if (copy_from_user(break_msg, data, count)) {
- err = -EFAULT;
- goto out;
- }
- break_msg[count] = '\0';
if (break_msg[count - 1] == '\n')
break_msg[count - 1] = '\0';
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 48163c5f2ec9..7ff24e03577b 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -60,9 +60,12 @@ struct nsim_ethtool_pauseparam {
};
struct nsim_ethtool {
+ u32 get_err;
+ u32 set_err;
struct nsim_ethtool_pauseparam pauseparam;
struct ethtool_coalesce coalesce;
struct ethtool_ringparam ring;
+ struct ethtool_fecparam fec;
};
struct netdevsim {
@@ -180,6 +183,20 @@ struct nsim_dev_health {
int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink);
void nsim_dev_health_exit(struct nsim_dev *nsim_dev);
+#if IS_ENABLED(CONFIG_PSAMPLE)
+int nsim_dev_psample_init(struct nsim_dev *nsim_dev);
+void nsim_dev_psample_exit(struct nsim_dev *nsim_dev);
+#else
+static inline int nsim_dev_psample_init(struct nsim_dev *nsim_dev)
+{
+ return 0;
+}
+
+static inline void nsim_dev_psample_exit(struct nsim_dev *nsim_dev)
+{
+}
+#endif
+
struct nsim_dev_port {
struct list_head list;
struct devlink_port devlink_port;
@@ -229,6 +246,7 @@ struct nsim_dev {
bool static_iana_vxlan;
u32 sleep;
} udp_ports;
+ struct nsim_dev_psample *psample;
};
static inline struct net *nsim_dev_net(struct nsim_dev *nsim_dev)
diff --git a/drivers/net/netdevsim/psample.c b/drivers/net/netdevsim/psample.c
new file mode 100644
index 000000000000..f0c6477dd0ae
--- /dev/null
+++ b/drivers/net/netdevsim/psample.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Mellanox Technologies. All rights reserved */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/inet.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <net/devlink.h>
+#include <net/ip.h>
+#include <net/psample.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/udp.h>
+
+#include "netdevsim.h"
+
+#define NSIM_PSAMPLE_REPORT_INTERVAL_MS 100
+#define NSIM_PSAMPLE_INVALID_TC 0xFFFF
+#define NSIM_PSAMPLE_L4_DATA_LEN 100
+
+struct nsim_dev_psample {
+ struct delayed_work psample_dw;
+ struct dentry *ddir;
+ struct psample_group *group;
+ u32 rate;
+ u32 group_num;
+ u32 trunc_size;
+ int in_ifindex;
+ int out_ifindex;
+ u16 out_tc;
+ u64 out_tc_occ_max;
+ u64 latency_max;
+ bool is_active;
+};
+
+static struct sk_buff *nsim_dev_psample_skb_build(void)
+{
+ int tot_len, data_len = NSIM_PSAMPLE_L4_DATA_LEN;
+ struct sk_buff *skb;
+ struct udphdr *udph;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+
+ skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+ tot_len = sizeof(struct iphdr) + sizeof(struct udphdr) + data_len;
+
+ skb_reset_mac_header(skb);
+ eth = skb_put(skb, sizeof(struct ethhdr));
+ eth_random_addr(eth->h_dest);
+ eth_random_addr(eth->h_source);
+ eth->h_proto = htons(ETH_P_IP);
+ skb->protocol = htons(ETH_P_IP);
+
+ skb_set_network_header(skb, skb->len);
+ iph = skb_put(skb, sizeof(struct iphdr));
+ iph->protocol = IPPROTO_UDP;
+ iph->saddr = in_aton("192.0.2.1");
+ iph->daddr = in_aton("198.51.100.1");
+ iph->version = 0x4;
+ iph->frag_off = 0;
+ iph->ihl = 0x5;
+ iph->tot_len = htons(tot_len);
+ iph->id = 0;
+ iph->ttl = 100;
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+ skb_set_transport_header(skb, skb->len);
+ udph = skb_put_zero(skb, sizeof(struct udphdr) + data_len);
+ get_random_bytes(&udph->source, sizeof(u16));
+ get_random_bytes(&udph->dest, sizeof(u16));
+ udph->len = htons(sizeof(struct udphdr) + data_len);
+
+ return skb;
+}
+
+static void nsim_dev_psample_md_prepare(const struct nsim_dev_psample *psample,
+ struct psample_metadata *md,
+ unsigned int len)
+{
+ md->trunc_size = psample->trunc_size ? psample->trunc_size : len;
+ md->in_ifindex = psample->in_ifindex;
+ md->out_ifindex = psample->out_ifindex;
+
+ if (psample->out_tc != NSIM_PSAMPLE_INVALID_TC) {
+ md->out_tc = psample->out_tc;
+ md->out_tc_valid = 1;
+ }
+
+ if (psample->out_tc_occ_max) {
+ u64 out_tc_occ;
+
+ get_random_bytes(&out_tc_occ, sizeof(u64));
+ md->out_tc_occ = out_tc_occ & (psample->out_tc_occ_max - 1);
+ md->out_tc_occ_valid = 1;
+ }
+
+ if (psample->latency_max) {
+ u64 latency;
+
+ get_random_bytes(&latency, sizeof(u64));
+ md->latency = latency & (psample->latency_max - 1);
+ md->latency_valid = 1;
+ }
+}
+
+static void nsim_dev_psample_report_work(struct work_struct *work)
+{
+ struct nsim_dev_psample *psample;
+ struct psample_metadata md = {};
+ struct sk_buff *skb;
+ unsigned long delay;
+
+ psample = container_of(work, struct nsim_dev_psample, psample_dw.work);
+
+ skb = nsim_dev_psample_skb_build();
+ if (!skb)
+ goto out;
+
+ nsim_dev_psample_md_prepare(psample, &md, skb->len);
+ psample_sample_packet(psample->group, skb, psample->rate, &md);
+ consume_skb(skb);
+
+out:
+ delay = msecs_to_jiffies(NSIM_PSAMPLE_REPORT_INTERVAL_MS);
+ schedule_delayed_work(&psample->psample_dw, delay);
+}
+
+static int nsim_dev_psample_enable(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_psample *psample = nsim_dev->psample;
+ struct devlink *devlink;
+ unsigned long delay;
+
+ if (psample->is_active)
+ return -EBUSY;
+
+ devlink = priv_to_devlink(nsim_dev);
+ psample->group = psample_group_get(devlink_net(devlink),
+ psample->group_num);
+ if (!psample->group)
+ return -EINVAL;
+
+ delay = msecs_to_jiffies(NSIM_PSAMPLE_REPORT_INTERVAL_MS);
+ schedule_delayed_work(&psample->psample_dw, delay);
+
+ psample->is_active = true;
+
+ return 0;
+}
+
+static int nsim_dev_psample_disable(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_psample *psample = nsim_dev->psample;
+
+ if (!psample->is_active)
+ return -EINVAL;
+
+ psample->is_active = false;
+
+ cancel_delayed_work_sync(&psample->psample_dw);
+ psample_group_put(psample->group);
+
+ return 0;
+}
+
+static ssize_t nsim_dev_psample_enable_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev *nsim_dev = file->private_data;
+ bool enable;
+ int err;
+
+ err = kstrtobool_from_user(data, count, &enable);
+ if (err)
+ return err;
+
+ if (enable)
+ err = nsim_dev_psample_enable(nsim_dev);
+ else
+ err = nsim_dev_psample_disable(nsim_dev);
+
+ return err ? err : count;
+}
+
+static const struct file_operations nsim_psample_enable_fops = {
+ .open = simple_open,
+ .write = nsim_dev_psample_enable_write,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
+int nsim_dev_psample_init(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_psample *psample;
+ int err;
+
+ psample = kzalloc(sizeof(*psample), GFP_KERNEL);
+ if (!psample)
+ return -ENOMEM;
+ nsim_dev->psample = psample;
+
+ INIT_DELAYED_WORK(&psample->psample_dw, nsim_dev_psample_report_work);
+
+ psample->ddir = debugfs_create_dir("psample", nsim_dev->ddir);
+ if (IS_ERR(psample->ddir)) {
+ err = PTR_ERR(psample->ddir);
+ goto err_psample_free;
+ }
+
+ /* Populate sampling parameters with sane defaults. */
+ psample->rate = 100;
+ debugfs_create_u32("rate", 0600, psample->ddir, &psample->rate);
+
+ psample->group_num = 10;
+ debugfs_create_u32("group_num", 0600, psample->ddir,
+ &psample->group_num);
+
+ psample->trunc_size = 0;
+ debugfs_create_u32("trunc_size", 0600, psample->ddir,
+ &psample->trunc_size);
+
+ psample->in_ifindex = 1;
+ debugfs_create_u32("in_ifindex", 0600, psample->ddir,
+ &psample->in_ifindex);
+
+ psample->out_ifindex = 2;
+ debugfs_create_u32("out_ifindex", 0600, psample->ddir,
+ &psample->out_ifindex);
+
+ psample->out_tc = 0;
+ debugfs_create_u16("out_tc", 0600, psample->ddir, &psample->out_tc);
+
+ psample->out_tc_occ_max = 10000;
+ debugfs_create_u64("out_tc_occ_max", 0600, psample->ddir,
+ &psample->out_tc_occ_max);
+
+ psample->latency_max = 50;
+ debugfs_create_u64("latency_max", 0600, psample->ddir,
+ &psample->latency_max);
+
+ debugfs_create_file("enable", 0200, psample->ddir, nsim_dev,
+ &nsim_psample_enable_fops);
+
+ return 0;
+
+err_psample_free:
+ kfree(nsim_dev->psample);
+ return err;
+}
+
+void nsim_dev_psample_exit(struct nsim_dev *nsim_dev)
+{
+ debugfs_remove_recursive(nsim_dev->psample->ddir);
+ if (nsim_dev->psample->is_active) {
+ cancel_delayed_work_sync(&nsim_dev->psample->psample_dw);
+ psample_group_put(nsim_dev->psample->group);
+ }
+ kfree(nsim_dev->psample);
+}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 1aa9903d602e..944ba105cac1 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -15,6 +15,7 @@
#define SYNOPSYS_XPCS_USXGMII_ID 0x7996ced0
#define SYNOPSYS_XPCS_10GKR_ID 0x7996ced0
#define SYNOPSYS_XPCS_XLGMII_ID 0x7996ced0
+#define SYNOPSYS_XPCS_SGMII_ID 0x7996ced0
#define SYNOPSYS_XPCS_MASK 0xffffffff
/* Vendor regs access */
@@ -57,6 +58,34 @@
#define DW_C73_2500KX BIT(0)
#define DW_C73_5000KR BIT(1)
+/* Clause 37 Defines */
+/* VR MII MMD registers offsets */
+#define DW_VR_MII_DIG_CTRL1 0x8000
+#define DW_VR_MII_AN_CTRL 0x8001
+#define DW_VR_MII_AN_INTR_STS 0x8002
+
+/* VR_MII_DIG_CTRL1 */
+#define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9)
+
+/* VR_MII_AN_CTRL */
+#define DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT 3
+#define DW_VR_MII_TX_CONFIG_MASK BIT(3)
+#define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1
+#define DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII 0x0
+#define DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT 1
+#define DW_VR_MII_PCS_MODE_MASK GENMASK(2, 1)
+#define DW_VR_MII_PCS_MODE_C37_1000BASEX 0x0
+#define DW_VR_MII_PCS_MODE_C37_SGMII 0x2
+
+/* VR_MII_AN_INTR_STS */
+#define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1)
+#define DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT 2
+#define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2)
+#define DW_VR_MII_C37_ANSGM_SP_10 0x0
+#define DW_VR_MII_C37_ANSGM_SP_100 0x1
+#define DW_VR_MII_C37_ANSGM_SP_1000 0x2
+#define DW_VR_MII_C37_ANSGM_SP_LNKSTS BIT(4)
+
static const int xpcs_usxgmii_features[] = {
ETHTOOL_LINK_MODE_Pause_BIT,
ETHTOOL_LINK_MODE_Asym_Pause_BIT,
@@ -105,6 +134,16 @@ static const int xpcs_xlgmii_features[] = {
__ETHTOOL_LINK_MODE_MASK_NBITS,
};
+static const int xpcs_sgmii_features[] = {
+ ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
static const phy_interface_t xpcs_usxgmii_interfaces[] = {
PHY_INTERFACE_MODE_USXGMII,
PHY_INTERFACE_MODE_MAX,
@@ -120,27 +159,42 @@ static const phy_interface_t xpcs_xlgmii_interfaces[] = {
PHY_INTERFACE_MODE_MAX,
};
+static const phy_interface_t xpcs_sgmii_interfaces[] = {
+ PHY_INTERFACE_MODE_SGMII,
+ PHY_INTERFACE_MODE_MAX,
+};
+
static struct xpcs_id {
u32 id;
u32 mask;
const int *supported;
const phy_interface_t *interface;
+ int an_mode;
} xpcs_id_list[] = {
{
.id = SYNOPSYS_XPCS_USXGMII_ID,
.mask = SYNOPSYS_XPCS_MASK,
.supported = xpcs_usxgmii_features,
.interface = xpcs_usxgmii_interfaces,
+ .an_mode = DW_AN_C73,
}, {
.id = SYNOPSYS_XPCS_10GKR_ID,
.mask = SYNOPSYS_XPCS_MASK,
.supported = xpcs_10gkr_features,
.interface = xpcs_10gkr_interfaces,
+ .an_mode = DW_AN_C73,
}, {
.id = SYNOPSYS_XPCS_XLGMII_ID,
.mask = SYNOPSYS_XPCS_MASK,
.supported = xpcs_xlgmii_features,
.interface = xpcs_xlgmii_interfaces,
+ .an_mode = DW_AN_C73,
+ }, {
+ .id = SYNOPSYS_XPCS_SGMII_ID,
+ .mask = SYNOPSYS_XPCS_MASK,
+ .supported = xpcs_sgmii_features,
+ .interface = xpcs_sgmii_interfaces,
+ .an_mode = DW_AN_C37_SGMII,
},
};
@@ -195,9 +249,20 @@ static int xpcs_poll_reset(struct mdio_xpcs_args *xpcs, int dev)
return (ret & MDIO_CTRL1_RESET) ? -ETIMEDOUT : 0;
}
-static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs, int dev)
+static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs)
{
- int ret;
+ int ret, dev;
+
+ switch (xpcs->an_mode) {
+ case DW_AN_C73:
+ dev = MDIO_MMD_PCS;
+ break;
+ case DW_AN_C37_SGMII:
+ dev = MDIO_MMD_VEND2;
+ break;
+ default:
+ return -1;
+ }
ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET);
if (ret < 0)
@@ -212,8 +277,8 @@ static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs, int dev)
dev_warn(&(__xpcs)->bus->dev, ##__args); \
})
-static int xpcs_read_fault(struct mdio_xpcs_args *xpcs,
- struct phylink_link_state *state)
+static int xpcs_read_fault_c73(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
{
int ret;
@@ -263,7 +328,7 @@ static int xpcs_read_fault(struct mdio_xpcs_args *xpcs,
return 0;
}
-static int xpcs_read_link(struct mdio_xpcs_args *xpcs, bool an)
+static int xpcs_read_link_c73(struct mdio_xpcs_args *xpcs, bool an)
{
bool link = true;
int ret;
@@ -357,7 +422,7 @@ static int xpcs_config_usxgmii(struct mdio_xpcs_args *xpcs, int speed)
return xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_RST);
}
-static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
+static int _xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
{
int ret, adv;
@@ -401,11 +466,11 @@ static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
return xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV1, adv);
}
-static int xpcs_config_aneg(struct mdio_xpcs_args *xpcs)
+static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
{
int ret;
- ret = xpcs_config_aneg_c73(xpcs);
+ ret = _xpcs_config_aneg_c73(xpcs);
if (ret < 0)
return ret;
@@ -418,8 +483,8 @@ static int xpcs_config_aneg(struct mdio_xpcs_args *xpcs)
return xpcs_write(xpcs, MDIO_MMD_AN, MDIO_CTRL1, ret);
}
-static int xpcs_aneg_done(struct mdio_xpcs_args *xpcs,
- struct phylink_link_state *state)
+static int xpcs_aneg_done_c73(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
{
int ret;
@@ -434,7 +499,7 @@ static int xpcs_aneg_done(struct mdio_xpcs_args *xpcs,
/* Check if Aneg outcome is valid */
if (!(ret & DW_C73_AN_ADV_SF)) {
- xpcs_config_aneg(xpcs);
+ xpcs_config_aneg_c73(xpcs);
return 0;
}
@@ -444,8 +509,8 @@ static int xpcs_aneg_done(struct mdio_xpcs_args *xpcs,
return 0;
}
-static int xpcs_read_lpa(struct mdio_xpcs_args *xpcs,
- struct phylink_link_state *state)
+static int xpcs_read_lpa_c73(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
{
int ret;
@@ -493,8 +558,8 @@ static int xpcs_read_lpa(struct mdio_xpcs_args *xpcs,
return 0;
}
-static void xpcs_resolve_lpa(struct mdio_xpcs_args *xpcs,
- struct phylink_link_state *state)
+static void xpcs_resolve_lpa_c73(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
{
int max_speed = xpcs_get_max_usxgmii_speed(state->lp_advertising);
@@ -585,32 +650,84 @@ static int xpcs_validate(struct mdio_xpcs_args *xpcs,
return 0;
}
+static int xpcs_config_aneg_c37_sgmii(struct mdio_xpcs_args *xpcs)
+{
+ int ret;
+
+ /* For AN for C37 SGMII mode, the settings are :-
+ * 1) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN)
+ * 2) VR_MII_AN_CTRL Bit(3) [TX_CONFIG] = 0b (MAC side SGMII)
+ * DW xPCS used with DW EQoS MAC is always MAC side SGMII.
+ * 3) VR_MII_DIG_CTRL1 Bit(9) [MAC_AUTO_SW] = 1b (Automatic
+ * speed/duplex mode change by HW after SGMII AN complete)
+ *
+ * Note: Since it is MAC side SGMII, there is no need to set
+ * SR_MII_AN_ADV. MAC side SGMII receives AN Tx Config from
+ * PHY about the link state change after C28 AN is completed
+ * between PHY and Link Partner. There is also no need to
+ * trigger AN restart for MAC-side SGMII.
+ */
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~(DW_VR_MII_PCS_MODE_MASK | DW_VR_MII_TX_CONFIG_MASK);
+ ret |= (DW_VR_MII_PCS_MODE_C37_SGMII <<
+ DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT &
+ DW_VR_MII_PCS_MODE_MASK);
+ ret |= (DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII <<
+ DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT &
+ DW_VR_MII_TX_CONFIG_MASK);
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, ret);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret |= DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+
+ return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret);
+}
+
static int xpcs_config(struct mdio_xpcs_args *xpcs,
const struct phylink_link_state *state)
{
int ret;
- if (state->an_enabled) {
- ret = xpcs_config_aneg(xpcs);
+ switch (xpcs->an_mode) {
+ case DW_AN_C73:
+ if (state->an_enabled) {
+ ret = xpcs_config_aneg_c73(xpcs);
+ if (ret)
+ return ret;
+ }
+ break;
+ case DW_AN_C37_SGMII:
+ ret = xpcs_config_aneg_c37_sgmii(xpcs);
if (ret)
return ret;
+ break;
+ default:
+ return -1;
}
return 0;
}
-static int xpcs_get_state(struct mdio_xpcs_args *xpcs,
- struct phylink_link_state *state)
+static int xpcs_get_state_c73(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
{
int ret;
/* Link needs to be read first ... */
- state->link = xpcs_read_link(xpcs, state->an_enabled) > 0 ? 1 : 0;
+ state->link = xpcs_read_link_c73(xpcs, state->an_enabled) > 0 ? 1 : 0;
/* ... and then we check the faults. */
- ret = xpcs_read_fault(xpcs, state);
+ ret = xpcs_read_fault_c73(xpcs, state);
if (ret) {
- ret = xpcs_soft_reset(xpcs, MDIO_MMD_PCS);
+ ret = xpcs_soft_reset(xpcs);
if (ret)
return ret;
@@ -619,10 +736,10 @@ static int xpcs_get_state(struct mdio_xpcs_args *xpcs,
return xpcs_config(xpcs, state);
}
- if (state->an_enabled && xpcs_aneg_done(xpcs, state)) {
+ if (state->an_enabled && xpcs_aneg_done_c73(xpcs, state)) {
state->an_complete = true;
- xpcs_read_lpa(xpcs, state);
- xpcs_resolve_lpa(xpcs, state);
+ xpcs_read_lpa_c73(xpcs, state);
+ xpcs_resolve_lpa_c73(xpcs, state);
} else if (state->an_enabled) {
state->link = 0;
} else if (state->link) {
@@ -632,6 +749,70 @@ static int xpcs_get_state(struct mdio_xpcs_args *xpcs,
return 0;
}
+static int xpcs_get_state_c37_sgmii(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int ret;
+
+ /* Reset link_state */
+ state->link = false;
+ state->speed = SPEED_UNKNOWN;
+ state->duplex = DUPLEX_UNKNOWN;
+ state->pause = 0;
+
+ /* For C37 SGMII mode, we check DW_VR_MII_AN_INTR_STS for link
+ * status, speed and duplex.
+ */
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS);
+ if (ret < 0)
+ return false;
+
+ if (ret & DW_VR_MII_C37_ANSGM_SP_LNKSTS) {
+ int speed_value;
+
+ state->link = true;
+
+ speed_value = (ret & DW_VR_MII_AN_STS_C37_ANSGM_SP) >>
+ DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT;
+ if (speed_value == DW_VR_MII_C37_ANSGM_SP_1000)
+ state->speed = SPEED_1000;
+ else if (speed_value == DW_VR_MII_C37_ANSGM_SP_100)
+ state->speed = SPEED_100;
+ else
+ state->speed = SPEED_10;
+
+ if (ret & DW_VR_MII_AN_STS_C37_ANSGM_FD)
+ state->duplex = DUPLEX_FULL;
+ else
+ state->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+static int xpcs_get_state(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int ret;
+
+ switch (xpcs->an_mode) {
+ case DW_AN_C73:
+ ret = xpcs_get_state_c73(xpcs, state);
+ if (ret)
+ return ret;
+ break;
+ case DW_AN_C37_SGMII:
+ ret = xpcs_get_state_c37_sgmii(xpcs, state);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
static int xpcs_link_up(struct mdio_xpcs_args *xpcs, int speed,
phy_interface_t interface)
{
@@ -646,6 +827,7 @@ static u32 xpcs_get_id(struct mdio_xpcs_args *xpcs)
int ret;
u32 id;
+ /* First, search C73 PCS using PCS MMD */
ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID1);
if (ret < 0)
return 0xffffffff;
@@ -656,7 +838,26 @@ static u32 xpcs_get_id(struct mdio_xpcs_args *xpcs)
if (ret < 0)
return 0xffffffff;
- return id | ret;
+ /* If Device IDs are not all zeros, we found C73 AN-type device */
+ if (id | ret)
+ return id | ret;
+
+ /* Next, search C37 PCS using Vendor-Specific MII MMD */
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID1);
+ if (ret < 0)
+ return 0xffffffff;
+
+ id = ret << 16;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID2);
+ if (ret < 0)
+ return 0xffffffff;
+
+ /* If Device IDs are not all zeros, we found C37 AN-type device */
+ if (id | ret)
+ return id | ret;
+
+ return 0xffffffff;
}
static bool xpcs_check_features(struct mdio_xpcs_args *xpcs,
@@ -676,6 +877,8 @@ static bool xpcs_check_features(struct mdio_xpcs_args *xpcs,
for (i = 0; match->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
set_bit(match->supported[i], xpcs->supported);
+ xpcs->an_mode = match->an_mode;
+
return true;
}
@@ -692,7 +895,7 @@ static int xpcs_probe(struct mdio_xpcs_args *xpcs, phy_interface_t interface)
match = entry;
if (xpcs_check_features(xpcs, match, interface))
- return xpcs_soft_reset(xpcs, MDIO_MMD_PCS);
+ return xpcs_soft_reset(xpcs);
}
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 698bea312adc..288bf405ebdb 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -201,6 +201,12 @@ config MARVELL_10G_PHY
help
Support for the Marvell Alaska MV88X3310 and compatible PHYs.
+config MARVELL_88X2222_PHY
+ tristate "Marvell 88X2222 PHY"
+ help
+ Support for the Marvell 88X2222 Dual-port Multi-speed Ethernet
+ Transceiver.
+
config MICREL_PHY
tristate "Micrel PHYs"
help
@@ -228,6 +234,12 @@ config NATIONAL_PHY
help
Currently supports the DP83865 PHY.
+config NXP_C45_TJA11XX_PHY
+ tristate "NXP C45 TJA11XX PHYs"
+ help
+ Enable support for NXP C45 TJA11XX PHYs.
+ Currently supports only the TJA1103 PHY.
+
config NXP_TJA11XX_PHY
tristate "NXP TJA11xx PHYs support"
depends on HWMON
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index a13e402074cf..bcda7ed2455d 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
obj-$(CONFIG_LXT_PHY) += lxt.o
obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o
obj-$(CONFIG_MARVELL_PHY) += marvell.o
+obj-$(CONFIG_MARVELL_88X2222_PHY) += marvell-88x2222.o
obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
@@ -70,6 +71,7 @@ obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o
obj-$(CONFIG_MICROSEMI_PHY) += mscc/
obj-$(CONFIG_NATIONAL_PHY) += national.o
+obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja11xx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index c2aa4c92edde..32af52dd5aed 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -144,6 +144,9 @@
#define ATH8035_PHY_ID 0x004dd072
#define AT8030_PHY_ID_MASK 0xffffffef
+#define AT803X_PAGE_FIBER 0
+#define AT803X_PAGE_COPPER 1
+
MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver");
MODULE_AUTHOR("Matus Ujhelyi");
MODULE_LICENSE("GPL");
@@ -198,6 +201,35 @@ static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
return phy_write(phydev, AT803X_DEBUG_DATA, val);
}
+static int at803x_write_page(struct phy_device *phydev, int page)
+{
+ int mask;
+ int set;
+
+ if (page == AT803X_PAGE_COPPER) {
+ set = AT803X_BT_BX_REG_SEL;
+ mask = 0;
+ } else {
+ set = 0;
+ mask = AT803X_BT_BX_REG_SEL;
+ }
+
+ return __phy_modify(phydev, AT803X_REG_CHIP_CONFIG, mask, set);
+}
+
+static int at803x_read_page(struct phy_device *phydev)
+{
+ int ccr = __phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+
+ if (ccr < 0)
+ return ccr;
+
+ if (ccr & AT803X_BT_BX_REG_SEL)
+ return AT803X_PAGE_COPPER;
+
+ return AT803X_PAGE_FIBER;
+}
+
static int at803x_enable_rx_delay(struct phy_device *phydev)
{
return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0,
@@ -522,10 +554,6 @@ static int at803x_parse_dt(struct phy_device *phydev)
phydev_err(phydev, "failed to get VDDIO regulator\n");
return PTR_ERR(priv->vddio);
}
-
- ret = regulator_enable(priv->vddio);
- if (ret < 0)
- return ret;
}
return 0;
@@ -535,6 +563,7 @@ static int at803x_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct at803x_priv *priv;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -542,7 +571,35 @@ static int at803x_probe(struct phy_device *phydev)
phydev->priv = priv;
- return at803x_parse_dt(phydev);
+ ret = at803x_parse_dt(phydev);
+ if (ret)
+ return ret;
+
+ if (priv->vddio) {
+ ret = regulator_enable(priv->vddio);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Some bootloaders leave the fiber page selected.
+ * Switch to the copper page, as otherwise we read
+ * the PHY capabilities from the fiber side.
+ */
+ if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) {
+ phy_lock_mdio_bus(phydev);
+ ret = at803x_write_page(phydev, AT803X_PAGE_COPPER);
+ phy_unlock_mdio_bus(phydev);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (priv->vddio)
+ regulator_disable(priv->vddio);
+
+ return ret;
}
static void at803x_remove(struct phy_device *phydev)
@@ -751,36 +808,6 @@ static void at803x_link_change_notify(struct phy_device *phydev)
}
}
-static int at803x_aneg_done(struct phy_device *phydev)
-{
- int ccr;
-
- int aneg_done = genphy_aneg_done(phydev);
- if (aneg_done != BMSR_ANEGCOMPLETE)
- return aneg_done;
-
- /*
- * in SGMII mode, if copper side autoneg is successful,
- * also check SGMII side autoneg result
- */
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- if ((ccr & AT803X_MODE_CFG_MASK) != AT803X_MODE_CFG_SGMII)
- return aneg_done;
-
- /* switch to SGMII/fiber page */
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
-
- /* check if the SGMII link is OK. */
- if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) {
- phydev_warn(phydev, "803x_aneg_done: SGMII link is not ok\n");
- aneg_done = 0;
- }
- /* switch back to copper page */
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
-
- return aneg_done;
-}
-
static int at803x_read_status(struct phy_device *phydev)
{
int ss, err, old_link = phydev->link;
@@ -1196,9 +1223,10 @@ static struct phy_driver at803x_driver[] = {
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
.resume = at803x_resume,
+ .read_page = at803x_read_page,
+ .write_page = at803x_write_page,
/* PHY_GBIT_FEATURES */
.read_status = at803x_read_status,
- .aneg_done = at803x_aneg_done,
.config_intr = &at803x_config_intr,
.handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 82fe5f43f0e9..7bf3011b8e77 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -671,13 +671,13 @@ static irqreturn_t brcm_fet_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
-struct bcm53xx_phy_priv {
+struct bcm54xx_phy_priv {
u64 *stats;
};
-static int bcm53xx_phy_probe(struct phy_device *phydev)
+static int bcm54xx_phy_probe(struct phy_device *phydev)
{
- struct bcm53xx_phy_priv *priv;
+ struct bcm54xx_phy_priv *priv;
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -694,10 +694,10 @@ static int bcm53xx_phy_probe(struct phy_device *phydev)
return 0;
}
-static void bcm53xx_phy_get_stats(struct phy_device *phydev,
- struct ethtool_stats *stats, u64 *data)
+static void bcm54xx_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
{
- struct bcm53xx_phy_priv *priv = phydev->priv;
+ struct bcm54xx_phy_priv *priv = phydev->priv;
bcm_phy_get_stats(phydev, priv->stats, stats, data);
}
@@ -708,6 +708,10 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5411",
/* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
@@ -716,6 +720,10 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
/* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
@@ -724,6 +732,10 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54210E",
/* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
@@ -732,6 +744,10 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
/* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
@@ -740,6 +756,10 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54612E",
/* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
@@ -759,6 +779,10 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5464",
/* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
@@ -769,6 +793,10 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5481",
/* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.config_intr = bcm_phy_config_intr,
@@ -778,6 +806,10 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54810",
/* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.config_intr = bcm_phy_config_intr,
@@ -789,6 +821,10 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54811",
/* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
.config_init = bcm54811_config_init,
.config_aneg = bcm5481_config_aneg,
.config_intr = bcm_phy_config_intr,
@@ -800,6 +836,10 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
/* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
@@ -808,6 +848,10 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610",
/* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
@@ -816,6 +860,10 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610M",
/* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
@@ -824,6 +872,10 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM57780",
/* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
@@ -851,8 +903,8 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
.get_strings = bcm_phy_get_strings,
- .get_stats = bcm53xx_phy_get_stats,
- .probe = bcm53xx_phy_probe,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
}, {
.phy_id = PHY_ID_BCM53125,
.phy_id_mask = 0xfffffff0,
@@ -861,8 +913,8 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
.get_strings = bcm_phy_get_strings,
- .get_stats = bcm53xx_phy_get_stats,
- .probe = bcm53xx_phy_probe,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
@@ -871,6 +923,10 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM89610",
/* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index 6eac50d4b42f..d453ec016168 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -11,6 +11,18 @@
#define XWAY_MDIO_IMASK 0x19 /* interrupt mask */
#define XWAY_MDIO_ISTAT 0x1A /* interrupt status */
+#define XWAY_MDIO_LED 0x1B /* led control */
+
+/* bit 15:12 are reserved */
+#define XWAY_MDIO_LED_LED3_EN BIT(11) /* Enable the integrated function of LED3 */
+#define XWAY_MDIO_LED_LED2_EN BIT(10) /* Enable the integrated function of LED2 */
+#define XWAY_MDIO_LED_LED1_EN BIT(9) /* Enable the integrated function of LED1 */
+#define XWAY_MDIO_LED_LED0_EN BIT(8) /* Enable the integrated function of LED0 */
+/* bit 7:4 are reserved */
+#define XWAY_MDIO_LED_LED3_DA BIT(3) /* Direct Access to LED3 */
+#define XWAY_MDIO_LED_LED2_DA BIT(2) /* Direct Access to LED2 */
+#define XWAY_MDIO_LED_LED1_DA BIT(1) /* Direct Access to LED1 */
+#define XWAY_MDIO_LED_LED0_DA BIT(0) /* Direct Access to LED0 */
#define XWAY_MDIO_INIT_WOL BIT(15) /* Wake-On-LAN */
#define XWAY_MDIO_INIT_MSRE BIT(14)
@@ -159,6 +171,15 @@ static int xway_gphy_config_init(struct phy_device *phydev)
/* Clear all pending interrupts */
phy_read(phydev, XWAY_MDIO_ISTAT);
+ /* Ensure that integrated led function is enabled for all leds */
+ err = phy_write(phydev, XWAY_MDIO_LED,
+ XWAY_MDIO_LED_LED0_EN |
+ XWAY_MDIO_LED_LED1_EN |
+ XWAY_MDIO_LED_LED2_EN |
+ XWAY_MDIO_LED_LED3_EN);
+ if (err)
+ return err;
+
phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCH,
XWAY_MMD_LEDCH_NACS_NONE |
XWAY_MMD_LEDCH_SBF_F02HZ |
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
new file mode 100644
index 000000000000..d8b31d4d2a73
--- /dev/null
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Marvell 88x2222 dual-port multi-speed ethernet transceiver.
+ *
+ * Supports:
+ * XAUI on the host side.
+ * 1000Base-X or 10GBase-R on the line side.
+ * SGMII over 1000Base-X.
+ */
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mdio.h>
+#include <linux/marvell_phy.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/sfp.h>
+#include <linux/netdevice.h>
+
+/* Port PCS Configuration */
+#define MV_PCS_CONFIG 0xF002
+#define MV_PCS_HOST_XAUI 0x73
+#define MV_PCS_LINE_10GBR (0x71 << 8)
+#define MV_PCS_LINE_1GBX_AN (0x7B << 8)
+#define MV_PCS_LINE_SGMII_AN (0x7F << 8)
+
+/* Port Reset and Power Down */
+#define MV_PORT_RST 0xF003
+#define MV_LINE_RST_SW BIT(15)
+#define MV_HOST_RST_SW BIT(7)
+#define MV_PORT_RST_SW (MV_LINE_RST_SW | MV_HOST_RST_SW)
+
+/* PMD Receive Signal Detect */
+#define MV_RX_SIGNAL_DETECT 0x000A
+#define MV_RX_SIGNAL_DETECT_GLOBAL BIT(0)
+
+/* 1000Base-X/SGMII Control Register */
+#define MV_1GBX_CTRL (0x2000 + MII_BMCR)
+
+/* 1000BASE-X/SGMII Status Register */
+#define MV_1GBX_STAT (0x2000 + MII_BMSR)
+
+/* 1000Base-X Auto-Negotiation Advertisement Register */
+#define MV_1GBX_ADVERTISE (0x2000 + MII_ADVERTISE)
+
+/* 1000Base-X PHY Specific Status Register */
+#define MV_1GBX_PHY_STAT 0xA003
+#define MV_1GBX_PHY_STAT_AN_RESOLVED BIT(11)
+#define MV_1GBX_PHY_STAT_DUPLEX BIT(13)
+#define MV_1GBX_PHY_STAT_SPEED100 BIT(14)
+#define MV_1GBX_PHY_STAT_SPEED1000 BIT(15)
+
+#define AUTONEG_TIMEOUT 3
+
+struct mv2222_data {
+ phy_interface_t line_interface;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ bool sfp_link;
+};
+
+/* SFI PMA transmit enable */
+static int mv2222_tx_enable(struct phy_device *phydev)
+{
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS,
+ MDIO_PMD_TXDIS_GLOBAL);
+}
+
+/* SFI PMA transmit disable */
+static int mv2222_tx_disable(struct phy_device *phydev)
+{
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS,
+ MDIO_PMD_TXDIS_GLOBAL);
+}
+
+static int mv2222_soft_reset(struct phy_device *phydev)
+{
+ int val, ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PORT_RST,
+ MV_PORT_RST_SW);
+ if (ret < 0)
+ return ret;
+
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND2, MV_PORT_RST,
+ val, !(val & MV_PORT_RST_SW),
+ 5000, 1000000, true);
+}
+
+static int mv2222_disable_aneg(struct phy_device *phydev)
+{
+ int ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_CTRL,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+ if (ret < 0)
+ return ret;
+
+ return mv2222_soft_reset(phydev);
+}
+
+static int mv2222_enable_aneg(struct phy_device *phydev)
+{
+ int ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_CTRL,
+ BMCR_ANENABLE | BMCR_RESET);
+ if (ret < 0)
+ return ret;
+
+ return mv2222_soft_reset(phydev);
+}
+
+static int mv2222_set_sgmii_speed(struct phy_device *phydev)
+{
+ struct mv2222_data *priv = phydev->priv;
+
+ switch (phydev->speed) {
+ default:
+ case SPEED_1000:
+ if ((linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ priv->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ priv->supported)))
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MV_1GBX_CTRL,
+ BMCR_SPEED1000 | BMCR_SPEED100,
+ BMCR_SPEED1000);
+
+ fallthrough;
+ case SPEED_100:
+ if ((linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ priv->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ priv->supported)))
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MV_1GBX_CTRL,
+ BMCR_SPEED1000 | BMCR_SPEED100,
+ BMCR_SPEED100);
+ fallthrough;
+ case SPEED_10:
+ if ((linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ priv->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ priv->supported)))
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MV_1GBX_CTRL,
+ BMCR_SPEED1000 | BMCR_SPEED100,
+ BMCR_SPEED10);
+
+ return -EINVAL;
+ }
+}
+
+static bool mv2222_is_10g_capable(struct phy_device *phydev)
+{
+ struct mv2222_data *priv = phydev->priv;
+
+ return (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ priv->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ priv->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ priv->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ priv->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+ priv->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+ priv->supported));
+}
+
+static bool mv2222_is_1gbx_capable(struct phy_device *phydev)
+{
+ struct mv2222_data *priv = phydev->priv;
+
+ return linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ priv->supported);
+}
+
+static bool mv2222_is_sgmii_capable(struct phy_device *phydev)
+{
+ struct mv2222_data *priv = phydev->priv;
+
+ return (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ priv->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ priv->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ priv->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ priv->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ priv->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ priv->supported));
+}
+
+static int mv2222_config_line(struct phy_device *phydev)
+{
+ struct mv2222_data *priv = phydev->priv;
+
+ switch (priv->line_interface) {
+ case PHY_INTERFACE_MODE_10GBASER:
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PCS_CONFIG,
+ MV_PCS_HOST_XAUI | MV_PCS_LINE_10GBR);
+ case PHY_INTERFACE_MODE_1000BASEX:
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PCS_CONFIG,
+ MV_PCS_HOST_XAUI | MV_PCS_LINE_1GBX_AN);
+ case PHY_INTERFACE_MODE_SGMII:
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_PCS_CONFIG,
+ MV_PCS_HOST_XAUI | MV_PCS_LINE_SGMII_AN);
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Switch between 1G (1000Base-X/SGMII) and 10G (10GBase-R) modes */
+static int mv2222_swap_line_type(struct phy_device *phydev)
+{
+ struct mv2222_data *priv = phydev->priv;
+ bool changed = false;
+ int ret;
+
+ switch (priv->line_interface) {
+ case PHY_INTERFACE_MODE_10GBASER:
+ if (mv2222_is_1gbx_capable(phydev)) {
+ priv->line_interface = PHY_INTERFACE_MODE_1000BASEX;
+ changed = true;
+ }
+
+ if (mv2222_is_sgmii_capable(phydev)) {
+ priv->line_interface = PHY_INTERFACE_MODE_SGMII;
+ changed = true;
+ }
+
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ if (mv2222_is_10g_capable(phydev)) {
+ priv->line_interface = PHY_INTERFACE_MODE_10GBASER;
+ changed = true;
+ }
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (changed) {
+ ret = mv2222_config_line(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mv2222_setup_forced(struct phy_device *phydev)
+{
+ struct mv2222_data *priv = phydev->priv;
+ int ret;
+
+ if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER) {
+ if (phydev->speed < SPEED_10000 &&
+ phydev->speed != SPEED_UNKNOWN) {
+ ret = mv2222_swap_line_type(phydev);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (priv->line_interface == PHY_INTERFACE_MODE_SGMII) {
+ ret = mv2222_set_sgmii_speed(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return mv2222_disable_aneg(phydev);
+}
+
+static int mv2222_config_aneg(struct phy_device *phydev)
+{
+ struct mv2222_data *priv = phydev->priv;
+ int ret, adv;
+
+ /* SFP is not present, do nothing */
+ if (priv->line_interface == PHY_INTERFACE_MODE_NA)
+ return 0;
+
+ if (phydev->autoneg == AUTONEG_DISABLE ||
+ priv->line_interface == PHY_INTERFACE_MODE_10GBASER)
+ return mv2222_setup_forced(phydev);
+
+ adv = linkmode_adv_to_mii_adv_x(priv->supported,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_ADVERTISE,
+ ADVERTISE_1000XFULL |
+ ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM,
+ adv);
+ if (ret < 0)
+ return ret;
+
+ return mv2222_enable_aneg(phydev);
+}
+
+static int mv2222_aneg_done(struct phy_device *phydev)
+{
+ int ret;
+
+ if (mv2222_is_10g_capable(phydev)) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_STAT1_LSTATUS)
+ return 1;
+ }
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_STAT);
+ if (ret < 0)
+ return ret;
+
+ return (ret & BMSR_ANEGCOMPLETE);
+}
+
+/* Returns negative on error, 0 if link is down, 1 if link is up */
+static int mv2222_read_status_10g(struct phy_device *phydev)
+{
+ static int timeout;
+ int val, link = 0;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ if (val & MDIO_STAT1_LSTATUS) {
+ link = 1;
+
+ /* 10GBASE-R do not support auto-negotiation */
+ phydev->autoneg = AUTONEG_DISABLE;
+ phydev->speed = SPEED_10000;
+ phydev->duplex = DUPLEX_FULL;
+ } else {
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ timeout++;
+
+ if (timeout > AUTONEG_TIMEOUT) {
+ timeout = 0;
+
+ val = mv2222_swap_line_type(phydev);
+ if (val < 0)
+ return val;
+
+ return mv2222_config_aneg(phydev);
+ }
+ }
+ }
+
+ return link;
+}
+
+/* Returns negative on error, 0 if link is down, 1 if link is up */
+static int mv2222_read_status_1g(struct phy_device *phydev)
+{
+ static int timeout;
+ int val, link = 0;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_STAT);
+ if (val < 0)
+ return val;
+
+ if (phydev->autoneg == AUTONEG_ENABLE &&
+ !(val & BMSR_ANEGCOMPLETE)) {
+ timeout++;
+
+ if (timeout > AUTONEG_TIMEOUT) {
+ timeout = 0;
+
+ val = mv2222_swap_line_type(phydev);
+ if (val < 0)
+ return val;
+
+ return mv2222_config_aneg(phydev);
+ }
+
+ return 0;
+ }
+
+ if (!(val & BMSR_LSTATUS))
+ return 0;
+
+ link = 1;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_PHY_STAT);
+ if (val < 0)
+ return val;
+
+ if (val & MV_1GBX_PHY_STAT_AN_RESOLVED) {
+ if (val & MV_1GBX_PHY_STAT_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (val & MV_1GBX_PHY_STAT_SPEED1000)
+ phydev->speed = SPEED_1000;
+ else if (val & MV_1GBX_PHY_STAT_SPEED100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+ }
+
+ return link;
+}
+
+static bool mv2222_link_is_operational(struct phy_device *phydev)
+{
+ struct mv2222_data *priv = phydev->priv;
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_RX_SIGNAL_DETECT);
+ if (val < 0 || !(val & MV_RX_SIGNAL_DETECT_GLOBAL))
+ return false;
+
+ if (phydev->sfp_bus && !priv->sfp_link)
+ return false;
+
+ return true;
+}
+
+static int mv2222_read_status(struct phy_device *phydev)
+{
+ struct mv2222_data *priv = phydev->priv;
+ int link;
+
+ phydev->link = 0;
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+
+ if (!mv2222_link_is_operational(phydev))
+ return 0;
+
+ if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER)
+ link = mv2222_read_status_10g(phydev);
+ else
+ link = mv2222_read_status_1g(phydev);
+
+ if (link < 0)
+ return link;
+
+ phydev->link = link;
+
+ return 0;
+}
+
+static int mv2222_resume(struct phy_device *phydev)
+{
+ return mv2222_tx_enable(phydev);
+}
+
+static int mv2222_suspend(struct phy_device *phydev)
+{
+ return mv2222_tx_disable(phydev);
+}
+
+static int mv2222_get_features(struct phy_device *phydev)
+{
+ /* All supported linkmodes are set at probe */
+
+ return 0;
+}
+
+static int mv2222_config_init(struct phy_device *phydev)
+{
+ if (phydev->interface != PHY_INTERFACE_MODE_XAUI)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ phy_interface_t sfp_interface;
+ struct mv2222_data *priv;
+ struct device *dev;
+ int ret;
+
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_supported) = { 0, };
+
+ priv = (struct mv2222_data *)phydev->priv;
+ dev = &phydev->mdio.dev;
+
+ sfp_parse_support(phydev->sfp_bus, id, sfp_supported);
+ sfp_interface = sfp_select_interface(phydev->sfp_bus, sfp_supported);
+
+ dev_info(dev, "%s SFP module inserted\n", phy_modes(sfp_interface));
+
+ if (sfp_interface != PHY_INTERFACE_MODE_10GBASER &&
+ sfp_interface != PHY_INTERFACE_MODE_1000BASEX &&
+ sfp_interface != PHY_INTERFACE_MODE_SGMII) {
+ dev_err(dev, "Incompatible SFP module inserted\n");
+
+ return -EINVAL;
+ }
+
+ priv->line_interface = sfp_interface;
+ linkmode_and(priv->supported, phydev->supported, sfp_supported);
+
+ ret = mv2222_config_line(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (mutex_trylock(&phydev->lock)) {
+ ret = mv2222_config_aneg(phydev);
+ mutex_unlock(&phydev->lock);
+ }
+
+ return ret;
+}
+
+static void mv2222_sfp_remove(void *upstream)
+{
+ struct phy_device *phydev = upstream;
+ struct mv2222_data *priv;
+
+ priv = (struct mv2222_data *)phydev->priv;
+
+ priv->line_interface = PHY_INTERFACE_MODE_NA;
+ linkmode_zero(priv->supported);
+}
+
+static void mv2222_sfp_link_up(void *upstream)
+{
+ struct phy_device *phydev = upstream;
+ struct mv2222_data *priv;
+
+ priv = phydev->priv;
+ priv->sfp_link = true;
+}
+
+static void mv2222_sfp_link_down(void *upstream)
+{
+ struct phy_device *phydev = upstream;
+ struct mv2222_data *priv;
+
+ priv = phydev->priv;
+ priv->sfp_link = false;
+}
+
+static const struct sfp_upstream_ops sfp_phy_ops = {
+ .module_insert = mv2222_sfp_insert,
+ .module_remove = mv2222_sfp_remove,
+ .link_up = mv2222_sfp_link_up,
+ .link_down = mv2222_sfp_link_down,
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+};
+
+static int mv2222_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct mv2222_data *priv = NULL;
+
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, supported);
+
+ linkmode_copy(phydev->supported, supported);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->line_interface = PHY_INTERFACE_MODE_NA;
+ phydev->priv = priv;
+
+ return phy_sfp_probe(phydev, &sfp_phy_ops);
+}
+
+static struct phy_driver mv2222_drivers[] = {
+ {
+ .phy_id = MARVELL_PHY_ID_88X2222,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88X2222",
+ .get_features = mv2222_get_features,
+ .soft_reset = mv2222_soft_reset,
+ .config_init = mv2222_config_init,
+ .config_aneg = mv2222_config_aneg,
+ .aneg_done = mv2222_aneg_done,
+ .probe = mv2222_probe,
+ .suspend = mv2222_suspend,
+ .resume = mv2222_resume,
+ .read_status = mv2222_read_status,
+ },
+};
+module_phy_driver(mv2222_drivers);
+
+static struct mdio_device_id __maybe_unused mv2222_tbl[] = {
+ { MARVELL_PHY_ID_88X2222, MARVELL_PHY_ID_MASK },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, mv2222_tbl);
+
+MODULE_DESCRIPTION("Marvell 88x2222 ethernet transceiver driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 8018ddf7f316..0b2cccb0d865 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -113,15 +113,26 @@
#define MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN BIT(9)
#define MII_88E6390_MISC_TEST 0x1b
-#define MII_88E6390_MISC_TEST_SAMPLE_1S 0
-#define MII_88E6390_MISC_TEST_SAMPLE_10MS BIT(14)
-#define MII_88E6390_MISC_TEST_SAMPLE_DISABLE BIT(15)
-#define MII_88E6390_MISC_TEST_SAMPLE_ENABLE 0
-#define MII_88E6390_MISC_TEST_SAMPLE_MASK (0x3 << 14)
+#define MII_88E6390_MISC_TEST_TEMP_SENSOR_ENABLE_SAMPLE_1S (0x0 << 14)
+#define MII_88E6390_MISC_TEST_TEMP_SENSOR_ENABLE (0x1 << 14)
+#define MII_88E6390_MISC_TEST_TEMP_SENSOR_ENABLE_ONESHOT (0x2 << 14)
+#define MII_88E6390_MISC_TEST_TEMP_SENSOR_DISABLE (0x3 << 14)
+#define MII_88E6390_MISC_TEST_TEMP_SENSOR_MASK (0x3 << 14)
+#define MII_88E6393_MISC_TEST_SAMPLES_2048 (0x0 << 11)
+#define MII_88E6393_MISC_TEST_SAMPLES_4096 (0x1 << 11)
+#define MII_88E6393_MISC_TEST_SAMPLES_8192 (0x2 << 11)
+#define MII_88E6393_MISC_TEST_SAMPLES_16384 (0x3 << 11)
+#define MII_88E6393_MISC_TEST_SAMPLES_MASK (0x3 << 11)
+#define MII_88E6393_MISC_TEST_RATE_2_3MS (0x5 << 8)
+#define MII_88E6393_MISC_TEST_RATE_6_4MS (0x6 << 8)
+#define MII_88E6393_MISC_TEST_RATE_11_9MS (0x7 << 8)
+#define MII_88E6393_MISC_TEST_RATE_MASK (0x7 << 8)
#define MII_88E6390_TEMP_SENSOR 0x1c
-#define MII_88E6390_TEMP_SENSOR_MASK 0xff
-#define MII_88E6390_TEMP_SENSOR_SAMPLES 10
+#define MII_88E6393_TEMP_SENSOR_THRESHOLD_MASK 0xff00
+#define MII_88E6393_TEMP_SENSOR_THRESHOLD_SHIFT 8
+#define MII_88E6390_TEMP_SENSOR_MASK 0xff
+#define MII_88E6390_TEMP_SENSOR_SAMPLES 10
#define MII_88E1318S_PHY_MSCR1_REG 16
#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
@@ -967,22 +978,28 @@ static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data)
static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt)
{
- int val;
+ int val, err;
if (cnt > MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX)
return -E2BIG;
- if (!cnt)
- return phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
- MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
+ if (!cnt) {
+ err = phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
+ MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
+ } else {
+ val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
+ val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
- val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
- val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
+ err = phy_modify(phydev, MII_M1111_PHY_EXT_CR,
+ MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
+ MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
+ val);
+ }
- return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
- MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
- MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
- val);
+ if (err < 0)
+ return err;
+
+ return genphy_soft_reset(phydev);
}
static int m88e1111_get_tunable(struct phy_device *phydev,
@@ -1025,22 +1042,28 @@ static int m88e1011_get_downshift(struct phy_device *phydev, u8 *data)
static int m88e1011_set_downshift(struct phy_device *phydev, u8 cnt)
{
- int val;
+ int val, err;
if (cnt > MII_M1011_PHY_SCR_DOWNSHIFT_MAX)
return -E2BIG;
- if (!cnt)
- return phy_clear_bits(phydev, MII_M1011_PHY_SCR,
- MII_M1011_PHY_SCR_DOWNSHIFT_EN);
+ if (!cnt) {
+ err = phy_clear_bits(phydev, MII_M1011_PHY_SCR,
+ MII_M1011_PHY_SCR_DOWNSHIFT_EN);
+ } else {
+ val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
+ val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
+
+ err = phy_modify(phydev, MII_M1011_PHY_SCR,
+ MII_M1011_PHY_SCR_DOWNSHIFT_EN |
+ MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
+ val);
+ }
- val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
- val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
+ if (err < 0)
+ return err;
- return phy_modify(phydev, MII_M1011_PHY_SCR,
- MII_M1011_PHY_SCR_DOWNSHIFT_EN |
- MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
- val);
+ return genphy_soft_reset(phydev);
}
static int m88e1011_get_tunable(struct phy_device *phydev,
@@ -2216,6 +2239,20 @@ static int marvell_vct7_cable_test_get_status(struct phy_device *phydev,
}
#ifdef CONFIG_HWMON
+struct marvell_hwmon_ops {
+ int (*config)(struct phy_device *phydev);
+ int (*get_temp)(struct phy_device *phydev, long *temp);
+ int (*get_temp_critical)(struct phy_device *phydev, long *temp);
+ int (*set_temp_critical)(struct phy_device *phydev, long temp);
+ int (*get_temp_alarm)(struct phy_device *phydev, long *alarm);
+};
+
+static const struct marvell_hwmon_ops *
+to_marvell_hwmon_ops(const struct phy_device *phydev)
+{
+ return phydev->drv->driver_data;
+}
+
static int m88e1121_get_temp(struct phy_device *phydev, long *temp)
{
int oldpage;
@@ -2259,75 +2296,6 @@ error:
return phy_restore_page(phydev, oldpage, ret);
}
-static int m88e1121_hwmon_read(struct device *dev,
- enum hwmon_sensor_types type,
- u32 attr, int channel, long *temp)
-{
- struct phy_device *phydev = dev_get_drvdata(dev);
- int err;
-
- switch (attr) {
- case hwmon_temp_input:
- err = m88e1121_get_temp(phydev, temp);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return err;
-}
-
-static umode_t m88e1121_hwmon_is_visible(const void *data,
- enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- if (type != hwmon_temp)
- return 0;
-
- switch (attr) {
- case hwmon_temp_input:
- return 0444;
- default:
- return 0;
- }
-}
-
-static u32 m88e1121_hwmon_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info m88e1121_hwmon_chip = {
- .type = hwmon_chip,
- .config = m88e1121_hwmon_chip_config,
-};
-
-static u32 m88e1121_hwmon_temp_config[] = {
- HWMON_T_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info m88e1121_hwmon_temp = {
- .type = hwmon_temp,
- .config = m88e1121_hwmon_temp_config,
-};
-
-static const struct hwmon_channel_info *m88e1121_hwmon_info[] = {
- &m88e1121_hwmon_chip,
- &m88e1121_hwmon_temp,
- NULL
-};
-
-static const struct hwmon_ops m88e1121_hwmon_hwmon_ops = {
- .is_visible = m88e1121_hwmon_is_visible,
- .read = m88e1121_hwmon_read,
-};
-
-static const struct hwmon_chip_info m88e1121_hwmon_chip_info = {
- .ops = &m88e1121_hwmon_hwmon_ops,
- .info = m88e1121_hwmon_info,
-};
-
static int m88e1510_get_temp(struct phy_device *phydev, long *temp)
{
int ret;
@@ -2390,92 +2358,6 @@ static int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm)
return 0;
}
-static int m88e1510_hwmon_read(struct device *dev,
- enum hwmon_sensor_types type,
- u32 attr, int channel, long *temp)
-{
- struct phy_device *phydev = dev_get_drvdata(dev);
- int err;
-
- switch (attr) {
- case hwmon_temp_input:
- err = m88e1510_get_temp(phydev, temp);
- break;
- case hwmon_temp_crit:
- err = m88e1510_get_temp_critical(phydev, temp);
- break;
- case hwmon_temp_max_alarm:
- err = m88e1510_get_temp_alarm(phydev, temp);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return err;
-}
-
-static int m88e1510_hwmon_write(struct device *dev,
- enum hwmon_sensor_types type,
- u32 attr, int channel, long temp)
-{
- struct phy_device *phydev = dev_get_drvdata(dev);
- int err;
-
- switch (attr) {
- case hwmon_temp_crit:
- err = m88e1510_set_temp_critical(phydev, temp);
- break;
- default:
- return -EOPNOTSUPP;
- }
- return err;
-}
-
-static umode_t m88e1510_hwmon_is_visible(const void *data,
- enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- if (type != hwmon_temp)
- return 0;
-
- switch (attr) {
- case hwmon_temp_input:
- case hwmon_temp_max_alarm:
- return 0444;
- case hwmon_temp_crit:
- return 0644;
- default:
- return 0;
- }
-}
-
-static u32 m88e1510_hwmon_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info m88e1510_hwmon_temp = {
- .type = hwmon_temp,
- .config = m88e1510_hwmon_temp_config,
-};
-
-static const struct hwmon_channel_info *m88e1510_hwmon_info[] = {
- &m88e1121_hwmon_chip,
- &m88e1510_hwmon_temp,
- NULL
-};
-
-static const struct hwmon_ops m88e1510_hwmon_hwmon_ops = {
- .is_visible = m88e1510_hwmon_is_visible,
- .read = m88e1510_hwmon_read,
- .write = m88e1510_hwmon_write,
-};
-
-static const struct hwmon_chip_info m88e1510_hwmon_chip_info = {
- .ops = &m88e1510_hwmon_hwmon_ops,
- .info = m88e1510_hwmon_info,
-};
-
static int m88e6390_get_temp(struct phy_device *phydev, long *temp)
{
int sum = 0;
@@ -2494,9 +2376,8 @@ static int m88e6390_get_temp(struct phy_device *phydev, long *temp)
if (ret < 0)
goto error;
- ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK;
- ret |= MII_88E6390_MISC_TEST_SAMPLE_ENABLE |
- MII_88E6390_MISC_TEST_SAMPLE_1S;
+ ret &= ~MII_88E6390_MISC_TEST_TEMP_SENSOR_MASK;
+ ret |= MII_88E6390_MISC_TEST_TEMP_SENSOR_ENABLE_SAMPLE_1S;
ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret);
if (ret < 0)
@@ -2523,8 +2404,8 @@ static int m88e6390_get_temp(struct phy_device *phydev, long *temp)
if (ret < 0)
goto error;
- ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK;
- ret |= MII_88E6390_MISC_TEST_SAMPLE_DISABLE;
+ ret = ret & ~MII_88E6390_MISC_TEST_TEMP_SENSOR_MASK;
+ ret |= MII_88E6390_MISC_TEST_TEMP_SENSOR_DISABLE;
ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret);
@@ -2534,63 +2415,169 @@ error:
return ret;
}
-static int m88e6390_hwmon_read(struct device *dev,
- enum hwmon_sensor_types type,
- u32 attr, int channel, long *temp)
+static int m88e6393_get_temp(struct phy_device *phydev, long *temp)
+{
+ int err;
+
+ err = m88e1510_get_temp(phydev, temp);
+
+ /* 88E1510 measures T + 25, while the PHY on 88E6393X switch
+ * T + 75, so we have to subtract another 50
+ */
+ *temp -= 50000;
+
+ return err;
+}
+
+static int m88e6393_get_temp_critical(struct phy_device *phydev, long *temp)
+{
+ int ret;
+
+ *temp = 0;
+
+ ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+ MII_88E6390_TEMP_SENSOR);
+ if (ret < 0)
+ return ret;
+
+ *temp = (((ret & MII_88E6393_TEMP_SENSOR_THRESHOLD_MASK) >>
+ MII_88E6393_TEMP_SENSOR_THRESHOLD_SHIFT) - 75) * 1000;
+
+ return 0;
+}
+
+static int m88e6393_set_temp_critical(struct phy_device *phydev, long temp)
+{
+ temp = (temp / 1000) + 75;
+
+ return phy_modify_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+ MII_88E6390_TEMP_SENSOR,
+ MII_88E6393_TEMP_SENSOR_THRESHOLD_MASK,
+ temp << MII_88E6393_TEMP_SENSOR_THRESHOLD_SHIFT);
+}
+
+static int m88e6393_hwmon_config(struct phy_device *phydev)
{
- struct phy_device *phydev = dev_get_drvdata(dev);
int err;
+ err = m88e6393_set_temp_critical(phydev, 100000);
+ if (err)
+ return err;
+
+ return phy_modify_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+ MII_88E6390_MISC_TEST,
+ MII_88E6390_MISC_TEST_TEMP_SENSOR_MASK |
+ MII_88E6393_MISC_TEST_SAMPLES_MASK |
+ MII_88E6393_MISC_TEST_RATE_MASK,
+ MII_88E6390_MISC_TEST_TEMP_SENSOR_ENABLE |
+ MII_88E6393_MISC_TEST_SAMPLES_2048 |
+ MII_88E6393_MISC_TEST_RATE_2_3MS);
+}
+
+static int marvell_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ const struct marvell_hwmon_ops *ops = to_marvell_hwmon_ops(phydev);
+ int err = -EOPNOTSUPP;
+
switch (attr) {
case hwmon_temp_input:
- err = m88e6390_get_temp(phydev, temp);
+ if (ops->get_temp)
+ err = ops->get_temp(phydev, temp);
+ break;
+ case hwmon_temp_crit:
+ if (ops->get_temp_critical)
+ err = ops->get_temp_critical(phydev, temp);
+ break;
+ case hwmon_temp_max_alarm:
+ if (ops->get_temp_alarm)
+ err = ops->get_temp_alarm(phydev, temp);
+ break;
+ }
+
+ return err;
+}
+
+static int marvell_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long temp)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ const struct marvell_hwmon_ops *ops = to_marvell_hwmon_ops(phydev);
+ int err = -EOPNOTSUPP;
+
+ switch (attr) {
+ case hwmon_temp_crit:
+ if (ops->set_temp_critical)
+ err = ops->set_temp_critical(phydev, temp);
break;
- default:
- return -EOPNOTSUPP;
}
return err;
}
-static umode_t m88e6390_hwmon_is_visible(const void *data,
- enum hwmon_sensor_types type,
- u32 attr, int channel)
+static umode_t marvell_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
+ const struct phy_device *phydev = data;
+ const struct marvell_hwmon_ops *ops = to_marvell_hwmon_ops(phydev);
+
if (type != hwmon_temp)
return 0;
switch (attr) {
case hwmon_temp_input:
- return 0444;
+ return ops->get_temp ? 0444 : 0;
+ case hwmon_temp_max_alarm:
+ return ops->get_temp_alarm ? 0444 : 0;
+ case hwmon_temp_crit:
+ return (ops->get_temp_critical ? 0444 : 0) |
+ (ops->set_temp_critical ? 0200 : 0);
default:
return 0;
}
}
-static u32 m88e6390_hwmon_temp_config[] = {
- HWMON_T_INPUT,
+static u32 marvell_hwmon_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0
+};
+
+static const struct hwmon_channel_info marvell_hwmon_chip = {
+ .type = hwmon_chip,
+ .config = marvell_hwmon_chip_config,
+};
+
+/* we can define HWMON_T_CRIT and HWMON_T_MAX_ALARM even though these are not
+ * defined for all PHYs, because the hwmon code checks whether the attributes
+ * exists via the .is_visible method
+ */
+static u32 marvell_hwmon_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM,
0
};
-static const struct hwmon_channel_info m88e6390_hwmon_temp = {
+static const struct hwmon_channel_info marvell_hwmon_temp = {
.type = hwmon_temp,
- .config = m88e6390_hwmon_temp_config,
+ .config = marvell_hwmon_temp_config,
};
-static const struct hwmon_channel_info *m88e6390_hwmon_info[] = {
- &m88e1121_hwmon_chip,
- &m88e6390_hwmon_temp,
+static const struct hwmon_channel_info *marvell_hwmon_info[] = {
+ &marvell_hwmon_chip,
+ &marvell_hwmon_temp,
NULL
};
-static const struct hwmon_ops m88e6390_hwmon_hwmon_ops = {
- .is_visible = m88e6390_hwmon_is_visible,
- .read = m88e6390_hwmon_read,
+static const struct hwmon_ops marvell_hwmon_hwmon_ops = {
+ .is_visible = marvell_hwmon_is_visible,
+ .read = marvell_hwmon_read,
+ .write = marvell_hwmon_write,
};
-static const struct hwmon_chip_info m88e6390_hwmon_chip_info = {
- .ops = &m88e6390_hwmon_hwmon_ops,
- .info = m88e6390_hwmon_info,
+static const struct hwmon_chip_info marvell_hwmon_chip_info = {
+ .ops = &marvell_hwmon_hwmon_ops,
+ .info = marvell_hwmon_info,
};
static int marvell_hwmon_name(struct phy_device *phydev)
@@ -2613,49 +2600,61 @@ static int marvell_hwmon_name(struct phy_device *phydev)
return 0;
}
-static int marvell_hwmon_probe(struct phy_device *phydev,
- const struct hwmon_chip_info *chip)
+static int marvell_hwmon_probe(struct phy_device *phydev)
{
+ const struct marvell_hwmon_ops *ops = to_marvell_hwmon_ops(phydev);
struct marvell_priv *priv = phydev->priv;
struct device *dev = &phydev->mdio.dev;
int err;
+ if (!ops)
+ return 0;
+
err = marvell_hwmon_name(phydev);
if (err)
return err;
priv->hwmon_dev = devm_hwmon_device_register_with_info(
- dev, priv->hwmon_name, phydev, chip, NULL);
+ dev, priv->hwmon_name, phydev, &marvell_hwmon_chip_info, NULL);
+ if (IS_ERR(priv->hwmon_dev))
+ return PTR_ERR(priv->hwmon_dev);
- return PTR_ERR_OR_ZERO(priv->hwmon_dev);
-}
+ if (ops->config)
+ err = ops->config(phydev);
-static int m88e1121_hwmon_probe(struct phy_device *phydev)
-{
- return marvell_hwmon_probe(phydev, &m88e1121_hwmon_chip_info);
+ return err;
}
-static int m88e1510_hwmon_probe(struct phy_device *phydev)
-{
- return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info);
-}
+static const struct marvell_hwmon_ops m88e1121_hwmon_ops = {
+ .get_temp = m88e1121_get_temp,
+};
+
+static const struct marvell_hwmon_ops m88e1510_hwmon_ops = {
+ .get_temp = m88e1510_get_temp,
+ .get_temp_critical = m88e1510_get_temp_critical,
+ .set_temp_critical = m88e1510_set_temp_critical,
+ .get_temp_alarm = m88e1510_get_temp_alarm,
+};
+
+static const struct marvell_hwmon_ops m88e6390_hwmon_ops = {
+ .get_temp = m88e6390_get_temp,
+};
+
+static const struct marvell_hwmon_ops m88e6393_hwmon_ops = {
+ .config = m88e6393_hwmon_config,
+ .get_temp = m88e6393_get_temp,
+ .get_temp_critical = m88e6393_get_temp_critical,
+ .set_temp_critical = m88e6393_set_temp_critical,
+ .get_temp_alarm = m88e1510_get_temp_alarm,
+};
+
+#define DEF_MARVELL_HWMON_OPS(s) (&(s))
-static int m88e6390_hwmon_probe(struct phy_device *phydev)
-{
- return marvell_hwmon_probe(phydev, &m88e6390_hwmon_chip_info);
-}
#else
-static int m88e1121_hwmon_probe(struct phy_device *phydev)
-{
- return 0;
-}
-static int m88e1510_hwmon_probe(struct phy_device *phydev)
-{
- return 0;
-}
+#define DEF_MARVELL_HWMON_OPS(s) NULL
-static int m88e6390_hwmon_probe(struct phy_device *phydev)
+static int marvell_hwmon_probe(struct phy_device *phydev)
{
return 0;
}
@@ -2671,40 +2670,7 @@ static int marvell_probe(struct phy_device *phydev)
phydev->priv = priv;
- return 0;
-}
-
-static int m88e1121_probe(struct phy_device *phydev)
-{
- int err;
-
- err = marvell_probe(phydev);
- if (err)
- return err;
-
- return m88e1121_hwmon_probe(phydev);
-}
-
-static int m88e1510_probe(struct phy_device *phydev)
-{
- int err;
-
- err = marvell_probe(phydev);
- if (err)
- return err;
-
- return m88e1510_hwmon_probe(phydev);
-}
-
-static int m88e6390_probe(struct phy_device *phydev)
-{
- int err;
-
- err = marvell_probe(phydev);
- if (err)
- return err;
-
- return m88e6390_hwmon_probe(phydev);
+ return marvell_hwmon_probe(phydev);
}
static struct phy_driver marvell_drivers[] = {
@@ -2810,8 +2776,9 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1121R,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1121R",
+ .driver_data = DEF_MARVELL_HWMON_OPS(m88e1121_hwmon_ops),
/* PHY_GBIT_FEATURES */
- .probe = m88e1121_probe,
+ .probe = marvell_probe,
.config_init = marvell_config_init,
.config_aneg = m88e1121_config_aneg,
.read_status = marvell_read_status,
@@ -2903,6 +2870,8 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1011_get_tunable,
+ .set_tunable = m88e1011_set_tunable,
},
{
.phy_id = MARVELL_PHY_ID_88E1116R,
@@ -2927,9 +2896,10 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1510,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1510",
+ .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops),
.features = PHY_GBIT_FIBRE_FEATURES,
.flags = PHY_POLL_CABLE_TEST,
- .probe = m88e1510_probe,
+ .probe = marvell_probe,
.config_init = m88e1510_config_init,
.config_aneg = m88e1510_config_aneg,
.read_status = marvell_read_status,
@@ -2955,9 +2925,10 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1540,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1540",
+ .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops),
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
- .probe = m88e1510_probe,
+ .probe = marvell_probe,
.config_init = marvell_config_init,
.config_aneg = m88e1510_config_aneg,
.read_status = marvell_read_status,
@@ -2980,7 +2951,8 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1545,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1545",
- .probe = m88e1510_probe,
+ .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops),
+ .probe = marvell_probe,
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
.config_init = marvell_config_init,
@@ -3024,9 +2996,10 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E6341_FAMILY,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E6341 Family",
+ .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops),
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
- .probe = m88e1510_probe,
+ .probe = marvell_probe,
.config_init = marvell_config_init,
.config_aneg = m88e6390_config_aneg,
.read_status = marvell_read_status,
@@ -3049,9 +3022,10 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E6390_FAMILY,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E6390 Family",
+ .driver_data = DEF_MARVELL_HWMON_OPS(m88e6390_hwmon_ops),
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
- .probe = m88e6390_probe,
+ .probe = marvell_probe,
.config_init = marvell_config_init,
.config_aneg = m88e6390_config_aneg,
.read_status = marvell_read_status,
@@ -3071,10 +3045,37 @@ static struct phy_driver marvell_drivers[] = {
.cable_test_get_status = marvell_vct7_cable_test_get_status,
},
{
+ .phy_id = MARVELL_PHY_ID_88E6393_FAMILY,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E6393 Family",
+ .driver_data = DEF_MARVELL_HWMON_OPS(m88e6393_hwmon_ops),
+ /* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = marvell_probe,
+ .config_init = marvell_config_init,
+ .config_aneg = m88e1510_config_aneg,
+ .read_status = marvell_read_status,
+ .config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
+ .get_tunable = m88e1540_get_tunable,
+ .set_tunable = m88e1540_set_tunable,
+ .cable_test_start = marvell_vct7_cable_test_start,
+ .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
+ .cable_test_get_status = marvell_vct7_cable_test_get_status,
+ },
+ {
.phy_id = MARVELL_PHY_ID_88E1340S,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1340S",
- .probe = m88e1510_probe,
+ .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops),
+ .probe = marvell_probe,
/* PHY_GBIT_FEATURES */
.config_init = marvell_config_init,
.config_aneg = m88e1510_config_aneg,
@@ -3095,7 +3096,8 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1548P,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1548P",
- .probe = m88e1510_probe,
+ .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops),
+ .probe = marvell_probe,
.features = PHY_GBIT_FIBRE_FEATURES,
.config_init = marvell_config_init,
.config_aneg = m88e1510_config_aneg,
@@ -3134,6 +3136,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E6393_FAMILY, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK },
{ }
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index b1bb9b8e1e4e..bbbc6ac8fa82 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -35,6 +35,15 @@
enum {
MV_PMA_FW_VER0 = 0xc011,
MV_PMA_FW_VER1 = 0xc012,
+ MV_PMA_21X0_PORT_CTRL = 0xc04a,
+ MV_PMA_21X0_PORT_CTRL_SWRST = BIT(15),
+ MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK = 0x7,
+ MV_PMA_21X0_PORT_CTRL_MACTYPE_USXGMII = 0x0,
+ MV_PMA_2180_PORT_CTRL_MACTYPE_DXGMII = 0x1,
+ MV_PMA_2180_PORT_CTRL_MACTYPE_QXGMII = 0x2,
+ MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER = 0x4,
+ MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER_NO_SGMII_AN = 0x5,
+ MV_PMA_21X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH = 0x6,
MV_PMA_BOOT = 0xc050,
MV_PMA_BOOT_FATAL = BIT(0),
@@ -78,10 +87,18 @@ enum {
/* Vendor2 MMD registers */
MV_V2_PORT_CTRL = 0xf001,
- MV_V2_PORT_CTRL_SWRST = BIT(15),
- MV_V2_PORT_CTRL_PWRDOWN = BIT(11),
- MV_V2_PORT_MAC_TYPE_MASK = 0x7,
- MV_V2_PORT_MAC_TYPE_RATE_MATCH = 0x6,
+ MV_V2_PORT_CTRL_PWRDOWN = BIT(11),
+ MV_V2_33X0_PORT_CTRL_SWRST = BIT(15),
+ MV_V2_33X0_PORT_CTRL_MACTYPE_MASK = 0x7,
+ MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI = 0x0,
+ MV_V2_3310_PORT_CTRL_MACTYPE_XAUI_RATE_MATCH = 0x1,
+ MV_V2_3340_PORT_CTRL_MACTYPE_RXAUI_NO_SGMII_AN = 0x1,
+ MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI_RATE_MATCH = 0x2,
+ MV_V2_3310_PORT_CTRL_MACTYPE_XAUI = 0x3,
+ MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER = 0x4,
+ MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_NO_SGMII_AN = 0x5,
+ MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH = 0x6,
+ MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII = 0x7,
/* Temperature control/read registers (88X3310 only) */
MV_V2_TEMP_CTRL = 0xf08a,
MV_V2_TEMP_CTRL_MASK = 0xc000,
@@ -91,14 +108,32 @@ enum {
MV_V2_TEMP_UNKNOWN = 0x9600, /* unknown function */
};
+struct mv3310_chip {
+ void (*init_supported_interfaces)(unsigned long *mask);
+ int (*get_mactype)(struct phy_device *phydev);
+ int (*init_interface)(struct phy_device *phydev, int mactype);
+
+#ifdef CONFIG_HWMON
+ int (*hwmon_read_temp_reg)(struct phy_device *phydev);
+#endif
+};
+
struct mv3310_priv {
+ DECLARE_BITMAP(supported_interfaces, PHY_INTERFACE_MODE_MAX);
+
u32 firmware_ver;
bool rate_match;
+ phy_interface_t const_interface;
struct device *hwmon_dev;
char *hwmon_name;
};
+static const struct mv3310_chip *to_mv3310_chip(struct phy_device *phydev)
+{
+ return phydev->drv->driver_data;
+}
+
#ifdef CONFIG_HWMON
static umode_t mv3310_hwmon_is_visible(const void *data,
enum hwmon_sensor_types type,
@@ -121,18 +156,11 @@ static int mv2110_hwmon_read_temp_reg(struct phy_device *phydev)
return phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_TEMP);
}
-static int mv10g_hwmon_read_temp_reg(struct phy_device *phydev)
-{
- if (phydev->drv->phy_id == MARVELL_PHY_ID_88X3310)
- return mv3310_hwmon_read_temp_reg(phydev);
- else /* MARVELL_PHY_ID_88E2110 */
- return mv2110_hwmon_read_temp_reg(phydev);
-}
-
static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *value)
{
struct phy_device *phydev = dev_get_drvdata(dev);
+ const struct mv3310_chip *chip = to_mv3310_chip(phydev);
int temp;
if (type == hwmon_chip && attr == hwmon_chip_update_interval) {
@@ -141,7 +169,7 @@ static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
}
if (type == hwmon_temp && attr == hwmon_temp_input) {
- temp = mv10g_hwmon_read_temp_reg(phydev);
+ temp = chip->hwmon_read_temp_reg(phydev);
if (temp < 0)
return temp;
@@ -268,7 +296,7 @@ static int mv3310_power_up(struct phy_device *phydev)
return ret;
return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
- MV_V2_PORT_CTRL_SWRST);
+ MV_V2_33X0_PORT_CTRL_SWRST);
}
static int mv3310_reset(struct phy_device *phydev, u32 unit)
@@ -363,6 +391,7 @@ static const struct sfp_upstream_ops mv3310_sfp_ops = {
static int mv3310_probe(struct phy_device *phydev)
{
+ const struct mv3310_chip *chip = to_mv3310_chip(phydev);
struct mv3310_priv *priv;
u32 mmd_mask = MDIO_DEVS_PMAPMD | MDIO_DEVS_AN;
int ret;
@@ -412,6 +441,8 @@ static int mv3310_probe(struct phy_device *phydev)
if (ret)
return ret;
+ chip->init_supported_interfaces(priv->supported_interfaces);
+
return phy_sfp_probe(phydev, &mv3310_sfp_ops);
}
@@ -453,18 +484,102 @@ static bool mv3310_has_pma_ngbaset_quirk(struct phy_device *phydev)
MV_PHY_ALASKA_NBT_QUIRK_MASK) == MV_PHY_ALASKA_NBT_QUIRK_REV;
}
+static int mv2110_get_mactype(struct phy_device *phydev)
+{
+ int mactype;
+
+ mactype = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_21X0_PORT_CTRL);
+ if (mactype < 0)
+ return mactype;
+
+ return mactype & MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK;
+}
+
+static int mv3310_get_mactype(struct phy_device *phydev)
+{
+ int mactype;
+
+ mactype = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL);
+ if (mactype < 0)
+ return mactype;
+
+ return mactype & MV_V2_33X0_PORT_CTRL_MACTYPE_MASK;
+}
+
+static int mv2110_init_interface(struct phy_device *phydev, int mactype)
+{
+ struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+
+ priv->rate_match = false;
+
+ if (mactype == MV_PMA_21X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH)
+ priv->rate_match = true;
+
+ if (mactype == MV_PMA_21X0_PORT_CTRL_MACTYPE_USXGMII)
+ priv->const_interface = PHY_INTERFACE_MODE_USXGMII;
+ else if (mactype == MV_PMA_21X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH)
+ priv->const_interface = PHY_INTERFACE_MODE_10GBASER;
+ else if (mactype == MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER ||
+ mactype == MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER_NO_SGMII_AN)
+ priv->const_interface = PHY_INTERFACE_MODE_NA;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mv3310_init_interface(struct phy_device *phydev, int mactype)
+{
+ struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+
+ priv->rate_match = false;
+
+ if (mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH ||
+ mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI_RATE_MATCH ||
+ mactype == MV_V2_3310_PORT_CTRL_MACTYPE_XAUI_RATE_MATCH)
+ priv->rate_match = true;
+
+ if (mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII)
+ priv->const_interface = PHY_INTERFACE_MODE_USXGMII;
+ else if (mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH ||
+ mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_NO_SGMII_AN ||
+ mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER)
+ priv->const_interface = PHY_INTERFACE_MODE_10GBASER;
+ else if (mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI_RATE_MATCH ||
+ mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI)
+ priv->const_interface = PHY_INTERFACE_MODE_RXAUI;
+ else if (mactype == MV_V2_3310_PORT_CTRL_MACTYPE_XAUI_RATE_MATCH ||
+ mactype == MV_V2_3310_PORT_CTRL_MACTYPE_XAUI)
+ priv->const_interface = PHY_INTERFACE_MODE_XAUI;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mv3340_init_interface(struct phy_device *phydev, int mactype)
+{
+ struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+ int err = 0;
+
+ priv->rate_match = false;
+
+ if (mactype == MV_V2_3340_PORT_CTRL_MACTYPE_RXAUI_NO_SGMII_AN)
+ priv->const_interface = PHY_INTERFACE_MODE_RXAUI;
+ else
+ err = mv3310_init_interface(phydev, mactype);
+
+ return err;
+}
+
static int mv3310_config_init(struct phy_device *phydev)
{
struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
- int err;
- int val;
+ const struct mv3310_chip *chip = to_mv3310_chip(phydev);
+ int err, mactype;
/* Check that the PHY interface type is compatible */
- if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
- phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
- phydev->interface != PHY_INTERFACE_MODE_XAUI &&
- phydev->interface != PHY_INTERFACE_MODE_RXAUI &&
- phydev->interface != PHY_INTERFACE_MODE_10GBASER)
+ if (!test_bit(phydev->interface, priv->supported_interfaces))
return -ENODEV;
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
@@ -474,11 +589,15 @@ static int mv3310_config_init(struct phy_device *phydev)
if (err)
return err;
- val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL);
- if (val < 0)
- return val;
- priv->rate_match = ((val & MV_V2_PORT_MAC_TYPE_MASK) ==
- MV_V2_PORT_MAC_TYPE_RATE_MATCH);
+ mactype = chip->get_mactype(phydev);
+ if (mactype < 0)
+ return mactype;
+
+ err = chip->init_interface(phydev, mactype);
+ if (err) {
+ phydev_err(phydev, "MACTYPE configuration invalid\n");
+ return err;
+ }
/* Enable EDPD mode - saving 600mW */
return mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS);
@@ -588,40 +707,44 @@ static void mv3310_update_interface(struct phy_device *phydev)
{
struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
- /* In "XFI with Rate Matching" mode the PHY interface is fixed at
- * 10Gb. The PHY adapts the rate to actual wire speed with help of
+ if (!phydev->link)
+ return;
+
+ /* In all of the "* with Rate Matching" modes the PHY interface is fixed
+ * at 10Gb. The PHY adapts the rate to actual wire speed with help of
* internal 16KB buffer.
+ *
+ * In USXGMII mode the PHY interface mode is also fixed.
*/
- if (priv->rate_match) {
- phydev->interface = PHY_INTERFACE_MODE_10GBASER;
+ if (priv->rate_match ||
+ priv->const_interface == PHY_INTERFACE_MODE_USXGMII) {
+ phydev->interface = priv->const_interface;
return;
}
- if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
- phydev->interface == PHY_INTERFACE_MODE_2500BASEX ||
- phydev->interface == PHY_INTERFACE_MODE_10GBASER) &&
- phydev->link) {
- /* The PHY automatically switches its serdes interface (and
- * active PHYXS instance) between Cisco SGMII, 10GBase-R and
- * 2500BaseX modes according to the speed. Florian suggests
- * setting phydev->interface to communicate this to the MAC.
- * Only do this if we are already in one of the above modes.
- */
- switch (phydev->speed) {
- case SPEED_10000:
- phydev->interface = PHY_INTERFACE_MODE_10GBASER;
- break;
- case SPEED_2500:
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
- break;
- case SPEED_1000:
- case SPEED_100:
- case SPEED_10:
- phydev->interface = PHY_INTERFACE_MODE_SGMII;
- break;
- default:
- break;
- }
+ /* The PHY automatically switches its serdes interface (and active PHYXS
+ * instance) between Cisco SGMII, 2500BaseX, 5GBase-R and 10GBase-R /
+ * xaui / rxaui modes according to the speed.
+ * Florian suggests setting phydev->interface to communicate this to the
+ * MAC. Only do this if we are already in one of the above modes.
+ */
+ switch (phydev->speed) {
+ case SPEED_10000:
+ phydev->interface = priv->const_interface;
+ break;
+ case SPEED_5000:
+ phydev->interface = PHY_INTERFACE_MODE_5GBASER;
+ break;
+ case SPEED_2500:
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+ case SPEED_1000:
+ case SPEED_100:
+ case SPEED_10:
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+ default:
+ break;
}
}
@@ -765,11 +888,133 @@ static int mv3310_set_tunable(struct phy_device *phydev,
}
}
+static void mv3310_init_supported_interfaces(unsigned long *mask)
+{
+ __set_bit(PHY_INTERFACE_MODE_SGMII, mask);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, mask);
+ __set_bit(PHY_INTERFACE_MODE_5GBASER, mask);
+ __set_bit(PHY_INTERFACE_MODE_XAUI, mask);
+ __set_bit(PHY_INTERFACE_MODE_RXAUI, mask);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, mask);
+ __set_bit(PHY_INTERFACE_MODE_USXGMII, mask);
+}
+
+static void mv3340_init_supported_interfaces(unsigned long *mask)
+{
+ __set_bit(PHY_INTERFACE_MODE_SGMII, mask);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, mask);
+ __set_bit(PHY_INTERFACE_MODE_5GBASER, mask);
+ __set_bit(PHY_INTERFACE_MODE_RXAUI, mask);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, mask);
+ __set_bit(PHY_INTERFACE_MODE_USXGMII, mask);
+}
+
+static void mv2110_init_supported_interfaces(unsigned long *mask)
+{
+ __set_bit(PHY_INTERFACE_MODE_SGMII, mask);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, mask);
+ __set_bit(PHY_INTERFACE_MODE_5GBASER, mask);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, mask);
+ __set_bit(PHY_INTERFACE_MODE_USXGMII, mask);
+}
+
+static void mv2111_init_supported_interfaces(unsigned long *mask)
+{
+ __set_bit(PHY_INTERFACE_MODE_SGMII, mask);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, mask);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, mask);
+ __set_bit(PHY_INTERFACE_MODE_USXGMII, mask);
+}
+
+static const struct mv3310_chip mv3310_type = {
+ .init_supported_interfaces = mv3310_init_supported_interfaces,
+ .get_mactype = mv3310_get_mactype,
+ .init_interface = mv3310_init_interface,
+
+#ifdef CONFIG_HWMON
+ .hwmon_read_temp_reg = mv3310_hwmon_read_temp_reg,
+#endif
+};
+
+static const struct mv3310_chip mv3340_type = {
+ .init_supported_interfaces = mv3340_init_supported_interfaces,
+ .get_mactype = mv3310_get_mactype,
+ .init_interface = mv3340_init_interface,
+
+#ifdef CONFIG_HWMON
+ .hwmon_read_temp_reg = mv3310_hwmon_read_temp_reg,
+#endif
+};
+
+static const struct mv3310_chip mv2110_type = {
+ .init_supported_interfaces = mv2110_init_supported_interfaces,
+ .get_mactype = mv2110_get_mactype,
+ .init_interface = mv2110_init_interface,
+
+#ifdef CONFIG_HWMON
+ .hwmon_read_temp_reg = mv2110_hwmon_read_temp_reg,
+#endif
+};
+
+static const struct mv3310_chip mv2111_type = {
+ .init_supported_interfaces = mv2111_init_supported_interfaces,
+ .get_mactype = mv2110_get_mactype,
+ .init_interface = mv2110_init_interface,
+
+#ifdef CONFIG_HWMON
+ .hwmon_read_temp_reg = mv2110_hwmon_read_temp_reg,
+#endif
+};
+
+static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g)
+{
+ int val;
+
+ if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] &
+ MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88E2110)
+ return 0;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_SPEED);
+ if (val < 0)
+ return val;
+
+ return !!(val & MDIO_PCS_SPEED_5G) == has_5g;
+}
+
+static int mv2110_match_phy_device(struct phy_device *phydev)
+{
+ return mv211x_match_phy_device(phydev, true);
+}
+
+static int mv2111_match_phy_device(struct phy_device *phydev)
+{
+ return mv211x_match_phy_device(phydev, false);
+}
+
static struct phy_driver mv3310_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88X3310,
- .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK,
.name = "mv88x3310",
+ .driver_data = &mv3310_type,
+ .get_features = mv3310_get_features,
+ .config_init = mv3310_config_init,
+ .probe = mv3310_probe,
+ .suspend = mv3310_suspend,
+ .resume = mv3310_resume,
+ .config_aneg = mv3310_config_aneg,
+ .aneg_done = mv3310_aneg_done,
+ .read_status = mv3310_read_status,
+ .get_tunable = mv3310_get_tunable,
+ .set_tunable = mv3310_set_tunable,
+ .remove = mv3310_remove,
+ .set_loopback = genphy_c45_loopback,
+ },
+ {
+ .phy_id = MARVELL_PHY_ID_88X3340,
+ .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK,
+ .name = "mv88x3340",
+ .driver_data = &mv3340_type,
.get_features = mv3310_get_features,
.config_init = mv3310_config_init,
.probe = mv3310_probe,
@@ -781,11 +1026,32 @@ static struct phy_driver mv3310_drivers[] = {
.get_tunable = mv3310_get_tunable,
.set_tunable = mv3310_set_tunable,
.remove = mv3310_remove,
+ .set_loopback = genphy_c45_loopback,
+ },
+ {
+ .phy_id = MARVELL_PHY_ID_88E2110,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .match_phy_device = mv2110_match_phy_device,
+ .name = "mv88e2110",
+ .driver_data = &mv2110_type,
+ .probe = mv3310_probe,
+ .suspend = mv3310_suspend,
+ .resume = mv3310_resume,
+ .config_init = mv3310_config_init,
+ .config_aneg = mv3310_config_aneg,
+ .aneg_done = mv3310_aneg_done,
+ .read_status = mv3310_read_status,
+ .get_tunable = mv3310_get_tunable,
+ .set_tunable = mv3310_set_tunable,
+ .remove = mv3310_remove,
+ .set_loopback = genphy_c45_loopback,
},
{
.phy_id = MARVELL_PHY_ID_88E2110,
.phy_id_mask = MARVELL_PHY_ID_MASK,
- .name = "mv88x2110",
+ .match_phy_device = mv2111_match_phy_device,
+ .name = "mv88e2111",
+ .driver_data = &mv2111_type,
.probe = mv3310_probe,
.suspend = mv3310_suspend,
.resume = mv3310_resume,
@@ -796,16 +1062,18 @@ static struct phy_driver mv3310_drivers[] = {
.get_tunable = mv3310_get_tunable,
.set_tunable = mv3310_set_tunable,
.remove = mv3310_remove,
+ .set_loopback = genphy_c45_loopback,
},
};
module_phy_driver(mv3310_drivers);
static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
- { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_88X33X0_MASK },
+ { MARVELL_PHY_ID_88X3340, MARVELL_PHY_ID_88X33X0_MASK },
{ MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK },
{ },
};
MODULE_DEVICE_TABLE(mdio, mv3310_tbl);
-MODULE_DESCRIPTION("Marvell Alaska X 10Gigabit Ethernet PHY driver (MV88X3310)");
+MODULE_DESCRIPTION("Marvell Alaska X/M multi-gigabit Ethernet PHY driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c
index 033df435f76c..2de679a68115 100644
--- a/drivers/net/phy/mdio-boardinfo.c
+++ b/drivers/net/phy/mdio-boardinfo.c
@@ -50,7 +50,7 @@ void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus,
EXPORT_SYMBOL(mdiobus_setup_mdiodev_from_board_info);
/**
- * mdio_register_board_info - register MDIO devices for a given board
+ * mdiobus_register_board_info - register MDIO devices for a given board
* @info: array of devices descriptors
* @n: number of descriptors provided
* Context: can sleep
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 823518554079..dadf75ff3ab9 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -510,7 +510,7 @@ static int mdiobus_create_device(struct mii_bus *bus,
* on a given bus, and attach them to the bus. Drivers should use
* mdiobus_register() rather than __mdiobus_register() unless they
* need to pass a specific owner module. MDIO devices which are not
- * PHYs will not be brought up by this function. They are expected to
+ * PHYs will not be brought up by this function. They are expected
* to be explicitly listed in DT and instantiated by of_mdiobus_register().
*
* Returns 0 on success or < 0 on error.
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 3a7705228ed5..6e32da28e138 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -1362,6 +1362,12 @@ static int vsc8584_config_pre_init(struct phy_device *phydev)
u16 crc, reg;
int ret;
+ ret = vsc8584_pll5g_reset(phydev);
+ if (ret < 0) {
+ dev_err(dev, "failed LCPLL reset, ret: %d\n", ret);
+ return ret;
+ }
+
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
/* all writes below are broadcasted to all PHYs in the same package */
@@ -1466,6 +1472,24 @@ static int vsc8584_config_pre_init(struct phy_device *phydev)
if (ret)
goto out;
+ /* Write patch vector 0, to skip IB cal polling */
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_GPIO);
+ reg = MSCC_ROM_TRAP_SERDES_6G_CFG; /* ROM address to trap, for patch vector 0 */
+ ret = phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(1), reg);
+ if (ret)
+ goto out;
+
+ reg = MSCC_RAM_TRAP_SERDES_6G_CFG; /* RAM address to jump to, when patch vector 0 enabled */
+ ret = phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(1), reg);
+ if (ret)
+ goto out;
+
+ reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
+ reg |= PATCH_VEC_ZERO_EN; /* bit 8, enable patch vector 0 */
+ ret = phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
+ if (ret)
+ goto out;
+
vsc8584_micro_deassert_reset(phydev, true);
out:
@@ -1531,62 +1555,81 @@ static void vsc85xx_coma_mode_release(struct phy_device *phydev)
vsc85xx_phy_write_page(phydev, MSCC_PHY_PAGE_STANDARD);
}
-static int vsc8584_config_init(struct phy_device *phydev)
+static int vsc8584_config_host_serdes(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531 = phydev->priv;
- int ret, i;
+ int ret;
u16 val;
- phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+ if (ret)
+ return ret;
- phy_lock_mdio_bus(phydev);
+ val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
+ val &= ~MAC_CFG_MASK;
+ if (phydev->interface == PHY_INTERFACE_MODE_QSGMII) {
+ val |= MAC_CFG_QSGMII;
+ } else if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ val |= MAC_CFG_SGMII;
+ } else {
+ ret = -EINVAL;
+ return ret;
+ }
- /* Some parts of the init sequence are identical for every PHY in the
- * package. Some parts are modifying the GPIO register bank which is a
- * set of registers that are affecting all PHYs, a few resetting the
- * microprocessor common to all PHYs. The CRC check responsible of the
- * checking the firmware within the 8051 microprocessor can only be
- * accessed via the PHY whose internal address in the package is 0.
- * All PHYs' interrupts mask register has to be zeroed before enabling
- * any PHY's interrupt in this register.
- * For all these reasons, we need to do the init sequence once and only
- * once whatever is the first PHY in the package that is initialized and
- * do the correct init sequence for all PHYs that are package-critical
- * in this pre-init function.
- */
- if (phy_package_init_once(phydev)) {
- /* The following switch statement assumes that the lowest
- * nibble of the phy_id_mask is always 0. This works because
- * the lowest nibble of the PHY_ID's below are also 0.
- */
- WARN_ON(phydev->drv->phy_id_mask & 0xf);
+ ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
+ if (ret)
+ return ret;
- switch (phydev->phy_id & phydev->drv->phy_id_mask) {
- case PHY_ID_VSC8504:
- case PHY_ID_VSC8552:
- case PHY_ID_VSC8572:
- case PHY_ID_VSC8574:
- ret = vsc8574_config_pre_init(phydev);
- break;
- case PHY_ID_VSC856X:
- case PHY_ID_VSC8575:
- case PHY_ID_VSC8582:
- case PHY_ID_VSC8584:
- ret = vsc8584_config_pre_init(phydev);
- break;
- default:
- ret = -EINVAL;
- break;
- }
+ ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_STANDARD);
+ if (ret)
+ return ret;
- if (ret)
- goto err;
- }
+ val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
+ PROC_CMD_READ_MOD_WRITE_PORT;
+ if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
+ val |= PROC_CMD_QSGMII_MAC;
+ else
+ val |= PROC_CMD_SGMII_MAC;
+
+ ret = vsc8584_cmd(phydev, val);
+ if (ret)
+ return ret;
+
+ usleep_range(10000, 20000);
+
+ /* Disable SerDes for 100Base-FX */
+ ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
+ PROC_CMD_FIBER_PORT(vsc8531->addr) |
+ PROC_CMD_FIBER_DISABLE |
+ PROC_CMD_READ_MOD_WRITE_PORT |
+ PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX);
+ if (ret)
+ return ret;
+
+ /* Disable SerDes for 1000Base-X */
+ ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
+ PROC_CMD_FIBER_PORT(vsc8531->addr) |
+ PROC_CMD_FIBER_DISABLE |
+ PROC_CMD_READ_MOD_WRITE_PORT |
+ PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X);
+ if (ret)
+ return ret;
+
+ return vsc85xx_sd6g_config_v2(phydev);
+}
+
+static int vsc8574_config_host_serdes(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ int ret;
+ u16 val;
ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
if (ret)
- goto err;
+ return ret;
val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
val &= ~MAC_CFG_MASK;
@@ -1598,17 +1641,17 @@ static int vsc8584_config_init(struct phy_device *phydev)
val |= MAC_CFG_RGMII;
} else {
ret = -EINVAL;
- goto err;
+ return ret;
}
ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
if (ret)
- goto err;
+ return ret;
ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_STANDARD);
if (ret)
- goto err;
+ return ret;
if (!phy_interface_is_rgmii(phydev)) {
val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
@@ -1620,7 +1663,7 @@ static int vsc8584_config_init(struct phy_device *phydev)
ret = vsc8584_cmd(phydev, val);
if (ret)
- goto err;
+ return ret;
usleep_range(10000, 20000);
}
@@ -1632,16 +1675,78 @@ static int vsc8584_config_init(struct phy_device *phydev)
PROC_CMD_READ_MOD_WRITE_PORT |
PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX);
if (ret)
- goto err;
+ return ret;
/* Disable SerDes for 1000Base-X */
- ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
- PROC_CMD_FIBER_PORT(vsc8531->addr) |
- PROC_CMD_FIBER_DISABLE |
- PROC_CMD_READ_MOD_WRITE_PORT |
- PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X);
- if (ret)
- goto err;
+ return vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
+ PROC_CMD_FIBER_PORT(vsc8531->addr) |
+ PROC_CMD_FIBER_DISABLE |
+ PROC_CMD_READ_MOD_WRITE_PORT |
+ PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X);
+}
+
+static int vsc8584_config_init(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ int ret, i;
+ u16 val;
+
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ phy_lock_mdio_bus(phydev);
+
+ /* Some parts of the init sequence are identical for every PHY in the
+ * package. Some parts are modifying the GPIO register bank which is a
+ * set of registers that are affecting all PHYs, a few resetting the
+ * microprocessor common to all PHYs. The CRC check responsible of the
+ * checking the firmware within the 8051 microprocessor can only be
+ * accessed via the PHY whose internal address in the package is 0.
+ * All PHYs' interrupts mask register has to be zeroed before enabling
+ * any PHY's interrupt in this register.
+ * For all these reasons, we need to do the init sequence once and only
+ * once whatever is the first PHY in the package that is initialized and
+ * do the correct init sequence for all PHYs that are package-critical
+ * in this pre-init function.
+ */
+ if (phy_package_init_once(phydev)) {
+ /* The following switch statement assumes that the lowest
+ * nibble of the phy_id_mask is always 0. This works because
+ * the lowest nibble of the PHY_ID's below are also 0.
+ */
+ WARN_ON(phydev->drv->phy_id_mask & 0xf);
+
+ switch (phydev->phy_id & phydev->drv->phy_id_mask) {
+ case PHY_ID_VSC8504:
+ case PHY_ID_VSC8552:
+ case PHY_ID_VSC8572:
+ case PHY_ID_VSC8574:
+ ret = vsc8574_config_pre_init(phydev);
+ if (ret)
+ goto err;
+ ret = vsc8574_config_host_serdes(phydev);
+ if (ret)
+ goto err;
+ break;
+ case PHY_ID_VSC856X:
+ case PHY_ID_VSC8575:
+ case PHY_ID_VSC8582:
+ case PHY_ID_VSC8584:
+ ret = vsc8584_config_pre_init(phydev);
+ if (ret)
+ goto err;
+ ret = vsc8584_config_host_serdes(phydev);
+ if (ret)
+ goto err;
+ vsc85xx_coma_mode_release(phydev);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ goto err;
+ }
phy_unlock_mdio_bus(phydev);
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
new file mode 100644
index 000000000000..26b9c0d7cb9d
--- /dev/null
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0
+/* NXP C45 PHY driver
+ * Copyright (C) 2021 NXP
+ * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/processor.h>
+#include <linux/property.h>
+
+#define PHY_ID_TJA_1103 0x001BB010
+
+#define PMAPMD_B100T1_PMAPMD_CTL 0x0834
+#define B100T1_PMAPMD_CONFIG_EN BIT(15)
+#define B100T1_PMAPMD_MASTER BIT(14)
+#define MASTER_MODE (B100T1_PMAPMD_CONFIG_EN | \
+ B100T1_PMAPMD_MASTER)
+#define SLAVE_MODE (B100T1_PMAPMD_CONFIG_EN)
+
+#define VEND1_DEVICE_CONTROL 0x0040
+#define DEVICE_CONTROL_RESET BIT(15)
+#define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
+#define DEVICE_CONTROL_CONFIG_ALL_EN BIT(13)
+
+#define VEND1_PHY_IRQ_ACK 0x80A0
+#define VEND1_PHY_IRQ_EN 0x80A1
+#define VEND1_PHY_IRQ_STATUS 0x80A2
+#define PHY_IRQ_LINK_EVENT BIT(1)
+
+#define VEND1_PHY_CONTROL 0x8100
+#define PHY_CONFIG_EN BIT(14)
+#define PHY_START_OP BIT(0)
+
+#define VEND1_PHY_CONFIG 0x8108
+#define PHY_CONFIG_AUTO BIT(0)
+
+#define VEND1_SIGNAL_QUALITY 0x8320
+#define SQI_VALID BIT(14)
+#define SQI_MASK GENMASK(2, 0)
+#define MAX_SQI SQI_MASK
+
+#define VEND1_CABLE_TEST 0x8330
+#define CABLE_TEST_ENABLE BIT(15)
+#define CABLE_TEST_START BIT(14)
+#define CABLE_TEST_VALID BIT(13)
+#define CABLE_TEST_OK 0x00
+#define CABLE_TEST_SHORTED 0x01
+#define CABLE_TEST_OPEN 0x02
+#define CABLE_TEST_UNKNOWN 0x07
+
+#define VEND1_PORT_CONTROL 0x8040
+#define PORT_CONTROL_EN BIT(14)
+
+#define VEND1_PORT_INFRA_CONTROL 0xAC00
+#define PORT_INFRA_CONTROL_EN BIT(14)
+
+#define VEND1_RXID 0xAFCC
+#define VEND1_TXID 0xAFCD
+#define ID_ENABLE BIT(15)
+
+#define VEND1_ABILITIES 0xAFC4
+#define RGMII_ID_ABILITY BIT(15)
+#define RGMII_ABILITY BIT(14)
+#define RMII_ABILITY BIT(10)
+#define REVMII_ABILITY BIT(9)
+#define MII_ABILITY BIT(8)
+#define SGMII_ABILITY BIT(0)
+
+#define VEND1_MII_BASIC_CONFIG 0xAFC6
+#define MII_BASIC_CONFIG_REV BIT(8)
+#define MII_BASIC_CONFIG_SGMII 0x9
+#define MII_BASIC_CONFIG_RGMII 0x7
+#define MII_BASIC_CONFIG_RMII 0x5
+#define MII_BASIC_CONFIG_MII 0x4
+
+#define VEND1_SYMBOL_ERROR_COUNTER 0x8350
+#define VEND1_LINK_DROP_COUNTER 0x8352
+#define VEND1_LINK_LOSSES_AND_FAILURES 0x8353
+#define VEND1_R_GOOD_FRAME_CNT 0xA950
+#define VEND1_R_BAD_FRAME_CNT 0xA952
+#define VEND1_R_RXER_FRAME_CNT 0xA954
+#define VEND1_RX_PREAMBLE_COUNT 0xAFCE
+#define VEND1_TX_PREAMBLE_COUNT 0xAFCF
+#define VEND1_RX_IPG_LENGTH 0xAFD0
+#define VEND1_TX_IPG_LENGTH 0xAFD1
+#define COUNTER_EN BIT(15)
+
+#define RGMII_PERIOD_PS 8000U
+#define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360)
+#define MIN_ID_PS 1644U
+#define MAX_ID_PS 2260U
+#define DEFAULT_ID_PS 2000U
+
+struct nxp_c45_phy {
+ u32 tx_delay;
+ u32 rx_delay;
+};
+
+struct nxp_c45_phy_stats {
+ const char *name;
+ u8 mmd;
+ u16 reg;
+ u8 off;
+ u16 mask;
+};
+
+static const struct nxp_c45_phy_stats nxp_c45_hw_stats[] = {
+ { "phy_symbol_error_cnt", MDIO_MMD_VEND1,
+ VEND1_SYMBOL_ERROR_COUNTER, 0, GENMASK(15, 0) },
+ { "phy_link_status_drop_cnt", MDIO_MMD_VEND1,
+ VEND1_LINK_DROP_COUNTER, 8, GENMASK(13, 8) },
+ { "phy_link_availability_drop_cnt", MDIO_MMD_VEND1,
+ VEND1_LINK_DROP_COUNTER, 0, GENMASK(5, 0) },
+ { "phy_link_loss_cnt", MDIO_MMD_VEND1,
+ VEND1_LINK_LOSSES_AND_FAILURES, 10, GENMASK(15, 10) },
+ { "phy_link_failure_cnt", MDIO_MMD_VEND1,
+ VEND1_LINK_LOSSES_AND_FAILURES, 0, GENMASK(9, 0) },
+ { "r_good_frame_cnt", MDIO_MMD_VEND1,
+ VEND1_R_GOOD_FRAME_CNT, 0, GENMASK(15, 0) },
+ { "r_bad_frame_cnt", MDIO_MMD_VEND1,
+ VEND1_R_BAD_FRAME_CNT, 0, GENMASK(15, 0) },
+ { "r_rxer_frame_cnt", MDIO_MMD_VEND1,
+ VEND1_R_RXER_FRAME_CNT, 0, GENMASK(15, 0) },
+ { "rx_preamble_count", MDIO_MMD_VEND1,
+ VEND1_RX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
+ { "tx_preamble_count", MDIO_MMD_VEND1,
+ VEND1_TX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
+ { "rx_ipg_length", MDIO_MMD_VEND1,
+ VEND1_RX_IPG_LENGTH, 0, GENMASK(8, 0) },
+ { "tx_ipg_length", MDIO_MMD_VEND1,
+ VEND1_TX_IPG_LENGTH, 0, GENMASK(8, 0) },
+};
+
+static int nxp_c45_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(nxp_c45_hw_stats);
+}
+
+static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
+ strncpy(data + i * ETH_GSTRING_LEN,
+ nxp_c45_hw_stats[i].name, ETH_GSTRING_LEN);
+ }
+}
+
+static void nxp_c45_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ size_t i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
+ ret = phy_read_mmd(phydev, nxp_c45_hw_stats[i].mmd,
+ nxp_c45_hw_stats[i].reg);
+ if (ret < 0) {
+ data[i] = U64_MAX;
+ } else {
+ data[i] = ret & nxp_c45_hw_stats[i].mask;
+ data[i] >>= nxp_c45_hw_stats[i].off;
+ }
+ }
+}
+
+static int nxp_c45_config_enable(struct phy_device *phydev)
+{
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
+ DEVICE_CONTROL_CONFIG_GLOBAL_EN |
+ DEVICE_CONTROL_CONFIG_ALL_EN);
+ usleep_range(400, 450);
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
+ PORT_CONTROL_EN);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
+ PHY_CONFIG_EN);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
+ PORT_INFRA_CONTROL_EN);
+
+ return 0;
+}
+
+static int nxp_c45_start_op(struct phy_device *phydev)
+{
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
+ PHY_START_OP);
+}
+
+static int nxp_c45_config_intr(struct phy_device *phydev)
+{
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
+ else
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
+}
+
+static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
+{
+ irqreturn_t ret = IRQ_NONE;
+ int irq;
+
+ irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
+ if (irq & PHY_IRQ_LINK_EVENT) {
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
+ PHY_IRQ_LINK_EVENT);
+ phy_trigger_machine(phydev);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int nxp_c45_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
+ DEVICE_CONTROL_RESET);
+ if (ret)
+ return ret;
+
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ VEND1_DEVICE_CONTROL, ret,
+ !(ret & DEVICE_CONTROL_RESET), 20000,
+ 240000, false);
+}
+
+static int nxp_c45_cable_test_start(struct phy_device *phydev)
+{
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
+ CABLE_TEST_ENABLE | CABLE_TEST_START);
+}
+
+static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret;
+ u8 cable_test_result;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST);
+ if (!(ret & CABLE_TEST_VALID)) {
+ *finished = false;
+ return 0;
+ }
+
+ *finished = true;
+ cable_test_result = ret & GENMASK(2, 0);
+
+ switch (cable_test_result) {
+ case CABLE_TEST_OK:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_OK);
+ break;
+ case CABLE_TEST_SHORTED:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
+ break;
+ case CABLE_TEST_OPEN:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
+ break;
+ default:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
+ }
+
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
+ CABLE_TEST_ENABLE);
+
+ return nxp_c45_start_op(phydev);
+}
+
+static int nxp_c45_setup_master_slave(struct phy_device *phydev)
+{
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ case MASTER_SLAVE_CFG_MASTER_PREFERRED:
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
+ MASTER_MODE);
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
+ SLAVE_MODE);
+ break;
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ return 0;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int nxp_c45_read_master_slave(struct phy_device *phydev)
+{
+ int reg;
+
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL);
+ if (reg < 0)
+ return reg;
+
+ if (reg & B100T1_PMAPMD_MASTER) {
+ phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
+ } else {
+ phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
+ }
+
+ return 0;
+}
+
+static int nxp_c45_config_aneg(struct phy_device *phydev)
+{
+ return nxp_c45_setup_master_slave(phydev);
+}
+
+static int nxp_c45_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_read_status(phydev);
+ if (ret)
+ return ret;
+
+ ret = nxp_c45_read_master_slave(phydev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int nxp_c45_get_sqi(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
+ if (!(reg & SQI_VALID))
+ return -EINVAL;
+
+ reg &= SQI_MASK;
+
+ return reg;
+}
+
+static int nxp_c45_get_sqi_max(struct phy_device *phydev)
+{
+ return MAX_SQI;
+}
+
+static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
+{
+ if (delay < MIN_ID_PS) {
+ phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
+ return -EINVAL;
+ }
+
+ if (delay > MAX_ID_PS) {
+ phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
+{
+ /* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
+ * To avoid floating point operations we'll multiply by 10
+ * and get 1 decimal point precision.
+ */
+ phase_offset_raw *= 10;
+ phase_offset_raw -= 738;
+ return div_u64(phase_offset_raw, 9);
+}
+
+static void nxp_c45_disable_delays(struct phy_device *phydev)
+{
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
+}
+
+static void nxp_c45_set_delays(struct phy_device *phydev)
+{
+ struct nxp_c45_phy *priv = phydev->priv;
+ u64 tx_delay = priv->tx_delay;
+ u64 rx_delay = priv->rx_delay;
+ u64 degree;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ degree = div_u64(tx_delay, PS_PER_DEGREE);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
+ ID_ENABLE | nxp_c45_get_phase_shift(degree));
+ } else {
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
+ ID_ENABLE);
+ }
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ degree = div_u64(rx_delay, PS_PER_DEGREE);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
+ ID_ENABLE | nxp_c45_get_phase_shift(degree));
+ } else {
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
+ ID_ENABLE);
+ }
+}
+
+static int nxp_c45_get_delays(struct phy_device *phydev)
+{
+ struct nxp_c45_phy *priv = phydev->priv;
+ int ret;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ ret = device_property_read_u32(&phydev->mdio.dev,
+ "tx-internal-delay-ps",
+ &priv->tx_delay);
+ if (ret)
+ priv->tx_delay = DEFAULT_ID_PS;
+
+ ret = nxp_c45_check_delay(phydev, priv->tx_delay);
+ if (ret) {
+ phydev_err(phydev,
+ "tx-internal-delay-ps invalid value\n");
+ return ret;
+ }
+ }
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ ret = device_property_read_u32(&phydev->mdio.dev,
+ "rx-internal-delay-ps",
+ &priv->rx_delay);
+ if (ret)
+ priv->rx_delay = DEFAULT_ID_PS;
+
+ ret = nxp_c45_check_delay(phydev, priv->rx_delay);
+ if (ret) {
+ phydev_err(phydev,
+ "rx-internal-delay-ps invalid value\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int nxp_c45_set_phy_mode(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
+ phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ if (!(ret & RGMII_ABILITY)) {
+ phydev_err(phydev, "rgmii mode not supported\n");
+ return -EINVAL;
+ }
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
+ MII_BASIC_CONFIG_RGMII);
+ nxp_c45_disable_delays(phydev);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ if (!(ret & RGMII_ID_ABILITY)) {
+ phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
+ return -EINVAL;
+ }
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
+ MII_BASIC_CONFIG_RGMII);
+ ret = nxp_c45_get_delays(phydev);
+ if (ret)
+ return ret;
+
+ nxp_c45_set_delays(phydev);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ if (!(ret & MII_ABILITY)) {
+ phydev_err(phydev, "mii mode not supported\n");
+ return -EINVAL;
+ }
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
+ MII_BASIC_CONFIG_MII);
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ if (!(ret & REVMII_ABILITY)) {
+ phydev_err(phydev, "rev-mii mode not supported\n");
+ return -EINVAL;
+ }
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
+ MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (!(ret & RMII_ABILITY)) {
+ phydev_err(phydev, "rmii mode not supported\n");
+ return -EINVAL;
+ }
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
+ MII_BASIC_CONFIG_RMII);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ if (!(ret & SGMII_ABILITY)) {
+ phydev_err(phydev, "sgmii mode not supported\n");
+ return -EINVAL;
+ }
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
+ MII_BASIC_CONFIG_SGMII);
+ break;
+ case PHY_INTERFACE_MODE_INTERNAL:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nxp_c45_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = nxp_c45_config_enable(phydev);
+ if (ret) {
+ phydev_err(phydev, "Failed to enable config\n");
+ return ret;
+ }
+
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
+ PHY_CONFIG_AUTO);
+
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
+ COUNTER_EN);
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
+ COUNTER_EN);
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
+ COUNTER_EN);
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
+ COUNTER_EN);
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
+ COUNTER_EN);
+
+ ret = nxp_c45_set_phy_mode(phydev);
+ if (ret)
+ return ret;
+
+ phydev->autoneg = AUTONEG_DISABLE;
+
+ return nxp_c45_start_op(phydev);
+}
+
+static int nxp_c45_probe(struct phy_device *phydev)
+{
+ struct nxp_c45_phy *priv;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static struct phy_driver nxp_c45_driver[] = {
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
+ .name = "NXP C45 TJA1103",
+ .features = PHY_BASIC_T1_FEATURES,
+ .probe = nxp_c45_probe,
+ .soft_reset = nxp_c45_soft_reset,
+ .config_aneg = nxp_c45_config_aneg,
+ .config_init = nxp_c45_config_init,
+ .config_intr = nxp_c45_config_intr,
+ .handle_interrupt = nxp_c45_handle_interrupt,
+ .read_status = nxp_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = genphy_c45_pma_resume,
+ .get_sset_count = nxp_c45_get_sset_count,
+ .get_strings = nxp_c45_get_strings,
+ .get_stats = nxp_c45_get_stats,
+ .cable_test_start = nxp_c45_cable_test_start,
+ .cable_test_get_status = nxp_c45_cable_test_get_status,
+ .set_loopback = genphy_c45_loopback,
+ .get_sqi = nxp_c45_get_sqi,
+ .get_sqi_max = nxp_c45_get_sqi_max,
+ },
+};
+
+module_phy_driver(nxp_c45_driver);
+
+static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
+ { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
+ { /*sentinel*/ },
+};
+
+MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
+
+MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
+MODULE_DESCRIPTION("NXP C45 PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 077f2929c45e..f4816b7d31b3 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -9,6 +9,49 @@
#include <linux/phy.h>
/**
+ * genphy_c45_pma_can_sleep - checks if the PMA have sleep support
+ * @phydev: target phy_device struct
+ */
+static bool genphy_c45_pma_can_sleep(struct phy_device *phydev)
+{
+ int stat1;
+
+ stat1 = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_STAT1);
+ if (stat1 < 0)
+ return false;
+
+ return !!(stat1 & MDIO_STAT1_LPOWERABLE);
+}
+
+/**
+ * genphy_c45_pma_resume - wakes up the PMA module
+ * @phydev: target phy_device struct
+ */
+int genphy_c45_pma_resume(struct phy_device *phydev)
+{
+ if (!genphy_c45_pma_can_sleep(phydev))
+ return -EOPNOTSUPP;
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+EXPORT_SYMBOL_GPL(genphy_c45_pma_resume);
+
+/**
+ * genphy_c45_pma_suspend - suspends the PMA module
+ * @phydev: target phy_device struct
+ */
+int genphy_c45_pma_suspend(struct phy_device *phydev)
+{
+ if (!genphy_c45_pma_can_sleep(phydev))
+ return -EOPNOTSUPP;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+EXPORT_SYMBOL_GPL(genphy_c45_pma_suspend);
+
+/**
* genphy_c45_pma_setup_forced - configures a forced speed
* @phydev: target phy_device struct
*/
@@ -560,6 +603,14 @@ int gen10g_config_aneg(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(gen10g_config_aneg);
+int genphy_c45_loopback(struct phy_device *phydev, bool enable)
+{
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
+ MDIO_PCS_CTRL1_LOOPBACK,
+ enable ? MDIO_PCS_CTRL1_LOOPBACK : 0);
+}
+EXPORT_SYMBOL_GPL(genphy_c45_loopback);
+
struct phy_driver genphy_c45_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index fc2e7cb5b2e5..1f0512e39c65 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -701,7 +701,7 @@ out:
}
EXPORT_SYMBOL(phy_start_cable_test_tdr);
-static int phy_config_aneg(struct phy_device *phydev)
+int phy_config_aneg(struct phy_device *phydev)
{
if (phydev->drv->config_aneg)
return phydev->drv->config_aneg(phydev);
@@ -714,6 +714,7 @@ static int phy_config_aneg(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+EXPORT_SYMBOL(phy_config_aneg);
/**
* phy_check_link_status - check link status and set state accordingly
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index cc38e326405a..0a2d8bedf73d 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -273,6 +273,9 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev)
{
struct phy_device *phydev = to_phy_device(dev);
+ if (phydev->mac_managed_pm)
+ return 0;
+
/* We must stop the state machine manually, otherwise it stops out of
* control, possibly with the phydev->lock held. Upon resume, netdev
* may call phy routines that try to grab the same lock, and that may
@@ -294,6 +297,9 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
struct phy_device *phydev = to_phy_device(dev);
int ret;
+ if (phydev->mac_managed_pm)
+ return 0;
+
if (!phydev->suspended_by_mdio_bus)
goto no_resume;
@@ -512,10 +518,21 @@ phy_has_fixups_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(phy_has_fixups);
+static ssize_t phy_dev_flags_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ return sprintf(buf, "0x%08x\n", phydev->dev_flags);
+}
+static DEVICE_ATTR_RO(phy_dev_flags);
+
static struct attribute *phy_dev_attrs[] = {
&dev_attr_phy_id.attr,
&dev_attr_phy_interface.attr,
&dev_attr_phy_has_fixups.attr,
+ &dev_attr_phy_dev_flags.attr,
NULL,
};
ATTRIBUTE_GROUPS(phy_dev);
@@ -1760,6 +1777,9 @@ int phy_loopback(struct phy_device *phydev, bool enable)
struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
int ret = 0;
+ if (!phydrv)
+ return -ENODEV;
+
mutex_lock(&phydev->lock);
if (enable && phydev->loopback_enabled) {
@@ -1772,10 +1792,10 @@ int phy_loopback(struct phy_device *phydev, bool enable)
goto out;
}
- if (phydev->drv && phydrv->set_loopback)
+ if (phydrv->set_loopback)
ret = phydrv->set_loopback(phydev, enable);
else
- ret = -EOPNOTSUPP;
+ ret = genphy_loopback(phydev, enable);
if (ret)
goto out;
@@ -2545,8 +2565,32 @@ EXPORT_SYMBOL(genphy_resume);
int genphy_loopback(struct phy_device *phydev, bool enable)
{
- return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
- enable ? BMCR_LOOPBACK : 0);
+ if (enable) {
+ u16 val, ctl = BMCR_LOOPBACK;
+ int ret;
+
+ if (phydev->speed == SPEED_1000)
+ ctl |= BMCR_SPEED1000;
+ else if (phydev->speed == SPEED_100)
+ ctl |= BMCR_SPEED100;
+
+ if (phydev->duplex == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+
+ phy_modify(phydev, MII_BMCR, ~0, ctl);
+
+ ret = phy_read_poll_timeout(phydev, MII_BMSR, val,
+ val & BMSR_LSTATUS,
+ 5000, 500000, true);
+ if (ret)
+ return ret;
+ } else {
+ phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, 0);
+
+ phy_config_aneg(phydev);
+ }
+
+ return 0;
}
EXPORT_SYMBOL(genphy_loopback);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index dc2800beacc3..96d8e88b4e46 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -271,8 +271,9 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
pl->cfg_link_an_mode = MLO_AN_FIXED;
fwnode_handle_put(dn);
- if (fwnode_property_read_string(fwnode, "managed", &managed) == 0 &&
- strcmp(managed, "in-band-status") == 0) {
+ if ((fwnode_property_read_string(fwnode, "managed", &managed) == 0 &&
+ strcmp(managed, "in-band-status") == 0) ||
+ pl->config->ovr_an_inband) {
if (pl->cfg_link_an_mode == MLO_AN_FIXED) {
phylink_err(pl,
"can't use both fixed-link and in-band-status\n");
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 2e11176c6b94..e61de66e973b 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -556,6 +556,26 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
EXPORT_SYMBOL_GPL(sfp_get_module_eeprom);
/**
+ * sfp_get_module_eeprom_by_page() - Read a page from the SFP module EEPROM
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @page: a &struct ethtool_module_eeprom
+ * @extack: extack for reporting problems
+ *
+ * Read an EEPROM page as specified by the supplied @page. See the
+ * documentation for &struct ethtool_module_eeprom for the page to be read.
+ *
+ * Returns 0 on success or a negative errno number. More error
+ * information might be provided via extack
+ */
+int sfp_get_module_eeprom_by_page(struct sfp_bus *bus,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ return bus->socket_ops->module_eeprom_by_page(bus->sfp, page, extack);
+}
+EXPORT_SYMBOL_GPL(sfp_get_module_eeprom_by_page);
+
+/**
* sfp_upstream_start() - Inform the SFP that the network device is up
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
*
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 7998acc689b7..37f722c763d7 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -2330,6 +2330,30 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
return 0;
}
+static int sfp_module_eeprom_by_page(struct sfp *sfp,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ if (page->bank) {
+ NL_SET_ERR_MSG(extack, "Banks not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (page->page) {
+ NL_SET_ERR_MSG(extack, "Only page 0 supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (page->i2c_address != 0x50 &&
+ page->i2c_address != 0x51) {
+ NL_SET_ERR_MSG(extack, "Only address 0x50 and 0x51 supported");
+ return -EOPNOTSUPP;
+ }
+
+ return sfp_read(sfp, page->i2c_address == 0x51, page->offset,
+ page->data, page->length);
+};
+
static const struct sfp_socket_ops sfp_module_ops = {
.attach = sfp_attach,
.detach = sfp_detach,
@@ -2337,6 +2361,7 @@ static const struct sfp_socket_ops sfp_module_ops = {
.stop = sfp_stop,
.module_info = sfp_module_info,
.module_eeprom = sfp_module_eeprom,
+ .module_eeprom_by_page = sfp_module_eeprom_by_page,
};
static void sfp_timeout(struct work_struct *work)
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index b83f70526270..27226535c72b 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -14,6 +14,9 @@ struct sfp_socket_ops {
int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);
int (*module_eeprom)(struct sfp *sfp, struct ethtool_eeprom *ee,
u8 *data);
+ int (*module_eeprom_by_page)(struct sfp *sfp,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
};
int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index ddb78fb4d6dc..d8cac02a79b9 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -185,10 +185,13 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
-static int lan87xx_config_aneg_ext(struct phy_device *phydev)
+static int lan95xx_config_aneg_ext(struct phy_device *phydev)
{
int rc;
+ if (phydev->phy_id != 0x0007c0f0) /* not (LAN9500A or LAN9505A) */
+ return lan87xx_config_aneg(phydev);
+
/* Extend Manual AutoMDIX timer */
rc = phy_read(phydev, PHY_EDPD_CONFIG);
if (rc < 0)
@@ -441,7 +444,7 @@ static struct phy_driver smsc_phy_driver[] = {
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
- .config_aneg = lan87xx_config_aneg_ext,
+ .config_aneg = lan95xx_config_aneg_ext,
/* IRQ related */
.config_intr = smsc_phy_config_intr,
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 4406b353123e..e26cf91bdec2 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -516,6 +516,7 @@ plip_receive(unsigned short nibble_timeout, struct net_device *dev,
*data_p |= (c0 << 1) & 0xf0;
write_data (dev, 0x00); /* send ACK */
*ns_p = PLIP_NB_BEGIN;
+ break;
case PLIP_NB_2:
break;
}
@@ -808,6 +809,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl,
return HS_TIMEOUT;
}
}
+ break;
case PLIP_PK_LENGTH_LSB:
if (plip_send(nibble_timeout, dev,
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index c457f849e553..e6d48e5c65a3 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -279,7 +279,6 @@ static void z_decomp_free(void *arg)
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
if (state) {
- zlib_inflateEnd(&state->strm);
vfree(state->strm.workspace);
kfree(state);
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index d445ecb1d0c7..930e49ef15f6 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1560,12 +1560,34 @@ static void ppp_dev_priv_destructor(struct net_device *dev)
ppp_destroy_interface(ppp);
}
+static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
+ struct net_device_path *path)
+{
+ struct ppp *ppp = netdev_priv(ctx->dev);
+ struct ppp_channel *chan;
+ struct channel *pch;
+
+ if (ppp->flags & SC_MULTILINK)
+ return -EOPNOTSUPP;
+
+ if (list_empty(&ppp->channels))
+ return -ENODEV;
+
+ pch = list_first_entry(&ppp->channels, struct channel, clist);
+ chan = pch->chan;
+ if (!chan->ops->fill_forward_path)
+ return -EOPNOTSUPP;
+
+ return chan->ops->fill_forward_path(ctx, path, chan);
+}
+
static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
.ndo_uninit = ppp_dev_uninit,
.ndo_start_xmit = ppp_start_xmit,
.ndo_do_ioctl = ppp_net_ioctl,
.ndo_get_stats64 = ppp_get_stats64,
+ .ndo_fill_forward_path = ppp_fill_forward_path,
};
static struct device_type ppp_type = {
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index d7f50b835050..3619520340b7 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -25,7 +25,7 @@
* in pppoe_release.
* 051000 : Initialization cleanup.
* 111100 : Fix recvmsg.
- * 050101 : Fix PADT procesing.
+ * 050101 : Fix PADT processing.
* 140501 : Use pppoe_rcv_core to handle all backlog. (Alexey)
* 170701 : Do not lock_sock with rwlock held. (DaveM)
* Ignore discovery frames if user has socket
@@ -96,7 +96,7 @@ struct pppoe_net {
* we could use _single_ hash table for all
* nets by injecting net id into the hash but
* it would increase hash chains and add
- * a few additional math comparations messy
+ * a few additional math comparisons messy
* as well, moreover in case of SMP less locking
* controversy here
*/
@@ -972,8 +972,31 @@ static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb)
return __pppoe_xmit(sk, skb);
}
+static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
+ struct net_device_path *path,
+ const struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct net_device *dev = po->pppoe_dev;
+
+ if (sock_flag(sk, SOCK_DEAD) ||
+ !(sk->sk_state & PPPOX_CONNECTED) || !dev)
+ return -1;
+
+ path->type = DEV_PATH_PPPOE;
+ path->encap.proto = htons(ETH_P_PPP_SES);
+ path->encap.id = be16_to_cpu(po->num);
+ memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
+ path->dev = ctx->dev;
+ ctx->dev = dev;
+
+ return 0;
+}
+
static const struct ppp_channel_ops pppoe_chan_ops = {
.start_xmit = pppoe_xmit,
+ .fill_forward_path = pppoe_fill_forward_path,
};
static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4cf38be26dc9..84f832806313 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1189,8 +1189,7 @@ static int tun_xdp_xmit(struct net_device *dev, int n,
struct tun_struct *tun = netdev_priv(dev);
struct tun_file *tfile;
u32 numqueues;
- int drops = 0;
- int cnt = n;
+ int nxmit = 0;
int i;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -1220,9 +1219,9 @@ resample:
if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
atomic_long_inc(&dev->tx_dropped);
- xdp_return_frame_rx_napi(xdp);
- drops++;
+ break;
}
+ nxmit++;
}
spin_unlock(&tfile->tx_ring.producer_lock);
@@ -1230,17 +1229,21 @@ resample:
__tun_xdp_flush_tfile(tfile);
rcu_read_unlock();
- return cnt - drops;
+ return nxmit;
}
static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
{
struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
+ int nxmit;
if (unlikely(!frame))
return -EOVERFLOW;
- return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
+ nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
+ if (!nxmit)
+ xdp_return_frame_rx_napi(frame);
+ return nxmit;
}
static const struct net_device_ops tap_netdev_ops = {
@@ -3005,7 +3008,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
return open_related_ns(&net->ns, get_net_ns);
}
- ret = 0;
rtnl_lock();
tun = tun_get(tfile);
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 6e13d8165852..19a8fafb8f04 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -125,8 +125,8 @@ static const struct ethtool_ops ax88172_ethtool_ops = {
.get_eeprom = asix_get_eeprom,
.set_eeprom = asix_set_eeprom,
.nway_reset = usbnet_nway_reset,
- .get_link_ksettings = usbnet_get_link_ksettings,
- .set_link_ksettings = usbnet_set_link_ksettings,
+ .get_link_ksettings = usbnet_get_link_ksettings_mii,
+ .set_link_ksettings = usbnet_set_link_ksettings_mii,
};
static void ax88172_set_multicast(struct net_device *net)
@@ -291,8 +291,8 @@ static const struct ethtool_ops ax88772_ethtool_ops = {
.get_eeprom = asix_get_eeprom,
.set_eeprom = asix_set_eeprom,
.nway_reset = usbnet_nway_reset,
- .get_link_ksettings = usbnet_get_link_ksettings,
- .set_link_ksettings = usbnet_set_link_ksettings,
+ .get_link_ksettings = usbnet_get_link_ksettings_mii,
+ .set_link_ksettings = usbnet_set_link_ksettings_mii,
};
static int ax88772_link_reset(struct usbnet *dev)
@@ -782,8 +782,8 @@ static const struct ethtool_ops ax88178_ethtool_ops = {
.get_eeprom = asix_get_eeprom,
.set_eeprom = asix_set_eeprom,
.nway_reset = usbnet_nway_reset,
- .get_link_ksettings = usbnet_get_link_ksettings,
- .set_link_ksettings = usbnet_set_link_ksettings,
+ .get_link_ksettings = usbnet_get_link_ksettings_mii,
+ .set_link_ksettings = usbnet_set_link_ksettings_mii,
};
static int marvell_phy_init(struct usbnet *dev)
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index d650b39b6e5d..c1316718304d 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -296,12 +296,12 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
int ret;
if (2 == size) {
- u16 buf;
+ u16 buf = 0;
ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
le16_to_cpus(&buf);
*((u16 *)data) = buf;
} else if (4 == size) {
- u32 buf;
+ u32 buf = 0;
ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
le32_to_cpus(&buf);
*((u32 *)data) = buf;
@@ -1296,6 +1296,8 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
{
u8 mac[ETH_ALEN];
+ memset(mac, 0, sizeof(mac));
+
/* Maybe the boot loader passed the MAC address via device tree */
if (!eth_platform_get_mac_address(&dev->udev->dev, mac)) {
netif_dbg(dev, ifup, dev->net,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index a9b551028659..7eb0109e9baa 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -92,6 +92,18 @@ void usbnet_cdc_update_filter(struct usbnet *dev)
}
EXPORT_SYMBOL_GPL(usbnet_cdc_update_filter);
+/* We need to override usbnet_*_link_ksettings in bind() */
+static const struct ethtool_ops cdc_ether_ethtool_ops = {
+ .get_link = usbnet_get_link,
+ .nway_reset = usbnet_nway_reset,
+ .get_drvinfo = usbnet_get_drvinfo,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = usbnet_get_link_ksettings_internal,
+ .set_link_ksettings = NULL,
+};
+
/* probes control interface, claims data interface, collects the bulk
* endpoints, activates data interface (if needed), maybe sets MTU.
* all pure cdc, except for certain firmware workarounds, and knowing
@@ -310,6 +322,9 @@ skip:
return -ENODEV;
}
+ /* override ethtool_ops */
+ dev->net->ethtool_ops = &cdc_ether_ethtool_ops;
+
return 0;
bad_desc:
@@ -379,12 +394,10 @@ EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
* (by Brad Hards) talked with, with more functionality.
*/
-static void dumpspeed(struct usbnet *dev, __le32 *speeds)
+static void speed_change(struct usbnet *dev, __le32 *speeds)
{
- netif_info(dev, timer, dev->net,
- "link speeds: %u kbps up, %u kbps down\n",
- __le32_to_cpu(speeds[0]) / 1000,
- __le32_to_cpu(speeds[1]) / 1000);
+ dev->tx_speed = __le32_to_cpu(speeds[0]);
+ dev->rx_speed = __le32_to_cpu(speeds[1]);
}
void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
@@ -396,7 +409,7 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
/* SPEED_CHANGE can get split into two 8-byte packets */
if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
- dumpspeed(dev, (__le32 *) urb->transfer_buffer);
+ speed_change(dev, (__le32 *) urb->transfer_buffer);
return;
}
@@ -413,7 +426,7 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
if (urb->actual_length != (sizeof(*event) + 8))
set_bit(EVENT_STS_SPLIT, &dev->flags);
else
- dumpspeed(dev, (__le32 *) &event[1]);
+ speed_change(dev, (__le32 *) &event[1]);
break;
/* USB_CDC_NOTIFY_RESPONSE_AVAILABLE can happen too (e.g. RNDIS),
* but there are no standard formats for the response data.
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 8acf30115428..b04055fd1b79 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -133,17 +133,17 @@ static void cdc_ncm_get_strings(struct net_device __always_unused *netdev, u32 s
static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx);
static const struct ethtool_ops cdc_ncm_ethtool_ops = {
- .get_link = usbnet_get_link,
- .nway_reset = usbnet_nway_reset,
- .get_drvinfo = usbnet_get_drvinfo,
- .get_msglevel = usbnet_get_msglevel,
- .set_msglevel = usbnet_set_msglevel,
- .get_ts_info = ethtool_op_get_ts_info,
- .get_sset_count = cdc_ncm_get_sset_count,
- .get_strings = cdc_ncm_get_strings,
- .get_ethtool_stats = cdc_ncm_get_ethtool_stats,
- .get_link_ksettings = usbnet_get_link_ksettings,
- .set_link_ksettings = usbnet_set_link_ksettings,
+ .get_link = usbnet_get_link,
+ .nway_reset = usbnet_nway_reset,
+ .get_drvinfo = usbnet_get_drvinfo,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_sset_count = cdc_ncm_get_sset_count,
+ .get_strings = cdc_ncm_get_strings,
+ .get_ethtool_stats = cdc_ncm_get_ethtool_stats,
+ .get_link_ksettings = usbnet_get_link_ksettings_internal,
+ .set_link_ksettings = NULL,
};
static u32 cdc_ncm_check_rx_max(struct usbnet *dev, u32 new_rx)
@@ -920,7 +920,6 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
goto error2;
}
- usb_set_intfdata(ctx->data, dev);
usb_set_intfdata(ctx->control, dev);
if (ctx->ether_desc) {
@@ -1826,33 +1825,9 @@ static void
cdc_ncm_speed_change(struct usbnet *dev,
struct usb_cdc_speed_change *data)
{
- uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
- uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
-
- /* if the speed hasn't changed, don't report it.
- * RTL8156 shipped before 2021 sends notification about every 32ms.
- */
- if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
- return;
-
- dev->rx_speed = rx_speed;
- dev->tx_speed = tx_speed;
-
- /*
- * Currently the USB-NET API does not support reporting the actual
- * device speed. Do print it instead.
- */
- if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
- netif_info(dev, link, dev->net,
- "%u mbit/s downlink %u mbit/s uplink\n",
- (unsigned int)(rx_speed / 1000000U),
- (unsigned int)(tx_speed / 1000000U));
- } else {
- netif_info(dev, link, dev->net,
- "%u kbit/s downlink %u kbit/s uplink\n",
- (unsigned int)(rx_speed / 1000U),
- (unsigned int)(tx_speed / 1000U));
- }
+ /* RTL8156 shipped before 2021 sends notification about every 32ms. */
+ dev->rx_speed = le32_to_cpu(data->DLBitRRate);
+ dev->tx_speed = le32_to_cpu(data->ULBitRate);
}
static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
@@ -1878,6 +1853,9 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
+ /* RTL8156 shipped before 2021 sends notification about
+ * every 32ms. Don't forward notification if state is same.
+ */
if (netif_carrier_ok(dev->net) != !!event->wValue)
usbnet_link_change(dev, !!event->wValue, 0);
break;
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index b5d2ac55a874..89cc61d7a675 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -282,8 +282,8 @@ static const struct ethtool_ops dm9601_ethtool_ops = {
.get_eeprom_len = dm9601_get_eeprom_len,
.get_eeprom = dm9601_get_eeprom,
.nway_reset = usbnet_nway_reset,
- .get_link_ksettings = usbnet_get_link_ksettings,
- .set_link_ksettings = usbnet_set_link_ksettings,
+ .get_link_ksettings = usbnet_get_link_ksettings_mii,
+ .set_link_ksettings = usbnet_set_link_ksettings_mii,
};
static void dm9601_set_multicast(struct net_device *net)
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 9bc58e64b5b7..3ef4b2841402 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -3104,7 +3104,7 @@ static void hso_free_interface(struct usb_interface *interface)
cancel_work_sync(&serial_table[i]->async_put_intf);
cancel_work_sync(&serial_table[i]->async_get_intf);
hso_serial_tty_unregister(serial);
- kref_put(&serial_table[i]->ref, hso_serial_ref_free);
+ kref_put(&serial->parent->ref, hso_serial_ref_free);
}
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index e81c5699c952..6acc5e904518 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2015 Microchip Technology
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index fc512b780d15..9f9352a4522f 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -452,8 +452,8 @@ static const struct ethtool_ops mcs7830_ethtool_ops = {
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.nway_reset = usbnet_nway_reset,
- .get_link_ksettings = usbnet_get_link_ksettings,
- .set_link_ksettings = usbnet_set_link_ksettings,
+ .get_link_ksettings = usbnet_get_link_ksettings_mii,
+ .set_link_ksettings = usbnet_set_link_ksettings_mii,
};
static const struct net_device_ops mcs7830_netdev_ops = {
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 20fb5638ac65..136ea06540ff 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -29,7 +29,7 @@
#include <linux/usb/r8152.h>
/* Information for net-next */
-#define NETNEXT_VERSION "11"
+#define NETNEXT_VERSION "12"
/* Information for net */
#define NET_VERSION "11"
@@ -43,10 +43,14 @@
#define PLA_IDR 0xc000
#define PLA_RCR 0xc010
+#define PLA_RCR1 0xc012
#define PLA_RMS 0xc016
#define PLA_RXFIFO_CTRL0 0xc0a0
+#define PLA_RXFIFO_FULL 0xc0a2
#define PLA_RXFIFO_CTRL1 0xc0a4
+#define PLA_RX_FIFO_FULL 0xc0a6
#define PLA_RXFIFO_CTRL2 0xc0a8
+#define PLA_RX_FIFO_EMPTY 0xc0aa
#define PLA_DMY_REG0 0xc0b0
#define PLA_FMC 0xc0b4
#define PLA_CFG_WOL 0xc0b6
@@ -63,6 +67,8 @@
#define PLA_MACDBG_PRE 0xd38c /* RTL_VER_04 only */
#define PLA_MACDBG_POST 0xd38e /* RTL_VER_04 only */
#define PLA_EXTRA_STATUS 0xd398
+#define PLA_GPHY_CTRL 0xd3ae
+#define PLA_POL_GPIO_CTRL 0xdc6a
#define PLA_EFUSE_DATA 0xdd00
#define PLA_EFUSE_CMD 0xdd02
#define PLA_LEDSEL 0xdd90
@@ -72,6 +78,8 @@
#define PLA_LWAKE_CTRL_REG 0xe007
#define PLA_GPHY_INTR_IMR 0xe022
#define PLA_EEE_CR 0xe040
+#define PLA_EEE_TXTWSYS 0xe04c
+#define PLA_EEE_TXTWSYS_2P5G 0xe058
#define PLA_EEEP_CR 0xe080
#define PLA_MAC_PWR_CTRL 0xe0c0
#define PLA_MAC_PWR_CTRL2 0xe0ca
@@ -82,6 +90,7 @@
#define PLA_TCR1 0xe612
#define PLA_MTPS 0xe615
#define PLA_TXFIFO_CTRL 0xe618
+#define PLA_TXFIFO_FULL 0xe61a
#define PLA_RSTTALLY 0xe800
#define PLA_CR 0xe813
#define PLA_CRWECR 0xe81c
@@ -98,6 +107,7 @@
#define PLA_SFF_STS_7 0xe8de
#define PLA_PHYSTATUS 0xe908
#define PLA_CONFIG6 0xe90a /* CONFIG6 */
+#define PLA_USB_CFG 0xe952
#define PLA_BP_BA 0xfc26
#define PLA_BP_0 0xfc28
#define PLA_BP_1 0xfc2a
@@ -112,6 +122,7 @@
#define USB_USB2PHY 0xb41e
#define USB_SSPHYLINK1 0xb426
#define USB_SSPHYLINK2 0xb428
+#define USB_L1_CTRL 0xb45e
#define USB_U2P3_CTRL 0xb460
#define USB_CSR_DUMMY1 0xb464
#define USB_CSR_DUMMY2 0xb466
@@ -122,7 +133,12 @@
#define USB_FW_FIX_EN0 0xcfca
#define USB_FW_FIX_EN1 0xcfcc
#define USB_LPM_CONFIG 0xcfd8
+#define USB_ECM_OPTION 0xcfee
#define USB_CSTMR 0xcfef /* RTL8153A */
+#define USB_MISC_2 0xcfff
+#define USB_ECM_OP 0xd26b
+#define USB_GPHY_CTRL 0xd284
+#define USB_SPEED_OPTION 0xd32a
#define USB_FW_CTRL 0xd334 /* RTL8153B */
#define USB_FC_TIMER 0xd340
#define USB_USB_CTRL 0xd406
@@ -136,16 +152,20 @@
#define USB_RX_EXTRA_AGGR_TMR 0xd432 /* RTL8153B */
#define USB_TX_DMA 0xd434
#define USB_UPT_RXDMA_OWN 0xd437
+#define USB_UPHY3_MDCMDIO 0xd480
#define USB_TOLERANCE 0xd490
#define USB_LPM_CTRL 0xd41a
#define USB_BMU_RESET 0xd4b0
+#define USB_BMU_CONFIG 0xd4b4
#define USB_U1U2_TIMER 0xd4da
#define USB_FW_TASK 0xd4e8 /* RTL8153B */
+#define USB_RX_AGGR_NUM 0xd4ee
#define USB_UPS_CTRL 0xd800
#define USB_POWER_CUT 0xd80a
#define USB_MISC_0 0xd81a
#define USB_MISC_1 0xd81f
#define USB_AFE_CTRL2 0xd824
+#define USB_UPHY_XTAL 0xd826
#define USB_UPS_CFG 0xd842
#define USB_UPS_FLAGS 0xd848
#define USB_WDT1_CTRL 0xe404
@@ -188,6 +208,9 @@
#define OCP_EEE_ABLE 0xa5c4
#define OCP_EEE_ADV 0xa5d0
#define OCP_EEE_LPABLE 0xa5d2
+#define OCP_10GBT_CTRL 0xa5d4
+#define OCP_10GBT_STAT 0xa5d6
+#define OCP_EEE_ADV2 0xa6d4
#define OCP_PHY_STATE 0xa708 /* nway state for 8153 */
#define OCP_PHY_PATCH_STAT 0xb800
#define OCP_PHY_PATCH_CMD 0xb820
@@ -199,6 +222,7 @@
/* SRAM Register */
#define SRAM_GREEN_CFG 0x8011
#define SRAM_LPF_CFG 0x8012
+#define SRAM_GPHY_FW_VER 0x801e
#define SRAM_10M_AMP1 0x8080
#define SRAM_10M_AMP2 0x8082
#define SRAM_IMPEDANCE 0x8084
@@ -210,11 +234,19 @@
#define RCR_AM 0x00000004
#define RCR_AB 0x00000008
#define RCR_ACPT_ALL (RCR_AAP | RCR_APM | RCR_AM | RCR_AB)
+#define SLOT_EN BIT(11)
+
+/* PLA_RCR1 */
+#define OUTER_VLAN BIT(7)
+#define INNER_VLAN BIT(6)
/* PLA_RXFIFO_CTRL0 */
#define RXFIFO_THR1_NORMAL 0x00080002
#define RXFIFO_THR1_OOB 0x01800003
+/* PLA_RXFIFO_FULL */
+#define RXFIFO_FULL_MASK 0xfff
+
/* PLA_RXFIFO_CTRL1 */
#define RXFIFO_THR2_FULL 0x00000060
#define RXFIFO_THR2_HIGH 0x00000038
@@ -249,6 +281,9 @@
/* PLA_TCR1 */
#define VERSION_MASK 0x7cf0
+#define IFG_MASK (BIT(3) | BIT(9) | BIT(8))
+#define IFG_144NS BIT(9)
+#define IFG_96NS (BIT(9) | BIT(8))
/* PLA_MTPS */
#define MTPS_JUMBO (12 * 1024 / 64)
@@ -282,6 +317,7 @@
#define MCU_BORW_EN 0x4000
/* PLA_CPCR */
+#define FLOW_CTRL_EN BIT(0)
#define CPCR_RX_VLAN 0x0040
/* PLA_CFG_WOL */
@@ -307,6 +343,10 @@
/* PLA_CONFIG6 */
#define LANWAKE_CLR_EN BIT(0)
+/* PLA_USB_CFG */
+#define EN_XG_LIP BIT(1)
+#define EN_G_LIP BIT(2)
+
/* PLA_CONFIG5 */
#define BWF_EN 0x0040
#define MWF_EN 0x0020
@@ -330,6 +370,7 @@
/* PLA_MAC_PWR_CTRL2 */
#define EEE_SPDWN_RATIO 0x8007
#define MAC_CLK_SPDWN_EN BIT(15)
+#define EEE_SPDWN_RATIO_MASK 0xff
/* PLA_MAC_PWR_CTRL3 */
#define PLA_MCU_SPDWN_EN BIT(14)
@@ -342,6 +383,7 @@
#define PWRSAVE_SPDWN_EN 0x1000
#define RXDV_SPDWN_EN 0x0800
#define TX10MIDLE_EN 0x0100
+#define IDLE_SPDWN_EN BIT(6)
#define TP100_SPDWN_EN 0x0020
#define TP500_SPDWN_EN 0x0010
#define TP1000_SPDWN_EN 0x0008
@@ -382,6 +424,13 @@
#define LINK_CHANGE_FLAG BIT(8)
#define POLL_LINK_CHG BIT(0)
+/* PLA_GPHY_CTRL */
+#define GPHY_FLASH BIT(1)
+
+/* PLA_POL_GPIO_CTRL */
+#define DACK_DET_EN BIT(15)
+#define POL_GPHY_PATCH BIT(4)
+
/* USB_USB2PHY */
#define USB2PHY_SUSPEND 0x0001
#define USB2PHY_L1 0x0002
@@ -430,6 +479,9 @@
#define BMU_RESET_EP_IN 0x01
#define BMU_RESET_EP_OUT 0x02
+/* USB_BMU_CONFIG */
+#define ACT_ODMA BIT(1)
+
/* USB_UPT_RXDMA_OWN */
#define OWN_UPDATE BIT(0)
#define OWN_CLEAR BIT(1)
@@ -437,27 +489,52 @@
/* USB_FW_TASK */
#define FC_PATCH_TASK BIT(1)
+/* USB_RX_AGGR_NUM */
+#define RX_AGGR_NUM_MASK 0x1ff
+
/* USB_UPS_CTRL */
#define POWER_CUT 0x0100
/* USB_PM_CTRL_STATUS */
#define RESUME_INDICATE 0x0001
+/* USB_ECM_OPTION */
+#define BYPASS_MAC_RESET BIT(5)
+
/* USB_CSTMR */
#define FORCE_SUPER BIT(0)
+/* USB_MISC_2 */
+#define UPS_FORCE_PWR_DOWN BIT(0)
+
+/* USB_ECM_OP */
+#define EN_ALL_SPEED BIT(0)
+
+/* USB_GPHY_CTRL */
+#define GPHY_PATCH_DONE BIT(2)
+#define BYPASS_FLASH BIT(5)
+#define BACKUP_RESTRORE BIT(6)
+
+/* USB_SPEED_OPTION */
+#define RG_PWRDN_EN BIT(8)
+#define ALL_SPEED_OFF BIT(9)
+
/* USB_FW_CTRL */
#define FLOW_CTRL_PATCH_OPT BIT(1)
+#define AUTO_SPEEDUP BIT(3)
+#define FLOW_CTRL_PATCH_2 BIT(8)
/* USB_FC_TIMER */
#define CTRL_TIMER_EN BIT(15)
/* USB_USB_CTRL */
+#define CDC_ECM_EN BIT(3)
#define RX_AGG_DISABLE 0x0010
#define RX_ZERO_EN 0x0080
/* USB_U2P3_CTRL */
#define U2P3_ENABLE 0x0001
+#define RX_DETECT8 BIT(3)
/* USB_POWER_CUT */
#define PWR_EN 0x0001
@@ -493,8 +570,12 @@
#define SEN_VAL_NORMAL 0xa000
#define SEL_RXIDLE 0x0100
+/* USB_UPHY_XTAL */
+#define OOBS_POLLING BIT(8)
+
/* USB_UPS_CFG */
#define SAW_CNT_1MS_MASK 0x0fff
+#define MID_REVERSE BIT(5) /* RTL8156A */
/* USB_UPS_FLAGS */
#define UPS_FLAGS_R_TUNE BIT(0)
@@ -502,6 +583,7 @@
#define UPS_FLAGS_250M_CKDIV BIT(2)
#define UPS_FLAGS_EN_ALDPS BIT(3)
#define UPS_FLAGS_CTAP_SHORT_DIS BIT(4)
+#define UPS_FLAGS_SPEED_MASK (0xf << 16)
#define ups_flags_speed(x) ((x) << 16)
#define UPS_FLAGS_EN_EEE BIT(20)
#define UPS_FLAGS_EN_500M_EEE BIT(21)
@@ -522,6 +604,8 @@ enum spd_duplex {
FORCE_10M_FULL,
FORCE_100M_HALF,
FORCE_100M_FULL,
+ FORCE_1000M_FULL,
+ NWAY_2500M_FULL,
};
/* OCP_ALDPS_CONFIG */
@@ -586,6 +670,9 @@ enum spd_duplex {
#define EN_10M_CLKDIV BIT(11)
#define EN_10M_BGOFF 0x0080
+/* OCP_10GBT_CTRL */
+#define RTL_ADV2_5G_F_R BIT(5) /* Advertise 2.5GBASE-T fast-retrain */
+
/* OCP_PHY_STATE */
#define TXDIS_STATE 0x01
#define ABD_STATE 0x02
@@ -605,7 +692,8 @@ enum spd_duplex {
#define EN_EMI_L 0x0040
/* OCP_SYSCLK_CFG */
-#define clk_div_expo(x) (min(x, 5) << 8)
+#define sysclk_div_expo(x) (min(x, 5) << 8)
+#define clk_div_expo(x) (min(x, 5) << 4)
/* SRAM_GREEN_CFG */
#define GREEN_ETH_EN BIT(15)
@@ -636,6 +724,11 @@ enum spd_duplex {
#define BP4_SUPER_ONLY 0x1578 /* RTL_VER_04 only */
enum rtl_register_content {
+ _2500bps = BIT(10),
+ _1250bps = BIT(9),
+ _500bps = BIT(8),
+ _tx_flow = BIT(6),
+ _rx_flow = BIT(5),
_1000bps = 0x10,
_100bps = 0x08,
_10bps = 0x04,
@@ -643,6 +736,9 @@ enum rtl_register_content {
FULL_DUP = 0x01,
};
+#define is_speed_2500(_speed) (((_speed) & (_2500bps | LINK_STATUS)) == (_2500bps | LINK_STATUS))
+#define is_flow_control(_speed) (((_speed) & (_tx_flow | _rx_flow)) == (_tx_flow | _rx_flow))
+
#define RTL8152_MAX_TX 4
#define RTL8152_MAX_RX 10
#define INTBUFSIZE 2
@@ -654,15 +750,12 @@ enum rtl_register_content {
#define INTR_LINK 0x0004
-#define RTL8153_MAX_PACKET 9216 /* 9K */
-#define RTL8153_MAX_MTU (RTL8153_MAX_PACKET - VLAN_ETH_HLEN - \
- ETH_FCS_LEN)
#define RTL8152_RMS (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
#define RTL8153_RMS RTL8153_MAX_PACKET
#define RTL8152_TX_TIMEOUT (5 * HZ)
-#define RTL8152_NAPI_WEIGHT 64
-#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + ETH_FCS_LEN + \
- sizeof(struct rx_desc) + RX_ALIGN)
+#define mtu_to_size(m) ((m) + VLAN_ETH_HLEN + ETH_FCS_LEN)
+#define size_to_mtu(s) ((s) - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define rx_reserved_size(x) (mtu_to_size(x) + sizeof(struct rx_desc) + RX_ALIGN)
/* rtl8152 flags */
enum rtl8152_flags {
@@ -674,8 +767,6 @@ enum rtl8152_flags {
PHY_RESET,
SCHEDULE_TASKLET,
GREEN_ETHERNET,
- DELL_TB_RX_AGG_BUG,
- LENOVO_MACPASSTHRU,
};
#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
@@ -792,9 +883,11 @@ struct r8152 {
bool (*in_nway)(struct r8152 *tp);
void (*hw_phy_cfg)(struct r8152 *tp);
void (*autosuspend_en)(struct r8152 *tp, bool enable);
+ void (*change_mtu)(struct r8152 *tp);
} rtl_ops;
struct ups_info {
+ u32 r_tune:1;
u32 _10m_ckdiv:1;
u32 _250m_ckdiv:1;
u32 aldps:1;
@@ -836,7 +929,11 @@ struct r8152 {
u32 rx_buf_sz;
u32 rx_copybreak;
u32 rx_pending;
+ u32 fc_pause_on, fc_pause_off;
+ u32 support_2500full:1;
+ u32 lenovo_macpassthru:1;
+ u32 dell_tb_rx_agg_bug:1;
u16 ocp_base;
u16 speed;
u16 eee_adv;
@@ -871,6 +968,66 @@ struct fw_header {
struct fw_block blocks[];
} __packed;
+enum rtl8152_fw_flags {
+ FW_FLAGS_USB = 0,
+ FW_FLAGS_PLA,
+ FW_FLAGS_START,
+ FW_FLAGS_STOP,
+ FW_FLAGS_NC,
+ FW_FLAGS_NC1,
+ FW_FLAGS_NC2,
+ FW_FLAGS_UC2,
+ FW_FLAGS_UC,
+ FW_FLAGS_SPEED_UP,
+ FW_FLAGS_VER,
+};
+
+enum rtl8152_fw_fixup_cmd {
+ FW_FIXUP_AND = 0,
+ FW_FIXUP_OR,
+ FW_FIXUP_NOT,
+ FW_FIXUP_XOR,
+};
+
+struct fw_phy_set {
+ __le16 addr;
+ __le16 data;
+} __packed;
+
+struct fw_phy_speed_up {
+ struct fw_block blk_hdr;
+ __le16 fw_offset;
+ __le16 version;
+ __le16 fw_reg;
+ __le16 reserved;
+ char info[];
+} __packed;
+
+struct fw_phy_ver {
+ struct fw_block blk_hdr;
+ struct fw_phy_set ver;
+ __le32 reserved;
+} __packed;
+
+struct fw_phy_fixup {
+ struct fw_block blk_hdr;
+ struct fw_phy_set setting;
+ __le16 bit_cmd;
+ __le16 reserved;
+} __packed;
+
+struct fw_phy_union {
+ struct fw_block blk_hdr;
+ __le16 fw_offset;
+ __le16 fw_reg;
+ struct fw_phy_set pre_set[2];
+ struct fw_phy_set bp[8];
+ struct fw_phy_set bp_en;
+ u8 pre_num;
+ u8 bp_num;
+ char info[];
+} __packed;
+
/**
* struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB.
* The layout of the firmware block is:
@@ -975,6 +1132,15 @@ enum rtl_fw_type {
RTL_FW_PHY_START,
RTL_FW_PHY_STOP,
RTL_FW_PHY_NC,
+ RTL_FW_PHY_FIXUP,
+ RTL_FW_PHY_UNION_NC,
+ RTL_FW_PHY_UNION_NC1,
+ RTL_FW_PHY_UNION_NC2,
+ RTL_FW_PHY_UNION_UC2,
+ RTL_FW_PHY_UNION_UC,
+ RTL_FW_PHY_UNION_MISC,
+ RTL_FW_PHY_SPEED_UP,
+ RTL_FW_PHY_VER,
};
enum rtl_version {
@@ -988,6 +1154,15 @@ enum rtl_version {
RTL_VER_07,
RTL_VER_08,
RTL_VER_09,
+
+ RTL_TEST_01,
+ RTL_VER_10,
+ RTL_VER_11,
+ RTL_VER_12,
+ RTL_VER_13,
+ RTL_VER_14,
+ RTL_VER_15,
+
RTL_VER_MAX
};
@@ -1003,6 +1178,7 @@ enum tx_csum_stat {
#define RTL_ADVERTISED_100_FULL BIT(3)
#define RTL_ADVERTISED_1000_HALF BIT(4)
#define RTL_ADVERTISED_1000_FULL BIT(5)
+#define RTL_ADVERTISED_2500_FULL BIT(6)
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
* The RTL chips use a 64 element hash table based on the Ethernet CRC.
@@ -1010,8 +1186,7 @@ enum tx_csum_stat {
static const int multicast_filter_limit = 32;
static unsigned int agg_buf_sz = 16384;
-#define RTL_LIMITED_TSO_SIZE (agg_buf_sz - sizeof(struct tx_desc) - \
- VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define RTL_LIMITED_TSO_SIZE (size_to_mtu(agg_buf_sz) - sizeof(struct tx_desc))
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
@@ -1419,7 +1594,7 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
acpi_object_type mac_obj_type;
int mac_strlen;
- if (test_bit(LENOVO_MACPASSTHRU, &tp->flags)) {
+ if (tp->lenovo_macpassthru) {
mac_obj_name = "\\MACA";
mac_obj_type = ACPI_TYPE_STRING;
mac_strlen = 0x16;
@@ -2108,7 +2283,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
- if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
+ if (tp->dell_tb_rx_agg_bug)
break;
}
@@ -2597,7 +2772,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
static void r8152b_reset_packet_filter(struct r8152 *tp)
{
- u32 ocp_data;
+ u32 ocp_data;
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_FMC);
ocp_data &= ~FMC_FCR_MCU_EN;
@@ -2608,28 +2783,58 @@ static void r8152b_reset_packet_filter(struct r8152 *tp)
static void rtl8152_nic_reset(struct r8152 *tp)
{
- int i;
+ u32 ocp_data;
+ int i;
- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST);
+ switch (tp->version) {
+ case RTL_TEST_01:
+ case RTL_VER_10:
+ case RTL_VER_11:
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR);
+ ocp_data &= ~CR_TE;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_RESET);
+ ocp_data &= ~BMU_RESET_EP_IN;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
+ ocp_data |= CDC_ECM_EN;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR);
+ ocp_data &= ~CR_RE;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_RESET);
+ ocp_data |= BMU_RESET_EP_IN;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
+ ocp_data &= ~CDC_ECM_EN;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
+ break;
- for (i = 0; i < 1000; i++) {
- if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST))
- break;
- usleep_range(100, 400);
+ default:
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST);
+
+ for (i = 0; i < 1000; i++) {
+ if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST))
+ break;
+ usleep_range(100, 400);
+ }
+ break;
}
}
static void set_tx_qlen(struct r8152 *tp)
{
- struct net_device *netdev = tp->netdev;
-
- tp->tx_qlen = agg_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN +
- sizeof(struct tx_desc));
+ tp->tx_qlen = agg_buf_sz / (mtu_to_size(tp->netdev->mtu) + sizeof(struct tx_desc));
}
-static inline u8 rtl8152_get_speed(struct r8152 *tp)
+static inline u16 rtl8152_get_speed(struct r8152 *tp)
{
- return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS);
+ return ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHYSTATUS);
}
static void rtl_eee_plus_en(struct r8152 *tp, bool enable)
@@ -2747,6 +2952,29 @@ static int rtl_stop_rx(struct r8152 *tp)
return 0;
}
+static void rtl_set_ifg(struct r8152 *tp, u16 speed)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1);
+ ocp_data &= ~IFG_MASK;
+ if ((speed & (_10bps | _100bps)) && !(speed & FULL_DUP)) {
+ ocp_data |= IFG_144NS;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4);
+ ocp_data &= ~TX10MIDLE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data);
+ } else {
+ ocp_data |= IFG_96NS;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4);
+ ocp_data |= TX10MIDLE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data);
+ }
+}
+
static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp)
{
ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN,
@@ -2766,6 +2994,7 @@ static int rtl_enable(struct r8152 *tp)
switch (tp->version) {
case RTL_VER_08:
case RTL_VER_09:
+ case RTL_VER_14:
r8153b_rx_agg_chg_indicate(tp);
break;
default:
@@ -2803,6 +3032,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
case RTL_VER_08:
case RTL_VER_09:
+ case RTL_VER_14:
/* The RTL8153B uses USB_RX_EXTRA_AGGR_TMR for rx timeout
* primarily. For USB_RX_EARLY_TIMEOUT, we fix it to 128ns.
*/
@@ -2812,6 +3042,18 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
ocp_data);
break;
+ case RTL_VER_10:
+ case RTL_VER_11:
+ case RTL_VER_12:
+ case RTL_VER_13:
+ case RTL_VER_15:
+ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT,
+ 640 / 8);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR,
+ ocp_data);
+ r8153b_rx_agg_chg_indicate(tp);
+ break;
+
default:
break;
}
@@ -2831,9 +3073,20 @@ static void r8153_set_rx_early_size(struct r8152 *tp)
break;
case RTL_VER_08:
case RTL_VER_09:
+ case RTL_VER_14:
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE,
ocp_data / 8);
break;
+ case RTL_TEST_01:
+ case RTL_VER_10:
+ case RTL_VER_11:
+ case RTL_VER_12:
+ case RTL_VER_13:
+ case RTL_VER_15:
+ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE,
+ ocp_data / 8);
+ r8153b_rx_agg_chg_indicate(tp);
+ break;
default:
WARN_ON_ONCE(1);
break;
@@ -2842,6 +3095,8 @@ static void r8153_set_rx_early_size(struct r8152 *tp)
static int rtl8153_enable(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return -ENODEV;
@@ -2850,15 +3105,20 @@ static int rtl8153_enable(struct r8152 *tp)
r8153_set_rx_early_timeout(tp);
r8153_set_rx_early_size(tp);
- if (tp->version == RTL_VER_09) {
- u32 ocp_data;
+ rtl_set_ifg(tp, rtl8152_get_speed(tp));
+ switch (tp->version) {
+ case RTL_VER_09:
+ case RTL_VER_14:
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
ocp_data &= ~FC_PATCH_TASK;
ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
usleep_range(1000, 2000);
ocp_data |= FC_PATCH_TASK;
ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+ break;
+ default:
+ break;
}
return rtl_enable(tp);
@@ -2923,12 +3183,40 @@ static void rtl_rx_vlan_en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
- if (enable)
- ocp_data |= CPCR_RX_VLAN;
- else
- ocp_data &= ~CPCR_RX_VLAN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ case RTL_VER_07:
+ case RTL_VER_08:
+ case RTL_VER_09:
+ case RTL_VER_14:
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
+ if (enable)
+ ocp_data |= CPCR_RX_VLAN;
+ else
+ ocp_data &= ~CPCR_RX_VLAN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
+ break;
+
+ case RTL_TEST_01:
+ case RTL_VER_10:
+ case RTL_VER_11:
+ case RTL_VER_12:
+ case RTL_VER_13:
+ case RTL_VER_15:
+ default:
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RCR1);
+ if (enable)
+ ocp_data |= OUTER_VLAN | INNER_VLAN;
+ else
+ ocp_data &= ~(OUTER_VLAN | INNER_VLAN);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RCR1, ocp_data);
+ break;
+ }
}
static int rtl8152_set_features(struct net_device *dev,
@@ -3021,6 +3309,40 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
device_set_wakeup_enable(&tp->udev->dev, false);
}
+static void r8153_mac_clk_speed_down(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2);
+
+ /* MAC clock speed down */
+ if (enable)
+ ocp_data |= MAC_CLK_SPDWN_EN;
+ else
+ ocp_data &= ~MAC_CLK_SPDWN_EN;
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data);
+}
+
+static void r8156_mac_clk_spd(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ /* MAC clock speed down */
+ if (enable) {
+ /* aldps_spdwn_ratio, tp10_spdwn_ratio */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL,
+ 0x0403);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2);
+ ocp_data &= ~EEE_SPDWN_RATIO_MASK;
+ ocp_data |= MAC_CLK_SPDWN_EN | 0x03; /* eee_spdwn_ratio */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data);
+ } else {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2);
+ ocp_data &= ~MAC_CLK_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data);
+ }
+}
+
static void r8153_u1u2en(struct r8152 *tp, bool enable)
{
u8 u1u2[8];
@@ -3080,6 +3402,9 @@ static void r8153b_ups_flags(struct r8152 *tp)
if (tp->ups_info.eee_cmod_lv)
ups_flags |= UPS_FLAGS_EEE_CMOD_LV_EN;
+ if (tp->ups_info.r_tune)
+ ups_flags |= UPS_FLAGS_R_TUNE;
+
if (tp->ups_info._10m_ckdiv)
ups_flags |= UPS_FLAGS_EN_10M_CKDIV;
@@ -3130,6 +3455,88 @@ static void r8153b_ups_flags(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags);
}
+static void r8156_ups_flags(struct r8152 *tp)
+{
+ u32 ups_flags = 0;
+
+ if (tp->ups_info.green)
+ ups_flags |= UPS_FLAGS_EN_GREEN;
+
+ if (tp->ups_info.aldps)
+ ups_flags |= UPS_FLAGS_EN_ALDPS;
+
+ if (tp->ups_info.eee)
+ ups_flags |= UPS_FLAGS_EN_EEE;
+
+ if (tp->ups_info.flow_control)
+ ups_flags |= UPS_FLAGS_EN_FLOW_CTR;
+
+ if (tp->ups_info.eee_ckdiv)
+ ups_flags |= UPS_FLAGS_EN_EEE_CKDIV;
+
+ if (tp->ups_info._10m_ckdiv)
+ ups_flags |= UPS_FLAGS_EN_10M_CKDIV;
+
+ if (tp->ups_info.eee_plloff_100)
+ ups_flags |= UPS_FLAGS_EEE_PLLOFF_100;
+
+ if (tp->ups_info.eee_plloff_giga)
+ ups_flags |= UPS_FLAGS_EEE_PLLOFF_GIGA;
+
+ if (tp->ups_info._250m_ckdiv)
+ ups_flags |= UPS_FLAGS_250M_CKDIV;
+
+ switch (tp->ups_info.speed_duplex) {
+ case FORCE_10M_HALF:
+ ups_flags |= ups_flags_speed(0);
+ break;
+ case FORCE_10M_FULL:
+ ups_flags |= ups_flags_speed(1);
+ break;
+ case FORCE_100M_HALF:
+ ups_flags |= ups_flags_speed(2);
+ break;
+ case FORCE_100M_FULL:
+ ups_flags |= ups_flags_speed(3);
+ break;
+ case NWAY_10M_HALF:
+ ups_flags |= ups_flags_speed(4);
+ break;
+ case NWAY_10M_FULL:
+ ups_flags |= ups_flags_speed(5);
+ break;
+ case NWAY_100M_HALF:
+ ups_flags |= ups_flags_speed(6);
+ break;
+ case NWAY_100M_FULL:
+ ups_flags |= ups_flags_speed(7);
+ break;
+ case NWAY_1000M_FULL:
+ ups_flags |= ups_flags_speed(8);
+ break;
+ case NWAY_2500M_FULL:
+ ups_flags |= ups_flags_speed(9);
+ break;
+ default:
+ break;
+ }
+
+ switch (tp->ups_info.lite_mode) {
+ case 1:
+ ups_flags |= 0 << 5;
+ break;
+ case 2:
+ ups_flags |= 2 << 5;
+ break;
+ case 0:
+ default:
+ ups_flags |= 1 << 5;
+ break;
+ }
+
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags);
+}
+
static void rtl_green_en(struct r8152 *tp, bool enable)
{
u16 data;
@@ -3193,16 +3600,16 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN;
ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
- ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, 0xcfff);
- ocp_data |= BIT(0);
- ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data);
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2);
+ ocp_data |= UPS_FORCE_PWR_DOWN;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data);
} else {
ocp_data &= ~(UPS_EN | USP_PREWAKE);
ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
- ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, 0xcfff);
- ocp_data &= ~BIT(0);
- ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data);
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2);
+ ocp_data &= ~UPS_FORCE_PWR_DOWN;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data);
if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) {
int i;
@@ -3222,6 +3629,95 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
}
}
+static void r8153c_ups_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT);
+
+ if (enable) {
+ r8153b_ups_flags(tp);
+
+ ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2);
+ ocp_data |= UPS_FORCE_PWR_DOWN;
+ ocp_data &= ~BIT(7);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data);
+ } else {
+ ocp_data &= ~(UPS_EN | USP_PREWAKE);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2);
+ ocp_data &= ~UPS_FORCE_PWR_DOWN;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data);
+
+ if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) {
+ int i;
+
+ for (i = 0; i < 500; i++) {
+ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ AUTOLOAD_DONE)
+ break;
+ msleep(20);
+ }
+
+ tp->rtl_ops.hw_phy_cfg(tp);
+
+ rtl8152_set_speed(tp, tp->autoneg, tp->speed,
+ tp->duplex, tp->advertising);
+ }
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ ocp_data |= BIT(8);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+ }
+}
+
+static void r8156_ups_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT);
+
+ if (enable) {
+ r8156_ups_flags(tp);
+
+ ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2);
+ ocp_data |= UPS_FORCE_PWR_DOWN;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data);
+
+ switch (tp->version) {
+ case RTL_VER_13:
+ case RTL_VER_15:
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPHY_XTAL);
+ ocp_data &= ~OOBS_POLLING;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_UPHY_XTAL, ocp_data);
+ break;
+ default:
+ break;
+ }
+ } else {
+ ocp_data &= ~(UPS_EN | USP_PREWAKE);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2);
+ ocp_data &= ~UPS_FORCE_PWR_DOWN;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data);
+
+ if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) {
+ tp->rtl_ops.hw_phy_cfg(tp);
+
+ rtl8152_set_speed(tp, tp->autoneg, tp->speed,
+ tp->duplex, tp->advertising);
+ }
+ }
+}
+
static void r8153_power_cut_en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
@@ -3351,6 +3847,38 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable)
}
}
+static void rtl8153c_runtime_enable(struct r8152 *tp, bool enable)
+{
+ if (enable) {
+ r8153_queue_wake(tp, true);
+ r8153b_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
+ rtl_runtime_suspend_enable(tp, true);
+ r8153c_ups_en(tp, true);
+ } else {
+ r8153c_ups_en(tp, false);
+ r8153_queue_wake(tp, false);
+ rtl_runtime_suspend_enable(tp, false);
+ r8153b_u1u2en(tp, true);
+ }
+}
+
+static void rtl8156_runtime_enable(struct r8152 *tp, bool enable)
+{
+ if (enable) {
+ r8153_queue_wake(tp, true);
+ r8153b_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
+ rtl_runtime_suspend_enable(tp, true);
+ } else {
+ r8153_queue_wake(tp, false);
+ rtl_runtime_suspend_enable(tp, false);
+ r8153_u2p3en(tp, true);
+ if (tp->udev->speed >= USB_SPEED_SUPER)
+ r8153b_u1u2en(tp, true);
+ }
+}
+
static void r8153_teredo_off(struct r8152 *tp)
{
u32 ocp_data;
@@ -3371,14 +3899,19 @@ static void r8153_teredo_off(struct r8152 *tp)
case RTL_VER_08:
case RTL_VER_09:
+ case RTL_TEST_01:
+ case RTL_VER_10:
+ case RTL_VER_11:
+ case RTL_VER_12:
+ case RTL_VER_13:
+ case RTL_VER_14:
+ case RTL_VER_15:
+ default:
/* The bit 0 ~ 7 are relative with teredo settings. They are
* W1C (write 1 to clear), so set all 1 to disable it.
*/
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, 0xff);
break;
-
- default:
- break;
}
ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
@@ -3413,6 +3946,12 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type)
break;
case RTL_VER_08:
case RTL_VER_09:
+ case RTL_VER_10:
+ case RTL_VER_11:
+ case RTL_VER_12:
+ case RTL_VER_13:
+ case RTL_VER_14:
+ case RTL_VER_15:
default:
if (type == MCU_TYPE_USB) {
ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
@@ -3521,6 +4060,162 @@ static int rtl_post_ram_code(struct r8152 *tp, u16 key_addr, bool wait)
return 0;
}
+static bool rtl8152_is_fw_phy_speed_up_ok(struct r8152 *tp, struct fw_phy_speed_up *phy)
+{
+ u16 fw_offset;
+ u32 length;
+ bool rc = false;
+
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ case RTL_VER_07:
+ case RTL_VER_08:
+ case RTL_VER_09:
+ case RTL_VER_10:
+ case RTL_VER_11:
+ case RTL_VER_12:
+ case RTL_VER_14:
+ goto out;
+ case RTL_VER_13:
+ case RTL_VER_15:
+ default:
+ break;
+ }
+
+ fw_offset = __le16_to_cpu(phy->fw_offset);
+ length = __le32_to_cpu(phy->blk_hdr.length);
+ if (fw_offset < sizeof(*phy) || length <= fw_offset) {
+ dev_err(&tp->intf->dev, "invalid fw_offset\n");
+ goto out;
+ }
+
+ length -= fw_offset;
+ if (length & 3) {
+ dev_err(&tp->intf->dev, "invalid block length\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->fw_reg) != 0x9A00) {
+ dev_err(&tp->intf->dev, "invalid register to load firmware\n");
+ goto out;
+ }
+
+ rc = true;
+out:
+ return rc;
+}
+
+static bool rtl8152_is_fw_phy_ver_ok(struct r8152 *tp, struct fw_phy_ver *ver)
+{
+ bool rc = false;
+
+ switch (tp->version) {
+ case RTL_VER_10:
+ case RTL_VER_11:
+ case RTL_VER_12:
+ case RTL_VER_13:
+ case RTL_VER_15:
+ break;
+ default:
+ goto out;
+ }
+
+ if (__le32_to_cpu(ver->blk_hdr.length) != sizeof(*ver)) {
+ dev_err(&tp->intf->dev, "invalid block length\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(ver->ver.addr) != SRAM_GPHY_FW_VER) {
+ dev_err(&tp->intf->dev, "invalid phy ver addr\n");
+ goto out;
+ }
+
+ rc = true;
+out:
+ return rc;
+}
+
+static bool rtl8152_is_fw_phy_fixup_ok(struct r8152 *tp, struct fw_phy_fixup *fix)
+{
+ bool rc = false;
+
+ switch (tp->version) {
+ case RTL_VER_10:
+ case RTL_VER_11:
+ case RTL_VER_12:
+ case RTL_VER_13:
+ case RTL_VER_15:
+ break;
+ default:
+ goto out;
+ }
+
+ if (__le32_to_cpu(fix->blk_hdr.length) != sizeof(*fix)) {
+ dev_err(&tp->intf->dev, "invalid block length\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(fix->setting.addr) != OCP_PHY_PATCH_CMD ||
+ __le16_to_cpu(fix->setting.data) != BIT(7)) {
+ dev_err(&tp->intf->dev, "invalid phy fixup\n");
+ goto out;
+ }
+
+ rc = true;
+out:
+ return rc;
+}
+
+static bool rtl8152_is_fw_phy_union_ok(struct r8152 *tp, struct fw_phy_union *phy)
+{
+ u16 fw_offset;
+ u32 length;
+ bool rc = false;
+
+ switch (tp->version) {
+ case RTL_VER_10:
+ case RTL_VER_11:
+ case RTL_VER_12:
+ case RTL_VER_13:
+ case RTL_VER_15:
+ break;
+ default:
+ goto out;
+ }
+
+ fw_offset = __le16_to_cpu(phy->fw_offset);
+ length = __le32_to_cpu(phy->blk_hdr.length);
+ if (fw_offset < sizeof(*phy) || length <= fw_offset) {
+ dev_err(&tp->intf->dev, "invalid fw_offset\n");
+ goto out;
+ }
+
+ length -= fw_offset;
+ if (length & 1) {
+ dev_err(&tp->intf->dev, "invalid block length\n");
+ goto out;
+ }
+
+ if (phy->pre_num > 2) {
+ dev_err(&tp->intf->dev, "invalid pre_num %d\n", phy->pre_num);
+ goto out;
+ }
+
+ if (phy->bp_num > 8) {
+ dev_err(&tp->intf->dev, "invalid bp_num %d\n", phy->bp_num);
+ goto out;
+ }
+
+ rc = true;
+out:
+ return rc;
+}
+
static bool rtl8152_is_fw_phy_nc_ok(struct r8152 *tp, struct fw_phy_nc *phy)
{
u32 length;
@@ -3622,6 +4317,11 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
case RTL_VER_06:
case RTL_VER_08:
case RTL_VER_09:
+ case RTL_VER_11:
+ case RTL_VER_12:
+ case RTL_VER_13:
+ case RTL_VER_14:
+ case RTL_VER_15:
fw_reg = 0xf800;
bp_ba_addr = PLA_BP_BA;
bp_en_addr = PLA_BP_EN;
@@ -3645,6 +4345,11 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
break;
case RTL_VER_08:
case RTL_VER_09:
+ case RTL_VER_11:
+ case RTL_VER_12:
+ case RTL_VER_13:
+ case RTL_VER_14:
+ case RTL_VER_15:
fw_reg = 0xe600;
bp_ba_addr = USB_BP_BA;
bp_en_addr = USB_BP2_EN;
@@ -3772,10 +4477,7 @@ static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw)
{
const struct firmware *fw = rtl_fw->fw;
struct fw_header *fw_hdr = (struct fw_header *)fw->data;
- struct fw_mac *pla = NULL, *usb = NULL;
- struct fw_phy_patch_key *start = NULL;
- struct fw_phy_nc *phy_nc = NULL;
- struct fw_block *stop = NULL;
+ unsigned long fw_flags = 0;
long ret = -EFAULT;
int i;
@@ -3804,50 +4506,56 @@ static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw)
goto fail;
goto fw_end;
case RTL_FW_PLA:
- if (pla) {
+ if (test_bit(FW_FLAGS_PLA, &fw_flags)) {
dev_err(&tp->intf->dev,
"multiple PLA firmware encountered");
goto fail;
}
- pla = (struct fw_mac *)block;
- if (!rtl8152_is_fw_mac_ok(tp, pla)) {
+ if (!rtl8152_is_fw_mac_ok(tp, (struct fw_mac *)block)) {
dev_err(&tp->intf->dev,
"check PLA firmware failed\n");
goto fail;
}
+ __set_bit(FW_FLAGS_PLA, &fw_flags);
break;
case RTL_FW_USB:
- if (usb) {
+ if (test_bit(FW_FLAGS_USB, &fw_flags)) {
dev_err(&tp->intf->dev,
"multiple USB firmware encountered");
goto fail;
}
- usb = (struct fw_mac *)block;
- if (!rtl8152_is_fw_mac_ok(tp, usb)) {
+ if (!rtl8152_is_fw_mac_ok(tp, (struct fw_mac *)block)) {
dev_err(&tp->intf->dev,
"check USB firmware failed\n");
goto fail;
}
+ __set_bit(FW_FLAGS_USB, &fw_flags);
break;
case RTL_FW_PHY_START:
- if (start || phy_nc || stop) {
+ if (test_bit(FW_FLAGS_START, &fw_flags) ||
+ test_bit(FW_FLAGS_NC, &fw_flags) ||
+ test_bit(FW_FLAGS_NC1, &fw_flags) ||
+ test_bit(FW_FLAGS_NC2, &fw_flags) ||
+ test_bit(FW_FLAGS_UC2, &fw_flags) ||
+ test_bit(FW_FLAGS_UC, &fw_flags) ||
+ test_bit(FW_FLAGS_STOP, &fw_flags)) {
dev_err(&tp->intf->dev,
"check PHY_START fail\n");
goto fail;
}
- if (__le32_to_cpu(block->length) != sizeof(*start)) {
+ if (__le32_to_cpu(block->length) != sizeof(struct fw_phy_patch_key)) {
dev_err(&tp->intf->dev,
"Invalid length for PHY_START\n");
goto fail;
}
-
- start = (struct fw_phy_patch_key *)block;
+ __set_bit(FW_FLAGS_START, &fw_flags);
break;
case RTL_FW_PHY_STOP:
- if (stop || !start) {
+ if (test_bit(FW_FLAGS_STOP, &fw_flags) ||
+ !test_bit(FW_FLAGS_START, &fw_flags)) {
dev_err(&tp->intf->dev,
"Check PHY_STOP fail\n");
goto fail;
@@ -3858,29 +4566,175 @@ static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw)
"Invalid length for PHY_STOP\n");
goto fail;
}
-
- stop = block;
+ __set_bit(FW_FLAGS_STOP, &fw_flags);
break;
case RTL_FW_PHY_NC:
- if (!start || stop) {
+ if (!test_bit(FW_FLAGS_START, &fw_flags) ||
+ test_bit(FW_FLAGS_STOP, &fw_flags)) {
dev_err(&tp->intf->dev,
"check PHY_NC fail\n");
goto fail;
}
- if (phy_nc) {
+ if (test_bit(FW_FLAGS_NC, &fw_flags)) {
dev_err(&tp->intf->dev,
"multiple PHY NC encountered\n");
goto fail;
}
- phy_nc = (struct fw_phy_nc *)block;
- if (!rtl8152_is_fw_phy_nc_ok(tp, phy_nc)) {
+ if (!rtl8152_is_fw_phy_nc_ok(tp, (struct fw_phy_nc *)block)) {
dev_err(&tp->intf->dev,
"check PHY NC firmware failed\n");
goto fail;
}
+ __set_bit(FW_FLAGS_NC, &fw_flags);
+ break;
+ case RTL_FW_PHY_UNION_NC:
+ if (!test_bit(FW_FLAGS_START, &fw_flags) ||
+ test_bit(FW_FLAGS_NC1, &fw_flags) ||
+ test_bit(FW_FLAGS_NC2, &fw_flags) ||
+ test_bit(FW_FLAGS_UC2, &fw_flags) ||
+ test_bit(FW_FLAGS_UC, &fw_flags) ||
+ test_bit(FW_FLAGS_STOP, &fw_flags)) {
+ dev_err(&tp->intf->dev, "PHY_UNION_NC out of order\n");
+ goto fail;
+ }
+
+ if (test_bit(FW_FLAGS_NC, &fw_flags)) {
+ dev_err(&tp->intf->dev, "multiple PHY_UNION_NC encountered\n");
+ goto fail;
+ }
+
+ if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) {
+ dev_err(&tp->intf->dev, "check PHY_UNION_NC failed\n");
+ goto fail;
+ }
+ __set_bit(FW_FLAGS_NC, &fw_flags);
+ break;
+ case RTL_FW_PHY_UNION_NC1:
+ if (!test_bit(FW_FLAGS_START, &fw_flags) ||
+ test_bit(FW_FLAGS_NC2, &fw_flags) ||
+ test_bit(FW_FLAGS_UC2, &fw_flags) ||
+ test_bit(FW_FLAGS_UC, &fw_flags) ||
+ test_bit(FW_FLAGS_STOP, &fw_flags)) {
+ dev_err(&tp->intf->dev, "PHY_UNION_NC1 out of order\n");
+ goto fail;
+ }
+ if (test_bit(FW_FLAGS_NC1, &fw_flags)) {
+ dev_err(&tp->intf->dev, "multiple PHY NC1 encountered\n");
+ goto fail;
+ }
+
+ if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) {
+ dev_err(&tp->intf->dev, "check PHY_UNION_NC1 failed\n");
+ goto fail;
+ }
+ __set_bit(FW_FLAGS_NC1, &fw_flags);
+ break;
+ case RTL_FW_PHY_UNION_NC2:
+ if (!test_bit(FW_FLAGS_START, &fw_flags) ||
+ test_bit(FW_FLAGS_UC2, &fw_flags) ||
+ test_bit(FW_FLAGS_UC, &fw_flags) ||
+ test_bit(FW_FLAGS_STOP, &fw_flags)) {
+ dev_err(&tp->intf->dev, "PHY_UNION_NC2 out of order\n");
+ goto fail;
+ }
+
+ if (test_bit(FW_FLAGS_NC2, &fw_flags)) {
+ dev_err(&tp->intf->dev, "multiple PHY NC2 encountered\n");
+ goto fail;
+ }
+
+ if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) {
+ dev_err(&tp->intf->dev, "check PHY_UNION_NC2 failed\n");
+ goto fail;
+ }
+ __set_bit(FW_FLAGS_NC2, &fw_flags);
+ break;
+ case RTL_FW_PHY_UNION_UC2:
+ if (!test_bit(FW_FLAGS_START, &fw_flags) ||
+ test_bit(FW_FLAGS_UC, &fw_flags) ||
+ test_bit(FW_FLAGS_STOP, &fw_flags)) {
+ dev_err(&tp->intf->dev, "PHY_UNION_UC2 out of order\n");
+ goto fail;
+ }
+
+ if (test_bit(FW_FLAGS_UC2, &fw_flags)) {
+ dev_err(&tp->intf->dev, "multiple PHY UC2 encountered\n");
+ goto fail;
+ }
+
+ if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) {
+ dev_err(&tp->intf->dev, "check PHY_UNION_UC2 failed\n");
+ goto fail;
+ }
+ __set_bit(FW_FLAGS_UC2, &fw_flags);
+ break;
+ case RTL_FW_PHY_UNION_UC:
+ if (!test_bit(FW_FLAGS_START, &fw_flags) ||
+ test_bit(FW_FLAGS_STOP, &fw_flags)) {
+ dev_err(&tp->intf->dev, "PHY_UNION_UC out of order\n");
+ goto fail;
+ }
+
+ if (test_bit(FW_FLAGS_UC, &fw_flags)) {
+ dev_err(&tp->intf->dev, "multiple PHY UC encountered\n");
+ goto fail;
+ }
+
+ if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) {
+ dev_err(&tp->intf->dev, "check PHY_UNION_UC failed\n");
+ goto fail;
+ }
+ __set_bit(FW_FLAGS_UC, &fw_flags);
+ break;
+ case RTL_FW_PHY_UNION_MISC:
+ if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) {
+ dev_err(&tp->intf->dev, "check RTL_FW_PHY_UNION_MISC failed\n");
+ goto fail;
+ }
+ break;
+ case RTL_FW_PHY_FIXUP:
+ if (!rtl8152_is_fw_phy_fixup_ok(tp, (struct fw_phy_fixup *)block)) {
+ dev_err(&tp->intf->dev, "check PHY fixup failed\n");
+ goto fail;
+ }
+ break;
+ case RTL_FW_PHY_SPEED_UP:
+ if (test_bit(FW_FLAGS_SPEED_UP, &fw_flags)) {
+ dev_err(&tp->intf->dev, "multiple PHY firmware encountered");
+ goto fail;
+ }
+
+ if (!rtl8152_is_fw_phy_speed_up_ok(tp, (struct fw_phy_speed_up *)block)) {
+ dev_err(&tp->intf->dev, "check PHY speed up failed\n");
+ goto fail;
+ }
+ __set_bit(FW_FLAGS_SPEED_UP, &fw_flags);
+ break;
+ case RTL_FW_PHY_VER:
+ if (test_bit(FW_FLAGS_START, &fw_flags) ||
+ test_bit(FW_FLAGS_NC, &fw_flags) ||
+ test_bit(FW_FLAGS_NC1, &fw_flags) ||
+ test_bit(FW_FLAGS_NC2, &fw_flags) ||
+ test_bit(FW_FLAGS_UC2, &fw_flags) ||
+ test_bit(FW_FLAGS_UC, &fw_flags) ||
+ test_bit(FW_FLAGS_STOP, &fw_flags)) {
+ dev_err(&tp->intf->dev, "Invalid order to set PHY version\n");
+ goto fail;
+ }
+
+ if (test_bit(FW_FLAGS_VER, &fw_flags)) {
+ dev_err(&tp->intf->dev, "multiple PHY version encountered");
+ goto fail;
+ }
+
+ if (!rtl8152_is_fw_phy_ver_ok(tp, (struct fw_phy_ver *)block)) {
+ dev_err(&tp->intf->dev, "check PHY version failed\n");
+ goto fail;
+ }
+ __set_bit(FW_FLAGS_VER, &fw_flags);
break;
default:
dev_warn(&tp->intf->dev, "Unknown type %u is found\n",
@@ -3893,7 +4747,7 @@ static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw)
}
fw_end:
- if ((phy_nc || start) && !stop) {
+ if (test_bit(FW_FLAGS_START, &fw_flags) && !test_bit(FW_FLAGS_STOP, &fw_flags)) {
dev_err(&tp->intf->dev, "without PHY_STOP\n");
goto fail;
}
@@ -3903,6 +4757,143 @@ fail:
return ret;
}
+static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy, bool wait)
+{
+ u32 len;
+ u8 *data;
+
+ if (sram_read(tp, SRAM_GPHY_FW_VER) >= __le16_to_cpu(phy->version)) {
+ dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n");
+ return;
+ }
+
+ len = __le32_to_cpu(phy->blk_hdr.length);
+ len -= __le16_to_cpu(phy->fw_offset);
+ data = (u8 *)phy + __le16_to_cpu(phy->fw_offset);
+
+ if (rtl_phy_patch_request(tp, true, wait))
+ return;
+
+ while (len) {
+ u32 ocp_data, size;
+ int i;
+
+ if (len < 2048)
+ size = len;
+ else
+ size = 2048;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL);
+ ocp_data |= GPHY_PATCH_DONE | BACKUP_RESTRORE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL, ocp_data);
+
+ generic_ocp_write(tp, __le16_to_cpu(phy->fw_reg), 0xff, size, data, MCU_TYPE_USB);
+
+ data += size;
+ len -= size;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL);
+ ocp_data |= POL_GPHY_PATCH;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL, ocp_data);
+
+ for (i = 0; i < 1000; i++) {
+ if (!(ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL) & POL_GPHY_PATCH))
+ break;
+ }
+
+ if (i == 1000) {
+ dev_err(&tp->intf->dev, "ram code speedup mode timeout\n");
+ break;
+ }
+ }
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base);
+ rtl_phy_patch_request(tp, false, wait);
+
+ if (sram_read(tp, SRAM_GPHY_FW_VER) == __le16_to_cpu(phy->version))
+ dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info);
+ else
+ dev_err(&tp->intf->dev, "ram code speedup mode fail\n");
+}
+
+static int rtl8152_fw_phy_ver(struct r8152 *tp, struct fw_phy_ver *phy_ver)
+{
+ u16 ver_addr, ver;
+
+ ver_addr = __le16_to_cpu(phy_ver->ver.addr);
+ ver = __le16_to_cpu(phy_ver->ver.data);
+
+ if (sram_read(tp, ver_addr) >= ver) {
+ dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n");
+ return 0;
+ }
+
+ sram_write(tp, ver_addr, ver);
+
+ dev_dbg(&tp->intf->dev, "PHY firmware version %x\n", ver);
+
+ return ver;
+}
+
+static void rtl8152_fw_phy_fixup(struct r8152 *tp, struct fw_phy_fixup *fix)
+{
+ u16 addr, data;
+
+ addr = __le16_to_cpu(fix->setting.addr);
+ data = ocp_reg_read(tp, addr);
+
+ switch (__le16_to_cpu(fix->bit_cmd)) {
+ case FW_FIXUP_AND:
+ data &= __le16_to_cpu(fix->setting.data);
+ break;
+ case FW_FIXUP_OR:
+ data |= __le16_to_cpu(fix->setting.data);
+ break;
+ case FW_FIXUP_NOT:
+ data &= ~__le16_to_cpu(fix->setting.data);
+ break;
+ case FW_FIXUP_XOR:
+ data ^= __le16_to_cpu(fix->setting.data);
+ break;
+ default:
+ return;
+ }
+
+ ocp_reg_write(tp, addr, data);
+
+ dev_dbg(&tp->intf->dev, "applied ocp %x %x\n", addr, data);
+}
+
+static void rtl8152_fw_phy_union_apply(struct r8152 *tp, struct fw_phy_union *phy)
+{
+ __le16 *data;
+ u32 length;
+ int i, num;
+
+ num = phy->pre_num;
+ for (i = 0; i < num; i++)
+ sram_write(tp, __le16_to_cpu(phy->pre_set[i].addr),
+ __le16_to_cpu(phy->pre_set[i].data));
+
+ length = __le32_to_cpu(phy->blk_hdr.length);
+ length -= __le16_to_cpu(phy->fw_offset);
+ num = length / 2;
+ data = (__le16 *)((u8 *)phy + __le16_to_cpu(phy->fw_offset));
+
+ ocp_reg_write(tp, OCP_SRAM_ADDR, __le16_to_cpu(phy->fw_reg));
+ for (i = 0; i < num; i++)
+ ocp_reg_write(tp, OCP_SRAM_DATA, __le16_to_cpu(data[i]));
+
+ num = phy->bp_num;
+ for (i = 0; i < num; i++)
+ sram_write(tp, __le16_to_cpu(phy->bp[i].addr), __le16_to_cpu(phy->bp[i].data));
+
+ if (phy->bp_num && phy->bp_en.addr)
+ sram_write(tp, __le16_to_cpu(phy->bp_en.addr), __le16_to_cpu(phy->bp_en.data));
+
+ dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info);
+}
+
static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy)
{
u16 mode_reg, bp_index;
@@ -3956,6 +4947,12 @@ static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac)
return;
}
+ fw_ver_reg = __le16_to_cpu(mac->fw_ver_reg);
+ if (fw_ver_reg && ocp_read_byte(tp, MCU_TYPE_USB, fw_ver_reg) >= mac->fw_ver_data) {
+ dev_dbg(&tp->intf->dev, "%s firmware has been the newest\n", type ? "PLA" : "USB");
+ return;
+ }
+
rtl_clear_bp(tp, type);
/* Enable backup/restore of MACDBG. This is required after clearing PLA
@@ -3991,7 +4988,6 @@ static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac)
ocp_write_word(tp, type, bp_en_addr,
__le16_to_cpu(mac->bp_en_value));
- fw_ver_reg = __le16_to_cpu(mac->fw_ver_reg);
if (fw_ver_reg)
ocp_write_byte(tp, MCU_TYPE_USB, fw_ver_reg,
mac->fw_ver_data);
@@ -4006,7 +5002,7 @@ static void rtl8152_apply_firmware(struct r8152 *tp, bool power_cut)
struct fw_header *fw_hdr;
struct fw_phy_patch_key *key;
u16 key_addr = 0;
- int i;
+ int i, patch_phy = 1;
if (IS_ERR_OR_NULL(rtl_fw->fw))
return;
@@ -4028,17 +5024,40 @@ static void rtl8152_apply_firmware(struct r8152 *tp, bool power_cut)
rtl8152_fw_mac_apply(tp, (struct fw_mac *)block);
break;
case RTL_FW_PHY_START:
+ if (!patch_phy)
+ break;
key = (struct fw_phy_patch_key *)block;
key_addr = __le16_to_cpu(key->key_reg);
rtl_pre_ram_code(tp, key_addr, __le16_to_cpu(key->key_data), !power_cut);
break;
case RTL_FW_PHY_STOP:
+ if (!patch_phy)
+ break;
WARN_ON(!key_addr);
rtl_post_ram_code(tp, key_addr, !power_cut);
break;
case RTL_FW_PHY_NC:
rtl8152_fw_phy_nc_apply(tp, (struct fw_phy_nc *)block);
break;
+ case RTL_FW_PHY_VER:
+ patch_phy = rtl8152_fw_phy_ver(tp, (struct fw_phy_ver *)block);
+ break;
+ case RTL_FW_PHY_UNION_NC:
+ case RTL_FW_PHY_UNION_NC1:
+ case RTL_FW_PHY_UNION_NC2:
+ case RTL_FW_PHY_UNION_UC2:
+ case RTL_FW_PHY_UNION_UC:
+ case RTL_FW_PHY_UNION_MISC:
+ if (patch_phy)
+ rtl8152_fw_phy_union_apply(tp, (struct fw_phy_union *)block);
+ break;
+ case RTL_FW_PHY_FIXUP:
+ if (patch_phy)
+ rtl8152_fw_phy_fixup(tp, (struct fw_phy_fixup *)block);
+ break;
+ case RTL_FW_PHY_SPEED_UP:
+ rtl_ram_code_speed_up(tp, (struct fw_phy_speed_up *)block, !power_cut);
+ break;
default:
break;
}
@@ -4185,6 +5204,22 @@ static void r8153_eee_en(struct r8152 *tp, bool enable)
tp->ups_info.eee = enable;
}
+static void r8156_eee_en(struct r8152 *tp, bool enable)
+{
+ u16 config;
+
+ r8153_eee_en(tp, enable);
+
+ config = ocp_reg_read(tp, OCP_EEE_ADV2);
+
+ if (enable)
+ config |= MDIO_EEE_2_5GT;
+ else
+ config &= ~MDIO_EEE_2_5GT;
+
+ ocp_reg_write(tp, OCP_EEE_ADV2, config);
+}
+
static void rtl_eee_enable(struct r8152 *tp, bool enable)
{
switch (tp->version) {
@@ -4206,6 +5241,7 @@ static void rtl_eee_enable(struct r8152 *tp, bool enable)
case RTL_VER_06:
case RTL_VER_08:
case RTL_VER_09:
+ case RTL_VER_14:
if (enable) {
r8153_eee_en(tp, true);
ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv);
@@ -4214,6 +5250,19 @@ static void rtl_eee_enable(struct r8152 *tp, bool enable)
ocp_reg_write(tp, OCP_EEE_ADV, 0);
}
break;
+ case RTL_VER_10:
+ case RTL_VER_11:
+ case RTL_VER_12:
+ case RTL_VER_13:
+ case RTL_VER_15:
+ if (enable) {
+ r8156_eee_en(tp, true);
+ ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv);
+ } else {
+ r8156_eee_en(tp, false);
+ ocp_reg_write(tp, OCP_EEE_ADV, 0);
+ }
+ break;
default:
break;
}
@@ -4260,6 +5309,20 @@ static void wait_oob_link_list_ready(struct r8152 *tp)
}
}
+static void r8156b_wait_loading_flash(struct r8152 *tp)
+{
+ if ((ocp_read_word(tp, MCU_TYPE_PLA, PLA_GPHY_CTRL) & GPHY_FLASH) &&
+ !(ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & BYPASS_FLASH)) {
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ if (ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & GPHY_PATCH_DONE)
+ break;
+ usleep_range(1000, 2000);
+ }
+ }
+}
+
static void r8152b_exit_oob(struct r8152 *tp)
{
u32 ocp_data;
@@ -4310,7 +5373,7 @@ static void r8152b_exit_oob(struct r8152 *tp)
}
/* TX share fifo free credit full threshold */
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2);
ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD);
ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_HIGH);
@@ -4487,6 +5550,36 @@ static int r8153b_post_firmware_1(struct r8152 *tp)
return 0;
}
+static int r8153c_post_firmware_1(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL);
+ ocp_data |= FLOW_CTRL_PATCH_2;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
+ ocp_data |= FC_PATCH_TASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+
+ return 0;
+}
+
+static int r8156a_post_firmware_1(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1);
+ ocp_data |= FW_IP_RESET_EN;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data);
+
+ /* Modify U3PHY parameter for compatibility issue */
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_UPHY3_MDCMDIO, 0x4026840e);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_UPHY3_MDCMDIO, 0x4001acc9);
+
+ return 0;
+}
+
static void r8153_aldps_en(struct r8152 *tp, bool enable)
{
u16 data;
@@ -4689,6 +5782,19 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
set_bit(PHY_RESET, &tp->flags);
}
+static void r8153c_hw_phy_cfg(struct r8152 *tp)
+{
+ r8153b_hw_phy_cfg(tp);
+
+ tp->ups_info.r_tune = true;
+}
+
+static void rtl8153_change_mtu(struct r8152 *tp)
+{
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu));
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO);
+}
+
static void r8153_first_init(struct r8152 *tp)
{
u32 ocp_data;
@@ -4721,9 +5827,7 @@ static void r8153_first_init(struct r8152 *tp)
rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
- ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO);
+ rtl8153_change_mtu(tp);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
ocp_data |= TCR0_AUTO_FIFO;
@@ -4758,8 +5862,7 @@ static void r8153_enter_oob(struct r8152 *tp)
wait_oob_link_list_ready(tp);
- ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu));
switch (tp->version) {
case RTL_VER_03:
@@ -4773,6 +5876,7 @@ static void r8153_enter_oob(struct r8152 *tp)
case RTL_VER_08:
case RTL_VER_09:
+ case RTL_VER_14:
/* Clear teredo wake event. bit[15:8] is the teredo wakeup
* type. Set it to zero. bits[7:0] are the W1C bits about
* the events. Set them to all 1 to clear them.
@@ -4809,6 +5913,96 @@ static void rtl8153_disable(struct r8152 *tp)
r8153_aldps_en(tp, true);
}
+static int rtl8156_enable(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u16 speed;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return -ENODEV;
+
+ set_tx_qlen(tp);
+ rtl_set_eee_plus(tp);
+ r8153_set_rx_early_timeout(tp);
+ r8153_set_rx_early_size(tp);
+
+ speed = rtl8152_get_speed(tp);
+ rtl_set_ifg(tp, speed);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4);
+ if (speed & _2500bps)
+ ocp_data &= ~IDLE_SPDWN_EN;
+ else
+ ocp_data |= IDLE_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data);
+
+ if (speed & _1000bps)
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS, 0x11);
+ else if (speed & _500bps)
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS, 0x3d);
+
+ if (tp->udev->speed == USB_SPEED_HIGH) {
+ /* USB 0xb45e[3:0] l1_nyet_hird */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_L1_CTRL);
+ ocp_data &= ~0xf;
+ if (is_flow_control(speed))
+ ocp_data |= 0xf;
+ else
+ ocp_data |= 0x1;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data);
+ }
+
+ return rtl_enable(tp);
+}
+
+static int rtl8156b_enable(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u16 speed;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return -ENODEV;
+
+ set_tx_qlen(tp);
+ rtl_set_eee_plus(tp);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_RX_AGGR_NUM);
+ ocp_data &= ~RX_AGGR_NUM_MASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_AGGR_NUM, ocp_data);
+
+ r8153_set_rx_early_timeout(tp);
+ r8153_set_rx_early_size(tp);
+
+ speed = rtl8152_get_speed(tp);
+ rtl_set_ifg(tp, speed);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4);
+ if (speed & _2500bps)
+ ocp_data &= ~IDLE_SPDWN_EN;
+ else
+ ocp_data |= IDLE_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data);
+
+ if (tp->udev->speed == USB_SPEED_HIGH) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_L1_CTRL);
+ ocp_data &= ~0xf;
+ if (is_flow_control(speed))
+ ocp_data |= 0xf;
+ else
+ ocp_data |= 0x1;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
+ ocp_data &= ~FC_PATCH_TASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+ usleep_range(1000, 2000);
+ ocp_data |= FC_PATCH_TASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+
+ return rtl_enable(tp);
+}
+
static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex,
u32 advertising)
{
@@ -4857,58 +6051,73 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex,
tp->mii.force_media = 1;
} else {
- u16 anar, tmp1;
+ u16 orig, new1;
u32 support;
support = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL |
RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL;
- if (tp->mii.supports_gmii)
+ if (tp->mii.supports_gmii) {
support |= RTL_ADVERTISED_1000_FULL;
+ if (tp->support_2500full)
+ support |= RTL_ADVERTISED_2500_FULL;
+ }
+
if (!(advertising & support))
return -EINVAL;
- anar = r8152_mdio_read(tp, MII_ADVERTISE);
- tmp1 = anar & ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
+ orig = r8152_mdio_read(tp, MII_ADVERTISE);
+ new1 = orig & ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL);
if (advertising & RTL_ADVERTISED_10_HALF) {
- tmp1 |= ADVERTISE_10HALF;
+ new1 |= ADVERTISE_10HALF;
tp->ups_info.speed_duplex = NWAY_10M_HALF;
}
if (advertising & RTL_ADVERTISED_10_FULL) {
- tmp1 |= ADVERTISE_10FULL;
+ new1 |= ADVERTISE_10FULL;
tp->ups_info.speed_duplex = NWAY_10M_FULL;
}
if (advertising & RTL_ADVERTISED_100_HALF) {
- tmp1 |= ADVERTISE_100HALF;
+ new1 |= ADVERTISE_100HALF;
tp->ups_info.speed_duplex = NWAY_100M_HALF;
}
if (advertising & RTL_ADVERTISED_100_FULL) {
- tmp1 |= ADVERTISE_100FULL;
+ new1 |= ADVERTISE_100FULL;
tp->ups_info.speed_duplex = NWAY_100M_FULL;
}
- if (anar != tmp1) {
- r8152_mdio_write(tp, MII_ADVERTISE, tmp1);
- tp->mii.advertising = tmp1;
+ if (orig != new1) {
+ r8152_mdio_write(tp, MII_ADVERTISE, new1);
+ tp->mii.advertising = new1;
}
if (tp->mii.supports_gmii) {
- u16 gbcr;
-
- gbcr = r8152_mdio_read(tp, MII_CTRL1000);
- tmp1 = gbcr & ~(ADVERTISE_1000FULL |
+ orig = r8152_mdio_read(tp, MII_CTRL1000);
+ new1 = orig & ~(ADVERTISE_1000FULL |
ADVERTISE_1000HALF);
if (advertising & RTL_ADVERTISED_1000_FULL) {
- tmp1 |= ADVERTISE_1000FULL;
+ new1 |= ADVERTISE_1000FULL;
tp->ups_info.speed_duplex = NWAY_1000M_FULL;
}
- if (gbcr != tmp1)
- r8152_mdio_write(tp, MII_CTRL1000, tmp1);
+ if (orig != new1)
+ r8152_mdio_write(tp, MII_CTRL1000, new1);
+ }
+
+ if (tp->support_2500full) {
+ orig = ocp_reg_read(tp, OCP_10GBT_CTRL);
+ new1 = orig & ~MDIO_AN_10GBT_CTRL_ADV2_5G;
+
+ if (advertising & RTL_ADVERTISED_2500_FULL) {
+ new1 |= MDIO_AN_10GBT_CTRL_ADV2_5G;
+ tp->ups_info.speed_duplex = NWAY_2500M_FULL;
+ }
+
+ if (orig != new1)
+ ocp_reg_write(tp, OCP_10GBT_CTRL, new1);
}
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
@@ -5064,6 +6273,253 @@ static void rtl8153b_down(struct r8152 *tp)
r8153_aldps_en(tp, true);
}
+static void rtl8153c_change_mtu(struct r8152 *tp)
+{
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu));
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, 10 * 1024 / 64);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, 512 / 64);
+
+ /* Adjust the tx fifo free credit full threshold, otherwise
+ * the fifo would be too small to send a jumbo frame packet.
+ */
+ if (tp->netdev->mtu < 8000)
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, 2048 / 8);
+ else
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, 900 / 8);
+}
+
+static void rtl8153c_up(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ r8153b_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
+ r8153_aldps_en(tp, false);
+
+ rxdy_gated_en(tp, true);
+ r8153_teredo_off(tp);
+
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data &= ~RCR_ACPT_ALL;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+
+ rtl8152_nic_reset(tp);
+ rtl_reset_bmu(tp);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data &= ~NOW_IS_OOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data &= ~MCU_BORW_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+ wait_oob_link_list_ready(tp);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data |= RE_INIT_LL;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+ wait_oob_link_list_ready(tp);
+
+ rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
+
+ rtl8153c_change_mtu(tp);
+
+ rtl8152_nic_reset(tp);
+
+ /* rx share fifo credit full threshold */
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, 0x02);
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 0x08);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_NORMAL);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL);
+
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ ocp_data |= BIT(8);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data &= ~PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
+ r8153_aldps_en(tp, true);
+ r8153b_u1u2en(tp, true);
+}
+
+static inline u32 fc_pause_on_auto(struct r8152 *tp)
+{
+ return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 6 * 1024);
+}
+
+static inline u32 fc_pause_off_auto(struct r8152 *tp)
+{
+ return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 14 * 1024);
+}
+
+static void r8156_fc_parameter(struct r8152 *tp)
+{
+ u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
+ u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
+
+ switch (tp->version) {
+ case RTL_VER_10:
+ case RTL_VER_11:
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 8);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 8);
+ break;
+ case RTL_VER_12:
+ case RTL_VER_13:
+ case RTL_VER_15:
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtl8156_change_mtu(struct r8152 *tp)
+{
+ u32 rx_max_size = mtu_to_size(tp->netdev->mtu);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rx_max_size);
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO);
+ r8156_fc_parameter(tp);
+
+ /* TX share fifo free credit full threshold */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, 512 / 64);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL,
+ ALIGN(rx_max_size + sizeof(struct tx_desc), 1024) / 16);
+}
+
+static void rtl8156_up(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ r8153b_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
+ r8153_aldps_en(tp, false);
+
+ rxdy_gated_en(tp, true);
+ r8153_teredo_off(tp);
+
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data &= ~RCR_ACPT_ALL;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+
+ rtl8152_nic_reset(tp);
+ rtl_reset_bmu(tp);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data &= ~NOW_IS_OOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data &= ~MCU_BORW_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+ rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
+
+ rtl8156_change_mtu(tp);
+
+ switch (tp->version) {
+ case RTL_TEST_01:
+ case RTL_VER_10:
+ case RTL_VER_11:
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_CONFIG);
+ ocp_data |= ACT_ODMA;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data);
+ break;
+ default:
+ break;
+ }
+
+ /* share FIFO settings */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL);
+ ocp_data &= ~RXFIFO_FULL_MASK;
+ ocp_data |= 0x08;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data &= ~PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION);
+ ocp_data &= ~(RG_PWRDN_EN | ALL_SPEED_OFF);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, ocp_data);
+
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, 0x00600400);
+
+ if (tp->saved_wolopts != __rtl_get_wol(tp)) {
+ netif_warn(tp, ifup, tp->netdev, "wol setting is changed\n");
+ __rtl_set_wol(tp, tp->saved_wolopts);
+ }
+
+ r8153_aldps_en(tp, true);
+ r8153_u2p3en(tp, true);
+
+ if (tp->udev->speed >= USB_SPEED_SUPER)
+ r8153b_u1u2en(tp, true);
+}
+
+static void rtl8156_down(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ rtl_drop_queued_tx(tp);
+ return;
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data |= PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
+ r8153b_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
+ r8153b_power_cut_en(tp, false);
+ r8153_aldps_en(tp, false);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data &= ~NOW_IS_OOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ rtl_disable(tp);
+ rtl_reset_bmu(tp);
+
+ /* Clear teredo wake event. bit[15:8] is the teredo wakeup
+ * type. Set it to zero. bits[7:0] are the W1C bits about
+ * the events. Set them to all 1 to clear them.
+ */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_WAKE_BASE, 0x00ff);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data |= NOW_IS_OOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ rtl_rx_vlan_en(tp, true);
+ rxdy_gated_en(tp, false);
+
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data |= RCR_APM | RCR_AM | RCR_AB;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+
+ r8153_aldps_en(tp, true);
+}
+
static bool rtl8152_in_nway(struct r8152 *tp)
{
u16 nway_state;
@@ -5094,7 +6550,7 @@ static void set_carrier(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
struct napi_struct *napi = &tp->napi;
- u8 speed;
+ u16 speed;
speed = rtl8152_get_speed(tp);
@@ -5107,7 +6563,7 @@ static void set_carrier(struct r8152 *tp)
rtl_start_rx(tp);
clear_bit(RTL8152_SET_RX_MODE, &tp->flags);
_rtl8152_set_rx_mode(netdev);
- napi_enable(&tp->napi);
+ napi_enable(napi);
netif_wake_queue(netdev);
netif_info(tp, link, netdev, "carrier on\n");
} else if (netif_queue_stopped(netdev) &&
@@ -5468,14 +6924,9 @@ static void r8153_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001);
- /* MAC clock speed down */
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
-
r8153_power_cut_en(tp, false);
rtl_runtime_suspend_enable(tp, false);
+ r8153_mac_clk_speed_down(tp, false);
r8153_u1u2en(tp, true);
usb_enable_lpm(tp->udev);
@@ -5490,7 +6941,7 @@ static void r8153_init(struct r8152 *tp)
/* rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
- if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
+ if (tp->dell_tb_rx_agg_bug)
ocp_data |= RX_AGG_DISABLE;
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
@@ -5566,9 +7017,7 @@ static void r8153b_init(struct r8152 *tp)
usb_enable_lpm(tp->udev);
/* MAC clock speed down */
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2);
- ocp_data |= MAC_CLK_SPDWN_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data);
+ r8153_mac_clk_speed_down(tp, true);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
ocp_data &= ~PLA_MCU_SPDWN_EN;
@@ -5595,6 +7044,1102 @@ static void r8153b_init(struct r8152 *tp)
tp->coalesce = 15000; /* 15 us */
}
+static void r8153c_init(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u16 data;
+ int i;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ r8153b_u1u2en(tp, false);
+
+ /* Disable spi_en */
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+ ocp_data &= ~BIT(3);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xcbf0);
+ ocp_data |= BIT(1);
+ ocp_write_word(tp, MCU_TYPE_USB, 0xcbf0, ocp_data);
+
+ for (i = 0; i < 500; i++) {
+ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ AUTOLOAD_DONE)
+ break;
+
+ msleep(20);
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+ }
+
+ data = r8153_phy_status(tp, 0);
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
+
+ r8153_u2p3en(tp, false);
+
+ /* MSC timer = 0xfff * 8ms = 32760 ms */
+ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff);
+
+ r8153b_power_cut_en(tp, false);
+ r8153c_ups_en(tp, false);
+ r8153_queue_wake(tp, false);
+ rtl_runtime_suspend_enable(tp, false);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+ if (rtl8152_get_speed(tp) & LINK_STATUS)
+ ocp_data |= CUR_LINK_OK;
+ else
+ ocp_data &= ~CUR_LINK_OK;
+
+ ocp_data |= POLL_LINK_CHG;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
+
+ r8153b_u1u2en(tp, true);
+
+ usb_enable_lpm(tp->udev);
+
+ /* MAC clock speed down */
+ r8153_mac_clk_speed_down(tp, true);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2);
+ ocp_data &= ~BIT(7);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data);
+
+ set_bit(GREEN_ETHERNET, &tp->flags);
+
+ /* rx aggregation */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
+ ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
+
+ rtl_tally_reset(tp);
+
+ tp->coalesce = 15000; /* 15 us */
+}
+
+static void r8156_hw_phy_cfg(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u16 data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ if (ocp_data & PCUT_STATUS) {
+ ocp_data &= ~PCUT_STATUS;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
+ }
+
+ data = r8153_phy_status(tp, 0);
+ switch (data) {
+ case PHY_STAT_EXT_INIT:
+ rtl8152_apply_firmware(tp, true);
+
+ data = ocp_reg_read(tp, 0xa468);
+ data &= ~(BIT(3) | BIT(1));
+ ocp_reg_write(tp, 0xa468, data);
+ break;
+ case PHY_STAT_LAN_ON:
+ case PHY_STAT_PWRDN:
+ default:
+ rtl8152_apply_firmware(tp, false);
+ break;
+ }
+
+ /* disable ALDPS before updating the PHY parameters */
+ r8153_aldps_en(tp, false);
+
+ /* disable EEE before updating the PHY parameters */
+ rtl_eee_enable(tp, false);
+
+ data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
+ WARN_ON_ONCE(data != PHY_STAT_LAN_ON);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
+ ocp_data |= PFM_PWM_SWITCH;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
+
+ switch (tp->version) {
+ case RTL_VER_10:
+ data = ocp_reg_read(tp, 0xad40);
+ data &= ~0x3ff;
+ data |= BIT(7) | BIT(2);
+ ocp_reg_write(tp, 0xad40, data);
+
+ data = ocp_reg_read(tp, 0xad4e);
+ data |= BIT(4);
+ ocp_reg_write(tp, 0xad4e, data);
+ data = ocp_reg_read(tp, 0xad16);
+ data &= ~0x3ff;
+ data |= 0x6;
+ ocp_reg_write(tp, 0xad16, data);
+ data = ocp_reg_read(tp, 0xad32);
+ data &= ~0x3f;
+ data |= 6;
+ ocp_reg_write(tp, 0xad32, data);
+ data = ocp_reg_read(tp, 0xac08);
+ data &= ~(BIT(12) | BIT(8));
+ ocp_reg_write(tp, 0xac08, data);
+ data = ocp_reg_read(tp, 0xac8a);
+ data |= BIT(12) | BIT(13) | BIT(14);
+ data &= ~BIT(15);
+ ocp_reg_write(tp, 0xac8a, data);
+ data = ocp_reg_read(tp, 0xad18);
+ data |= BIT(10);
+ ocp_reg_write(tp, 0xad18, data);
+ data = ocp_reg_read(tp, 0xad1a);
+ data |= 0x3ff;
+ ocp_reg_write(tp, 0xad1a, data);
+ data = ocp_reg_read(tp, 0xad1c);
+ data |= 0x3ff;
+ ocp_reg_write(tp, 0xad1c, data);
+
+ data = sram_read(tp, 0x80ea);
+ data &= ~0xff00;
+ data |= 0xc400;
+ sram_write(tp, 0x80ea, data);
+ data = sram_read(tp, 0x80eb);
+ data &= ~0x0700;
+ data |= 0x0300;
+ sram_write(tp, 0x80eb, data);
+ data = sram_read(tp, 0x80f8);
+ data &= ~0xff00;
+ data |= 0x1c00;
+ sram_write(tp, 0x80f8, data);
+ data = sram_read(tp, 0x80f1);
+ data &= ~0xff00;
+ data |= 0x3000;
+ sram_write(tp, 0x80f1, data);
+
+ data = sram_read(tp, 0x80fe);
+ data &= ~0xff00;
+ data |= 0xa500;
+ sram_write(tp, 0x80fe, data);
+ data = sram_read(tp, 0x8102);
+ data &= ~0xff00;
+ data |= 0x5000;
+ sram_write(tp, 0x8102, data);
+ data = sram_read(tp, 0x8015);
+ data &= ~0xff00;
+ data |= 0x3300;
+ sram_write(tp, 0x8015, data);
+ data = sram_read(tp, 0x8100);
+ data &= ~0xff00;
+ data |= 0x7000;
+ sram_write(tp, 0x8100, data);
+ data = sram_read(tp, 0x8014);
+ data &= ~0xff00;
+ data |= 0xf000;
+ sram_write(tp, 0x8014, data);
+ data = sram_read(tp, 0x8016);
+ data &= ~0xff00;
+ data |= 0x6500;
+ sram_write(tp, 0x8016, data);
+ data = sram_read(tp, 0x80dc);
+ data &= ~0xff00;
+ data |= 0xed00;
+ sram_write(tp, 0x80dc, data);
+ data = sram_read(tp, 0x80df);
+ data |= BIT(8);
+ sram_write(tp, 0x80df, data);
+ data = sram_read(tp, 0x80e1);
+ data &= ~BIT(8);
+ sram_write(tp, 0x80e1, data);
+
+ data = ocp_reg_read(tp, 0xbf06);
+ data &= ~0x003f;
+ data |= 0x0038;
+ ocp_reg_write(tp, 0xbf06, data);
+
+ sram_write(tp, 0x819f, 0xddb6);
+
+ ocp_reg_write(tp, 0xbc34, 0x5555);
+ data = ocp_reg_read(tp, 0xbf0a);
+ data &= ~0x0e00;
+ data |= 0x0a00;
+ ocp_reg_write(tp, 0xbf0a, data);
+
+ data = ocp_reg_read(tp, 0xbd2c);
+ data &= ~BIT(13);
+ ocp_reg_write(tp, 0xbd2c, data);
+ break;
+ case RTL_VER_11:
+ data = ocp_reg_read(tp, 0xad16);
+ data |= 0x3ff;
+ ocp_reg_write(tp, 0xad16, data);
+ data = ocp_reg_read(tp, 0xad32);
+ data &= ~0x3f;
+ data |= 6;
+ ocp_reg_write(tp, 0xad32, data);
+ data = ocp_reg_read(tp, 0xac08);
+ data &= ~(BIT(12) | BIT(8));
+ ocp_reg_write(tp, 0xac08, data);
+ data = ocp_reg_read(tp, 0xacc0);
+ data &= ~0x3;
+ data |= BIT(1);
+ ocp_reg_write(tp, 0xacc0, data);
+ data = ocp_reg_read(tp, 0xad40);
+ data &= ~0xe7;
+ data |= BIT(6) | BIT(2);
+ ocp_reg_write(tp, 0xad40, data);
+ data = ocp_reg_read(tp, 0xac14);
+ data &= ~BIT(7);
+ ocp_reg_write(tp, 0xac14, data);
+ data = ocp_reg_read(tp, 0xac80);
+ data &= ~(BIT(8) | BIT(9));
+ ocp_reg_write(tp, 0xac80, data);
+ data = ocp_reg_read(tp, 0xac5e);
+ data &= ~0x7;
+ data |= BIT(1);
+ ocp_reg_write(tp, 0xac5e, data);
+ ocp_reg_write(tp, 0xad4c, 0x00a8);
+ ocp_reg_write(tp, 0xac5c, 0x01ff);
+ data = ocp_reg_read(tp, 0xac8a);
+ data &= ~0xf0;
+ data |= BIT(4) | BIT(5);
+ ocp_reg_write(tp, 0xac8a, data);
+ ocp_reg_write(tp, 0xb87c, 0x8157);
+ data = ocp_reg_read(tp, 0xb87e);
+ data &= ~0xff00;
+ data |= 0x0500;
+ ocp_reg_write(tp, 0xb87e, data);
+ ocp_reg_write(tp, 0xb87c, 0x8159);
+ data = ocp_reg_read(tp, 0xb87e);
+ data &= ~0xff00;
+ data |= 0x0700;
+ ocp_reg_write(tp, 0xb87e, data);
+
+ /* AAGC */
+ ocp_reg_write(tp, 0xb87c, 0x80a2);
+ ocp_reg_write(tp, 0xb87e, 0x0153);
+ ocp_reg_write(tp, 0xb87c, 0x809c);
+ ocp_reg_write(tp, 0xb87e, 0x0153);
+
+ /* EEE parameter */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS_2P5G, 0x0056);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_USB_CFG);
+ ocp_data |= EN_XG_LIP | EN_G_LIP;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_USB_CFG, ocp_data);
+
+ sram_write(tp, 0x8257, 0x020f); /* XG PLL */
+ sram_write(tp, 0x80ea, 0x7843); /* GIGA Master */
+
+ if (rtl_phy_patch_request(tp, true, true))
+ return;
+
+ /* Advance EEE */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4);
+ ocp_data |= EEE_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data);
+
+ data = ocp_reg_read(tp, OCP_DOWN_SPEED);
+ data &= ~(EN_EEE_100 | EN_EEE_1000);
+ data |= EN_10M_CLKDIV;
+ ocp_reg_write(tp, OCP_DOWN_SPEED, data);
+ tp->ups_info._10m_ckdiv = true;
+ tp->ups_info.eee_plloff_100 = false;
+ tp->ups_info.eee_plloff_giga = false;
+
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ data &= ~EEE_CLKDIV_EN;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ tp->ups_info.eee_ckdiv = false;
+
+ ocp_reg_write(tp, OCP_SYSCLK_CFG, 0);
+ ocp_reg_write(tp, OCP_SYSCLK_CFG, sysclk_div_expo(5));
+ tp->ups_info._250m_ckdiv = false;
+
+ rtl_phy_patch_request(tp, false, true);
+
+ /* enable ADC Ibias Cal */
+ data = ocp_reg_read(tp, 0xd068);
+ data |= BIT(13);
+ ocp_reg_write(tp, 0xd068, data);
+
+ /* enable Thermal Sensor */
+ data = sram_read(tp, 0x81a2);
+ data &= ~BIT(8);
+ sram_write(tp, 0x81a2, data);
+ data = ocp_reg_read(tp, 0xb54c);
+ data &= ~0xff00;
+ data |= 0xdb00;
+ ocp_reg_write(tp, 0xb54c, data);
+
+ /* Nway 2.5G Lite */
+ data = ocp_reg_read(tp, 0xa454);
+ data &= ~BIT(0);
+ ocp_reg_write(tp, 0xa454, data);
+
+ /* CS DSP solution */
+ data = ocp_reg_read(tp, OCP_10GBT_CTRL);
+ data |= RTL_ADV2_5G_F_R;
+ ocp_reg_write(tp, OCP_10GBT_CTRL, data);
+ data = ocp_reg_read(tp, 0xad4e);
+ data &= ~BIT(4);
+ ocp_reg_write(tp, 0xad4e, data);
+ data = ocp_reg_read(tp, 0xa86a);
+ data &= ~BIT(0);
+ ocp_reg_write(tp, 0xa86a, data);
+
+ /* MDI SWAP */
+ if ((ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CFG) & MID_REVERSE) &&
+ (ocp_reg_read(tp, 0xd068) & BIT(1))) {
+ u16 swap_a, swap_b;
+
+ data = ocp_reg_read(tp, 0xd068);
+ data &= ~0x1f;
+ data |= 0x1; /* p0 */
+ ocp_reg_write(tp, 0xd068, data);
+ swap_a = ocp_reg_read(tp, 0xd06a);
+ data &= ~0x18;
+ data |= 0x18; /* p3 */
+ ocp_reg_write(tp, 0xd068, data);
+ swap_b = ocp_reg_read(tp, 0xd06a);
+ data &= ~0x18; /* p0 */
+ ocp_reg_write(tp, 0xd068, data);
+ ocp_reg_write(tp, 0xd06a,
+ (swap_a & ~0x7ff) | (swap_b & 0x7ff));
+ data |= 0x18; /* p3 */
+ ocp_reg_write(tp, 0xd068, data);
+ ocp_reg_write(tp, 0xd06a,
+ (swap_b & ~0x7ff) | (swap_a & 0x7ff));
+ data &= ~0x18;
+ data |= 0x08; /* p1 */
+ ocp_reg_write(tp, 0xd068, data);
+ swap_a = ocp_reg_read(tp, 0xd06a);
+ data &= ~0x18;
+ data |= 0x10; /* p2 */
+ ocp_reg_write(tp, 0xd068, data);
+ swap_b = ocp_reg_read(tp, 0xd06a);
+ data &= ~0x18;
+ data |= 0x08; /* p1 */
+ ocp_reg_write(tp, 0xd068, data);
+ ocp_reg_write(tp, 0xd06a,
+ (swap_a & ~0x7ff) | (swap_b & 0x7ff));
+ data &= ~0x18;
+ data |= 0x10; /* p2 */
+ ocp_reg_write(tp, 0xd068, data);
+ ocp_reg_write(tp, 0xd06a,
+ (swap_b & ~0x7ff) | (swap_a & 0x7ff));
+ swap_a = ocp_reg_read(tp, 0xbd5a);
+ swap_b = ocp_reg_read(tp, 0xbd5c);
+ ocp_reg_write(tp, 0xbd5a, (swap_a & ~0x1f1f) |
+ ((swap_b & 0x1f) << 8) |
+ ((swap_b >> 8) & 0x1f));
+ ocp_reg_write(tp, 0xbd5c, (swap_b & ~0x1f1f) |
+ ((swap_a & 0x1f) << 8) |
+ ((swap_a >> 8) & 0x1f));
+ swap_a = ocp_reg_read(tp, 0xbc18);
+ swap_b = ocp_reg_read(tp, 0xbc1a);
+ ocp_reg_write(tp, 0xbc18, (swap_a & ~0x1f1f) |
+ ((swap_b & 0x1f) << 8) |
+ ((swap_b >> 8) & 0x1f));
+ ocp_reg_write(tp, 0xbc1a, (swap_b & ~0x1f1f) |
+ ((swap_a & 0x1f) << 8) |
+ ((swap_a >> 8) & 0x1f));
+ }
+ break;
+ default:
+ break;
+ }
+
+ rtl_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags));
+
+ data = ocp_reg_read(tp, 0xa428);
+ data &= ~BIT(9);
+ ocp_reg_write(tp, 0xa428, data);
+ data = ocp_reg_read(tp, 0xa5ea);
+ data &= ~BIT(0);
+ ocp_reg_write(tp, 0xa5ea, data);
+ tp->ups_info.lite_mode = 0;
+
+ if (tp->eee_en)
+ rtl_eee_enable(tp, true);
+
+ r8153_aldps_en(tp, true);
+ r8152b_enable_fc(tp);
+ r8153_u2p3en(tp, true);
+
+ set_bit(PHY_RESET, &tp->flags);
+}
+
+static void r8156b_hw_phy_cfg(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u16 data;
+
+ switch (tp->version) {
+ case RTL_VER_12:
+ ocp_reg_write(tp, 0xbf86, 0x9000);
+ data = ocp_reg_read(tp, 0xc402);
+ data |= BIT(10);
+ ocp_reg_write(tp, 0xc402, data);
+ data &= ~BIT(10);
+ ocp_reg_write(tp, 0xc402, data);
+ ocp_reg_write(tp, 0xbd86, 0x1010);
+ ocp_reg_write(tp, 0xbd88, 0x1010);
+ data = ocp_reg_read(tp, 0xbd4e);
+ data &= ~(BIT(10) | BIT(11));
+ data |= BIT(11);
+ ocp_reg_write(tp, 0xbd4e, data);
+ data = ocp_reg_read(tp, 0xbf46);
+ data &= ~0xf00;
+ data |= 0x700;
+ ocp_reg_write(tp, 0xbf46, data);
+ break;
+ case RTL_VER_13:
+ case RTL_VER_15:
+ r8156b_wait_loading_flash(tp);
+ break;
+ default:
+ break;
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ if (ocp_data & PCUT_STATUS) {
+ ocp_data &= ~PCUT_STATUS;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
+ }
+
+ data = r8153_phy_status(tp, 0);
+ switch (data) {
+ case PHY_STAT_EXT_INIT:
+ rtl8152_apply_firmware(tp, true);
+
+ data = ocp_reg_read(tp, 0xa466);
+ data &= ~BIT(0);
+ ocp_reg_write(tp, 0xa466, data);
+
+ data = ocp_reg_read(tp, 0xa468);
+ data &= ~(BIT(3) | BIT(1));
+ ocp_reg_write(tp, 0xa468, data);
+ break;
+ case PHY_STAT_LAN_ON:
+ case PHY_STAT_PWRDN:
+ default:
+ rtl8152_apply_firmware(tp, false);
+ break;
+ }
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ /* disable ALDPS before updating the PHY parameters */
+ r8153_aldps_en(tp, false);
+
+ /* disable EEE before updating the PHY parameters */
+ rtl_eee_enable(tp, false);
+
+ data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
+ WARN_ON_ONCE(data != PHY_STAT_LAN_ON);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
+ ocp_data |= PFM_PWM_SWITCH;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
+
+ switch (tp->version) {
+ case RTL_VER_12:
+ data = ocp_reg_read(tp, 0xbc08);
+ data |= BIT(3) | BIT(2);
+ ocp_reg_write(tp, 0xbc08, data);
+
+ data = sram_read(tp, 0x8fff);
+ data &= ~0xff00;
+ data |= 0x0400;
+ sram_write(tp, 0x8fff, data);
+
+ data = ocp_reg_read(tp, 0xacda);
+ data |= 0xff00;
+ ocp_reg_write(tp, 0xacda, data);
+ data = ocp_reg_read(tp, 0xacde);
+ data |= 0xf000;
+ ocp_reg_write(tp, 0xacde, data);
+ ocp_reg_write(tp, 0xac8c, 0x0ffc);
+ ocp_reg_write(tp, 0xac46, 0xb7b4);
+ ocp_reg_write(tp, 0xac50, 0x0fbc);
+ ocp_reg_write(tp, 0xac3c, 0x9240);
+ ocp_reg_write(tp, 0xac4e, 0x0db4);
+ ocp_reg_write(tp, 0xacc6, 0x0707);
+ ocp_reg_write(tp, 0xacc8, 0xa0d3);
+ ocp_reg_write(tp, 0xad08, 0x0007);
+
+ ocp_reg_write(tp, 0xb87c, 0x8560);
+ ocp_reg_write(tp, 0xb87e, 0x19cc);
+ ocp_reg_write(tp, 0xb87c, 0x8562);
+ ocp_reg_write(tp, 0xb87e, 0x19cc);
+ ocp_reg_write(tp, 0xb87c, 0x8564);
+ ocp_reg_write(tp, 0xb87e, 0x19cc);
+ ocp_reg_write(tp, 0xb87c, 0x8566);
+ ocp_reg_write(tp, 0xb87e, 0x147d);
+ ocp_reg_write(tp, 0xb87c, 0x8568);
+ ocp_reg_write(tp, 0xb87e, 0x147d);
+ ocp_reg_write(tp, 0xb87c, 0x856a);
+ ocp_reg_write(tp, 0xb87e, 0x147d);
+ ocp_reg_write(tp, 0xb87c, 0x8ffe);
+ ocp_reg_write(tp, 0xb87e, 0x0907);
+ ocp_reg_write(tp, 0xb87c, 0x80d6);
+ ocp_reg_write(tp, 0xb87e, 0x2801);
+ ocp_reg_write(tp, 0xb87c, 0x80f2);
+ ocp_reg_write(tp, 0xb87e, 0x2801);
+ ocp_reg_write(tp, 0xb87c, 0x80f4);
+ ocp_reg_write(tp, 0xb87e, 0x6077);
+ ocp_reg_write(tp, 0xb506, 0x01e7);
+
+ ocp_reg_write(tp, 0xb87c, 0x8013);
+ ocp_reg_write(tp, 0xb87e, 0x0700);
+ ocp_reg_write(tp, 0xb87c, 0x8fb9);
+ ocp_reg_write(tp, 0xb87e, 0x2801);
+ ocp_reg_write(tp, 0xb87c, 0x8fba);
+ ocp_reg_write(tp, 0xb87e, 0x0100);
+ ocp_reg_write(tp, 0xb87c, 0x8fbc);
+ ocp_reg_write(tp, 0xb87e, 0x1900);
+ ocp_reg_write(tp, 0xb87c, 0x8fbe);
+ ocp_reg_write(tp, 0xb87e, 0xe100);
+ ocp_reg_write(tp, 0xb87c, 0x8fc0);
+ ocp_reg_write(tp, 0xb87e, 0x0800);
+ ocp_reg_write(tp, 0xb87c, 0x8fc2);
+ ocp_reg_write(tp, 0xb87e, 0xe500);
+ ocp_reg_write(tp, 0xb87c, 0x8fc4);
+ ocp_reg_write(tp, 0xb87e, 0x0f00);
+ ocp_reg_write(tp, 0xb87c, 0x8fc6);
+ ocp_reg_write(tp, 0xb87e, 0xf100);
+ ocp_reg_write(tp, 0xb87c, 0x8fc8);
+ ocp_reg_write(tp, 0xb87e, 0x0400);
+ ocp_reg_write(tp, 0xb87c, 0x8fca);
+ ocp_reg_write(tp, 0xb87e, 0xf300);
+ ocp_reg_write(tp, 0xb87c, 0x8fcc);
+ ocp_reg_write(tp, 0xb87e, 0xfd00);
+ ocp_reg_write(tp, 0xb87c, 0x8fce);
+ ocp_reg_write(tp, 0xb87e, 0xff00);
+ ocp_reg_write(tp, 0xb87c, 0x8fd0);
+ ocp_reg_write(tp, 0xb87e, 0xfb00);
+ ocp_reg_write(tp, 0xb87c, 0x8fd2);
+ ocp_reg_write(tp, 0xb87e, 0x0100);
+ ocp_reg_write(tp, 0xb87c, 0x8fd4);
+ ocp_reg_write(tp, 0xb87e, 0xf400);
+ ocp_reg_write(tp, 0xb87c, 0x8fd6);
+ ocp_reg_write(tp, 0xb87e, 0xff00);
+ ocp_reg_write(tp, 0xb87c, 0x8fd8);
+ ocp_reg_write(tp, 0xb87e, 0xf600);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_USB_CFG);
+ ocp_data |= EN_XG_LIP | EN_G_LIP;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_USB_CFG, ocp_data);
+ ocp_reg_write(tp, 0xb87c, 0x813d);
+ ocp_reg_write(tp, 0xb87e, 0x390e);
+ ocp_reg_write(tp, 0xb87c, 0x814f);
+ ocp_reg_write(tp, 0xb87e, 0x790e);
+ ocp_reg_write(tp, 0xb87c, 0x80b0);
+ ocp_reg_write(tp, 0xb87e, 0x0f31);
+ data = ocp_reg_read(tp, 0xbf4c);
+ data |= BIT(1);
+ ocp_reg_write(tp, 0xbf4c, data);
+ data = ocp_reg_read(tp, 0xbcca);
+ data |= BIT(9) | BIT(8);
+ ocp_reg_write(tp, 0xbcca, data);
+ ocp_reg_write(tp, 0xb87c, 0x8141);
+ ocp_reg_write(tp, 0xb87e, 0x320e);
+ ocp_reg_write(tp, 0xb87c, 0x8153);
+ ocp_reg_write(tp, 0xb87e, 0x720e);
+ ocp_reg_write(tp, 0xb87c, 0x8529);
+ ocp_reg_write(tp, 0xb87e, 0x050e);
+ data = ocp_reg_read(tp, OCP_EEE_CFG);
+ data &= ~CTAP_SHORT_EN;
+ ocp_reg_write(tp, OCP_EEE_CFG, data);
+
+ sram_write(tp, 0x816c, 0xc4a0);
+ sram_write(tp, 0x8170, 0xc4a0);
+ sram_write(tp, 0x8174, 0x04a0);
+ sram_write(tp, 0x8178, 0x04a0);
+ sram_write(tp, 0x817c, 0x0719);
+ sram_write(tp, 0x8ff4, 0x0400);
+ sram_write(tp, 0x8ff1, 0x0404);
+
+ ocp_reg_write(tp, 0xbf4a, 0x001b);
+ ocp_reg_write(tp, 0xb87c, 0x8033);
+ ocp_reg_write(tp, 0xb87e, 0x7c13);
+ ocp_reg_write(tp, 0xb87c, 0x8037);
+ ocp_reg_write(tp, 0xb87e, 0x7c13);
+ ocp_reg_write(tp, 0xb87c, 0x803b);
+ ocp_reg_write(tp, 0xb87e, 0xfc32);
+ ocp_reg_write(tp, 0xb87c, 0x803f);
+ ocp_reg_write(tp, 0xb87e, 0x7c13);
+ ocp_reg_write(tp, 0xb87c, 0x8043);
+ ocp_reg_write(tp, 0xb87e, 0x7c13);
+ ocp_reg_write(tp, 0xb87c, 0x8047);
+ ocp_reg_write(tp, 0xb87e, 0x7c13);
+
+ ocp_reg_write(tp, 0xb87c, 0x8145);
+ ocp_reg_write(tp, 0xb87e, 0x370e);
+ ocp_reg_write(tp, 0xb87c, 0x8157);
+ ocp_reg_write(tp, 0xb87e, 0x770e);
+ ocp_reg_write(tp, 0xb87c, 0x8169);
+ ocp_reg_write(tp, 0xb87e, 0x0d0a);
+ ocp_reg_write(tp, 0xb87c, 0x817b);
+ ocp_reg_write(tp, 0xb87e, 0x1d0a);
+
+ data = sram_read(tp, 0x8217);
+ data &= ~0xff00;
+ data |= 0x5000;
+ sram_write(tp, 0x8217, data);
+ data = sram_read(tp, 0x821a);
+ data &= ~0xff00;
+ data |= 0x5000;
+ sram_write(tp, 0x821a, data);
+ sram_write(tp, 0x80da, 0x0403);
+ data = sram_read(tp, 0x80dc);
+ data &= ~0xff00;
+ data |= 0x1000;
+ sram_write(tp, 0x80dc, data);
+ sram_write(tp, 0x80b3, 0x0384);
+ sram_write(tp, 0x80b7, 0x2007);
+ data = sram_read(tp, 0x80ba);
+ data &= ~0xff00;
+ data |= 0x6c00;
+ sram_write(tp, 0x80ba, data);
+ sram_write(tp, 0x80b5, 0xf009);
+ data = sram_read(tp, 0x80bd);
+ data &= ~0xff00;
+ data |= 0x9f00;
+ sram_write(tp, 0x80bd, data);
+ sram_write(tp, 0x80c7, 0xf083);
+ sram_write(tp, 0x80dd, 0x03f0);
+ data = sram_read(tp, 0x80df);
+ data &= ~0xff00;
+ data |= 0x1000;
+ sram_write(tp, 0x80df, data);
+ sram_write(tp, 0x80cb, 0x2007);
+ data = sram_read(tp, 0x80ce);
+ data &= ~0xff00;
+ data |= 0x6c00;
+ sram_write(tp, 0x80ce, data);
+ sram_write(tp, 0x80c9, 0x8009);
+ data = sram_read(tp, 0x80d1);
+ data &= ~0xff00;
+ data |= 0x8000;
+ sram_write(tp, 0x80d1, data);
+ sram_write(tp, 0x80a3, 0x200a);
+ sram_write(tp, 0x80a5, 0xf0ad);
+ sram_write(tp, 0x809f, 0x6073);
+ sram_write(tp, 0x80a1, 0x000b);
+ data = sram_read(tp, 0x80a9);
+ data &= ~0xff00;
+ data |= 0xc000;
+ sram_write(tp, 0x80a9, data);
+
+ if (rtl_phy_patch_request(tp, true, true))
+ return;
+
+ data = ocp_reg_read(tp, 0xb896);
+ data &= ~BIT(0);
+ ocp_reg_write(tp, 0xb896, data);
+ data = ocp_reg_read(tp, 0xb892);
+ data &= ~0xff00;
+ ocp_reg_write(tp, 0xb892, data);
+ ocp_reg_write(tp, 0xb88e, 0xc23e);
+ ocp_reg_write(tp, 0xb890, 0x0000);
+ ocp_reg_write(tp, 0xb88e, 0xc240);
+ ocp_reg_write(tp, 0xb890, 0x0103);
+ ocp_reg_write(tp, 0xb88e, 0xc242);
+ ocp_reg_write(tp, 0xb890, 0x0507);
+ ocp_reg_write(tp, 0xb88e, 0xc244);
+ ocp_reg_write(tp, 0xb890, 0x090b);
+ ocp_reg_write(tp, 0xb88e, 0xc246);
+ ocp_reg_write(tp, 0xb890, 0x0c0e);
+ ocp_reg_write(tp, 0xb88e, 0xc248);
+ ocp_reg_write(tp, 0xb890, 0x1012);
+ ocp_reg_write(tp, 0xb88e, 0xc24a);
+ ocp_reg_write(tp, 0xb890, 0x1416);
+ data = ocp_reg_read(tp, 0xb896);
+ data |= BIT(0);
+ ocp_reg_write(tp, 0xb896, data);
+
+ rtl_phy_patch_request(tp, false, true);
+
+ data = ocp_reg_read(tp, 0xa86a);
+ data |= BIT(0);
+ ocp_reg_write(tp, 0xa86a, data);
+ data = ocp_reg_read(tp, 0xa6f0);
+ data |= BIT(0);
+ ocp_reg_write(tp, 0xa6f0, data);
+
+ ocp_reg_write(tp, 0xbfa0, 0xd70d);
+ ocp_reg_write(tp, 0xbfa2, 0x4100);
+ ocp_reg_write(tp, 0xbfa4, 0xe868);
+ ocp_reg_write(tp, 0xbfa6, 0xdc59);
+ ocp_reg_write(tp, 0xb54c, 0x3c18);
+ data = ocp_reg_read(tp, 0xbfa4);
+ data &= ~BIT(5);
+ ocp_reg_write(tp, 0xbfa4, data);
+ data = sram_read(tp, 0x817d);
+ data |= BIT(12);
+ sram_write(tp, 0x817d, data);
+ break;
+ case RTL_VER_13:
+ /* 2.5G INRX */
+ data = ocp_reg_read(tp, 0xac46);
+ data &= ~0x00f0;
+ data |= 0x0090;
+ ocp_reg_write(tp, 0xac46, data);
+ data = ocp_reg_read(tp, 0xad30);
+ data &= ~0x0003;
+ data |= 0x0001;
+ ocp_reg_write(tp, 0xad30, data);
+ fallthrough;
+ case RTL_VER_15:
+ /* EEE parameter */
+ ocp_reg_write(tp, 0xb87c, 0x80f5);
+ ocp_reg_write(tp, 0xb87e, 0x760e);
+ ocp_reg_write(tp, 0xb87c, 0x8107);
+ ocp_reg_write(tp, 0xb87e, 0x360e);
+ ocp_reg_write(tp, 0xb87c, 0x8551);
+ data = ocp_reg_read(tp, 0xb87e);
+ data &= ~0xff00;
+ data |= 0x0800;
+ ocp_reg_write(tp, 0xb87e, data);
+
+ /* ADC_PGA parameter */
+ data = ocp_reg_read(tp, 0xbf00);
+ data &= ~0xe000;
+ data |= 0xa000;
+ ocp_reg_write(tp, 0xbf00, data);
+ data = ocp_reg_read(tp, 0xbf46);
+ data &= ~0x0f00;
+ data |= 0x0300;
+ ocp_reg_write(tp, 0xbf46, data);
+
+ /* Green Table-PGA, 1G full viterbi */
+ sram_write(tp, 0x8044, 0x2417);
+ sram_write(tp, 0x804a, 0x2417);
+ sram_write(tp, 0x8050, 0x2417);
+ sram_write(tp, 0x8056, 0x2417);
+ sram_write(tp, 0x805c, 0x2417);
+ sram_write(tp, 0x8062, 0x2417);
+ sram_write(tp, 0x8068, 0x2417);
+ sram_write(tp, 0x806e, 0x2417);
+ sram_write(tp, 0x8074, 0x2417);
+ sram_write(tp, 0x807a, 0x2417);
+
+ /* XG PLL */
+ data = ocp_reg_read(tp, 0xbf84);
+ data &= ~0xe000;
+ data |= 0xa000;
+ ocp_reg_write(tp, 0xbf84, data);
+ break;
+ default:
+ break;
+ }
+
+ if (rtl_phy_patch_request(tp, true, true))
+ return;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4);
+ ocp_data |= EEE_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data);
+
+ data = ocp_reg_read(tp, OCP_DOWN_SPEED);
+ data &= ~(EN_EEE_100 | EN_EEE_1000);
+ data |= EN_10M_CLKDIV;
+ ocp_reg_write(tp, OCP_DOWN_SPEED, data);
+ tp->ups_info._10m_ckdiv = true;
+ tp->ups_info.eee_plloff_100 = false;
+ tp->ups_info.eee_plloff_giga = false;
+
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ data &= ~EEE_CLKDIV_EN;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ tp->ups_info.eee_ckdiv = false;
+
+ rtl_phy_patch_request(tp, false, true);
+
+ rtl_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags));
+
+ data = ocp_reg_read(tp, 0xa428);
+ data &= ~BIT(9);
+ ocp_reg_write(tp, 0xa428, data);
+ data = ocp_reg_read(tp, 0xa5ea);
+ data &= ~BIT(0);
+ ocp_reg_write(tp, 0xa5ea, data);
+ tp->ups_info.lite_mode = 0;
+
+ if (tp->eee_en)
+ rtl_eee_enable(tp, true);
+
+ r8153_aldps_en(tp, true);
+ r8152b_enable_fc(tp);
+ r8153_u2p3en(tp, true);
+
+ set_bit(PHY_RESET, &tp->flags);
+}
+
+static void r8156_init(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u16 data;
+ int i;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
+ ocp_data &= ~EN_ALL_SPEED;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_ECM_OP, ocp_data);
+
+ ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, 0);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_ECM_OPTION);
+ ocp_data |= BYPASS_MAC_RESET;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_ECM_OPTION, ocp_data);
+
+ r8153b_u1u2en(tp, false);
+
+ for (i = 0; i < 500; i++) {
+ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ AUTOLOAD_DONE)
+ break;
+
+ msleep(20);
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+ }
+
+ data = r8153_phy_status(tp, 0);
+ if (data == PHY_STAT_EXT_INIT) {
+ data = ocp_reg_read(tp, 0xa468);
+ data &= ~(BIT(3) | BIT(1));
+ ocp_reg_write(tp, 0xa468, data);
+ }
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
+ WARN_ON_ONCE(data != PHY_STAT_LAN_ON);
+
+ r8153_u2p3en(tp, false);
+
+ /* MSC timer = 0xfff * 8ms = 32760 ms */
+ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff);
+
+ /* U1/U2/L1 idle timer. 500 us */
+ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500);
+
+ r8153b_power_cut_en(tp, false);
+ r8156_ups_en(tp, false);
+ r8153_queue_wake(tp, false);
+ rtl_runtime_suspend_enable(tp, false);
+
+ if (tp->udev->speed >= USB_SPEED_SUPER)
+ r8153b_u1u2en(tp, true);
+
+ usb_enable_lpm(tp->udev);
+
+ r8156_mac_clk_spd(tp, true);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data &= ~PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+ if (rtl8152_get_speed(tp) & LINK_STATUS)
+ ocp_data |= CUR_LINK_OK;
+ else
+ ocp_data &= ~CUR_LINK_OK;
+ ocp_data |= POLL_LINK_CHG;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
+
+ set_bit(GREEN_ETHERNET, &tp->flags);
+
+ /* rx aggregation */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
+ ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG);
+ ocp_data |= ACT_ODMA;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data);
+
+ rtl_tally_reset(tp);
+
+ tp->coalesce = 15000; /* 15 us */
+}
+
+static void r8156b_init(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u16 data;
+ int i;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
+ ocp_data &= ~EN_ALL_SPEED;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_ECM_OP, ocp_data);
+
+ ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, 0);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_ECM_OPTION);
+ ocp_data |= BYPASS_MAC_RESET;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_ECM_OPTION, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
+ ocp_data |= RX_DETECT8;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
+
+ r8153b_u1u2en(tp, false);
+
+ switch (tp->version) {
+ case RTL_VER_13:
+ case RTL_VER_15:
+ r8156b_wait_loading_flash(tp);
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < 500; i++) {
+ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ AUTOLOAD_DONE)
+ break;
+
+ msleep(20);
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+ }
+
+ data = r8153_phy_status(tp, 0);
+ if (data == PHY_STAT_EXT_INIT) {
+ data = ocp_reg_read(tp, 0xa468);
+ data &= ~(BIT(3) | BIT(1));
+ ocp_reg_write(tp, 0xa468, data);
+
+ data = ocp_reg_read(tp, 0xa466);
+ data &= ~BIT(0);
+ ocp_reg_write(tp, 0xa466, data);
+ }
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
+
+ r8153_u2p3en(tp, false);
+
+ /* MSC timer = 0xfff * 8ms = 32760 ms */
+ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff);
+
+ /* U1/U2/L1 idle timer. 500 us */
+ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500);
+
+ r8153b_power_cut_en(tp, false);
+ r8156_ups_en(tp, false);
+ r8153_queue_wake(tp, false);
+ rtl_runtime_suspend_enable(tp, false);
+
+ if (tp->udev->speed >= USB_SPEED_SUPER)
+ r8153b_u1u2en(tp, true);
+
+ usb_enable_lpm(tp->udev);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data &= ~SLOT_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
+ ocp_data |= FLOW_CTRL_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
+
+ /* enable fc timer and set timer to 600 ms. */
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FC_TIMER,
+ CTRL_TIMER_EN | (600 / 8));
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL);
+ if (!(ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL) & DACK_DET_EN))
+ ocp_data |= FLOW_CTRL_PATCH_2;
+ ocp_data &= ~AUTO_SPEEDUP;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
+ ocp_data |= FC_PATCH_TASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+
+ r8156_mac_clk_spd(tp, true);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data &= ~PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+ if (rtl8152_get_speed(tp) & LINK_STATUS)
+ ocp_data |= CUR_LINK_OK;
+ else
+ ocp_data &= ~CUR_LINK_OK;
+ ocp_data |= POLL_LINK_CHG;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
+
+ set_bit(GREEN_ETHERNET, &tp->flags);
+
+ /* rx aggregation */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
+ ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
+
+ rtl_tally_reset(tp);
+
+ tp->coalesce = 15000; /* 15 us */
+}
+
+static bool rtl_vendor_mode(struct usb_interface *intf)
+{
+ struct usb_host_interface *alt = intf->cur_altsetting;
+ struct usb_device *udev;
+ struct usb_host_config *c;
+ int i, num_configs;
+
+ if (alt->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC)
+ return true;
+
+ /* The vendor mode is not always config #1, so to find it out. */
+ udev = interface_to_usbdev(intf);
+ c = udev->config;
+ num_configs = udev->descriptor.bNumConfigurations;
+ for (i = 0; i < num_configs; (i++, c++)) {
+ struct usb_interface_descriptor *desc = NULL;
+
+ if (c->desc.bNumInterfaces > 0)
+ desc = &c->intf_cache[0]->altsetting->desc;
+ else
+ continue;
+
+ if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
+ usb_driver_set_configuration(udev, c->desc.bConfigurationValue);
+ break;
+ }
+ }
+
+ WARN_ON_ONCE(i == num_configs);
+
+ return false;
+}
+
static int rtl8152_pre_reset(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
@@ -5958,6 +8503,22 @@ int rtl8152_get_link_ksettings(struct net_device *netdev,
mii_ethtool_get_link_ksettings(&tp->mii, cmd);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ cmd->link_modes.supported, tp->support_2500full);
+
+ if (tp->support_2500full) {
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ cmd->link_modes.advertising,
+ ocp_reg_read(tp, OCP_10GBT_CTRL) & MDIO_AN_10GBT_CTRL_ADV2_5G);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ cmd->link_modes.lp_advertising,
+ ocp_reg_read(tp, OCP_10GBT_STAT) & MDIO_AN_10GBT_STAT_LP2_5G);
+
+ if (is_speed_2500(rtl8152_get_speed(tp)))
+ cmd->base.speed = SPEED_2500;
+ }
+
mutex_unlock(&tp->control);
usb_autopm_put_interface(tp->intf);
@@ -6001,6 +8562,10 @@ static int rtl8152_set_link_ksettings(struct net_device *dev,
cmd->link_modes.advertising))
advertising |= RTL_ADVERTISED_1000_FULL;
+ if (test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ cmd->link_modes.advertising))
+ advertising |= RTL_ADVERTISED_2500_FULL;
+
mutex_lock(&tp->control);
ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed,
@@ -6459,12 +9024,21 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
if (netif_running(dev)) {
- u32 rms = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+ if (tp->rtl_ops.change_mtu)
+ tp->rtl_ops.change_mtu(tp);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms);
-
- if (netif_carrier_ok(dev))
- r8153_set_rx_early_size(tp);
+ if (netif_carrier_ok(dev)) {
+ netif_stop_queue(dev);
+ napi_disable(&tp->napi);
+ tasklet_disable(&tp->tx_tl);
+ tp->rtl_ops.disable(tp);
+ tp->rtl_ops.enable(tp);
+ rtl_start_rx(tp);
+ tasklet_enable(&tp->tx_tl);
+ napi_enable(&tp->napi);
+ rtl8152_set_rx_mode(dev);
+ netif_wake_queue(dev);
+ }
}
mutex_unlock(&tp->control);
@@ -6553,6 +9127,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->in_nway = rtl8153_in_nway;
ops->hw_phy_cfg = r8153_hw_phy_cfg;
ops->autosuspend_en = rtl8153_runtime_enable;
+ ops->change_mtu = rtl8153_change_mtu;
if (tp->udev->speed < USB_SPEED_SUPER)
tp->rx_buf_sz = 16 * 1024;
else
@@ -6574,6 +9149,68 @@ static int rtl_ops_init(struct r8152 *tp)
ops->in_nway = rtl8153_in_nway;
ops->hw_phy_cfg = r8153b_hw_phy_cfg;
ops->autosuspend_en = rtl8153b_runtime_enable;
+ ops->change_mtu = rtl8153_change_mtu;
+ tp->rx_buf_sz = 32 * 1024;
+ tp->eee_en = true;
+ tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX;
+ break;
+
+ case RTL_VER_11:
+ tp->eee_en = true;
+ tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX;
+ fallthrough;
+ case RTL_VER_10:
+ ops->init = r8156_init;
+ ops->enable = rtl8156_enable;
+ ops->disable = rtl8153_disable;
+ ops->up = rtl8156_up;
+ ops->down = rtl8156_down;
+ ops->unload = rtl8153_unload;
+ ops->eee_get = r8153_get_eee;
+ ops->eee_set = r8152_set_eee;
+ ops->in_nway = rtl8153_in_nway;
+ ops->hw_phy_cfg = r8156_hw_phy_cfg;
+ ops->autosuspend_en = rtl8156_runtime_enable;
+ ops->change_mtu = rtl8156_change_mtu;
+ tp->rx_buf_sz = 48 * 1024;
+ tp->support_2500full = 1;
+ break;
+
+ case RTL_VER_12:
+ case RTL_VER_13:
+ tp->support_2500full = 1;
+ fallthrough;
+ case RTL_VER_15:
+ tp->eee_en = true;
+ tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX;
+ ops->init = r8156b_init;
+ ops->enable = rtl8156b_enable;
+ ops->disable = rtl8153_disable;
+ ops->up = rtl8156_up;
+ ops->down = rtl8156_down;
+ ops->unload = rtl8153_unload;
+ ops->eee_get = r8153_get_eee;
+ ops->eee_set = r8152_set_eee;
+ ops->in_nway = rtl8153_in_nway;
+ ops->hw_phy_cfg = r8156b_hw_phy_cfg;
+ ops->autosuspend_en = rtl8156_runtime_enable;
+ ops->change_mtu = rtl8156_change_mtu;
+ tp->rx_buf_sz = 48 * 1024;
+ break;
+
+ case RTL_VER_14:
+ ops->init = r8153c_init;
+ ops->enable = rtl8153_enable;
+ ops->disable = rtl8153_disable;
+ ops->up = rtl8153c_up;
+ ops->down = rtl8153b_down;
+ ops->unload = rtl8153_unload;
+ ops->eee_get = r8153_get_eee;
+ ops->eee_set = r8152_set_eee;
+ ops->in_nway = rtl8153_in_nway;
+ ops->hw_phy_cfg = r8153c_hw_phy_cfg;
+ ops->autosuspend_en = rtl8153c_runtime_enable;
+ ops->change_mtu = rtl8153c_change_mtu;
tp->rx_buf_sz = 32 * 1024;
tp->eee_en = true;
tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX;
@@ -6592,11 +9229,17 @@ static int rtl_ops_init(struct r8152 *tp)
#define FIRMWARE_8153A_3 "rtl_nic/rtl8153a-3.fw"
#define FIRMWARE_8153A_4 "rtl_nic/rtl8153a-4.fw"
#define FIRMWARE_8153B_2 "rtl_nic/rtl8153b-2.fw"
+#define FIRMWARE_8153C_1 "rtl_nic/rtl8153c-1.fw"
+#define FIRMWARE_8156A_2 "rtl_nic/rtl8156a-2.fw"
+#define FIRMWARE_8156B_2 "rtl_nic/rtl8156b-2.fw"
MODULE_FIRMWARE(FIRMWARE_8153A_2);
MODULE_FIRMWARE(FIRMWARE_8153A_3);
MODULE_FIRMWARE(FIRMWARE_8153A_4);
MODULE_FIRMWARE(FIRMWARE_8153B_2);
+MODULE_FIRMWARE(FIRMWARE_8153C_1);
+MODULE_FIRMWARE(FIRMWARE_8156A_2);
+MODULE_FIRMWARE(FIRMWARE_8156B_2);
static int rtl_fw_init(struct r8152 *tp)
{
@@ -6622,6 +9265,19 @@ static int rtl_fw_init(struct r8152 *tp)
rtl_fw->pre_fw = r8153b_pre_firmware_1;
rtl_fw->post_fw = r8153b_post_firmware_1;
break;
+ case RTL_VER_11:
+ rtl_fw->fw_name = FIRMWARE_8156A_2;
+ rtl_fw->post_fw = r8156a_post_firmware_1;
+ break;
+ case RTL_VER_13:
+ case RTL_VER_15:
+ rtl_fw->fw_name = FIRMWARE_8156B_2;
+ break;
+ case RTL_VER_14:
+ rtl_fw->fw_name = FIRMWARE_8153C_1;
+ rtl_fw->pre_fw = r8153b_pre_firmware_1;
+ rtl_fw->post_fw = r8153c_post_firmware_1;
+ break;
default:
break;
}
@@ -6677,6 +9333,27 @@ u8 rtl8152_get_version(struct usb_interface *intf)
case 0x6010:
version = RTL_VER_09;
break;
+ case 0x7010:
+ version = RTL_TEST_01;
+ break;
+ case 0x7020:
+ version = RTL_VER_10;
+ break;
+ case 0x7030:
+ version = RTL_VER_11;
+ break;
+ case 0x7400:
+ version = RTL_VER_12;
+ break;
+ case 0x7410:
+ version = RTL_VER_13;
+ break;
+ case 0x6400:
+ version = RTL_VER_14;
+ break;
+ case 0x7420:
+ version = RTL_VER_15;
+ break;
default:
version = RTL_VER_UNKNOWN;
dev_info(&intf->dev, "Unknown version 0x%04x\n", ocp_data);
@@ -6701,10 +9378,8 @@ static int rtl8152_probe(struct usb_interface *intf,
if (version == RTL_VER_UNKNOWN)
return -ENODEV;
- if (udev->actconfig->desc.bConfigurationValue != 1) {
- usb_driver_set_configuration(udev, 1);
+ if (!rtl_vendor_mode(intf))
return -ENODEV;
- }
if (intf->cur_altsetting->desc.bNumEndpoints < 3)
return -ENODEV;
@@ -6772,7 +9447,7 @@ static int rtl8152_probe(struct usb_interface *intf,
switch (le16_to_cpu(udev->descriptor.idProduct)) {
case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
- set_bit(LENOVO_MACPASSTHRU, &tp->flags);
+ tp->lenovo_macpassthru = 1;
}
}
@@ -6780,7 +9455,7 @@ static int rtl8152_probe(struct usb_interface *intf,
(!strcmp(udev->serial, "000001000000") ||
!strcmp(udev->serial, "000002000000"))) {
dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
- set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
+ tp->dell_tb_rx_agg_bug = 1;
}
netdev->ethtool_ops = &ops;
@@ -6789,12 +9464,29 @@ static int rtl8152_probe(struct usb_interface *intf,
/* MTU range: 68 - 1500 or 9194 */
netdev->min_mtu = ETH_MIN_MTU;
switch (tp->version) {
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ case RTL_VER_08:
+ case RTL_VER_09:
+ case RTL_VER_14:
+ netdev->max_mtu = size_to_mtu(9 * 1024);
+ break;
+ case RTL_VER_10:
+ case RTL_VER_11:
+ netdev->max_mtu = size_to_mtu(15 * 1024);
+ break;
+ case RTL_VER_12:
+ case RTL_VER_13:
+ case RTL_VER_15:
+ netdev->max_mtu = size_to_mtu(16 * 1024);
+ break;
case RTL_VER_01:
case RTL_VER_02:
- netdev->max_mtu = ETH_DATA_LEN;
- break;
+ case RTL_VER_07:
default:
- netdev->max_mtu = RTL8153_MAX_MTU;
+ netdev->max_mtu = ETH_DATA_LEN;
break;
}
@@ -6810,7 +9502,13 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->advertising = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL |
RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL;
if (tp->mii.supports_gmii) {
- tp->speed = SPEED_1000;
+ if (tp->support_2500full &&
+ tp->udev->speed >= USB_SPEED_SUPER) {
+ tp->speed = SPEED_2500;
+ tp->advertising |= RTL_ADVERTISED_2500_FULL;
+ } else {
+ tp->speed = SPEED_1000;
+ }
tp->advertising |= RTL_ADVERTISED_1000_FULL;
}
tp->duplex = DUPLEX_FULL;
@@ -6834,7 +9532,11 @@ static int rtl8152_probe(struct usb_interface *intf,
set_ethernet_addr(tp);
usb_set_intfdata(intf, tp);
- netif_napi_add(netdev, &tp->napi, r8152_poll, RTL8152_NAPI_WEIGHT);
+
+ if (tp->support_2500full)
+ netif_napi_add(netdev, &tp->napi, r8152_poll, 256);
+ else
+ netif_napi_add(netdev, &tp->napi, r8152_poll, 64);
ret = register_netdev(netdev);
if (ret != 0) {
@@ -6870,49 +9572,48 @@ static void rtl8152_disconnect(struct usb_interface *intf)
unregister_netdev(tp->netdev);
tasklet_kill(&tp->tx_tl);
cancel_delayed_work_sync(&tp->hw_phy_work);
- tp->rtl_ops.unload(tp);
+ if (tp->rtl_ops.unload)
+ tp->rtl_ops.unload(tp);
rtl8152_release_firmware(tp);
free_netdev(tp->netdev);
}
}
-#define REALTEK_USB_DEVICE(vend, prod) \
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
- USB_DEVICE_ID_MATCH_INT_CLASS, \
- .idVendor = (vend), \
- .idProduct = (prod), \
- .bInterfaceClass = USB_CLASS_VENDOR_SPEC \
+#define REALTEK_USB_DEVICE(vend, prod) { \
+ USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC), \
}, \
{ \
- .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | \
- USB_DEVICE_ID_MATCH_DEVICE, \
- .idVendor = (vend), \
- .idProduct = (prod), \
- .bInterfaceClass = USB_CLASS_COMM, \
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
- .bInterfaceProtocol = USB_CDC_PROTO_NONE
+ USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_COMM, \
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), \
+}
/* table of devices that work with this driver */
static const struct usb_device_id rtl8152_table[] = {
- {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050)},
- {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
- {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
- {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
- {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
- {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)},
- {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
- {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
- {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
- {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069)},
- {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082)},
- {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
- {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
- {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
- {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e)},
- {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)},
- {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
- {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
- {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)},
+ /* Realtek */
+ REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050),
+ REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8053),
+ REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152),
+ REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153),
+ REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8155),
+ REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8156),
+
+ /* Microsoft */
+ REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab),
+ REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6),
+ REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
+ REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387),
+ REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041),
+ REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff),
+ REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601),
{}
};
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 55a244eca5ca..55025202dc4f 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -629,8 +629,8 @@ static const struct ethtool_ops sierra_net_ethtool_ops = {
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.nway_reset = usbnet_nway_reset,
- .get_link_ksettings = usbnet_get_link_ksettings,
- .set_link_ksettings = usbnet_set_link_ksettings,
+ .get_link_ksettings = usbnet_get_link_ksettings_mii,
+ .set_link_ksettings = usbnet_set_link_ksettings_mii,
};
static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 4353b370249f..f8cdabb9ef5a 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -741,8 +741,8 @@ static const struct ethtool_ops smsc75xx_ethtool_ops = {
.set_eeprom = smsc75xx_ethtool_set_eeprom,
.get_wol = smsc75xx_ethtool_get_wol,
.set_wol = smsc75xx_ethtool_set_wol,
- .get_link_ksettings = usbnet_get_link_ksettings,
- .set_link_ksettings = usbnet_set_link_ksettings,
+ .get_link_ksettings = usbnet_get_link_ksettings_mii,
+ .set_link_ksettings = usbnet_set_link_ksettings_mii,
};
static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 878557ad03ad..ce29261263cd 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -250,8 +250,8 @@ static const struct ethtool_ops sr9700_ethtool_ops = {
.get_eeprom_len = sr9700_get_eeprom_len,
.get_eeprom = sr9700_get_eeprom,
.nway_reset = usbnet_nway_reset,
- .get_link_ksettings = usbnet_get_link_ksettings,
- .set_link_ksettings = usbnet_set_link_ksettings,
+ .get_link_ksettings = usbnet_get_link_ksettings_mii,
+ .set_link_ksettings = usbnet_set_link_ksettings_mii,
};
static void sr9700_set_multicast(struct net_device *netdev)
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index da56735d7755..a822d81310d5 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -527,8 +527,8 @@ static const struct ethtool_ops sr9800_ethtool_ops = {
.get_eeprom_len = sr_get_eeprom_len,
.get_eeprom = sr_get_eeprom,
.nway_reset = usbnet_nway_reset,
- .get_link_ksettings = usbnet_get_link_ksettings,
- .set_link_ksettings = usbnet_set_link_ksettings,
+ .get_link_ksettings = usbnet_get_link_ksettings_mii,
+ .set_link_ksettings = usbnet_set_link_ksettings_mii,
};
static int sr9800_link_reset(struct usbnet *dev)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index f4f37ecfed58..ecf62849f4c1 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -944,7 +944,10 @@ EXPORT_SYMBOL_GPL(usbnet_open);
* they'll probably want to use this base set.
*/
-int usbnet_get_link_ksettings(struct net_device *net,
+/* These methods are written on the assumption that the device
+ * uses MII
+ */
+int usbnet_get_link_ksettings_mii(struct net_device *net,
struct ethtool_link_ksettings *cmd)
{
struct usbnet *dev = netdev_priv(net);
@@ -956,9 +959,30 @@ int usbnet_get_link_ksettings(struct net_device *net,
return 0;
}
-EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings);
+EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings_mii);
+
+int usbnet_get_link_ksettings_internal(struct net_device *net,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ /* the assumption that speed is equal on tx and rx
+ * is deeply engrained into the networking layer.
+ * For wireless stuff it is not true.
+ * We assume that rx_speed matters more.
+ */
+ if (dev->rx_speed != SPEED_UNSET)
+ cmd->base.speed = dev->rx_speed / 1000000;
+ else if (dev->tx_speed != SPEED_UNSET)
+ cmd->base.speed = dev->tx_speed / 1000000;
+ else
+ cmd->base.speed = SPEED_UNKNOWN;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings_internal);
-int usbnet_set_link_ksettings(struct net_device *net,
+int usbnet_set_link_ksettings_mii(struct net_device *net,
const struct ethtool_link_ksettings *cmd)
{
struct usbnet *dev = netdev_priv(net);
@@ -978,7 +1002,7 @@ int usbnet_set_link_ksettings(struct net_device *net,
return retval;
}
-EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings);
+EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings_mii);
u32 usbnet_get_link (struct net_device *net)
{
@@ -1043,8 +1067,8 @@ static const struct ethtool_ops usbnet_ethtool_ops = {
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_ts_info = ethtool_op_get_ts_info,
- .get_link_ksettings = usbnet_get_link_ksettings,
- .set_link_ksettings = usbnet_set_link_ksettings,
+ .get_link_ksettings = usbnet_get_link_ksettings_mii,
+ .set_link_ksettings = usbnet_set_link_ksettings_mii,
};
/*-------------------------------------------------------------------------*/
@@ -1661,6 +1685,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->intf = udev;
dev->driver_info = info;
dev->driver_name = name;
+ dev->rx_speed = SPEED_UNSET;
+ dev->tx_speed = SPEED_UNSET;
net->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!net->tstats)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 34e49c75db42..bdb7ce3cb054 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -57,6 +57,7 @@ struct veth_rq_stats {
struct veth_rq {
struct napi_struct xdp_napi;
+ struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */
struct net_device *dev;
struct bpf_prog __rcu *xdp_prog;
struct xdp_mem_info xdp_mem;
@@ -218,6 +219,17 @@ static void veth_get_ethtool_stats(struct net_device *dev,
}
}
+static void veth_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ channels->tx_count = dev->real_num_tx_queues;
+ channels->rx_count = dev->real_num_rx_queues;
+ channels->max_tx = dev->real_num_tx_queues;
+ channels->max_rx = dev->real_num_rx_queues;
+ channels->combined_count = min(dev->real_num_rx_queues, dev->real_num_tx_queues);
+ channels->max_combined = min(dev->real_num_rx_queues, dev->real_num_tx_queues);
+}
+
static const struct ethtool_ops veth_ethtool_ops = {
.get_drvinfo = veth_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -226,6 +238,7 @@ static const struct ethtool_ops veth_ethtool_ops = {
.get_ethtool_stats = veth_get_ethtool_stats,
.get_link_ksettings = veth_get_link_ksettings,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_channels = veth_get_channels,
};
/* general routines */
@@ -281,13 +294,32 @@ static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
netif_rx(skb);
}
+/* return true if the specified skb has chances of GRO aggregation
+ * Don't strive for accuracy, but try to avoid GRO overhead in the most
+ * common scenarios.
+ * When XDP is enabled, all traffic is considered eligible, as the xmit
+ * device has TSO off.
+ * When TSO is enabled on the xmit device, we are likely interested only
+ * in UDP aggregation, explicitly check for that if the skb is suspected
+ * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
+ * to belong to locally generated UDP traffic.
+ */
+static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
+ const struct net_device *rcv,
+ const struct sk_buff *skb)
+{
+ return !(dev->features & NETIF_F_ALL_TSO) ||
+ (skb->destructor == sock_wfree &&
+ rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD));
+}
+
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct veth_rq *rq = NULL;
struct net_device *rcv;
int length = skb->len;
- bool rcv_xdp = false;
+ bool use_napi = false;
int rxq;
rcu_read_lock();
@@ -301,20 +333,26 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
rxq = skb_get_queue_mapping(skb);
if (rxq < rcv->real_num_rx_queues) {
rq = &rcv_priv->rq[rxq];
- rcv_xdp = rcu_access_pointer(rq->xdp_prog);
+
+ /* The napi pointer is available when an XDP program is
+ * attached or when GRO is enabled
+ * Don't bother with napi/GRO if the skb can't be aggregated
+ */
+ use_napi = rcu_access_pointer(rq->napi) &&
+ veth_skb_is_eligible_for_gro(dev, rcv, skb);
skb_record_rx_queue(skb, rxq);
}
skb_tx_timestamp(skb);
- if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
- if (!rcv_xdp)
+ if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
+ if (!use_napi)
dev_lstats_add(dev, length);
} else {
drop:
atomic64_inc(&priv->dropped);
}
- if (rcv_xdp)
+ if (use_napi)
__veth_xdp_flush(rq);
rcu_read_unlock();
@@ -433,7 +471,7 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
u32 flags, bool ndo_xmit)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
- int i, ret = -ENXIO, drops = 0;
+ int i, ret = -ENXIO, nxmit = 0;
struct net_device *rcv;
unsigned int max_len;
struct veth_rq *rq;
@@ -448,11 +486,10 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
rcv_priv = netdev_priv(rcv);
rq = &rcv_priv->rq[veth_select_rxq(rcv)];
- /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
- * side. This means an XDP program is loaded on the peer and the peer
- * device is up.
+ /* The napi pointer is set if NAPI is enabled, which ensures that
+ * xdp_ring is initialized on receive side and the peer device is up.
*/
- if (!rcu_access_pointer(rq->xdp_prog))
+ if (!rcu_access_pointer(rq->napi))
goto out;
max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
@@ -463,21 +500,20 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
void *ptr = veth_xdp_to_ptr(frame);
if (unlikely(frame->len > max_len ||
- __ptr_ring_produce(&rq->xdp_ring, ptr))) {
- xdp_return_frame_rx_napi(frame);
- drops++;
- }
+ __ptr_ring_produce(&rq->xdp_ring, ptr)))
+ break;
+ nxmit++;
}
spin_unlock(&rq->xdp_ring.producer_lock);
if (flags & XDP_XMIT_FLUSH)
__veth_xdp_flush(rq);
- ret = n - drops;
+ ret = nxmit;
if (ndo_xmit) {
u64_stats_update_begin(&rq->stats.syncp);
- rq->stats.vs.peer_tq_xdp_xmit += n - drops;
- rq->stats.vs.peer_tq_xdp_xmit_err += drops;
+ rq->stats.vs.peer_tq_xdp_xmit += nxmit;
+ rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
u64_stats_update_end(&rq->stats.syncp);
}
@@ -504,20 +540,23 @@ static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
{
- int sent, i, err = 0;
+ int sent, i, err = 0, drops;
sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
if (sent < 0) {
err = sent;
sent = 0;
- for (i = 0; i < bq->count; i++)
- xdp_return_frame(bq->q[i]);
}
- trace_xdp_bulk_tx(rq->dev, sent, bq->count - sent, err);
+
+ for (i = sent; unlikely(i < bq->count); i++)
+ xdp_return_frame(bq->q[i]);
+
+ drops = bq->count - sent;
+ trace_xdp_bulk_tx(rq->dev, sent, drops, err);
u64_stats_update_begin(&rq->stats.syncp);
rq->stats.vs.xdp_tx += sent;
- rq->stats.vs.xdp_tx_err += bq->count - sent;
+ rq->stats.vs.xdp_tx_err += drops;
u64_stats_update_end(&rq->stats.syncp);
bq->count = 0;
@@ -672,7 +711,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
int mac_len, delta, off;
struct xdp_buff xdp;
- skb_orphan(skb);
+ skb_orphan_partial(skb);
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -889,7 +928,7 @@ static int veth_poll(struct napi_struct *napi, int budget)
return done;
}
-static int veth_napi_add(struct net_device *dev)
+static int __veth_napi_enable(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
int err, i;
@@ -906,6 +945,7 @@ static int veth_napi_add(struct net_device *dev)
struct veth_rq *rq = &priv->rq[i];
napi_enable(&rq->xdp_napi);
+ rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
}
return 0;
@@ -924,6 +964,7 @@ static void veth_napi_del(struct net_device *dev)
for (i = 0; i < dev->real_num_rx_queues; i++) {
struct veth_rq *rq = &priv->rq[i];
+ rcu_assign_pointer(priv->rq[i].napi, NULL);
napi_disable(&rq->xdp_napi);
__netif_napi_del(&rq->xdp_napi);
}
@@ -937,8 +978,14 @@ static void veth_napi_del(struct net_device *dev)
}
}
+static bool veth_gro_requested(const struct net_device *dev)
+{
+ return !!(dev->wanted_features & NETIF_F_GRO);
+}
+
static int veth_enable_xdp(struct net_device *dev)
{
+ bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
struct veth_priv *priv = netdev_priv(dev);
int err, i;
@@ -946,7 +993,8 @@ static int veth_enable_xdp(struct net_device *dev)
for (i = 0; i < dev->real_num_rx_queues; i++) {
struct veth_rq *rq = &priv->rq[i];
- netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
+ if (!napi_already_on)
+ netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
if (err < 0)
goto err_rxq_reg;
@@ -961,13 +1009,25 @@ static int veth_enable_xdp(struct net_device *dev)
rq->xdp_mem = rq->xdp_rxq.mem;
}
- err = veth_napi_add(dev);
- if (err)
- goto err_rxq_reg;
+ if (!napi_already_on) {
+ err = __veth_napi_enable(dev);
+ if (err)
+ goto err_rxq_reg;
+
+ if (!veth_gro_requested(dev)) {
+ /* user-space did not require GRO, but adding XDP
+ * is supposed to get GRO working
+ */
+ dev->features |= NETIF_F_GRO;
+ netdev_features_change(dev);
+ }
+ }
}
- for (i = 0; i < dev->real_num_rx_queues; i++)
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
+ rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
+ }
return 0;
err_reg_mem:
@@ -977,7 +1037,8 @@ err_rxq_reg:
struct veth_rq *rq = &priv->rq[i];
xdp_rxq_info_unreg(&rq->xdp_rxq);
- netif_napi_del(&rq->xdp_napi);
+ if (!napi_already_on)
+ netif_napi_del(&rq->xdp_napi);
}
return err;
@@ -990,7 +1051,19 @@ static void veth_disable_xdp(struct net_device *dev)
for (i = 0; i < dev->real_num_rx_queues; i++)
rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
- veth_napi_del(dev);
+
+ if (!netif_running(dev) || !veth_gro_requested(dev)) {
+ veth_napi_del(dev);
+
+ /* if user-space did not require GRO, since adding XDP
+ * enabled it, clear it now
+ */
+ if (!veth_gro_requested(dev) && netif_running(dev)) {
+ dev->features &= ~NETIF_F_GRO;
+ netdev_features_change(dev);
+ }
+ }
+
for (i = 0; i < dev->real_num_rx_queues; i++) {
struct veth_rq *rq = &priv->rq[i];
@@ -999,6 +1072,29 @@ static void veth_disable_xdp(struct net_device *dev)
}
}
+static int veth_napi_enable(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int err, i;
+
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
+ }
+
+ err = __veth_napi_enable(dev);
+ if (err) {
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ netif_napi_del(&rq->xdp_napi);
+ }
+ return err;
+ }
+ return err;
+}
+
static int veth_open(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
@@ -1012,6 +1108,10 @@ static int veth_open(struct net_device *dev)
err = veth_enable_xdp(dev);
if (err)
return err;
+ } else if (veth_gro_requested(dev)) {
+ err = veth_napi_enable(dev);
+ if (err)
+ return err;
}
if (peer->flags & IFF_UP) {
@@ -1033,6 +1133,8 @@ static int veth_close(struct net_device *dev)
if (priv->_xdp_prog)
veth_disable_xdp(dev);
+ else if (veth_gro_requested(dev))
+ veth_napi_del(dev);
return 0;
}
@@ -1131,10 +1233,32 @@ static netdev_features_t veth_fix_features(struct net_device *dev,
if (peer_priv->_xdp_prog)
features &= ~NETIF_F_GSO_SOFTWARE;
}
+ if (priv->_xdp_prog)
+ features |= NETIF_F_GRO;
return features;
}
+static int veth_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ dev->features;
+ struct veth_priv *priv = netdev_priv(dev);
+ int err;
+
+ if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
+ return 0;
+
+ if (features & NETIF_F_GRO) {
+ err = veth_napi_enable(dev);
+ if (err)
+ return err;
+ } else {
+ veth_napi_del(dev);
+ }
+ return 0;
+}
+
static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
{
struct veth_priv *peer_priv, *priv = netdev_priv(dev);
@@ -1253,6 +1377,7 @@ static const struct net_device_ops veth_netdev_ops = {
#endif
.ndo_get_iflink = veth_get_iflink,
.ndo_fix_features = veth_fix_features,
+ .ndo_set_features = veth_set_features,
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = veth_set_rx_headroom,
.ndo_bpf = veth_xdp,
@@ -1315,6 +1440,13 @@ static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
static struct rtnl_link_ops veth_link_ops;
+static void veth_disable_gro(struct net_device *dev)
+{
+ dev->features &= ~NETIF_F_GRO;
+ dev->wanted_features &= ~NETIF_F_GRO;
+ netdev_update_features(dev);
+}
+
static int veth_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -1387,6 +1519,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (err < 0)
goto err_register_peer;
+ /* keep GRO disabled by default to be consistent with the established
+ * veth behavior
+ */
+ veth_disable_gro(peer);
netif_carrier_off(peer);
err = rtnl_configure_link(peer, ifmp);
@@ -1424,6 +1560,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
priv = netdev_priv(peer);
rcu_assign_pointer(priv->peer, dev);
+ veth_disable_gro(dev);
return 0;
err_register_dev:
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 0824e6999e49..9b6a4a875c55 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -195,6 +195,9 @@ struct virtnet_info {
/* # of XDP queue pairs currently used by the driver */
u16 xdp_queue_pairs;
+ /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
+ bool xdp_enabled;
+
/* I like... big packets and I cannot lie! */
bool big_packets;
@@ -376,21 +379,18 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct receive_queue *rq,
struct page *page, unsigned int offset,
unsigned int len, unsigned int truesize,
- bool hdr_valid, unsigned int metasize)
+ bool hdr_valid, unsigned int metasize,
+ unsigned int headroom)
{
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
unsigned int copy, hdr_len, hdr_padded_len;
- char *p;
+ struct page *page_to_free = NULL;
+ int tailroom, shinfo_size;
+ char *p, *hdr_p, *buf;
p = page_address(page) + offset;
-
- /* copy small packet so we can reuse these pages for small data */
- skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
- if (unlikely(!skb))
- return NULL;
-
- hdr = skb_vnet_hdr(skb);
+ hdr_p = p;
hdr_len = vi->hdr_len;
if (vi->mergeable_rx_bufs)
@@ -398,14 +398,44 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
- /* hdr_valid means no XDP, so we can copy the vnet header */
- if (hdr_valid)
- memcpy(hdr, p, hdr_len);
+ /* If headroom is not 0, there is an offset between the beginning of the
+ * data and the allocated space, otherwise the data and the allocated
+ * space are aligned.
+ */
+ if (headroom) {
+ /* Buffers with headroom use PAGE_SIZE as alloc size,
+ * see add_recvbuf_mergeable() + get_mergeable_buf_len()
+ */
+ truesize = PAGE_SIZE;
+ tailroom = truesize - len - offset;
+ buf = page_address(page);
+ } else {
+ tailroom = truesize - len;
+ buf = p;
+ }
len -= hdr_len;
offset += hdr_padded_len;
p += hdr_padded_len;
+ shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ /* copy small packet so we can reuse these pages */
+ if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
+ skb = build_skb(buf, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, p - buf);
+ skb_put(skb, len);
+ goto ok;
+ }
+
+ /* copy small packet so we can reuse these pages for small data */
+ skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
+ if (unlikely(!skb))
+ return NULL;
+
/* Copy all frame if it fits skb->head, otherwise
* we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
*/
@@ -415,11 +445,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
copy = ETH_HLEN + metasize;
skb_put_data(skb, p, copy);
- if (metasize) {
- __skb_pull(skb, metasize);
- skb_metadata_set(skb, metasize);
- }
-
len -= copy;
offset += copy;
@@ -427,8 +452,8 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
if (len)
skb_add_rx_frag(skb, 0, page, offset, len, truesize);
else
- put_page(page);
- return skb;
+ page_to_free = page;
+ goto ok;
}
/*
@@ -455,6 +480,20 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
if (page)
give_pages(rq, page);
+ok:
+ /* hdr_valid means no XDP, so we can copy the vnet header */
+ if (hdr_valid) {
+ hdr = skb_vnet_hdr(skb);
+ memcpy(hdr, hdr_p, hdr_len);
+ }
+ if (page_to_free)
+ put_page(page_to_free);
+
+ if (metasize) {
+ __skb_pull(skb, metasize);
+ skb_metadata_set(skb, metasize);
+ }
+
return skb;
}
@@ -485,12 +524,41 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
return 0;
}
-static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
-{
- unsigned int qp;
-
- qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
- return &vi->sq[qp];
+/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
+ * the current cpu, so it does not need to be locked.
+ *
+ * Here we use marco instead of inline functions because we have to deal with
+ * three issues at the same time: 1. the choice of sq. 2. judge and execute the
+ * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
+ * functions to perfectly solve these three problems at the same time.
+ */
+#define virtnet_xdp_get_sq(vi) ({ \
+ struct netdev_queue *txq; \
+ typeof(vi) v = (vi); \
+ unsigned int qp; \
+ \
+ if (v->curr_queue_pairs > nr_cpu_ids) { \
+ qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
+ qp += smp_processor_id(); \
+ txq = netdev_get_tx_queue(v->dev, qp); \
+ __netif_tx_acquire(txq); \
+ } else { \
+ qp = smp_processor_id() % v->curr_queue_pairs; \
+ txq = netdev_get_tx_queue(v->dev, qp); \
+ __netif_tx_lock(txq, raw_smp_processor_id()); \
+ } \
+ v->sq + qp; \
+})
+
+#define virtnet_xdp_put_sq(vi, q) { \
+ struct netdev_queue *txq; \
+ typeof(vi) v = (vi); \
+ \
+ txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
+ if (v->curr_queue_pairs > nr_cpu_ids) \
+ __netif_tx_release(txq); \
+ else \
+ __netif_tx_unlock(txq); \
}
static int virtnet_xdp_xmit(struct net_device *dev,
@@ -503,10 +571,10 @@ static int virtnet_xdp_xmit(struct net_device *dev,
unsigned int len;
int packets = 0;
int bytes = 0;
- int drops = 0;
+ int nxmit = 0;
int kicks = 0;
- int ret, err;
void *ptr;
+ int ret;
int i;
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
@@ -516,11 +584,10 @@ static int virtnet_xdp_xmit(struct net_device *dev,
if (!xdp_prog)
return -ENXIO;
- sq = virtnet_xdp_sq(vi);
+ sq = virtnet_xdp_get_sq(vi);
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
ret = -EINVAL;
- drops = n;
goto out;
}
@@ -543,13 +610,11 @@ static int virtnet_xdp_xmit(struct net_device *dev,
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
- err = __virtnet_xdp_xmit_one(vi, sq, xdpf);
- if (err) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
+ break;
+ nxmit++;
}
- ret = n - drops;
+ ret = nxmit;
if (flags & XDP_XMIT_FLUSH) {
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
@@ -560,16 +625,17 @@ out:
sq->stats.bytes += bytes;
sq->stats.packets += packets;
sq->stats.xdp_tx += n;
- sq->stats.xdp_tx_drops += drops;
+ sq->stats.xdp_tx_drops += n - nxmit;
sq->stats.kicks += kicks;
u64_stats_update_end(&sq->stats.syncp);
+ virtnet_xdp_put_sq(vi, sq);
return ret;
}
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
{
- return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
+ return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
}
/* We copy the packet for XDP in the following cases:
@@ -713,7 +779,9 @@ static struct sk_buff *receive_small(struct net_device *dev,
if (unlikely(!xdpf))
goto err_xdp;
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
- if (unlikely(err < 0)) {
+ if (unlikely(!err)) {
+ xdp_return_frame_rx_napi(xdpf);
+ } else if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
goto err_xdp;
}
@@ -776,7 +844,7 @@ static struct sk_buff *receive_big(struct net_device *dev,
{
struct page *page = buf;
struct sk_buff *skb =
- page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0);
+ page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0);
stats->bytes += len - vi->hdr_len;
if (unlikely(!skb))
@@ -890,7 +958,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
put_page(page);
head_skb = page_to_skb(vi, rq, xdp_page, offset,
len, PAGE_SIZE, false,
- metasize);
+ metasize, headroom);
return head_skb;
}
break;
@@ -900,7 +968,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (unlikely(!xdpf))
goto err_xdp;
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
- if (unlikely(err < 0)) {
+ if (unlikely(!err)) {
+ xdp_return_frame_rx_napi(xdpf);
+ } else if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
if (unlikely(xdp_page != page))
put_page(xdp_page);
@@ -946,7 +1016,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
- metasize);
+ metasize, headroom);
curr_skb = head_skb;
if (unlikely(!curr_skb))
@@ -1462,12 +1532,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
xdp_do_flush();
if (xdp_xmit & VIRTIO_XDP_TX) {
- sq = virtnet_xdp_sq(vi);
+ sq = virtnet_xdp_get_sq(vi);
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
u64_stats_update_begin(&sq->stats.syncp);
sq->stats.kicks++;
u64_stats_update_end(&sq->stats.syncp);
}
+ virtnet_xdp_put_sq(vi, sq);
}
return received;
@@ -1985,7 +2056,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
}
virtqueue_set_affinity(vi->rq[i].vq, mask);
virtqueue_set_affinity(vi->sq[i].vq, mask);
- __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false);
+ __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
cpumask_clear(mask);
}
@@ -2108,25 +2179,21 @@ static int virtnet_set_channels(struct net_device *dev,
static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct virtnet_info *vi = netdev_priv(dev);
- char *p = (char *)data;
unsigned int i, j;
+ u8 *p = data;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < vi->curr_queue_pairs; i++) {
- for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
- snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
- i, virtnet_rq_stats_desc[j].desc);
- p += ETH_GSTRING_LEN;
- }
+ for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
+ ethtool_sprintf(&p, "rx_queue_%u_%s", i,
+ virtnet_rq_stats_desc[j].desc);
}
for (i = 0; i < vi->curr_queue_pairs; i++) {
- for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
- snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s",
- i, virtnet_sq_stats_desc[j].desc);
- p += ETH_GSTRING_LEN;
- }
+ for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
+ ethtool_sprintf(&p, "tx_queue_%u_%s", i,
+ virtnet_sq_stats_desc[j].desc);
}
break;
}
@@ -2422,10 +2489,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
/* XDP requires extra queues for XDP_TX */
if (curr_qp + xdp_qp > vi->max_queue_pairs) {
- NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
- netdev_warn(dev, "request %i queues but max is %i\n",
+ netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
curr_qp + xdp_qp, vi->max_queue_pairs);
- return -ENOMEM;
+ xdp_qp = 0;
}
old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
@@ -2459,11 +2525,14 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
vi->xdp_queue_pairs = xdp_qp;
if (prog) {
+ vi->xdp_enabled = true;
for (i = 0; i < vi->max_queue_pairs; i++) {
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
if (i == 0 && !old_prog)
virtnet_clear_guest_offloads(vi);
}
+ } else {
+ vi->xdp_enabled = false;
}
for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -2531,7 +2600,7 @@ static int virtnet_set_features(struct net_device *dev,
int err;
if ((dev->features ^ features) & NETIF_F_LRO) {
- if (vi->xdp_queue_pairs)
+ if (vi->xdp_enabled)
return -EBUSY;
if (features & NETIF_F_LRO)
@@ -2801,9 +2870,13 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
{
int i;
- vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
- if (!vi->ctrl)
- goto err_ctrl;
+ if (vi->has_cvq) {
+ vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
+ if (!vi->ctrl)
+ goto err_ctrl;
+ } else {
+ vi->ctrl = NULL;
+ }
vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
if (!vi->sq)
goto err_sq;
@@ -2977,7 +3050,8 @@ static int virtnet_probe(struct virtio_device *vdev)
return -ENOMEM;
/* Set up network device as normal. */
- dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
+ IFF_TX_SKB_NO_LINEAR;
dev->netdev_ops = &virtnet_netdev;
dev->features = NETIF_F_HIGHDMA;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 7ec8652f2c26..c0bd9cbc43b1 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -218,43 +218,28 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
static void
vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
{
- struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- if (stringset == ETH_SS_STATS) {
- int i, j;
- for (j = 0; j < adapter->num_tx_queues; j++) {
- for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
- memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
- }
- for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats);
- i++) {
- memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
- }
- }
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ int i, j;
- for (j = 0; j < adapter->num_rx_queues; j++) {
- for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
- memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
- }
- for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats);
- i++) {
- memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
- }
- }
+ if (stringset != ETH_SS_STATS)
+ return;
- for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
- memcpy(buf, vmxnet3_global_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
- }
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
+ ethtool_sprintf(&buf, vmxnet3_tq_dev_stats[i].desc);
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
+ ethtool_sprintf(&buf, vmxnet3_tq_driver_stats[i].desc);
+ }
+
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
+ ethtool_sprintf(&buf, vmxnet3_rq_dev_stats[i].desc);
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
+ ethtool_sprintf(&buf, vmxnet3_rq_driver_stats[i].desc);
}
+
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
+ ethtool_sprintf(&buf, vmxnet3_global_stats[i].desc);
}
netdev_features_t vmxnet3_fix_features(struct net_device *netdev,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 53dbc67e8a34..02a14f1b938a 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3494,6 +3494,7 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
if (err < 0)
return ERR_PTR(err);
+ udp_allow_gso(sock->sk);
return sock;
}
@@ -3713,6 +3714,7 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
#if IS_ENABLED(CONFIG_IPV6)
if (use_ipv6) {
struct inet6_dev *idev = __in6_dev_get(lowerdev);
+
if (idev && idev->cnf.disable_ipv6) {
NL_SET_ERR_MSG(extack,
"IPv6 support disabled by administrator");
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 686a25d3b512..5de71e44fc5a 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -573,7 +573,7 @@ static DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q);
static DECLARE_TASKLET(fst_int_task, fst_process_int_work_q);
static struct fst_card_info *fst_card_array[FST_MAX_CARDS];
-static spinlock_t fst_work_q_lock;
+static DEFINE_SPINLOCK(fst_work_q_lock);
static u64 fst_work_txq;
static u64 fst_work_intq;
@@ -2648,7 +2648,6 @@ fst_init(void)
for (i = 0; i < FST_MAX_CARDS; i++)
fst_card_array[i] = NULL;
- spin_lock_init(&fst_work_q_lock);
return pci_register_driver(&fst_driver);
}
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 5a6a945f6c81..ba8c36c7ea91 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -25,6 +25,8 @@ struct x25_state {
x25_hdlc_proto settings;
bool up;
spinlock_t up_lock; /* Protects "up" */
+ struct sk_buff_head rx_queue;
+ struct tasklet_struct rx_tasklet;
};
static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
@@ -34,14 +36,27 @@ static struct x25_state *state(hdlc_device *hdlc)
return hdlc->state;
}
+static void x25_rx_queue_kick(struct tasklet_struct *t)
+{
+ struct x25_state *x25st = from_tasklet(x25st, t, rx_tasklet);
+ struct sk_buff *skb = skb_dequeue(&x25st->rx_queue);
+
+ while (skb) {
+ netif_receive_skb_core(skb);
+ skb = skb_dequeue(&x25st->rx_queue);
+ }
+}
+
/* These functions are callbacks called by LAPB layer */
static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
{
+ struct x25_state *x25st = state(dev_to_hdlc(dev));
struct sk_buff *skb;
unsigned char *ptr;
- if ((skb = dev_alloc_skb(1)) == NULL) {
+ skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
+ if (!skb) {
netdev_err(dev, "out of memory\n");
return;
}
@@ -50,7 +65,9 @@ static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
*ptr = code;
skb->protocol = x25_type_trans(skb, dev);
- netif_rx(skb);
+
+ skb_queue_tail(&x25st->rx_queue, skb);
+ tasklet_schedule(&x25st->rx_tasklet);
}
@@ -71,6 +88,7 @@ static void x25_disconnected(struct net_device *dev, int reason)
static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
{
+ struct x25_state *x25st = state(dev_to_hdlc(dev));
unsigned char *ptr;
if (skb_cow(skb, 1)) {
@@ -84,7 +102,10 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
*ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
- return netif_rx(skb);
+
+ skb_queue_tail(&x25st->rx_queue, skb);
+ tasklet_schedule(&x25st->rx_tasklet);
+ return NET_RX_SUCCESS;
}
@@ -223,6 +244,7 @@ static void x25_close(struct net_device *dev)
spin_unlock_bh(&x25st->up_lock);
lapb_unregister(dev);
+ tasklet_kill(&x25st->rx_tasklet);
}
@@ -338,6 +360,8 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
memcpy(&state(hdlc)->settings, &new_settings, size);
state(hdlc)->up = false;
spin_lock_init(&state(hdlc)->up_lock);
+ skb_queue_head_init(&state(hdlc)->rx_queue);
+ tasklet_setup(&state(hdlc)->rx_tasklet, x25_rx_queue_kick);
/* There's no header_ops so hard_header_len should be 0. */
dev->hard_header_len = 0;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index c3372498f4f1..59646865a3a4 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -51,6 +51,10 @@ struct lapbethdev {
struct list_head node;
struct net_device *ethdev; /* link to ethernet device */
struct net_device *axdev; /* lapbeth device (lapb#) */
+ bool up;
+ spinlock_t up_lock; /* Protects "up" */
+ struct sk_buff_head rx_queue;
+ struct napi_struct napi;
};
static LIST_HEAD(lapbeth_devices);
@@ -81,6 +85,26 @@ static __inline__ int dev_is_ethdev(struct net_device *dev)
/* ------------------------------------------------------------------------ */
+static int lapbeth_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct lapbethdev *lapbeth = container_of(napi, struct lapbethdev,
+ napi);
+ struct sk_buff *skb;
+ int processed = 0;
+
+ for (; processed < budget; ++processed) {
+ skb = skb_dequeue(&lapbeth->rx_queue);
+ if (!skb)
+ break;
+ netif_receive_skb_core(skb);
+ }
+
+ if (processed < budget)
+ napi_complete(napi);
+
+ return processed;
+}
+
/*
* Receive a LAPB frame via an ethernet interface.
*/
@@ -101,8 +125,9 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
rcu_read_lock();
lapbeth = lapbeth_get_x25_dev(dev);
if (!lapbeth)
- goto drop_unlock;
- if (!netif_running(lapbeth->axdev))
+ goto drop_unlock_rcu;
+ spin_lock_bh(&lapbeth->up_lock);
+ if (!lapbeth->up)
goto drop_unlock;
len = skb->data[0] + skb->data[1] * 256;
@@ -117,11 +142,14 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
goto drop_unlock;
}
out:
+ spin_unlock_bh(&lapbeth->up_lock);
rcu_read_unlock();
return 0;
drop_unlock:
kfree_skb(skb);
goto out;
+drop_unlock_rcu:
+ rcu_read_unlock();
drop:
kfree_skb(skb);
return 0;
@@ -129,6 +157,7 @@ drop:
static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
{
+ struct lapbethdev *lapbeth = netdev_priv(dev);
unsigned char *ptr;
if (skb_cow(skb, 1)) {
@@ -142,7 +171,10 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
*ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
- return netif_rx(skb);
+
+ skb_queue_tail(&lapbeth->rx_queue, skb);
+ napi_schedule(&lapbeth->napi);
+ return NET_RX_SUCCESS;
}
/*
@@ -151,13 +183,11 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ struct lapbethdev *lapbeth = netdev_priv(dev);
int err;
- /*
- * Just to be *really* sure not to send anything if the interface
- * is down, the ethernet device may have gone.
- */
- if (!netif_running(dev))
+ spin_lock_bh(&lapbeth->up_lock);
+ if (!lapbeth->up)
goto drop;
/* There should be a pseudo header of 1 byte added by upper layers.
@@ -194,6 +224,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
goto drop;
}
out:
+ spin_unlock_bh(&lapbeth->up_lock);
return NETDEV_TX_OK;
drop:
kfree_skb(skb);
@@ -228,8 +259,9 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
static void lapbeth_connected(struct net_device *dev, int reason)
{
+ struct lapbethdev *lapbeth = netdev_priv(dev);
unsigned char *ptr;
- struct sk_buff *skb = dev_alloc_skb(1);
+ struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
if (!skb) {
pr_err("out of memory\n");
@@ -240,13 +272,16 @@ static void lapbeth_connected(struct net_device *dev, int reason)
*ptr = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, dev);
- netif_rx(skb);
+
+ skb_queue_tail(&lapbeth->rx_queue, skb);
+ napi_schedule(&lapbeth->napi);
}
static void lapbeth_disconnected(struct net_device *dev, int reason)
{
+ struct lapbethdev *lapbeth = netdev_priv(dev);
unsigned char *ptr;
- struct sk_buff *skb = dev_alloc_skb(1);
+ struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
if (!skb) {
pr_err("out of memory\n");
@@ -257,7 +292,9 @@ static void lapbeth_disconnected(struct net_device *dev, int reason)
*ptr = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, dev);
- netif_rx(skb);
+
+ skb_queue_tail(&lapbeth->rx_queue, skb);
+ napi_schedule(&lapbeth->napi);
}
/*
@@ -285,23 +322,37 @@ static const struct lapb_register_struct lapbeth_callbacks = {
*/
static int lapbeth_open(struct net_device *dev)
{
+ struct lapbethdev *lapbeth = netdev_priv(dev);
int err;
+ napi_enable(&lapbeth->napi);
+
if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
pr_err("lapb_register error: %d\n", err);
return -ENODEV;
}
+ spin_lock_bh(&lapbeth->up_lock);
+ lapbeth->up = true;
+ spin_unlock_bh(&lapbeth->up_lock);
+
return 0;
}
static int lapbeth_close(struct net_device *dev)
{
+ struct lapbethdev *lapbeth = netdev_priv(dev);
int err;
+ spin_lock_bh(&lapbeth->up_lock);
+ lapbeth->up = false;
+ spin_unlock_bh(&lapbeth->up_lock);
+
if ((err = lapb_unregister(dev)) != LAPB_OK)
pr_err("lapb_unregister error: %d\n", err);
+ napi_disable(&lapbeth->napi);
+
return 0;
}
@@ -356,6 +407,12 @@ static int lapbeth_new_device(struct net_device *dev)
dev_hold(dev);
lapbeth->ethdev = dev;
+ lapbeth->up = false;
+ spin_lock_init(&lapbeth->up_lock);
+
+ skb_queue_head_init(&lapbeth->rx_queue);
+ netif_napi_add(ndev, &lapbeth->napi, lapbeth_napi_poll, 16);
+
rc = -EIO;
if (register_netdevice(ndev))
goto fail;
@@ -403,8 +460,8 @@ static int lapbeth_device_event(struct notifier_block *this,
if (lapbeth_get_x25_dev(dev) == NULL)
lapbeth_new_device(dev);
break;
- case NETDEV_DOWN:
- /* ethernet device closed -> close LAPB interface */
+ case NETDEV_GOING_DOWN:
+ /* ethernet device closes -> close LAPB interface */
lapbeth = lapbeth_get_x25_dev(dev);
if (lapbeth)
dev_close(lapbeth->axdev);
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
index 1081d171e477..462cb620bc5d 100644
--- a/drivers/net/wan/z85230.h
+++ b/drivers/net/wan/z85230.h
@@ -327,45 +327,6 @@ struct z8530_channel
void *private; /* For our owner */
struct net_device *netdevice; /* Network layer device */
- /*
- * Async features
- */
-
- struct tty_struct *tty; /* Attached terminal */
- int line; /* Minor number */
- wait_queue_head_t open_wait; /* Tasks waiting to open */
- wait_queue_head_t close_wait; /* and for close to end */
- unsigned long event; /* Pending events */
- int fdcount; /* # of fd on device */
- int blocked_open; /* # of blocked opens */
- int x_char; /* XON/XOF char */
- unsigned char *xmit_buf; /* Transmit pointer */
- int xmit_head; /* Transmit ring */
- int xmit_tail;
- int xmit_cnt;
- int flags;
- int timeout;
- int xmit_fifo_size; /* Transmit FIFO info */
-
- int close_delay; /* Do we wait for drain on close ? */
- unsigned short closing_wait;
-
- /* We need to know the current clock divisor
- * to read the bps rate the chip has currently
- * loaded.
- */
-
- unsigned char clk_divisor; /* May be 1, 16, 32, or 64 */
- int zs_baud;
-
- int magic;
- int baud_base; /* Baud parameters */
- int custom_divisor;
-
-
- unsigned char tx_active; /* character is being xmitted */
- unsigned char tx_stopped; /* output is suspended */
-
spinlock_t *lock; /* Device lock */
};
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 0a37be6a7d33..fab398046a3f 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -669,7 +669,7 @@ static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
ath10k_dbg(ar, ATH10K_DBG_HTC,
"bundle tx status %d eid %d req count %d count %d len %d\n",
- ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len);
+ ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index d66593f0950f..ea00fbb15601 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -1759,17 +1759,11 @@ err_core_destroy:
return ret;
}
-static int ath10k_snoc_remove(struct platform_device *pdev)
+static int ath10k_snoc_free_resources(struct ath10k *ar)
{
- struct ath10k *ar = platform_get_drvdata(pdev);
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
-
- reinit_completion(&ar->driver_recovery);
-
- if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
- wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ);
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc free resources\n");
set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags);
@@ -1783,12 +1777,29 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
return 0;
}
+static int ath10k_snoc_remove(struct platform_device *pdev)
+{
+ struct ath10k *ar = platform_get_drvdata(pdev);
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
+
+ reinit_completion(&ar->driver_recovery);
+
+ if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
+ wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ);
+
+ ath10k_snoc_free_resources(ar);
+
+ return 0;
+}
+
static void ath10k_snoc_shutdown(struct platform_device *pdev)
{
struct ath10k *ar = platform_get_drvdata(pdev);
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc shutdown\n");
- ath10k_snoc_remove(pdev);
+ ath10k_snoc_free_resources(ar);
}
static struct platform_driver ath10k_snoc_driver = {
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index d97b33f789e4..7efbe03fbca8 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -592,6 +592,9 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
GFP_ATOMIC
);
break;
+ default:
+ kfree(tb);
+ return;
}
exit:
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index d4ef45cd0685..8c9c781afc3e 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -373,7 +373,7 @@ static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
cfg->tgt_ce = ab->hw_params.target_ce_config;
cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
- ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074;
+ ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
}
static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index 987c65010272..de8b632b058c 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -187,6 +187,59 @@ const struct ce_attr ath11k_host_ce_config_qca6390[] = {
};
+const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 32,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
+ },
+};
+
static bool ath11k_ce_need_shadow_fix(int ce_id)
{
/* only ce4 needs shadow workaroud*/
@@ -455,7 +508,7 @@ static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_
struct hal_srng_params *ring_params)
{
u32 msi_data_start;
- u32 msi_data_count;
+ u32 msi_data_count, msi_data_idx;
u32 msi_irq_start;
u32 addr_lo;
u32 addr_hi;
@@ -469,10 +522,11 @@ static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_
return;
ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
+ ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
ring_params->msi_addr = addr_lo;
ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
- ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
+ ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
}
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
index d6eeef919349..713f766cac22 100644
--- a/drivers/net/wireless/ath/ath11k/ce.h
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -173,6 +173,7 @@ struct ath11k_ce {
extern const struct ce_attr ath11k_host_ce_config_ipq8074[];
extern const struct ce_attr ath11k_host_ce_config_qca6390[];
+extern const struct ce_attr ath11k_host_ce_config_qcn9074[];
void ath11k_ce_cleanup_pipes(struct ath11k_base *ab);
void ath11k_ce_rx_replenish_retry(struct timer_list *t);
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 350b7913622c..77ce3347ab86 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -45,6 +45,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ring_mask = &ath11k_hw_ring_mask_ipq8074,
.internal_sleep_clock = false,
.regs = &ipq8074_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
.host_ce_config = ath11k_host_ce_config_ipq8074,
.ce_count = 12,
.target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
@@ -68,6 +69,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = false,
.cold_boot_calib = true,
.supports_suspend = false,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -83,6 +85,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ring_mask = &ath11k_hw_ring_mask_ipq8074,
.internal_sleep_clock = false,
.regs = &ipq8074_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
.host_ce_config = ath11k_host_ce_config_ipq8074,
.ce_count = 12,
.target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
@@ -106,6 +109,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = false,
.cold_boot_calib = true,
.supports_suspend = false,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
},
{
.name = "qca6390 hw2.0",
@@ -121,6 +125,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ring_mask = &ath11k_hw_ring_mask_qca6390,
.internal_sleep_clock = true,
.regs = &qca6390_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
.host_ce_config = ath11k_host_ce_config_qca6390,
.ce_count = 9,
.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
@@ -143,6 +148,44 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = true,
.cold_boot_calib = false,
.supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ },
+ {
+ .name = "qcn9074 hw1.0",
+ .hw_rev = ATH11K_HW_QCN9074_HW10,
+ .fw = {
+ .dir = "QCN9074/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_size = 256 * 1024,
+ },
+ .max_radios = 1,
+ .single_pdev_only = false,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074,
+ .hw_ops = &qcn9074_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qcn9074,
+ .internal_sleep_clock = false,
+ .regs = &qcn9074_regs,
+ .host_ce_config = ath11k_host_ce_config_qcn9074,
+ .ce_count = 6,
+ .target_ce_config = ath11k_target_ce_config_wlan_qcn9074,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074,
+ .svc_to_ce_map_len = 18,
+ .rxdma1_enable = true,
+ .num_rxmda_per_pdev = 1,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+ .htt_peer_map_v2 = true,
+ .tcl_0_only = false,
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = true,
+ .supports_shadow_regs = false,
+ .idle_ps = false,
+ .cold_boot_calib = false,
+ .supports_suspend = false,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
},
};
@@ -974,7 +1017,7 @@ static int ath11k_init_hw_params(struct ath11k_base *ab)
ab->hw_params = *hw_params;
- ath11k_dbg(ab, ATH11K_DBG_BOOT, "Hardware name %s\n", ab->hw_params.name);
+ ath11k_info(ab, "%s\n", ab->hw_params.name);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 8d29845774df..55af982deca7 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -34,6 +34,7 @@
#define ATH11K_PRB_RSP_DROP_THRESHOLD ((ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4)
#define ATH11K_INVALID_HW_MAC_ID 0xFF
+#define ATH11K_CONNECTION_LOSS_HZ (3 * HZ)
extern unsigned int ath11k_frame_mode;
@@ -105,6 +106,7 @@ enum ath11k_hw_rev {
ATH11K_HW_IPQ8074,
ATH11K_HW_QCA6390_HW20,
ATH11K_HW_IPQ6018_HW10,
+ ATH11K_HW_QCN9074_HW10,
};
enum ath11k_firmware_mode {
@@ -234,6 +236,7 @@ struct ath11k_vif {
u32 aid;
u8 bssid[ETH_ALEN];
struct cfg80211_bitrate_mask bitrate_mask;
+ struct delayed_work connection_loss_work;
int num_legacy_stations;
int rtscts_prot_mode;
int txpower;
@@ -607,6 +610,7 @@ struct ath11k_bus_params {
bool m3_fw_support;
bool fixed_bdf_addr;
bool fixed_mem_region;
+ bool static_window_map;
};
/* IPQ8074 HW channel counters frequency value in hertz */
@@ -876,6 +880,8 @@ extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018
extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[];
+extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[];
int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab);
int ath11k_core_pre_init(struct ath11k_base *ab);
int ath11k_core_init(struct ath11k_base *ath11k);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
index e13684343ec3..ec93f14e6d2a 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -3851,7 +3851,7 @@ htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf,
htt_stats_buf->num_non_srg_ppdu_tried);
len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n",
htt_stats_buf->num_non_srg_ppdu_success);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG Opportunies = %u\n",
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG Opportunities = %u\n",
htt_stats_buf->num_srg_opportunities);
len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG tried PPDU = %u\n",
htt_stats_buf->num_srg_ppdu_tried);
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 850ad38b888f..1d9aa1bb6b6e 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -20,95 +20,102 @@
#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
-static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc)
+static u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
{
- return desc->hdr_status;
+ return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
}
-static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc)
+static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- if (!(__le32_to_cpu(desc->mpdu_start.info1) &
- RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID))
+ if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
return HAL_ENCRYPT_TYPE_OPEN;
- return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
- __le32_to_cpu(desc->mpdu_start.info2));
+ return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct hal_rx_desc *desc)
+static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
- __le32_to_cpu(desc->msdu_start.info2));
+ return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct hal_rx_desc *desc)
+static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
- __le32_to_cpu(desc->msdu_start.info2));
+ return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
}
-static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct hal_rx_desc *desc)
+static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
- __le32_to_cpu(desc->mpdu_start.info1));
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
}
-static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct hal_rx_desc *desc)
+static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
- __le32_to_cpu(desc->mpdu_start.info1));
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
}
-static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct sk_buff *skb)
+static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
- hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
return ieee80211_has_morefrags(hdr->frame_control);
}
-static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct sk_buff *skb)
+static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
- hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
}
-static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct hal_rx_desc *desc)
+static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
- __le32_to_cpu(desc->mpdu_start.info1));
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
}
-static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc)
+static void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
+}
+
+static bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
{
return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
- __le32_to_cpu(desc->attention.info2));
+ __le32_to_cpu(attn->info2));
}
-static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc)
+static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
{
return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
- __le32_to_cpu(desc->attention.info1));
+ __le32_to_cpu(attn->info1));
}
-static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc)
+static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
{
return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
- __le32_to_cpu(desc->attention.info1));
+ __le32_to_cpu(attn->info1));
}
-static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc)
+static bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
{
return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
- __le32_to_cpu(desc->attention.info2)) ==
+ __le32_to_cpu(attn->info2)) ==
RX_DESC_DECRYPT_STATUS_CODE_OK);
}
-static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc)
+static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
{
- u32 info = __le32_to_cpu(desc->attention.info1);
+ u32 info = __le32_to_cpu(attn->info1);
u32 errmap = 0;
if (info & RX_ATTENTION_INFO1_FCS_ERR)
@@ -135,131 +142,122 @@ static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc)
return errmap;
}
-static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc)
+static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
- __le32_to_cpu(desc->msdu_start.info1));
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc)
+static u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return FIELD_GET(RX_MSDU_START_INFO3_SGI,
- __le32_to_cpu(desc->msdu_start.info3));
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc)
+static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
- __le32_to_cpu(desc->msdu_start.info3));
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc)
+static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
- __le32_to_cpu(desc->msdu_start.info3));
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
}
-static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc)
+static u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return __le32_to_cpu(desc->msdu_start.phy_meta_data);
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc)
+static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
- __le32_to_cpu(desc->msdu_start.info3));
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc)
+static u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
- __le32_to_cpu(desc->msdu_start.info3));
-
- return hweight8(mimo_ss_bitmap);
+ return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
}
-static u8 ath11k_dp_rx_h_mpdu_start_tid(struct hal_rx_desc *desc)
+static u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return FIELD_GET(RX_MPDU_START_INFO2_TID,
- __le32_to_cpu(desc->mpdu_start.info2));
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
}
-static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct hal_rx_desc *desc)
+static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return __le16_to_cpu(desc->mpdu_start.sw_peer_id);
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
}
-static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc)
+static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
- __le32_to_cpu(desc->msdu_end.info2));
+ return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
}
-static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc)
+static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
- __le32_to_cpu(desc->msdu_end.info2));
+ return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
}
-static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc)
+static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
- return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU,
- __le32_to_cpu(desc->msdu_end.info2));
+ return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);
}
-static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc,
+static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
+ struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
- memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end,
- sizeof(struct rx_msdu_end));
- memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention,
- sizeof(struct rx_attention));
- memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end,
- sizeof(struct rx_mpdu_end));
+ ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
}
-static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc)
+static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
{
- struct rx_attention *rx_attn;
-
- rx_attn = &rx_desc->attention;
-
return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
- __le32_to_cpu(rx_attn->info1));
+ __le32_to_cpu(attn->info1));
}
-static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc)
-{
- struct rx_msdu_start *rx_msdu_start;
-
- rx_msdu_start = &rx_desc->msdu_start;
-
- return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
- __le32_to_cpu(rx_msdu_start->info2));
-}
-
-static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc)
+static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
+ struct hal_rx_desc *rx_desc)
{
u8 *rx_pkt_hdr;
- rx_pkt_hdr = &rx_desc->msdu_payload[0];
+ rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);
return rx_pkt_hdr;
}
-static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc)
+static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *rx_desc)
{
u32 tlv_tag;
- tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG,
- __le32_to_cpu(rx_desc->mpdu_start_tag));
+ tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);
return tlv_tag == HAL_RX_MPDU_START;
}
-static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
+static u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
+ struct hal_rx_desc *rx_desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
+}
+
+static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
+ struct hal_rx_desc *desc,
+ u16 len)
{
- return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id);
+ ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
}
static void ath11k_dp_service_mon_ring(struct timer_list *t)
@@ -1722,19 +1720,19 @@ static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
struct sk_buff *first, struct sk_buff *last,
u8 l3pad_bytes, int msdu_len)
{
+ struct ath11k_base *ab = ar->ab;
struct sk_buff *skb;
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
int buf_first_hdr_len, buf_first_len;
struct hal_rx_desc *ldesc;
- int space_extra;
- int rem_len;
- int buf_len;
+ int space_extra, rem_len, buf_len;
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
/* As the msdu is spread across multiple rx buffers,
* find the offset to the start of msdu for computing
* the length of the msdu in the first buffer.
*/
- buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes;
+ buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
@@ -1744,8 +1742,8 @@ static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
}
ldesc = (struct hal_rx_desc *)last->data;
- rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc);
- rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc);
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);
/* MSDU spans over multiple buffers because the length of the MSDU
* exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
@@ -1757,7 +1755,7 @@ static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
/* When an MSDU spread over multiple buffers attention, MSDU_END and
* MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
*/
- ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc);
+ ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
if (space_extra > 0 &&
@@ -1778,18 +1776,18 @@ static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
rxcb = ATH11K_SKB_RXCB(skb);
if (rxcb->is_continuation)
- buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE;
+ buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
else
buf_len = rem_len;
- if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) {
+ if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
WARN_ON_ONCE(1);
dev_kfree_skb_any(skb);
return -EINVAL;
}
- skb_put(skb, buf_len + HAL_RX_DESC_SIZE);
- skb_pull(skb, HAL_RX_DESC_SIZE);
+ skb_put(skb, buf_len + hal_rx_desc_sz);
+ skb_pull(skb, hal_rx_desc_sz);
skb_copy_from_linear_data(skb, skb_put(first, buf_len),
buf_len);
dev_kfree_skb_any(skb);
@@ -1820,13 +1818,15 @@ static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_
return NULL;
}
-static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu)
+static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct rx_attention *rx_attention;
bool ip_csum_fail, l4_csum_fail;
- ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc);
- l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc);
+ rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);
+ ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);
+ l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);
msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
@@ -1957,7 +1957,7 @@ static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
qos_ctl = rxcb->tid;
- if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(rxcb->rx_desc))
+ if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
/* TODO Add other QoS ctl fields when required */
@@ -2061,7 +2061,7 @@ static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
bool is_amsdu;
is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
- hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc);
+ hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);
rfc1042 = hdr;
if (rxcb->is_first_msdu) {
@@ -2134,8 +2134,8 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
u8 *first_hdr;
u8 decap;
- first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc);
- decap = ath11k_dp_rx_h_msdu_start_decap_type(rx_desc);
+ first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
+ decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
switch (decap) {
case DP_RX_DECAP_TYPE_NATIVE_WIFI:
@@ -2167,6 +2167,7 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
bool is_decrypted = false;
struct ieee80211_hdr *hdr;
struct ath11k_peer *peer;
+ struct rx_attention *rx_attention;
u32 err_bitmap;
hdr = (struct ieee80211_hdr *)msdu->data;
@@ -2188,9 +2189,10 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
}
spin_unlock_bh(&ar->ab->base_lock);
- err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc);
+ rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
+ err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
- is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
/* Clear per-MPDU flags while leaving per-PPDU flags intact */
rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
@@ -2215,7 +2217,7 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
RX_FLAG_PN_VALIDATED;
}
- ath11k_dp_rx_h_csum_offload(msdu);
+ ath11k_dp_rx_h_csum_offload(ar, msdu);
ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
enctype, rx_status, is_decrypted);
@@ -2236,11 +2238,11 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
u8 sgi;
bool is_cck;
- pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc);
- bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc);
- rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc);
- nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc);
- sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc);
+ pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
+ bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
+ rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);
+ nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);
+ sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);
switch (pkt_type) {
case RX_MSDU_START_PKT_TYPE_11A:
@@ -2297,7 +2299,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status)
{
u8 channel_num;
- u32 center_freq;
+ u32 center_freq, meta_data;
struct ieee80211_channel *channel;
rx_status->freq = 0;
@@ -2308,8 +2310,9 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
- channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
- center_freq = ath11k_dp_rx_h_msdu_start_freq(rx_desc) >> 16;
+ meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);
+ channel_num = meta_data;
+ center_freq = meta_data >> 16;
if (center_freq >= 5935 && center_freq <= 7105) {
rx_status->band = NL80211_BAND_6GHZ;
@@ -2409,7 +2412,9 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
struct sk_buff *msdu,
struct sk_buff_head *msdu_list)
{
+ struct ath11k_base *ab = ar->ab;
struct hal_rx_desc *rx_desc, *lrx_desc;
+ struct rx_attention *rx_attention;
struct ieee80211_rx_status rx_status = {0};
struct ieee80211_rx_status *status;
struct ath11k_skb_rxcb *rxcb;
@@ -2419,10 +2424,11 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
u8 *hdr_status;
u16 msdu_len;
int ret;
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
if (!last_buf) {
- ath11k_warn(ar->ab,
+ ath11k_warn(ab,
"No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
ret = -EIO;
goto free_out;
@@ -2430,38 +2436,39 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
rx_desc = (struct hal_rx_desc *)msdu->data;
lrx_desc = (struct hal_rx_desc *)last_buf->data;
- if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) {
- ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n");
+ rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
+ if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
+ ath11k_warn(ab, "msdu_done bit in attention is not set\n");
ret = -EIO;
goto free_out;
}
rxcb = ATH11K_SKB_RXCB(msdu);
rxcb->rx_desc = rx_desc;
- msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
- l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc);
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);
+ l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);
if (rxcb->is_frag) {
- skb_pull(msdu, HAL_RX_DESC_SIZE);
+ skb_pull(msdu, hal_rx_desc_sz);
} else if (!rxcb->is_continuation) {
- if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
- hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
+ if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
+ hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
ret = -EINVAL;
- ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len);
- ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
+ ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);
+ ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
sizeof(struct ieee80211_hdr));
- ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
+ ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
sizeof(struct hal_rx_desc));
goto free_out;
}
- skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
- skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes);
+ skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
} else {
ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
msdu, last_buf,
l3_pad_bytes, msdu_len);
if (ret) {
- ath11k_warn(ar->ab,
+ ath11k_warn(ab,
"failed to coalesce msdu rx buffer%d\n", ret);
goto free_out;
}
@@ -3090,16 +3097,17 @@ static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer
u8 mic[IEEE80211_CCMP_MIC_LEN];
int head_len, tail_len, ret;
size_t data_len;
- u32 hdr_len;
+ u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
u8 *key, *data;
u8 key_idx;
- if (ath11k_dp_rx_h_mpdu_start_enctype(rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
+ if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=
+ HAL_ENCRYPT_TYPE_TKIP_MIC)
return 0;
- hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE);
+ hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
hdr_len = ieee80211_hdrlen(hdr->frame_control);
- head_len = hdr_len + HAL_RX_DESC_SIZE + IEEE80211_TKIP_IV_LEN;
+ head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
if (!is_multicast_ether_addr(hdr->addr1))
@@ -3125,7 +3133,7 @@ mic_fail:
rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
- skb_pull(msdu, HAL_RX_DESC_SIZE);
+ skb_pull(msdu, hal_rx_desc_sz);
ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
@@ -3140,11 +3148,12 @@ static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
struct ieee80211_hdr *hdr;
size_t hdr_len;
size_t crypto_len;
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
if (!flags)
return;
- hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE);
+ hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
if (flags & RX_FLAG_MIC_STRIPPED)
skb_trim(msdu, msdu->len -
@@ -3158,8 +3167,8 @@ static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
- memmove((void *)msdu->data + HAL_RX_DESC_SIZE + crypto_len,
- (void *)msdu->data + HAL_RX_DESC_SIZE, hdr_len);
+ memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,
+ (void *)msdu->data + hal_rx_desc_sz, hdr_len);
skb_pull(msdu, crypto_len);
}
}
@@ -3172,11 +3181,12 @@ static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
struct hal_rx_desc *rx_desc;
struct sk_buff *skb, *first_frag, *last_frag;
struct ieee80211_hdr *hdr;
+ struct rx_attention *rx_attention;
enum hal_encrypt_type enctype;
bool is_decrypted = false;
int msdu_len = 0;
int extra_space;
- u32 flags;
+ u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
first_frag = skb_peek(&rx_tid->rx_frags);
last_frag = skb_peek_tail(&rx_tid->rx_frags);
@@ -3184,11 +3194,13 @@ static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
skb_queue_walk(&rx_tid->rx_frags, skb) {
flags = 0;
rx_desc = (struct hal_rx_desc *)skb->data;
- hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
+ hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
- enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
- if (enctype != HAL_ENCRYPT_TYPE_OPEN)
- is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
+ if (enctype != HAL_ENCRYPT_TYPE_OPEN) {
+ rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
+ }
if (is_decrypted) {
if (skb != first_frag)
@@ -3204,7 +3216,7 @@ static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
if (skb != first_frag)
- skb_pull(skb, HAL_RX_DESC_SIZE +
+ skb_pull(skb, hal_rx_desc_sz +
ieee80211_hdrlen(hdr->frame_control));
msdu_len += skb->len;
}
@@ -3220,7 +3232,7 @@ static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
dev_kfree_skb_any(skb);
}
- hdr = (struct ieee80211_hdr *)(first_frag->data + HAL_RX_DESC_SIZE);
+ hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
@@ -3246,10 +3258,10 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti
struct hal_srng *srng;
dma_addr_t paddr;
u32 desc_bank, msdu_info, mpdu_info;
- u32 dst_idx, cookie;
- u32 *msdu_len_offset;
+ u32 dst_idx, cookie, hal_rx_desc_sz;
int ret, buf_id;
+ hal_rx_desc_sz = ab->hw_params.hal_desc_sz;
link_desc_banks = ab->dp.link_desc_banks;
reo_dest_ring = rx_tid->dst_ring_desc;
@@ -3264,16 +3276,14 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti
FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
- defrag_skb->len - HAL_RX_DESC_SIZE) |
+ defrag_skb->len - hal_rx_desc_sz) |
FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
msdu0->rx_msdu_info.info0 = msdu_info;
/* change msdu len in hal rx desc */
- msdu_len_offset = (u32 *)&rx_desc->msdu_start;
- *msdu_len_offset &= ~(RX_MSDU_START_INFO1_MSDU_LENGTH);
- *msdu_len_offset |= defrag_skb->len - HAL_RX_DESC_SIZE;
+ ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
paddr = dma_map_single(ab->dev, defrag_skb->data,
defrag_skb->len + skb_tailroom(defrag_skb),
@@ -3346,24 +3356,26 @@ err_unmap_dma:
return ret;
}
-static int ath11k_dp_rx_h_cmp_frags(struct sk_buff *a, struct sk_buff *b)
+static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,
+ struct sk_buff *a, struct sk_buff *b)
{
int frag1, frag2;
- frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(a);
- frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(b);
+ frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);
+ frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);
return frag1 - frag2;
}
-static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list,
+static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,
+ struct sk_buff_head *frag_list,
struct sk_buff *cur_frag)
{
struct sk_buff *skb;
int cmp;
skb_queue_walk(frag_list, skb) {
- cmp = ath11k_dp_rx_h_cmp_frags(skb, cur_frag);
+ cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);
if (cmp < 0)
continue;
__skb_queue_before(frag_list, skb, cur_frag);
@@ -3372,14 +3384,15 @@ static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list,
__skb_queue_tail(frag_list, cur_frag);
}
-static u64 ath11k_dp_rx_h_get_pn(struct sk_buff *skb)
+static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
u64 pn = 0;
u8 *ehdr;
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
- hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
- ehdr = skb->data + HAL_RX_DESC_SIZE + ieee80211_hdrlen(hdr->frame_control);
+ hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
+ ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
pn = ehdr[0];
pn |= (u64)ehdr[1] << 8;
@@ -3403,19 +3416,19 @@ ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_t
first_frag = skb_peek(&rx_tid->rx_frags);
desc = (struct hal_rx_desc *)first_frag->data;
- encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(desc);
+ encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);
if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
return true;
- last_pn = ath11k_dp_rx_h_get_pn(first_frag);
+ last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);
skb_queue_walk(&rx_tid->rx_frags, skb) {
if (skb == first_frag)
continue;
- cur_pn = ath11k_dp_rx_h_get_pn(skb);
+ cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);
if (cur_pn != last_pn + 1)
return false;
last_pn = cur_pn;
@@ -3439,14 +3452,14 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
bool more_frags;
rx_desc = (struct hal_rx_desc *)msdu->data;
- peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(rx_desc);
- tid = ath11k_dp_rx_h_mpdu_start_tid(rx_desc);
- seqno = ath11k_dp_rx_h_mpdu_start_seq_no(rx_desc);
- frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(msdu);
- more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(msdu);
-
- if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(rx_desc) ||
- !ath11k_dp_rx_h_mpdu_start_fc_valid(rx_desc) ||
+ peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
+ tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);
+ seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
+ frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
+ more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
+
+ if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
+ !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
tid > IEEE80211_NUM_TIDS)
return -EINVAL;
@@ -3484,7 +3497,7 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
if (frag_no > __fls(rx_tid->rx_frag_bitmap))
__skb_queue_tail(&rx_tid->rx_frags, msdu);
else
- ath11k_dp_rx_h_sort_frags(&rx_tid->rx_frags, msdu);
+ ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
rx_tid->rx_frag_bitmap |= BIT(frag_no);
if (!more_frags)
@@ -3551,6 +3564,7 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool
struct hal_rx_desc *rx_desc;
u8 *hdr_status;
u16 msdu_len;
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
spin_lock_bh(&rx_ring->idr_lock);
msdu = idr_find(&rx_ring->bufs_idr, buf_id);
@@ -3586,9 +3600,9 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool
}
rx_desc = (struct hal_rx_desc *)msdu->data;
- msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
- if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
- hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);
+ if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
+ hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
sizeof(struct ieee80211_hdr));
@@ -3598,7 +3612,7 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool
goto exit;
}
- skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len);
+ skb_put(msdu, hal_rx_desc_sz + msdu_len);
if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
dev_kfree_skb_any(msdu);
@@ -3732,7 +3746,7 @@ static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
int n_buffs;
n_buffs = DIV_ROUND_UP(msdu_len,
- (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE));
+ (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));
skb_queue_walk_safe(msdu_list, skb, tmp) {
rxcb = ATH11K_SKB_RXCB(skb);
@@ -3753,19 +3767,22 @@ static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
{
u16 msdu_len;
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ struct rx_attention *rx_attention;
u8 l3pad_bytes;
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
- msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
- if (!rxcb->is_frag && ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE)) {
+ if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
/* First buffer will be freed by the caller, so deduct it's length */
- msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE);
+ msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
return -EINVAL;
}
- if (!ath11k_dp_rx_h_attn_msdu_done(desc)) {
+ rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);
+ if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
ath11k_warn(ar->ab,
"msdu_done bit not set in null_q_des processing\n");
__skb_queue_purge(msdu_list);
@@ -3781,25 +3798,25 @@ static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
* This error can show up both in a REO destination or WBM release ring.
*/
- rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
- rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
if (rxcb->is_frag) {
- skb_pull(msdu, HAL_RX_DESC_SIZE);
+ skb_pull(msdu, hal_rx_desc_sz);
} else {
- l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
+ l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
- if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
+ if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
return -EINVAL;
- skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
- skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
+ skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
}
ath11k_dp_rx_h_ppdu(ar, desc, status);
ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
- rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(desc);
+ rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);
/* Please note that caller will having the access to msdu and completing
* rx with mac80211. Need not worry about cleaning up amsdu_list.
@@ -3846,14 +3863,15 @@ static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
- rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
- rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
- l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
- msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
- skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
- skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
+ l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
+ skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
ath11k_dp_rx_h_ppdu(ar, desc, status);
@@ -4595,10 +4613,10 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
rx_desc = (struct hal_rx_desc *)msdu->data;
rx_pkt_offset = sizeof(struct hal_rx_desc);
- l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc);
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
if (is_first_msdu) {
- if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) {
+ if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
drop_mpdu = true;
dev_kfree_skb_any(msdu);
msdu = NULL;
@@ -4607,7 +4625,7 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
}
msdu_ppdu_id =
- ath11k_dp_rxdesc_get_ppduid(rx_desc);
+ ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
ppdu_id,
@@ -4676,12 +4694,13 @@ next_msdu:
return rx_bufs_used;
}
-static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu)
+static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)
{
u32 rx_pkt_offset, l2_hdr_offset;
- rx_pkt_offset = sizeof(struct hal_rx_desc);
- l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data);
+ rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,
+ (struct hal_rx_desc *)msdu->data);
skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
}
@@ -4691,12 +4710,14 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
struct sk_buff *last_msdu,
struct ieee80211_rx_status *rxs)
{
+ struct ath11k_base *ab = ar->ab;
struct sk_buff *msdu, *mpdu_buf, *prev_buf;
- u32 decap_format, wifi_hdr_len;
+ u32 wifi_hdr_len;
struct hal_rx_desc *rx_desc;
char *hdr_desc;
- u8 *dest;
+ u8 *dest, decap_format;
struct ieee80211_hdr_3addr *wh;
+ struct rx_attention *rx_attention;
mpdu_buf = NULL;
@@ -4704,22 +4725,23 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
goto err_merge_fail;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
+ rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
- if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc))
+ if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
return NULL;
- decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc);
+ decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);
ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
if (decap_format == DP_RX_DECAP_TYPE_RAW) {
- ath11k_dp_rx_msdus_set_payload(head_msdu);
+ ath11k_dp_rx_msdus_set_payload(ar, head_msdu);
prev_buf = head_msdu;
msdu = head_msdu->next;
while (msdu) {
- ath11k_dp_rx_msdus_set_payload(msdu);
+ ath11k_dp_rx_msdus_set_payload(ar, msdu);
prev_buf = msdu;
msdu = msdu->next;
@@ -4733,7 +4755,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
u8 qos_pkt = 0;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
- hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
+ hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
/* Base size */
wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
@@ -4750,7 +4772,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
while (msdu) {
rx_desc = (struct hal_rx_desc *)msdu->data;
- hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
+ hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
if (qos_pkt) {
dest = skb_push(msdu, sizeof(__le16));
@@ -4760,7 +4782,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
memcpy(dest + wifi_hdr_len,
(u8 *)&qos_field, sizeof(__le16));
}
- ath11k_dp_rx_msdus_set_payload(msdu);
+ ath11k_dp_rx_msdus_set_payload(ar, msdu);
prev_buf = msdu;
msdu = msdu->next;
}
@@ -4768,11 +4790,11 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
if (!dest)
goto err_merge_fail;
- ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
"mpdu_buf %pK mpdu_buf->len %u",
prev_buf, prev_buf->len);
} else {
- ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
"decap format %d is not supported!\n",
decap_format);
goto err_merge_fail;
@@ -4782,7 +4804,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
err_merge_fail:
if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
- ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
"err_merge_fail mpdu_buf %pK", mpdu_buf);
/* Free the head buffer */
dev_kfree_skb_any(mpdu_buf);
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 1a0b9be9ce6a..8bba5234f81f 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -178,7 +178,7 @@ tcl_ring_sel:
}
if (ieee80211_vif_is_mesh(arvif->vif))
- ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE, 1);
+ ti.enable_mesh = true;
ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
@@ -792,8 +792,8 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >>
HAL_ADDR_MSB_REG_SHIFT;
- cmd->ring_msi_addr_lo = params.msi_addr & 0xffffffff;
- cmd->ring_msi_addr_hi = ((uint64_t)(params.msi_addr) >> 32) & 0xffffffff;
+ cmd->ring_msi_addr_lo = lower_32_bits(params.msi_addr);
+ cmd->ring_msi_addr_hi = upper_32_bits(params.msi_addr);
cmd->msi_data = params.msi_data;
cmd->intr_info = FIELD_PREP(
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 9904c0eb7587..08e3c72d9237 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -89,17 +89,6 @@ static const struct hal_srng_config hw_srng_config_template[] = {
.entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
- .reg_start = {
- (HAL_SEQ_WCSS_UMAC_CE0_SRC_REG +
- HAL_CE_DST_RING_BASE_LSB),
- HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP,
- },
- .reg_size = {
- (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
- HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
- (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
- HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
- },
.max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
},
{ /* CE_DST */
@@ -108,17 +97,6 @@ static const struct hal_srng_config hw_srng_config_template[] = {
.entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
- .reg_start = {
- (HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
- HAL_CE_DST_RING_BASE_LSB),
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP,
- },
- .reg_size = {
- (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
- (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
- },
.max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
},
{ /* CE_DST_STATUS */
@@ -127,18 +105,6 @@ static const struct hal_srng_config hw_srng_config_template[] = {
.entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
- .reg_start = {
- (HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
- HAL_CE_DST_STATUS_RING_BASE_LSB),
- (HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
- HAL_CE_DST_STATUS_RING_HP),
- },
- .reg_size = {
- (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
- (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
- },
.max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
},
{ /* WBM_IDLE_LINK */
@@ -147,11 +113,6 @@ static const struct hal_srng_config hw_srng_config_template[] = {
.entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
- .reg_start = {
- (HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_IDLE_LINK_RING_BASE_LSB),
- (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP),
- },
.max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
},
{ /* SW2WBM_RELEASE */
@@ -160,11 +121,6 @@ static const struct hal_srng_config hw_srng_config_template[] = {
.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
- .reg_start = {
- (HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_RELEASE_RING_BASE_LSB),
- (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP),
- },
.max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
},
{ /* WBM2SW_RELEASE */
@@ -173,16 +129,6 @@ static const struct hal_srng_config hw_srng_config_template[] = {
.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
- .reg_start = {
- (HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM0_RELEASE_RING_BASE_LSB),
- (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP),
- },
- .reg_size = {
- (HAL_WBM1_RELEASE_RING_BASE_LSB -
- HAL_WBM0_RELEASE_RING_BASE_LSB),
- (HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP),
- },
.max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
},
{ /* RXDMA_BUF */
@@ -955,7 +901,7 @@ void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
/* Enable the SRNG */
ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
- HAL_WBM_IDLE_LINK_RING_MISC_ADDR, 0x40);
+ HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab), 0x40);
}
int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
@@ -1234,6 +1180,46 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
+ s = &hal->srng_config[HAL_CE_SRC];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
+
+ s = &hal->srng_config[HAL_CE_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+
+ s = &hal->srng_config[HAL_CE_DST_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
+ HAL_CE_DST_STATUS_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+
+ s = &hal->srng_config[HAL_WBM_IDLE_LINK];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
+
+ s = &hal->srng_config[HAL_SW2WBM_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP;
+
+ s = &hal->srng_config[HAL_WBM2SW_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
+ s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
+ HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 1f1b29cd0aa3..91d1428b8b94 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -39,14 +39,22 @@ struct ath11k_base;
#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x)))
/* WCSS Relative address */
+#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000
#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
#define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000
-#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG 0x00a00000
-#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG 0x00a01000
-#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG 0x00a02000
-#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG 0x00a03000
+#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(x) \
+ (ab->hw_params.regs->hal_seq_wcss_umac_ce0_src_reg)
+#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(x) \
+ (ab->hw_params.regs->hal_seq_wcss_umac_ce0_dst_reg)
+#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(x) \
+ (ab->hw_params.regs->hal_seq_wcss_umac_ce1_src_reg)
+#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(x) \
+ (ab->hw_params.regs->hal_seq_wcss_umac_ce1_dst_reg)
#define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000
+#define HAL_CE_WFSS_CE_REG_BASE 0x01b80000
+#define HAL_WLAON_REG_BASE 0x01f80000
+
/* SW2TCL(x) R0 ring configuration address */
#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000014
#define HAL_TCL1_RING_DSCP_TID_MAP 0x0000002c
@@ -197,8 +205,10 @@ struct ath11k_base;
#define HAL_REO_STATUS_HP(ab) ab->hw_params.regs->hal_reo_status_hp
/* WBM Idle R0 address */
-#define HAL_WBM_IDLE_LINK_RING_BASE_LSB 0x00000860
-#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR 0x00000870
+#define HAL_WBM_IDLE_LINK_RING_BASE_LSB(x) \
+ (ab->hw_params.regs->hal_wbm_idle_link_ring_base_lsb)
+#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(x) \
+ (ab->hw_params.regs->hal_wbm_idle_link_ring_misc)
#define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR 0x00000048
#define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR 0x0000004c
#define HAL_WBM_SCATTERED_RING_BASE_LSB 0x00000058
@@ -213,14 +223,17 @@ struct ath11k_base;
#define HAL_WBM_IDLE_LINK_RING_HP 0x000030b0
/* SW2WBM R0 release address */
-#define HAL_WBM_RELEASE_RING_BASE_LSB 0x000001d8
+#define HAL_WBM_RELEASE_RING_BASE_LSB(x) \
+ (ab->hw_params.regs->hal_wbm_release_ring_base_lsb)
/* SW2WBM R2 release address */
#define HAL_WBM_RELEASE_RING_HP 0x00003018
/* WBM2SW R0 release address */
-#define HAL_WBM0_RELEASE_RING_BASE_LSB 0x00000910
-#define HAL_WBM1_RELEASE_RING_BASE_LSB 0x00000968
+#define HAL_WBM0_RELEASE_RING_BASE_LSB(x) \
+ (ab->hw_params.regs->hal_wbm0_release_ring_base_lsb)
+#define HAL_WBM1_RELEASE_RING_BASE_LSB(x) \
+ (ab->hw_params.regs->hal_wbm1_release_ring_base_lsb)
/* WBM2SW R2 release address */
#define HAL_WBM0_RELEASE_RING_HP 0x000030c0
@@ -303,8 +316,6 @@ struct ath11k_base;
#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
#define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff
-#define HAL_RX_DESC_SIZE (sizeof(struct hal_rx_desc))
-
/* Add any other errors here and return them in
* ath11k_hal_rx_desc_get_err().
*/
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
index 1b713cb13b90..d54ec6aa6281 100644
--- a/drivers/net/wireless/ath/ath11k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -949,16 +949,17 @@ struct hal_reo_flush_cache {
#define HAL_TCL_DATA_CMD_INFO1_TO_FW BIT(21)
#define HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET GENMASK(31, 23)
-#define HAL_TCL_DATA_CMD_INFO2_BUF_TIMESTAMP GENMASK(18, 0)
-#define HAL_TCL_DATA_CMD_INFO2_BUF_T_VALID BIT(19)
-#define HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE BIT(20)
-#define HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE BIT(21)
-#define HAL_TCL_DATA_CMD_INFO2_TID GENMASK(25, 22)
-#define HAL_TCL_DATA_CMD_INFO2_LMAC_ID GENMASK(27, 26)
+#define HAL_TCL_DATA_CMD_INFO2_BUF_TIMESTAMP GENMASK(18, 0)
+#define HAL_TCL_DATA_CMD_INFO2_BUF_T_VALID BIT(19)
+#define HAL_IPQ8074_TCL_DATA_CMD_INFO2_MESH_ENABLE BIT(20)
+#define HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE BIT(21)
+#define HAL_TCL_DATA_CMD_INFO2_TID GENMASK(25, 22)
+#define HAL_TCL_DATA_CMD_INFO2_LMAC_ID GENMASK(27, 26)
#define HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX GENMASK(5, 0)
#define HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX GENMASK(25, 6)
#define HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM GENMASK(29, 26)
+#define HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE GENMASK(31, 30)
#define HAL_TCL_DATA_CMD_INFO4_RING_ID GENMASK(27, 20)
#define HAL_TCL_DATA_CMD_INFO4_LOOPING_COUNT GENMASK(31, 28)
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c
index 569e790d83a1..c8929de8ce6c 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.c
@@ -75,6 +75,9 @@ void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM,
ti->bss_ast_hash);
tcl_cmd->info4 = 0;
+
+ if (ti->enable_mesh)
+ ab->hw_params.hw_ops->tx_mesh_enable(ab, tcl_cmd);
}
void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id)
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h
index c291e59c3ca6..36f4f6f6cbc2 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.h
@@ -34,6 +34,7 @@ struct hal_tx_info {
u8 search_type; /* %HAL_TX_ADDR_SEARCH_ */
u8 lmac_id;
u8 dscp_tid_tbl_idx;
+ bool enable_mesh;
};
/* TODO: Check if the actual desc macros can be used instead */
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
index 6285c52afc44..e9366f786fbb 100644
--- a/drivers/net/wireless/ath/ath11k/hif.h
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -28,6 +28,7 @@ struct ath11k_hif_ops {
u32 *msi_addr_hi);
void (*ce_irq_enable)(struct ath11k_base *ab);
void (*ce_irq_disable)(struct ath11k_base *ab);
+ void (*get_ce_msi_idx)(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
};
static inline void ath11k_hif_ce_irq_enable(struct ath11k_base *ab)
@@ -124,4 +125,13 @@ static inline void ath11k_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_
ab->hif.ops->get_msi_address(ab, msi_addr_lo, msi_addr_hi);
}
+
+static inline void ath11k_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id,
+ u32 *msi_data_idx)
+{
+ if (ab->hif.ops->get_ce_msi_idx)
+ ab->hif.ops->get_ce_msi_idx(ab, ce_id, msi_data_idx);
+ else
+ *msi_data_idx = ce_id;
+}
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index 66331da35012..377ae8d5b58f 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -31,6 +31,20 @@ static u8 ath11k_hw_ipq6018_mac_from_pdev_id(int pdev_idx)
return pdev_idx;
}
+static void ath11k_hw_ipq8074_tx_mesh_enable(struct ath11k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd)
+{
+ tcl_cmd->info2 |= FIELD_PREP(HAL_IPQ8074_TCL_DATA_CMD_INFO2_MESH_ENABLE,
+ true);
+}
+
+static void ath11k_hw_qcn9074_tx_mesh_enable(struct ath11k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd)
+{
+ tcl_cmd->info3 |= FIELD_PREP(HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE,
+ true);
+}
+
static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
struct target_resource_config *config)
{
@@ -155,11 +169,358 @@ static int ath11k_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params *hw,
return mac_id;
}
+static bool ath11k_hw_ipq8074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
+ __le32_to_cpu(desc->u.ipq8074.msdu_end.info2));
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU,
+ __le32_to_cpu(desc->u.ipq8074.msdu_end.info2));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
+ __le32_to_cpu(desc->u.ipq8074.msdu_end.info2));
+}
+
+static u8 *ath11k_hw_ipq8074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
+{
+ return desc->u.ipq8074.hdr_status;
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
+}
+
+static u32 ath11k_hw_ipq8074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start.info2));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1));
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1));
+}
+
+static u16 ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1));
+}
+
+static u16 ath11k_hw_ipq8074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info1));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
+}
+
+static u32 ath11k_hw_ipq8074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.ipq8074.msdu_start.phy_meta_data);
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_TID,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start.info2));
+}
+
+static u16 ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.ipq8074.mpdu_start.sw_peer_id);
+}
+
+static void ath11k_hw_ipq8074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy((u8 *)&fdesc->u.ipq8074.msdu_end, (u8 *)&ldesc->u.ipq8074.msdu_end,
+ sizeof(struct rx_msdu_end_ipq8074));
+ memcpy((u8 *)&fdesc->u.ipq8074.attention, (u8 *)&ldesc->u.ipq8074.attention,
+ sizeof(struct rx_attention));
+ memcpy((u8 *)&fdesc->u.ipq8074.mpdu_end, (u8 *)&ldesc->u.ipq8074.mpdu_end,
+ sizeof(struct rx_mpdu_end));
+}
+
+static u32 ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(HAL_TLV_HDR_TAG,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start_tag));
+}
+
+static u32 ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.ipq8074.mpdu_start.phy_ppdu_id);
+}
+
+static void ath11k_hw_ipq8074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.ipq8074.msdu_start.info1);
+
+ info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
+ info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
+
+ desc->u.ipq8074.msdu_start.info1 = __cpu_to_le32(info);
+}
+
+static
+struct rx_attention *ath11k_hw_ipq8074_rx_desc_get_attention(struct hal_rx_desc *desc)
+{
+ return &desc->u.ipq8074.attention;
+}
+
+static u8 *ath11k_hw_ipq8074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.ipq8074.msdu_payload[0];
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO4_FIRST_MSDU,
+ __le16_to_cpu(desc->u.qcn9074.msdu_end.info4));
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO4_LAST_MSDU,
+ __le16_to_cpu(desc->u.qcn9074.msdu_end.info4));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_END_INFO4_L3_HDR_PADDING,
+ __le16_to_cpu(desc->u.qcn9074.msdu_end.info4));
+}
+
+static u8 *ath11k_hw_qcn9074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
+{
+ return desc->u.qcn9074.hdr_status;
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11) &
+ RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID;
+}
+
+static u32 ath11k_hw_qcn9074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO9_ENC_TYPE,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start.info9));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11));
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_FCTRL_VALID,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11));
+}
+
+static u16 ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_NUM,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11));
+}
+
+static u16 ath11k_hw_qcn9074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info1));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
+}
+
+static u32 ath11k_hw_qcn9074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9074.msdu_start.phy_meta_data);
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO9_TID,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start.info9));
+}
+
+static u16 ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9074.mpdu_start.sw_peer_id);
+}
+
+static void ath11k_hw_qcn9074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy((u8 *)&fdesc->u.qcn9074.msdu_end, (u8 *)&ldesc->u.qcn9074.msdu_end,
+ sizeof(struct rx_msdu_end_qcn9074));
+ memcpy((u8 *)&fdesc->u.qcn9074.attention, (u8 *)&ldesc->u.qcn9074.attention,
+ sizeof(struct rx_attention));
+ memcpy((u8 *)&fdesc->u.qcn9074.mpdu_end, (u8 *)&ldesc->u.qcn9074.mpdu_end,
+ sizeof(struct rx_mpdu_end));
+}
+
+static u32 ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(HAL_TLV_HDR_TAG,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start_tag));
+}
+
+static u32 ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9074.mpdu_start.phy_ppdu_id);
+}
+
+static void ath11k_hw_qcn9074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9074.msdu_start.info1);
+
+ info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
+ info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
+
+ desc->u.qcn9074.msdu_start.info1 = __cpu_to_le32(info);
+}
+
+static
+struct rx_attention *ath11k_hw_qcn9074_rx_desc_get_attention(struct hal_rx_desc *desc)
+{
+ return &desc->u.qcn9074.attention;
+}
+
+static u8 *ath11k_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.qcn9074.msdu_payload[0];
+}
+
const struct ath11k_hw_ops ipq8074_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
.mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
.mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+ .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
};
const struct ath11k_hw_ops ipq6018_ops = {
@@ -167,6 +528,33 @@ const struct ath11k_hw_ops ipq6018_ops = {
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
.mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
.mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+ .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
};
const struct ath11k_hw_ops qca6390_ops = {
@@ -174,6 +562,67 @@ const struct ath11k_hw_ops qca6390_ops = {
.wmi_init_config = ath11k_init_wmi_config_qca6390,
.mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
.mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
+ .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
+};
+
+const struct ath11k_hw_ops qcn9074_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_ipq8074,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+ .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
};
#define ATH11K_TX_RING_MASK_0 0x1
@@ -792,6 +1241,241 @@ const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[] = {
},
};
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE 9, 10, 11 are used by MHI driver */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[] = {
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ ATH11K_TX_RING_MASK_1,
+ ATH11K_TX_RING_MASK_2,
+ },
+ .rx_mon_status = {
+ 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ ATH11K_RX_MON_STATUS_RING_MASK_1,
+ ATH11K_RX_MON_STATUS_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ 0, 0, 0,
+ ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ 0, 0, 0,
+ ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, 0, 0,
+ ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ 0, 0, 0,
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ },
+ .host2rxdma = {
+ 0, 0, 0,
+ ATH11K_HOST2RXDMA_RING_MASK_0,
+ },
+};
+
const struct ath11k_hw_regs ipq8074_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_base_lsb = 0x00000510,
@@ -841,6 +1525,26 @@ const struct ath11k_hw_regs ipq8074_regs = {
.hal_reo_status_ring_base_lsb = 0x00000504,
.hal_reo_status_hp = 0x00003070,
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000860,
+ .hal_wbm_idle_link_ring_misc = 0x00000870,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001d8,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000910,
+ .hal_wbm1_release_ring_base_lsb = 0x00000968,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x0,
+ .pcie_pcs_osc_dtct_config_base = 0x0,
};
const struct ath11k_hw_regs qca6390_regs = {
@@ -891,4 +1595,96 @@ const struct ath11k_hw_regs qca6390_regs = {
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x000004ac,
.hal_reo_status_hp = 0x00003068,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000860,
+ .hal_wbm_idle_link_ring_misc = 0x00000870,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001d8,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000910,
+ .hal_wbm1_release_ring_base_lsb = 0x00000968,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0c628,
+};
+
+const struct ath11k_hw_regs qcn9074_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x000004f0,
+ .hal_tcl1_ring_base_msb = 0x000004f4,
+ .hal_tcl1_ring_id = 0x000004f8,
+ .hal_tcl1_ring_misc = 0x00000500,
+ .hal_tcl1_ring_tp_addr_lsb = 0x0000050c,
+ .hal_tcl1_ring_tp_addr_msb = 0x00000510,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000520,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000524,
+ .hal_tcl1_ring_msi1_base_lsb = 0x00000538,
+ .hal_tcl1_ring_msi1_base_msb = 0x0000053c,
+ .hal_tcl1_ring_msi1_data = 0x00000540,
+ .hal_tcl2_ring_base_lsb = 0x00000548,
+ .hal_tcl_ring_base_lsb = 0x000005f8,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000700,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x0000029c,
+ .hal_reo1_ring_base_msb = 0x000002a0,
+ .hal_reo1_ring_id = 0x000002a4,
+ .hal_reo1_ring_misc = 0x000002ac,
+ .hal_reo1_ring_hp_addr_lsb = 0x000002b0,
+ .hal_reo1_ring_hp_addr_msb = 0x000002b4,
+ .hal_reo1_ring_producer_int_setup = 0x000002c0,
+ .hal_reo1_ring_msi1_base_lsb = 0x000002e4,
+ .hal_reo1_ring_msi1_base_msb = 0x000002e8,
+ .hal_reo1_ring_msi1_data = 0x000002ec,
+ .hal_reo2_ring_base_lsb = 0x000002f4,
+ .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+ .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003038,
+ .hal_reo1_ring_tp = 0x0000303c,
+ .hal_reo2_ring_hp = 0x00003040,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+ .hal_reo_tcl_ring_hp = 0x00003058,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x00000504,
+ .hal_reo_status_hp = 0x00003070,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000874,
+ .hal_wbm_idle_link_ring_misc = 0x00000884,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001ec,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000924,
+ .hal_wbm1_release_ring_base_lsb = 0x0000097c,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
};
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 8af0034fdb05..c81a6328361d 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -105,6 +105,9 @@ enum ath11k_bus {
#define ATH11K_EXT_IRQ_GRP_NUM_MAX 11
+struct hal_rx_desc;
+struct hal_tcl_data_cmd;
+
struct ath11k_hw_ring_mask {
u8 tx[ATH11K_EXT_IRQ_GRP_NUM_MAX];
u8 rx_mon_status[ATH11K_EXT_IRQ_GRP_NUM_MAX];
@@ -134,6 +137,7 @@ struct ath11k_hw_params {
bool internal_sleep_clock;
const struct ath11k_hw_regs *regs;
+ u32 qmi_service_ins_id;
const struct ce_attr *host_ce_config;
u32 ce_count;
const struct ce_pipe_config *target_ce_config;
@@ -157,6 +161,7 @@ struct ath11k_hw_params {
bool idle_ps;
bool cold_boot_calib;
bool supports_suspend;
+ u32 hal_desc_sz;
};
struct ath11k_hw_ops {
@@ -165,14 +170,45 @@ struct ath11k_hw_ops {
struct target_resource_config *config);
int (*mac_id_to_pdev_id)(struct ath11k_hw_params *hw, int mac_id);
int (*mac_id_to_srng_id)(struct ath11k_hw_params *hw, int mac_id);
+ void (*tx_mesh_enable)(struct ath11k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd);
+ bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc);
+ u8 *(*rx_desc_get_hdr_status)(struct hal_rx_desc *desc);
+ bool (*rx_desc_encrypt_valid)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_decap_type)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc);
+ u16 (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc);
+ u16 (*rx_desc_get_msdu_len)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_sgi)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_rate_mcs)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_rx_bw)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_msdu_freq)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_pkt_type)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_nss)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_mpdu_tid)(struct hal_rx_desc *desc);
+ u16 (*rx_desc_get_mpdu_peer_id)(struct hal_rx_desc *desc);
+ void (*rx_desc_copy_attn_end_tlv)(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc);
+ u32 (*rx_desc_get_mpdu_start_tag)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_mpdu_ppdu_id)(struct hal_rx_desc *desc);
+ void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, u16 len);
+ struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc);
+ u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
};
extern const struct ath11k_hw_ops ipq8074_ops;
extern const struct ath11k_hw_ops ipq6018_ops;
extern const struct ath11k_hw_ops qca6390_ops;
+extern const struct ath11k_hw_ops qcn9074_ops;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074;
static inline
int ath11k_hw_get_mac_from_pdev_id(struct ath11k_hw_params *hw,
@@ -261,9 +297,26 @@ struct ath11k_hw_regs {
u32 hal_reo_status_ring_base_lsb;
u32 hal_reo_status_hp;
+
+ u32 hal_seq_wcss_umac_ce0_src_reg;
+ u32 hal_seq_wcss_umac_ce0_dst_reg;
+ u32 hal_seq_wcss_umac_ce1_src_reg;
+ u32 hal_seq_wcss_umac_ce1_dst_reg;
+
+ u32 hal_wbm_idle_link_ring_base_lsb;
+ u32 hal_wbm_idle_link_ring_misc;
+
+ u32 hal_wbm_release_ring_base_lsb;
+
+ u32 hal_wbm0_release_ring_base_lsb;
+ u32 hal_wbm1_release_ring_base_lsb;
+
+ u32 pcie_qserdes_sysclk_en_sel;
+ u32 pcie_pcs_osc_dtct_config_base;
};
extern const struct ath11k_hw_regs ipq8074_regs;
extern const struct ath11k_hw_regs qca6390_regs;
+extern const struct ath11k_hw_regs qcn9074_regs;
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index faa2e678e63e..4df425dd31a2 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -829,6 +829,75 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif,
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
+static void ath11k_mac_handle_beacon_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = data;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
+ return;
+
+ cancel_delayed_work(&arvif->connection_loss_work);
+}
+
+void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_handle_beacon_iter,
+ skb);
+}
+
+static void ath11k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u32 *vdev_id = data;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct ath11k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+
+ if (arvif->vdev_id != *vdev_id)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_beacon_loss(vif);
+
+ /* Firmware doesn't report beacon loss events repeatedly. If AP probe
+ * (done by mac80211) succeeds but beacons do not resume then it
+ * doesn't make sense to continue operation. Queue connection loss work
+ * which can be cancelled when beacon is received.
+ */
+ ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
+ ATH11K_CONNECTION_LOSS_HZ);
+}
+
+void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_handle_beacon_miss_iter,
+ &vdev_id);
+}
+
+static void ath11k_mac_vif_sta_connection_loss_work(struct work_struct *work)
+{
+ struct ath11k_vif *arvif = container_of(work, struct ath11k_vif,
+ connection_loss_work.work);
+ struct ieee80211_vif *vif = arvif->vif;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_connection_loss(vif);
+}
+
static void ath11k_peer_assoc_h_basic(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -1265,9 +1334,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
* request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu
* length.
*/
- ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] &
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >>
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_SHIFT;
+ ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3],
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
if (ampdu_factor) {
if (sta->vht_cap.vht_supported)
@@ -1760,7 +1828,7 @@ static void ath11k_bss_disassoc(struct ieee80211_hw *hw,
arvif->is_up = false;
- /* TODO: cancel connection_loss_work */
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
}
static u32 ath11k_mac_get_rate_hw_value(int bitrate)
@@ -3807,7 +3875,7 @@ ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem)
IEEE80211_HE_MAC_CAP4_BQR;
he_cap_elem->mac_cap_info[4] &= ~m;
- m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION |
+ m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION |
IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU |
IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING |
IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
@@ -3817,7 +3885,7 @@ ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem)
IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
he_cap_elem->phy_cap_info[2] &= ~m;
- m = IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA |
+ m = IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK;
he_cap_elem->phy_cap_info[3] &= ~m;
@@ -3829,13 +3897,13 @@ ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem)
he_cap_elem->phy_cap_info[5] &= ~m;
m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
- IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
he_cap_elem->phy_cap_info[6] &= ~m;
- m = IEEE80211_HE_PHY_CAP7_SRP_BASED_SR |
- IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+ m = IEEE80211_HE_PHY_CAP7_PSR_BASED_SR |
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
he_cap_elem->phy_cap_info[7] &= ~m;
@@ -3919,8 +3987,6 @@ static int ath11k_mac_copy_he_cap(struct ath11k *ar,
he_cap_elem->phy_cap_info[5] &=
~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
- he_cap_elem->phy_cap_info[5] &=
- ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1;
switch (i) {
@@ -4213,7 +4279,7 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
return -ENOSPC;
}
- if (skb_queue_len(q) == ATH11K_TX_MGMT_NUM_PENDING_MAX) {
+ if (skb_queue_len_lockless(q) >= ATH11K_TX_MGMT_NUM_PENDING_MAX) {
ath11k_warn(ar->ab, "mgmt tx queue is full\n");
return -ENOSPC;
}
@@ -4617,10 +4683,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
arvif->vif = vif;
INIT_LIST_HEAD(&arvif->list);
-
- /* Should we initialize any worker to handle connection loss indication
- * from firmware in sta mode?
- */
+ INIT_DELAYED_WORK(&arvif->connection_loss_work,
+ ath11k_mac_vif_sta_connection_loss_work);
for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
arvif->bitrate_mask.control[i].legacy = 0xffffffff;
@@ -4829,6 +4893,8 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
int ret;
int i;
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+
mutex_lock(&ar->conf_mutex);
ath11k_dbg(ab, ATH11K_DBG_MAC, "mac remove interface (vdev %d)\n",
@@ -5096,13 +5162,15 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
arg.channel.chan_radar =
!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+ arg.channel.freq2_radar =
+ !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+
arg.channel.passive = arg.channel.chan_radar;
spin_lock_bh(&ab->base_lock);
arg.regdomain = ar->ab->dfs_region;
spin_unlock_bh(&ab->base_lock);
- /* TODO: Notify if secondary 80Mhz also needs radar detection */
if (he_support) {
ret = ath11k_set_he_mu_sounding_mode(ar, arvif);
if (ret) {
@@ -6082,6 +6150,7 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
/* TODO: Use real NF instead of default one. */
sinfo->signal = arsta->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
static const struct ieee80211_ops ath11k_ops = {
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 455577905505..4bc59bdaf244 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -150,4 +150,6 @@ int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
u8 ath11k_mac_bw_to_mac80211_bw(u8 bw);
enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw);
enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher);
+void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb);
+void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 09858e516903..27b394d115e2 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -7,10 +7,11 @@
#include "core.h"
#include "debug.h"
#include "mhi.h"
+#include "pci.h"
#define MHI_TIMEOUT_DEFAULT_MS 90000
-static struct mhi_channel_config ath11k_mhi_channels[] = {
+static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
{
.num = 0,
.name = "LOOPBACK",
@@ -69,7 +70,7 @@ static struct mhi_channel_config ath11k_mhi_channels[] = {
},
};
-static struct mhi_event_config ath11k_mhi_events[] = {
+static struct mhi_event_config ath11k_mhi_events_qca6390[] = {
{
.num_elements = 32,
.irq_moderation_ms = 0,
@@ -92,15 +93,108 @@ static struct mhi_event_config ath11k_mhi_events[] = {
},
};
-static struct mhi_controller_config ath11k_mhi_config = {
+static struct mhi_controller_config ath11k_mhi_config_qca6390 = {
.max_channels = 128,
.timeout_ms = 2000,
.use_bounce_buf = false,
.buf_len = 0,
- .num_channels = ARRAY_SIZE(ath11k_mhi_channels),
- .ch_cfg = ath11k_mhi_channels,
- .num_events = ARRAY_SIZE(ath11k_mhi_events),
- .event_cfg = ath11k_mhi_events,
+ .num_channels = ARRAY_SIZE(ath11k_mhi_channels_qca6390),
+ .ch_cfg = ath11k_mhi_channels_qca6390,
+ .num_events = ARRAY_SIZE(ath11k_mhi_events_qca6390),
+ .event_cfg = ath11k_mhi_events_qca6390,
+};
+
+static struct mhi_channel_config ath11k_mhi_channels_qcn9074[] = {
+ {
+ .num = 0,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x14,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 1,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x14,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 20,
+ .name = "IPCR",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x14,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 21,
+ .name = "IPCR",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x14,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ },
+};
+
+static struct mhi_event_config ath11k_mhi_events_qcn9074[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 1,
+ .data_type = MHI_ER_CTRL,
+ .mode = MHI_DB_BRST_DISABLE,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+ {
+ .num_elements = 256,
+ .irq_moderation_ms = 1,
+ .irq = 2,
+ .mode = MHI_DB_BRST_DISABLE,
+ .priority = 1,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+static struct mhi_controller_config ath11k_mhi_config_qcn9074 = {
+ .max_channels = 30,
+ .timeout_ms = 10000,
+ .use_bounce_buf = false,
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(ath11k_mhi_channels_qcn9074),
+ .ch_cfg = ath11k_mhi_channels_qcn9074,
+ .num_events = ARRAY_SIZE(ath11k_mhi_events_qcn9074),
+ .event_cfg = ath11k_mhi_events_qcn9074,
};
void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab)
@@ -221,6 +315,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
struct mhi_controller *mhi_ctrl;
+ struct mhi_controller_config *ath11k_mhi_config;
int ret;
mhi_ctrl = mhi_alloc_controller();
@@ -254,7 +349,21 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
mhi_ctrl->read_reg = ath11k_mhi_op_read_reg;
mhi_ctrl->write_reg = ath11k_mhi_op_write_reg;
- ret = mhi_register_controller(mhi_ctrl, &ath11k_mhi_config);
+ switch (ab->hw_rev) {
+ case ATH11K_HW_QCN9074_HW10:
+ ath11k_mhi_config = &ath11k_mhi_config_qcn9074;
+ break;
+ case ATH11K_HW_QCA6390_HW20:
+ ath11k_mhi_config = &ath11k_mhi_config_qca6390;
+ break;
+ default:
+ ath11k_err(ab, "failed assign mhi_config for unknown hw rev %d\n",
+ ab->hw_rev);
+ mhi_free_controller(mhi_ctrl);
+ return -EINVAL;
+ }
+
+ ret = mhi_register_controller(mhi_ctrl, ath11k_mhi_config);
if (ret) {
ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
mhi_free_controller(mhi_ctrl);
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index d14416816acc..0f31eb566fb6 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -35,9 +35,11 @@
#define ACCESS_ALWAYS_OFF 0xFE0
#define QCA6390_DEVICE_ID 0x1101
+#define QCN9074_DEVICE_ID 0x1104
static const struct pci_device_id ath11k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
+ /* TODO: add QCN9074_DEVICE_ID) once firmware issues are resolved */
{0}
};
@@ -50,14 +52,25 @@ static const struct ath11k_bus_params ath11k_pci_bus_params = {
.fixed_mem_region = false,
};
-static const struct ath11k_msi_config msi_config = {
- .total_vectors = 32,
- .total_users = 4,
- .users = (struct ath11k_msi_user[]) {
- { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
- { .name = "CE", .num_vectors = 10, .base_vector = 3 },
- { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
- { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+static const struct ath11k_msi_config ath11k_msi_config[] = {
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ },
+ {
+ .total_vectors = 16,
+ .total_users = 3,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 5, .base_vector = 3 },
+ { .name = "DP", .num_vectors = 8, .base_vector = 8 },
+ },
},
};
@@ -131,9 +144,38 @@ static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offse
}
}
+static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci)
+{
+ u32 umac_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET);
+ u32 ce_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE);
+ u32 window;
+
+ window = (umac_window << 12) | (ce_window << 6);
+
+ iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS);
+}
+
+static inline u32 ath11k_pci_get_window_start(struct ath11k_base *ab,
+ u32 offset)
+{
+ u32 window_start;
+
+ /* If offset lies within DP register range, use 3rd window */
+ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK)
+ window_start = 3 * WINDOW_START;
+ /* If offset lies within CE register range, use 2nd window */
+ else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
+ window_start = 2 * WINDOW_START;
+ else
+ window_start = WINDOW_START;
+
+ return window_start;
+}
+
void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ u32 window_start;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup MHI to access.
@@ -145,10 +187,21 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
if (offset < WINDOW_START) {
iowrite32(value, ab->mem + offset);
} else {
- spin_lock_bh(&ab_pci->window_lock);
- ath11k_pci_select_window(ab_pci, offset);
- iowrite32(value, ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK));
- spin_unlock_bh(&ab_pci->window_lock);
+ if (ab->bus_params.static_window_map)
+ window_start = ath11k_pci_get_window_start(ab, offset);
+ else
+ window_start = WINDOW_START;
+
+ if (window_start == WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ iowrite32(value, ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ } else {
+ iowrite32(value, ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ }
}
if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
@@ -159,7 +212,7 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- u32 val;
+ u32 val, window_start;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup MHI to access.
@@ -171,10 +224,21 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
if (offset < WINDOW_START) {
val = ioread32(ab->mem + offset);
} else {
- spin_lock_bh(&ab_pci->window_lock);
- ath11k_pci_select_window(ab_pci, offset);
- val = ioread32(ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK));
- spin_unlock_bh(&ab_pci->window_lock);
+ if (ab->bus_params.static_window_map)
+ window_start = ath11k_pci_get_window_start(ab, offset);
+ else
+ window_start = WINDOW_START;
+
+ if (window_start == WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ val = ioread32(ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ } else {
+ val = ioread32(ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ }
}
if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
@@ -271,7 +335,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
int ret;
ret = ath11k_pci_set_link_reg(ab,
- PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG,
+ PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab),
PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
if (ret) {
@@ -280,27 +344,27 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
}
ret = ath11k_pci_set_link_reg(ab,
- PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG,
- PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL,
- PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
+ PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab),
+ PCIE_PCS_OSC_DTCT_CONFIG1_VAL,
+ PCIE_PCS_OSC_DTCT_CONFIG_MSK);
if (ret) {
ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
return ret;
}
ret = ath11k_pci_set_link_reg(ab,
- PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG,
- PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL,
- PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
+ PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab),
+ PCIE_PCS_OSC_DTCT_CONFIG2_VAL,
+ PCIE_PCS_OSC_DTCT_CONFIG_MSK);
if (ret) {
ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
return ret;
}
ret = ath11k_pci_set_link_reg(ab,
- PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG,
- PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL,
- PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
+ PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab),
+ PCIE_PCS_OSC_DTCT_CONFIG4_VAL,
+ PCIE_PCS_OSC_DTCT_CONFIG_MSK);
if (ret) {
ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
return ret;
@@ -406,14 +470,15 @@ int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_nam
u32 *base_vector)
{
struct ath11k_base *ab = ab_pci->ab;
+ const struct ath11k_msi_config *msi_config = ab_pci->msi_config;
int idx;
- for (idx = 0; idx < msi_config.total_users; idx++) {
- if (strcmp(user_name, msi_config.users[idx].name) == 0) {
- *num_vectors = msi_config.users[idx].num_vectors;
- *user_base_data = msi_config.users[idx].base_vector
+ for (idx = 0; idx < msi_config->total_users; idx++) {
+ if (strcmp(user_name, msi_config->users[idx].name) == 0) {
+ *num_vectors = msi_config->users[idx].num_vectors;
+ *user_base_data = msi_config->users[idx].base_vector
+ ab_pci->msi_ep_base_data;
- *base_vector = msi_config.users[idx].base_vector;
+ *base_vector = msi_config->users[idx].base_vector;
ath11k_dbg(ab, ATH11K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
user_name, *num_vectors, *user_base_data,
@@ -428,6 +493,23 @@ int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_nam
return -EINVAL;
}
+static void ath11k_pci_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id,
+ u32 *msi_idx)
+{
+ u32 i, msi_data_idx;
+
+ for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ if (ce_id == i)
+ break;
+
+ msi_data_idx++;
+ }
+ *msi_idx = msi_data_idx;
+}
+
static int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
int *num_vectors, u32 *user_base_data,
u32 *base_vector)
@@ -521,6 +603,9 @@ static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg)
{
struct ath11k_ce_pipe *ce_pipe = arg;
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+
ath11k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
tasklet_schedule(&ce_pipe->intr_tq);
@@ -615,6 +700,9 @@ static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg)
ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq);
+ /* last interrupt received for this group */
+ irq_grp->timestamp = jiffies;
+
ath11k_pci_ext_grp_disable(irq_grp);
napi_schedule(&irq_grp->napi);
@@ -625,8 +713,9 @@ static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg)
static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
{
int i, j, ret, num_vectors = 0;
- u32 user_base_data = 0, base_vector = 0;
+ u32 user_base_data = 0, base_vector = 0, base_idx;
+ base_idx = ATH11K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP",
&num_vectors,
&user_base_data,
@@ -656,7 +745,7 @@ static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
}
irq_grp->num_irq = num_irq;
- irq_grp->irqs[0] = base_vector + i;
+ irq_grp->irqs[0] = base_idx + i;
for (j = 0; j < irq_grp->num_irq; j++) {
int irq_idx = irq_grp->irqs[j];
@@ -667,6 +756,8 @@ static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_PCI,
"irq:%d group:%d\n", irq, i);
+
+ irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
ret = request_irq(irq, ath11k_pci_ext_interrupt_handler,
IRQF_SHARED,
"DP_EXT_IRQ", irq_grp);
@@ -687,7 +778,7 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *ce_pipe;
u32 msi_data_start;
- u32 msi_data_count;
+ u32 msi_data_count, msi_data_idx;
u32 msi_irq_start;
unsigned int msi_data;
int irq, i, ret, irq_idx;
@@ -699,14 +790,14 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab)
return ret;
/* Configure CE irqs */
- for (i = 0; i < ab->hw_params.ce_count; i++) {
- msi_data = (i % msi_data_count) + msi_irq_start;
- irq = ath11k_pci_get_msi_irq(ab->dev, msi_data);
- ce_pipe = &ab->ce.ce_pipe[i];
-
+ for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
+ msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
+ irq = ath11k_pci_get_msi_irq(ab->dev, msi_data);
+ ce_pipe = &ab->ce.ce_pipe[i];
+
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet);
@@ -721,6 +812,8 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab)
}
ab->irq_num[irq_idx] = irq;
+ msi_data_idx++;
+
ath11k_pci_ce_irq_disable(ab, i);
}
@@ -740,7 +833,7 @@ static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
- ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390;
+ ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2,
&cfg->shadow_reg_v2_len);
@@ -760,17 +853,18 @@ static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab)
static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
+ const struct ath11k_msi_config *msi_config = ab_pci->msi_config;
struct msi_desc *msi_desc;
int num_vectors;
int ret;
num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
- msi_config.total_vectors,
- msi_config.total_vectors,
+ msi_config->total_vectors,
+ msi_config->total_vectors,
PCI_IRQ_MSI);
- if (num_vectors != msi_config.total_vectors) {
+ if (num_vectors != msi_config->total_vectors) {
ath11k_err(ab, "failed to get %d MSI vectors, only %d available",
- msi_config.total_vectors, num_vectors);
+ msi_config->total_vectors, num_vectors);
if (num_vectors >= 0)
return -EINVAL;
@@ -932,6 +1026,9 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
return ret;
}
+ if (ab->bus_params.static_window_map)
+ ath11k_pci_select_static_window(ab_pci);
+
return 0;
}
@@ -1076,6 +1173,7 @@ static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
.map_service_to_pipe = ath11k_pci_map_service_to_pipe,
.ce_irq_enable = ath11k_pci_hif_ce_irq_enable,
.ce_irq_disable = ath11k_pci_hif_ce_irq_disable,
+ .get_ce_msi_idx = ath11k_pci_get_ce_msi_idx,
};
static int ath11k_pci_probe(struct pci_dev *pdev,
@@ -1130,6 +1228,12 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
ret = -EOPNOTSUPP;
goto err_pci_free_region;
}
+ ab_pci->msi_config = &ath11k_msi_config[0];
+ break;
+ case QCN9074_DEVICE_ID:
+ ab_pci->msi_config = &ath11k_msi_config[1];
+ ab->bus_params.static_window_map = true;
+ ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break;
default:
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index fe44d0dfce19..f3e645891d19 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -34,16 +34,20 @@
#define PCIE_SMLH_REQ_RST_LINK_DOWN 0x2
#define PCIE_INT_CLEAR_ALL 0xffffffff
-#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG 0x01e0c0ac
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(x) \
+ (ab->hw_params.regs->pcie_qserdes_sysclk_en_sel)
#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL 0x10
#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK 0xffffffff
-#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG 0x01e0c628
-#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL 0x02
-#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG 0x01e0c62c
-#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL 0x52
-#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG 0x01e0c634
-#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL 0xff
-#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK 0x000000ff
+#define PCIE_PCS_OSC_DTCT_CONFIG1_REG(x) \
+ (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base)
+#define PCIE_PCS_OSC_DTCT_CONFIG1_VAL 0x02
+#define PCIE_PCS_OSC_DTCT_CONFIG2_REG(x) \
+ (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0x4)
+#define PCIE_PCS_OSC_DTCT_CONFIG2_VAL 0x52
+#define PCIE_PCS_OSC_DTCT_CONFIG4_REG(x) \
+ (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0xc)
+#define PCIE_PCS_OSC_DTCT_CONFIG4_VAL 0xff
+#define PCIE_PCS_OSC_DTCT_CONFIG_MSK 0x000000ff
#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c
#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4
@@ -73,6 +77,7 @@ struct ath11k_pci {
char amss_path[100];
u32 msi_ep_base_data;
struct mhi_controller *mhi_ctrl;
+ const struct ath11k_msi_config *msi_config;
unsigned long mhi_state;
u32 register_window;
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 7968fe4eda22..b5e34d670715 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1556,6 +1556,8 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
}
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi host cap request\n");
+
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -1566,7 +1568,7 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
if (ret < 0) {
- ath11k_warn(ab, "Failed to send host capability request,err = %d\n", ret);
+ ath11k_warn(ab, "failed to send host capability request: %d\n", ret);
goto out;
}
@@ -1575,7 +1577,7 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
goto out;
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- ath11k_warn(ab, "Host capability request failed, result: %d, err: %d\n",
+ ath11k_warn(ab, "host capability request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
@@ -1624,24 +1626,26 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab)
if (ret < 0)
goto out;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi indication register request\n");
+
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_IND_REGISTER_REQ_V01,
QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_ind_register_req_msg_v01_ei, req);
if (ret < 0) {
- ath11k_warn(ab, "Failed to send indication register request, err = %d\n",
+ ath11k_warn(ab, "failed to send indication register request: %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
- ath11k_warn(ab, "failed to register fw indication %d\n", ret);
+ ath11k_warn(ab, "failed to register fw indication: %d\n", ret);
goto out;
}
if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
- ath11k_warn(ab, "FW Ind register request failed, result: %d, err: %d\n",
+ ath11k_warn(ab, "firmware indication register request failed: %d %d\n",
resp->resp.result, resp->resp.error);
ret = -EINVAL;
goto out;
@@ -1699,19 +1703,22 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
if (ret < 0)
goto out;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi respond memory request delayed %i\n",
+ delayed);
+
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_RESPOND_MEM_REQ_V01,
QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
if (ret < 0) {
- ath11k_warn(ab, "qmi failed to respond memory request, err = %d\n",
+ ath11k_warn(ab, "failed to respond qmi memory request: %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
- ath11k_warn(ab, "qmi failed memory request, err = %d\n", ret);
+ ath11k_warn(ab, "failed to wait qmi memory request: %d\n", ret);
goto out;
}
@@ -1722,7 +1729,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
if (delayed && resp.resp.error == 0)
goto out;
- ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
+ ath11k_warn(ab, "qmi respond memory request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
@@ -1765,7 +1772,7 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
&chunk->paddr,
GFP_KERNEL);
if (!chunk->vaddr) {
- if (ab->qmi.mem_seg_count <= 2) {
+ if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"qmi dma allocation failed (%d B type %u), will try later with small size\n",
chunk->size,
@@ -1774,7 +1781,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
ab->qmi.target_mem_delayed = true;
return 0;
}
- ath11k_err(ab, "failed to alloc memory, size: 0x%x, type: %u\n",
+
+ ath11k_err(ab, "failed to allocate dma memory for qmi (%d B type %u)\n",
chunk->size,
chunk->type);
return -EINVAL;
@@ -1843,24 +1851,26 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
if (ret < 0)
goto out;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi target cap request\n");
+
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_CAP_REQ_V01,
QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_cap_req_msg_v01_ei, &req);
if (ret < 0) {
- ath11k_warn(ab, "qmi failed to send target cap request, err = %d\n",
+ ath11k_warn(ab, "failed to send qmi cap request: %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
- ath11k_warn(ab, "qmi failed target cap request %d\n", ret);
+ ath11k_warn(ab, "failed to wait qmi cap request: %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- ath11k_warn(ab, "qmi targetcap req failed, result: %d, err: %d\n",
+ ath11k_warn(ab, "qmi cap request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
@@ -1923,7 +1933,7 @@ ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type,
ret = ath11k_core_fetch_bdf(ab, &bd);
if (ret) {
- ath11k_warn(ab, "qmi failed to load BDF\n");
+ ath11k_warn(ab, "failed to load board file: %d\n", ret);
return ret;
}
@@ -1971,7 +1981,7 @@ static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab)
bdf_addr = ioremap(ab->hw_params.bdf_addr, ATH11K_QMI_BDF_MAX_SIZE);
if (!bdf_addr) {
- ath11k_warn(ab, "qmi ioremap error for BDF\n");
+ ath11k_warn(ab, "failed ioremap for board file\n");
ret = -EIO;
goto out;
}
@@ -2000,6 +2010,9 @@ static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab)
if (ret < 0)
goto out_qmi_bdf;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download req fixed addr type %d\n",
+ type);
+
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
@@ -2014,7 +2027,7 @@ static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab)
goto out_qmi_bdf;
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- ath11k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n",
+ ath11k_warn(ab, "board file download request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out_qmi_bdf;
@@ -2047,7 +2060,7 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
memset(&bd, 0, sizeof(bd));
ret = ath11k_core_fetch_bdf(ab, &bd);
if (ret) {
- ath11k_warn(ab, "qmi failed to load bdf:\n");
+ ath11k_warn(ab, "failed to fetch board file: %d\n", ret);
goto out;
}
@@ -2090,6 +2103,9 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
if (ret < 0)
goto out_qmi_bdf;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download request remaining %i\n",
+ remaining);
+
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
@@ -2104,7 +2120,7 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
goto out_qmi_bdf;
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- ath11k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n",
+ ath11k_warn(ab, "bdf download request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = resp.resp.result;
goto out_qmi_bdf;
@@ -2200,24 +2216,26 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
if (ret < 0)
goto out;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi m3 info req\n");
+
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_M3_INFO_REQ_V01,
QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
if (ret < 0) {
- ath11k_warn(ab, "qmi failed to send M3 information request, err = %d\n",
+ ath11k_warn(ab, "failed to send m3 information request: %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
- ath11k_warn(ab, "qmi failed M3 information request %d\n", ret);
+ ath11k_warn(ab, "failed to wait m3 information request: %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- ath11k_warn(ab, "qmi M3 info request failed, result: %d, err: %d\n",
+ ath11k_warn(ab, "m3 info request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
@@ -2246,12 +2264,14 @@ static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab,
if (ret < 0)
goto out;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi wlan mode req mode %d\n", mode);
+
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_WLAN_MODE_REQ_V01,
QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
if (ret < 0) {
- ath11k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n",
+ ath11k_warn(ab, "failed to send wlan mode request (mode %d): %d\n",
mode, ret);
goto out;
}
@@ -2262,13 +2282,13 @@ static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab,
ath11k_warn(ab, "WLFW service is dis-connected\n");
return 0;
}
- ath11k_warn(ab, "qmi failed set mode request, mode: %d, err = %d\n",
+ ath11k_warn(ab, "failed to wait wlan mode request (mode %d): %d\n",
mode, ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- ath11k_warn(ab, "Mode request failed, mode: %d, result: %d err: %d\n",
+ ath11k_warn(ab, "wlan mode request failed (mode: %d): %d %d\n",
mode, resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
@@ -2338,24 +2358,26 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
if (ret < 0)
goto out;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi wlan cfg req\n");
+
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_WLAN_CFG_REQ_V01,
QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
if (ret < 0) {
- ath11k_warn(ab, "qmi failed to send wlan config request, err = %d\n",
+ ath11k_warn(ab, "failed to send wlan config request: %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
- ath11k_warn(ab, "qmi failed wlan config request, err = %d\n", ret);
+ ath11k_warn(ab, "failed to wait wlan config request: %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- ath11k_warn(ab, "qmi wlan config request failed, result: %d, err: %d\n",
+ ath11k_warn(ab, "wlan config request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
@@ -2370,9 +2392,11 @@ void ath11k_qmi_firmware_stop(struct ath11k_base *ab)
{
int ret;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware stop\n");
+
ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_OFF);
if (ret < 0) {
- ath11k_warn(ab, "qmi failed to send wlan mode off\n");
+ ath11k_warn(ab, "qmi failed to send wlan mode off: %d\n", ret);
return;
}
}
@@ -2382,15 +2406,17 @@ int ath11k_qmi_firmware_start(struct ath11k_base *ab,
{
int ret;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware start\n");
+
ret = ath11k_qmi_wlanfw_wlan_cfg_send(ab);
if (ret < 0) {
- ath11k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret);
+ ath11k_warn(ab, "qmi failed to send wlan cfg: %d\n", ret);
return ret;
}
ret = ath11k_qmi_wlanfw_mode_send(ab, mode);
if (ret < 0) {
- ath11k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret);
+ ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret);
return ret;
}
@@ -2404,7 +2430,7 @@ static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab)
ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_COLD_BOOT);
if (ret < 0) {
- ath11k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret);
+ ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret);
return ret;
}
@@ -2414,7 +2440,7 @@ static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab)
(ab->qmi.cal_done == 1),
ATH11K_COLD_BOOT_FW_RESET_DELAY);
if (timeout <= 0) {
- ath11k_warn(ab, "Coldboot Calibration failed - wait ended\n");
+ ath11k_warn(ab, "coldboot calibration timed out\n");
return 0;
}
@@ -2453,13 +2479,14 @@ static int ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi)
ret = ath11k_qmi_fw_ind_register_send(ab);
if (ret < 0) {
- ath11k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret);
+ ath11k_warn(ab, "failed to send qmi firmware indication: %d\n",
+ ret);
return ret;
}
ret = ath11k_qmi_host_cap_send(ab);
if (ret < 0) {
- ath11k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret);
+ ath11k_warn(ab, "failed to send qmi host cap: %d\n", ret);
return ret;
}
@@ -2473,7 +2500,7 @@ static int ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi)
ret = ath11k_qmi_respond_fw_mem_request(ab);
if (ret < 0) {
- ath11k_warn(ab, "qmi failed to respond fw mem req:%d\n", ret);
+ ath11k_warn(ab, "qmi failed to respond fw mem req: %d\n", ret);
return ret;
}
@@ -2487,7 +2514,8 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
ret = ath11k_qmi_request_target_cap(ab);
if (ret < 0) {
- ath11k_warn(ab, "qmi failed to req target capabilities:%d\n", ret);
+ ath11k_warn(ab, "failed to request qmi target capabilities: %d\n",
+ ret);
return ret;
}
@@ -2496,13 +2524,13 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
else
ret = ath11k_qmi_load_bdf_qmi(ab);
if (ret < 0) {
- ath11k_warn(ab, "qmi failed to load board data file:%d\n", ret);
+ ath11k_warn(ab, "failed to load board data file: %d\n", ret);
return ret;
}
ret = ath11k_qmi_wlanfw_m3_info_send(ab);
if (ret < 0) {
- ath11k_warn(ab, "qmi failed to send m3 info req:%d\n", ret);
+ ath11k_warn(ab, "failed to send qmi m3 info req: %d\n", ret);
return ret;
}
@@ -2523,7 +2551,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
if (msg->mem_seg_len == 0 ||
msg->mem_seg_len > ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01)
- ath11k_warn(ab, "Invalid memory segment length: %u\n",
+ ath11k_warn(ab, "invalid memory segment length: %u\n",
msg->mem_seg_len);
ab->qmi.mem_seg_count = msg->mem_seg_len;
@@ -2538,14 +2566,14 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
if (ab->bus_params.fixed_mem_region) {
ret = ath11k_qmi_assign_target_mem_chunk(ab);
if (ret) {
- ath11k_warn(ab, "qmi failed to assign target memory: %d\n",
+ ath11k_warn(ab, "failed to assign qmi target memory: %d\n",
ret);
return;
}
} else {
ret = ath11k_qmi_alloc_target_mem_chunk(ab);
if (ret) {
- ath11k_warn(ab, "qmi failed to alloc target memory: %d\n",
+ ath11k_warn(ab, "failed to allocate qmi target memory: %d\n",
ret);
return;
}
@@ -2639,7 +2667,7 @@ static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq,
sizeof(*sq), 0);
if (ret) {
- ath11k_warn(ab, "qmi failed to connect to remote service %d\n", ret);
+ ath11k_warn(ab, "failed to connect to qmi remote service: %d\n", ret);
return ret;
}
@@ -2725,7 +2753,7 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE:
break;
default:
- ath11k_warn(ab, "invalid event type: %d", event->type);
+ ath11k_warn(ab, "invalid qmi event type: %d", event->type);
break;
}
kfree(event);
@@ -2746,7 +2774,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab)
ret = qmi_handle_init(&ab->qmi.handle, ATH11K_QMI_RESP_LEN_MAX,
&ath11k_qmi_ops, ath11k_qmi_msg_handlers);
if (ret < 0) {
- ath11k_warn(ab, "failed to initialize qmi handle\n");
+ ath11k_warn(ab, "failed to initialize qmi handle: %d\n", ret);
return ret;
}
@@ -2765,7 +2793,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab)
ATH11K_QMI_WLFW_SERVICE_VERS_V01,
ab->qmi.service_ins_id);
if (ret < 0) {
- ath11k_warn(ab, "failed to add qmi lookup\n");
+ ath11k_warn(ab, "failed to add qmi lookup: %d\n", ret);
destroy_workqueue(ab->qmi.event_wq);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 7bad374cc23a..3d5930330703 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -21,11 +21,13 @@
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01 0x02
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390 0x01
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074 0x02
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074 0x07
#define ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
#define ATH11K_QMI_RESP_LEN_MAX 8192
-#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 32
+#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52
#define ATH11K_QMI_CALDB_SIZE 0x480000
#define ATH11K_QMI_BDF_EXT_STR_LENGTH 0x20
+#define ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT 3
#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
@@ -141,6 +143,7 @@ struct ath11k_qmi {
#define QMI_IPQ8074_FW_MEM_MODE 0xFF
#define HOST_DDR_REGION_TYPE 0x1
#define BDF_MEM_REGION_TYPE 0x2
+#define M3_DUMP_REGION_TYPE 0x3
#define CALDB_MEM_REGION_TYPE 0x4
struct qmi_wlanfw_host_cap_req_msg_v01 {
@@ -216,8 +219,8 @@ struct qmi_wlanfw_ind_register_resp_msg_v01 {
u64 fw_status;
};
-#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN 1124
-#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN 548
+#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN 1824
+#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN 888
#define QMI_WLANFW_RESPOND_MEM_RESP_MSG_V01_MAX_LEN 7
#define QMI_WLANFW_REQUEST_MEM_IND_V01 0x0035
#define QMI_WLANFW_RESPOND_MEM_REQ_V01 0x0036
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
index 86494da1069a..0cdb4a1f816e 100644
--- a/drivers/net/wireless/ath/ath11k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -414,7 +414,7 @@ struct rx_attention {
#define RX_MPDU_START_RAW_MPDU BIT(0)
-struct rx_mpdu_start {
+struct rx_mpdu_start_ipq8074 {
__le16 info0;
__le16 phy_ppdu_id;
__le16 ast_index;
@@ -440,6 +440,112 @@ struct rx_mpdu_start {
__le32 raw;
} __packed;
+#define RX_MPDU_START_INFO7_REO_DEST_IND GENMASK(4, 0)
+#define RX_MPDU_START_INFO7_LMAC_PEER_ID_MSB GENMASK(6, 5)
+#define RX_MPDU_START_INFO7_FLOW_ID_TOEPLITZ BIT(7)
+#define RX_MPDU_START_INFO7_PKT_SEL_FP_UCAST_DATA BIT(8)
+#define RX_MPDU_START_INFO7_PKT_SEL_FP_MCAST_DATA BIT(9)
+#define RX_MPDU_START_INFO7_PKT_SEL_FP_CTRL_BAR BIT(10)
+#define RX_MPDU_START_INFO7_RXDMA0_SRC_RING_SEL GENMASK(12, 11)
+#define RX_MPDU_START_INFO7_RXDMA0_DST_RING_SEL GENMASK(14, 13)
+
+#define RX_MPDU_START_INFO8_REO_QUEUE_DESC_HI GENMASK(7, 0)
+#define RX_MPDU_START_INFO8_RECV_QUEUE_NUM GENMASK(23, 8)
+#define RX_MPDU_START_INFO8_PRE_DELIM_ERR_WARN BIT(24)
+#define RX_MPDU_START_INFO8_FIRST_DELIM_ERR BIT(25)
+
+#define RX_MPDU_START_INFO9_EPD_EN BIT(0)
+#define RX_MPDU_START_INFO9_ALL_FRAME_ENCPD BIT(1)
+#define RX_MPDU_START_INFO9_ENC_TYPE GENMASK(5, 2)
+#define RX_MPDU_START_INFO9_VAR_WEP_KEY_WIDTH GENMASK(7, 6)
+#define RX_MPDU_START_INFO9_MESH_STA GENMASK(9, 8)
+#define RX_MPDU_START_INFO9_BSSID_HIT BIT(10)
+#define RX_MPDU_START_INFO9_BSSID_NUM GENMASK(14, 11)
+#define RX_MPDU_START_INFO9_TID GENMASK(18, 15)
+
+#define RX_MPDU_START_INFO10_RXPCU_MPDU_FLTR GENMASK(1, 0)
+#define RX_MPDU_START_INFO10_SW_FRAME_GRP_ID GENMASK(8, 2)
+#define RX_MPDU_START_INFO10_NDP_FRAME BIT(9)
+#define RX_MPDU_START_INFO10_PHY_ERR BIT(10)
+#define RX_MPDU_START_INFO10_PHY_ERR_MPDU_HDR BIT(11)
+#define RX_MPDU_START_INFO10_PROTO_VER_ERR BIT(12)
+#define RX_MPDU_START_INFO10_AST_LOOKUP_VALID BIT(13)
+
+#define RX_MPDU_START_INFO11_MPDU_FCTRL_VALID BIT(0)
+#define RX_MPDU_START_INFO11_MPDU_DUR_VALID BIT(1)
+#define RX_MPDU_START_INFO11_MAC_ADDR1_VALID BIT(2)
+#define RX_MPDU_START_INFO11_MAC_ADDR2_VALID BIT(3)
+#define RX_MPDU_START_INFO11_MAC_ADDR3_VALID BIT(4)
+#define RX_MPDU_START_INFO11_MAC_ADDR4_VALID BIT(5)
+#define RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID BIT(6)
+#define RX_MPDU_START_INFO11_MPDU_QOS_CTRL_VALID BIT(7)
+#define RX_MPDU_START_INFO11_MPDU_HT_CTRL_VALID BIT(8)
+#define RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID BIT(9)
+#define RX_MPDU_START_INFO11_MPDU_FRAG_NUMBER GENMASK(13, 10)
+#define RX_MPDU_START_INFO11_MORE_FRAG_FLAG BIT(14)
+#define RX_MPDU_START_INFO11_FROM_DS BIT(16)
+#define RX_MPDU_START_INFO11_TO_DS BIT(17)
+#define RX_MPDU_START_INFO11_ENCRYPTED BIT(18)
+#define RX_MPDU_START_INFO11_MPDU_RETRY BIT(19)
+#define RX_MPDU_START_INFO11_MPDU_SEQ_NUM GENMASK(31, 20)
+
+#define RX_MPDU_START_INFO12_KEY_ID GENMASK(7, 0)
+#define RX_MPDU_START_INFO12_NEW_PEER_ENTRY BIT(8)
+#define RX_MPDU_START_INFO12_DECRYPT_NEEDED BIT(9)
+#define RX_MPDU_START_INFO12_DECAP_TYPE GENMASK(11, 10)
+#define RX_MPDU_START_INFO12_VLAN_TAG_C_PADDING BIT(12)
+#define RX_MPDU_START_INFO12_VLAN_TAG_S_PADDING BIT(13)
+#define RX_MPDU_START_INFO12_STRIP_VLAN_TAG_C BIT(14)
+#define RX_MPDU_START_INFO12_STRIP_VLAN_TAG_S BIT(15)
+#define RX_MPDU_START_INFO12_PRE_DELIM_COUNT GENMASK(27, 16)
+#define RX_MPDU_START_INFO12_AMPDU_FLAG BIT(28)
+#define RX_MPDU_START_INFO12_BAR_FRAME BIT(29)
+#define RX_MPDU_START_INFO12_RAW_MPDU BIT(30)
+
+#define RX_MPDU_START_INFO13_MPDU_LEN GENMASK(13, 0)
+#define RX_MPDU_START_INFO13_FIRST_MPDU BIT(14)
+#define RX_MPDU_START_INFO13_MCAST_BCAST BIT(15)
+#define RX_MPDU_START_INFO13_AST_IDX_NOT_FOUND BIT(16)
+#define RX_MPDU_START_INFO13_AST_IDX_TIMEOUT BIT(17)
+#define RX_MPDU_START_INFO13_POWER_MGMT BIT(18)
+#define RX_MPDU_START_INFO13_NON_QOS BIT(19)
+#define RX_MPDU_START_INFO13_NULL_DATA BIT(20)
+#define RX_MPDU_START_INFO13_MGMT_TYPE BIT(21)
+#define RX_MPDU_START_INFO13_CTRL_TYPE BIT(22)
+#define RX_MPDU_START_INFO13_MORE_DATA BIT(23)
+#define RX_MPDU_START_INFO13_EOSP BIT(24)
+#define RX_MPDU_START_INFO13_FRAGMENT BIT(25)
+#define RX_MPDU_START_INFO13_ORDER BIT(26)
+#define RX_MPDU_START_INFO13_UAPSD_TRIGGER BIT(27)
+#define RX_MPDU_START_INFO13_ENCRYPT_REQUIRED BIT(28)
+#define RX_MPDU_START_INFO13_DIRECTED BIT(29)
+#define RX_MPDU_START_INFO13_AMSDU_PRESENT BIT(30)
+
+struct rx_mpdu_start_qcn9074 {
+ __le32 info7;
+ __le32 reo_queue_desc_lo;
+ __le32 info8;
+ __le32 pn[4];
+ __le32 info9;
+ __le32 peer_meta_data;
+ __le16 info10;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info11;
+ __le32 info12;
+ __le32 info13;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+ __le32 ht_ctrl;
+} __packed;
+
/* rx_mpdu_start
*
* rxpcu_mpdu_filter_in_category
@@ -672,7 +778,19 @@ enum rx_msdu_start_reception_type {
#define RX_MSDU_START_INFO3_RECEPTION_TYPE GENMASK(23, 21)
#define RX_MSDU_START_INFO3_MIMO_SS_BITMAP GENMASK(31, 24)
-struct rx_msdu_start {
+struct rx_msdu_start_ipq8074 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 toeplitz_hash;
+ __le32 flow_id_toeplitz;
+ __le32 info3;
+ __le32 ppdu_start_timestamp;
+ __le32 phy_meta_data;
+} __packed;
+
+struct rx_msdu_start_qcn9074 {
__le16 info0;
__le16 phy_ppdu_id;
__le32 info1;
@@ -682,6 +800,8 @@ struct rx_msdu_start {
__le32 info3;
__le32 ppdu_start_timestamp;
__le32 phy_meta_data;
+ __le16 vlan_ctag_c1;
+ __le16 vlan_stag_c1;
} __packed;
/* rx_msdu_start
@@ -894,7 +1014,7 @@ struct rx_msdu_start {
#define RX_MSDU_END_INFO5_REO_DEST_IND GENMASK(5, 1)
#define RX_MSDU_END_INFO5_FLOW_IDX GENMASK(25, 6)
-struct rx_msdu_end {
+struct rx_msdu_end_ipq8074 {
__le16 info0;
__le16 phy_ppdu_id;
__le16 ip_hdr_cksum;
@@ -917,6 +1037,58 @@ struct rx_msdu_end {
__le16 sa_sw_peer_id;
} __packed;
+#define RX_MSDU_END_MPDU_LENGTH_INFO GENMASK(13, 0)
+
+#define RX_MSDU_END_INFO2_DA_OFFSET GENMASK(5, 0)
+#define RX_MSDU_END_INFO2_SA_OFFSET GENMASK(11, 6)
+#define RX_MSDU_END_INFO2_DA_OFFSET_VALID BIT(12)
+#define RX_MSDU_END_INFO2_SA_OFFSET_VALID BIT(13)
+#define RX_MSDU_END_INFO2_L3_TYPE GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO4_SA_IDX_TIMEOUT BIT(0)
+#define RX_MSDU_END_INFO4_DA_IDX_TIMEOUT BIT(1)
+#define RX_MSDU_END_INFO4_MSDU_LIMIT_ERR BIT(2)
+#define RX_MSDU_END_INFO4_FLOW_IDX_TIMEOUT BIT(3)
+#define RX_MSDU_END_INFO4_FLOW_IDX_INVALID BIT(4)
+#define RX_MSDU_END_INFO4_WIFI_PARSER_ERR BIT(5)
+#define RX_MSDU_END_INFO4_AMSDU_PARSER_ERR BIT(6)
+#define RX_MSDU_END_INFO4_SA_IS_VALID BIT(7)
+#define RX_MSDU_END_INFO4_DA_IS_VALID BIT(8)
+#define RX_MSDU_END_INFO4_DA_IS_MCBC BIT(9)
+#define RX_MSDU_END_INFO4_L3_HDR_PADDING GENMASK(11, 10)
+#define RX_MSDU_END_INFO4_FIRST_MSDU BIT(12)
+#define RX_MSDU_END_INFO4_LAST_MSDU BIT(13)
+
+#define RX_MSDU_END_INFO6_AGGR_COUNT GENMASK(7, 0)
+#define RX_MSDU_END_INFO6_FLOW_AGGR_CONTN BIT(8)
+#define RX_MSDU_END_INFO6_FISA_TIMEOUT BIT(9)
+
+struct rx_msdu_end_qcn9074 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ip_hdr_cksum;
+ __le16 mpdu_length_info;
+ __le32 info1;
+ __le32 rule_indication[2];
+ __le32 info2;
+ __le32 ipv6_options_crc;
+ __le32 tcp_seq_num;
+ __le32 tcp_ack_num;
+ __le16 info3;
+ __le16 window_size;
+ __le16 tcp_udp_cksum;
+ __le16 info4;
+ __le16 sa_idx;
+ __le16 da_idx;
+ __le32 info5;
+ __le32 fse_metadata;
+ __le16 cce_metadata;
+ __le16 sa_sw_peer_id;
+ __le32 info6;
+ __le16 cum_l4_cksum;
+ __le16 cum_ip_length;
+} __packed;
+
/* rx_msdu_end
*
* rxpcu_mpdu_filter_in_category
@@ -1190,16 +1362,16 @@ struct rx_mpdu_end {
#define HAL_RX_DESC_HDR_STATUS_LEN 120
-struct hal_rx_desc {
+struct hal_rx_desc_ipq8074 {
__le32 msdu_end_tag;
- struct rx_msdu_end msdu_end;
+ struct rx_msdu_end_ipq8074 msdu_end;
__le32 rx_attn_tag;
struct rx_attention attention;
__le32 msdu_start_tag;
- struct rx_msdu_start msdu_start;
+ struct rx_msdu_start_ipq8074 msdu_start;
u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
__le32 mpdu_start_tag;
- struct rx_mpdu_start mpdu_start;
+ struct rx_mpdu_start_ipq8074 mpdu_start;
__le32 mpdu_end_tag;
struct rx_mpdu_end mpdu_end;
u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
@@ -1209,6 +1381,32 @@ struct hal_rx_desc {
u8 msdu_payload[0];
} __packed;
+struct hal_rx_desc_qcn9074 {
+ __le32 msdu_end_tag;
+ struct rx_msdu_end_qcn9074 msdu_end;
+ __le32 rx_attn_tag;
+ struct rx_attention attention;
+ __le32 msdu_start_tag;
+ struct rx_msdu_start_qcn9074 msdu_start;
+ u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
+ __le32 mpdu_start_tag;
+ struct rx_mpdu_start_qcn9074 mpdu_start;
+ __le32 mpdu_end_tag;
+ struct rx_mpdu_end mpdu_end;
+ u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
+ __le32 hdr_status_tag;
+ __le32 phy_ppdu_id;
+ u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
+ u8 msdu_payload[0];
+} __packed;
+
+struct hal_rx_desc {
+ union {
+ struct hal_rx_desc_ipq8074 ipq8074;
+ struct hal_rx_desc_qcn9074 qcn9074;
+ } u;
+} __packed;
+
#define HAL_RX_RU_ALLOC_TYPE_MAX 6
#define RU_26 1
#define RU_52 2
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index cccfd3bd4d27..5ca2d80679b6 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -5417,31 +5417,6 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
return 0;
}
-static int
-ath11k_pull_pdev_temp_ev(struct ath11k_base *ab, u8 *evt_buf,
- u32 len, const struct wmi_pdev_temperature_event *ev)
-{
- const void **tb;
- int ret;
-
- tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
- if (IS_ERR(tb)) {
- ret = PTR_ERR(tb);
- ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
- return ret;
- }
-
- ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
- if (!ev) {
- ath11k_warn(ab, "failed to fetch pdev temp ev");
- kfree(tb);
- return -EPROTO;
- }
-
- kfree(tb);
- return 0;
-}
-
size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
{
struct ath11k_fw_stats_vdev *i;
@@ -6196,10 +6171,8 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
}
}
- /* TODO: Pending handle beacon implementation
- *if (ieee80211_is_beacon(hdr->frame_control))
- * ath11k_mac_handle_beacon(ar, skb);
- */
+ if (ieee80211_is_beacon(hdr->frame_control))
+ ath11k_mac_handle_beacon(ar, skb);
ath11k_dbg(ab, ATH11K_DBG_MGMT,
"event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
@@ -6418,10 +6391,7 @@ static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb)
switch (roam_ev.reason) {
case WMI_ROAM_REASON_BEACON_MISS:
- /* TODO: Pending beacon miss and connection_loss_work
- * implementation
- * ath11k_mac_handle_beacon_miss(ar, vdev_id);
- */
+ ath11k_mac_handle_beacon_miss(ar, roam_ev.vdev_id);
break;
case WMI_ROAM_REASON_BETTER_AP:
case WMI_ROAM_REASON_LOW_RSSI:
@@ -6849,23 +6819,37 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct ath11k *ar;
- struct wmi_pdev_temperature_event ev = {0};
+ const void **tb;
+ const struct wmi_pdev_temperature_event *ev;
+ int ret;
- if (ath11k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
- ath11k_warn(ab, "failed to extract pdev temperature event");
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev temp ev");
+ kfree(tb);
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI,
- "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
+ "pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id);
- ar = ath11k_mac_get_ar_by_pdev_id(ab, ev.pdev_id);
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
if (!ar) {
- ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
+ ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
+ kfree(tb);
return;
}
- ath11k_thermal_event_temperature(ar, ev.temp);
+ ath11k_thermal_event_temperature(ar, ev->temp);
+
+ kfree(tb);
}
static void ath11k_fils_discovery_event(struct ath11k_base *ab,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index db0c6fa9c9dc..ff61ae34ecdf 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -246,7 +246,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
if (unlikely(r)) {
ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n",
reg_offset, r);
- return -EIO;
+ return -1;
}
return be32_to_cpu(val);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 5abc2a5526ec..2ca3b86714a9 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -286,7 +286,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah)
srev = REG_READ(ah, AR_SREV);
- if (srev == -EIO) {
+ if (srev == -1) {
ath_err(ath9k_hw_common(ah),
"Failed to read SREV register");
return false;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 01f9c26f9bf3..e9a36dd7144f 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -617,7 +617,6 @@ static int ath9k_of_init(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
enum ath_bus_type bus_type = common->bus_ops->ath_bus_type;
- const char *mac;
char eeprom_name[100];
int ret;
@@ -640,9 +639,7 @@ static int ath9k_of_init(struct ath_softc *sc)
ah->ah_flags |= AH_NO_EEP_SWAP;
}
- mac = of_get_mac_address(np);
- if (!IS_ERR(mac))
- ether_addr_copy(common->macaddr, mac);
+ of_get_mac_address(np, common->macaddr);
return 0;
}
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 0d38100d6e4f..84a8ce0784b1 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -631,14 +631,9 @@ static inline u16 carl9170_get_seq(struct sk_buff *skb)
return get_seq_h(carl9170_get_hdr(skb));
}
-static inline u16 get_tid_h(struct ieee80211_hdr *hdr)
-{
- return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
-}
-
static inline u16 carl9170_get_tid(struct sk_buff *skb)
{
- return get_tid_h(carl9170_get_hdr(skb));
+ return ieee80211_get_tid(carl9170_get_hdr(skb));
}
static inline struct ieee80211_vif *
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 6b8446ff48c8..88444fe6d1c6 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -394,7 +394,7 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
if (unlikely(!sta))
goto out_rcu;
- tid = get_tid_h(hdr);
+ tid = ieee80211_get_tid(hdr);
sta_info = (void *) sta->drv_priv;
tid_info = rcu_dereference(sta_info->agg[tid]);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 823ec6e78a22..02ad44997e87 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1456,7 +1456,7 @@ static void wil_link_stats_store_basic(struct wil6210_vif *vif,
u8 cid = basic->cid;
struct wil_sta_info *sta;
- if (cid < 0 || cid >= wil->max_assoc_sta) {
+ if (cid >= wil->max_assoc_sta) {
wil_err(wil, "invalid cid %d\n", cid);
return;
}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 150a366e8f62..17bcec5f3ff7 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -4053,7 +4053,7 @@ static void b43_update_basic_rates(struct b43_wldev *dev, u32 brates)
{
struct ieee80211_supported_band *sband =
dev->wl->hw->wiphy->bands[b43_current_band(dev->wl)];
- struct ieee80211_rate *rate;
+ const struct ieee80211_rate *rate;
int i;
u16 basic, direct, offset, basic_offset, rateptr;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 7692a2618c97..f64ebff68308 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -2762,7 +2762,7 @@ static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u32 brates
{
struct ieee80211_supported_band *sband =
dev->wl->hw->wiphy->bands[NL80211_BAND_2GHZ];
- struct ieee80211_rate *rate;
+ const struct ieee80211_rate *rate;
int i;
u16 basic, direct, offset, basic_offset, rateptr;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index ea78fe527c5d..838b09b23abf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -151,7 +151,7 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
/* Send down the multicast list first. */
cnt = netdev_mc_count(ndev);
buflen = sizeof(cnt) + (cnt * ETH_ALEN);
- buf = kmalloc(buflen, GFP_ATOMIC);
+ buf = kmalloc(buflen, GFP_KERNEL);
if (!buf)
return;
bufp = buf;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
index 4146faeed344..44ba6f389fa9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
@@ -112,7 +112,6 @@ do { \
extern int brcmf_msg_level;
-struct brcmf_bus;
struct brcmf_pub;
#ifdef DEBUG
struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h
index ee273e3bb101..e000ef78928c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h
@@ -28,7 +28,7 @@ struct brcmf_usbdev {
int ntxq, nrxq, rxsize;
u32 bus_mtu;
int devid;
- int chiprev; /* chip revsion number */
+ int chiprev; /* chip revision number */
};
/* IO Request Block (IRB) */
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 60db38c38960..fd37d4d2983b 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -3817,6 +3817,68 @@ static inline void set_auth_type(struct airo_info *local, int auth_type)
local->last_auth = auth_type;
}
+static int noinline_for_stack airo_readconfig(struct airo_info *ai, u8 *mac, int lock)
+{
+ int i, status;
+ /* large variables, so don't inline this function,
+ * maybe change to kmalloc
+ */
+ tdsRssiRid rssi_rid;
+ CapabilityRid cap_rid;
+
+ kfree(ai->SSID);
+ ai->SSID = NULL;
+ // general configuration (read/modify/write)
+ status = readConfigRid(ai, lock);
+ if (status != SUCCESS) return ERROR;
+
+ status = readCapabilityRid(ai, &cap_rid, lock);
+ if (status != SUCCESS) return ERROR;
+
+ status = PC4500_readrid(ai, RID_RSSI, &rssi_rid, sizeof(rssi_rid), lock);
+ if (status == SUCCESS) {
+ if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
+ memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
+ }
+ else {
+ kfree(ai->rssi);
+ ai->rssi = NULL;
+ if (cap_rid.softCap & cpu_to_le16(8))
+ ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
+ else
+ airo_print_warn(ai->dev->name, "unknown received signal "
+ "level scale");
+ }
+ ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
+ set_auth_type(ai, AUTH_OPEN);
+ ai->config.modulation = MOD_CCK;
+
+ if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
+ (cap_rid.extSoftCap & cpu_to_le16(1)) &&
+ micsetup(ai) == SUCCESS) {
+ ai->config.opmode |= MODE_MIC;
+ set_bit(FLAG_MIC_CAPABLE, &ai->flags);
+ }
+
+ /* Save off the MAC */
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac[i] = ai->config.macAddr[i];
+ }
+
+ /* Check to see if there are any insmod configured
+ rates to add */
+ if (rates[0]) {
+ memset(ai->config.rates, 0, sizeof(ai->config.rates));
+ for (i = 0; i < 8 && rates[i]; i++) {
+ ai->config.rates[i] = rates[i];
+ }
+ }
+ set_bit (FLAG_COMMIT, &ai->flags);
+
+ return SUCCESS;
+}
+
+
static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
{
Cmd cmd;
@@ -3863,58 +3925,9 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
if (lock)
up(&ai->sem);
if (ai->config.len == 0) {
- int i;
- tdsRssiRid rssi_rid;
- CapabilityRid cap_rid;
-
- kfree(ai->SSID);
- ai->SSID = NULL;
- // general configuration (read/modify/write)
- status = readConfigRid(ai, lock);
- if (status != SUCCESS) return ERROR;
-
- status = readCapabilityRid(ai, &cap_rid, lock);
- if (status != SUCCESS) return ERROR;
-
- status = PC4500_readrid(ai, RID_RSSI,&rssi_rid, sizeof(rssi_rid), lock);
- if (status == SUCCESS) {
- if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
- memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
- }
- else {
- kfree(ai->rssi);
- ai->rssi = NULL;
- if (cap_rid.softCap & cpu_to_le16(8))
- ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
- else
- airo_print_warn(ai->dev->name, "unknown received signal "
- "level scale");
- }
- ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
- set_auth_type(ai, AUTH_OPEN);
- ai->config.modulation = MOD_CCK;
-
- if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
- (cap_rid.extSoftCap & cpu_to_le16(1)) &&
- micsetup(ai) == SUCCESS) {
- ai->config.opmode |= MODE_MIC;
- set_bit(FLAG_MIC_CAPABLE, &ai->flags);
- }
-
- /* Save off the MAC */
- for (i = 0; i < ETH_ALEN; i++) {
- mac[i] = ai->config.macAddr[i];
- }
-
- /* Check to see if there are any insmod configured
- rates to add */
- if (rates[0]) {
- memset(ai->config.rates, 0, sizeof(ai->config.rates));
- for (i = 0; i < 8 && rates[i]; i++) {
- ai->config.rates[i] = rates[i];
- }
- }
- set_bit (FLAG_COMMIT, &ai->flags);
+ status = airo_readconfig(ai, mac, lock);
+ if (status != SUCCESS)
+ return ERROR;
}
/* Setup the SSIDs if present */
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
index a0cf78c418ac..903de34028ef 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
@@ -633,8 +633,10 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
}
if (ext->alg != IW_ENCODE_ALG_NONE) {
- memcpy(sec.keys[idx], ext->key, ext->key_len);
- sec.key_sizes[idx] = ext->key_len;
+ int key_len = clamp_val(ext->key_len, 0, SCM_KEY_LEN);
+
+ memcpy(sec.keys[idx], ext->key, key_len);
+ sec.key_sizes[idx] = key_len;
sec.flags |= (1 << idx);
if (ext->alg == IW_ENCODE_ALG_WEP) {
sec.encode_alg[idx] = SEC_ALG_WEP;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 4ca8212d4fa4..6ff2674f8466 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -751,9 +751,7 @@ il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
static void
il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb)
{
-#ifdef CONFIG_IWLEGACY_DEBUG
struct il_rx_pkt *pkt = rxb_addr(rxb);
-#endif
D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status);
}
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 0651a6a416d1..219fed91cac5 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -1430,10 +1430,8 @@ static void
il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
{
-#ifdef CONFIG_IWLEGACY_DEBUG
struct il_rx_pkt *pkt = rxb_addr(rxb);
struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
-#endif
D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
scan_notif->scanned_channels, scan_notif->tsf_low,
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index ea1b1bb7ddcb..40877ef1fbf2 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -2937,7 +2937,7 @@ do { \
} while (0)
#else
-#define IL_DBG(level, fmt, args...)
+#define IL_DBG(level, fmt, args...) no_printk(fmt, ##args)
static inline void
il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 0a0e25a3c681..c2315dea9a23 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -9,7 +9,7 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 62
+#define IWL_22000_UCODE_API_MAX 63
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -48,6 +48,10 @@
#define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0-"
#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0-"
#define IWL_SNJ_A_MR_A_FW_PRE "iwlwifi-SoSnj-a0-mr-a0-"
+#define IWL_BZ_A_HR_B_FW_PRE "iwlwifi-bz-a0-hr-b0-"
+#define IWL_BZ_A_GF_A_FW_PRE "iwlwifi-bz-a0-gf-a0-"
+#define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0-"
+#define IWL_BZ_A_MR_A_FW_PRE "iwlwifi-bz-a0-mr-a0-"
#define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \
IWL_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
@@ -91,6 +95,14 @@
IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SNJ_A_MR_A_MODULE_FIRMWARE(api) \
IWL_SNJ_A_MR_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \
+ IWL_BZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_BZ_A_GF_A_MODULE_FIRMWARE(api) \
+ IWL_BZ_A_GF_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_BZ_A_GF4_A_MODULE_FIRMWARE(api) \
+ IWL_BZ_A_GF4_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_BZ_A_MR_A_MODULE_FIRMWARE(api) \
+ IWL_BZ_A_MR_A_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -357,13 +369,27 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = {
.umac_prph_offset = 0x300000
};
+const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_AX210,
+ .base_params = &iwl_ax210_base_params,
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .integrated = true,
+ .umac_prph_offset = 0x300000,
+ .xtal_latency = 12000,
+ .low_latency_xtal = true,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
+};
+
const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
-const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz";
-const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz";
-const char iwl_ma_name[] = "Intel(R) Wi-Fi 6";
+const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
+const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
+const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz";
const char iwl_ax200_killer_1650w_name[] =
"Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)";
@@ -373,6 +399,10 @@ const char iwl_ax201_killer_1650s_name[] =
"Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)";
const char iwl_ax201_killer_1650i_name[] =
"Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)";
+const char iwl_ax210_killer_1675w_name[] =
+ "Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)";
+const char iwl_ax210_killer_1675x_name[] =
+ "Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)";
const struct iwl_cfg iwl_qu_b0_hr1_b0 = {
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
@@ -578,7 +608,7 @@ const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
-const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
+const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_SO_A_JF_B_FW_PRE,
IWL_DEVICE_AX210,
@@ -719,6 +749,34 @@ const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
+const struct iwl_cfg iwl_cfg_bz_a0_hr_b0 = {
+ .fw_name_pre = IWL_BZ_A_HR_B_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_bz_a0_gf_a0 = {
+ .fw_name_pre = IWL_BZ_A_GF_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0 = {
+ .fw_name_pre = IWL_BZ_A_GF4_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_bz_a0_mr_a0 = {
+ .fw_name_pre = IWL_BZ_A_MR_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
@@ -740,3 +798,7 @@ MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BZ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index c4164bf508e5..df1297358379 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -168,7 +168,7 @@ const char iwl9462_160_name[] = "Intel(R) Wireless-AC 9462 160MHz";
const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz";
const char iwl9260_killer_1550_name[] =
- "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)";
+ "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW) 160MHz";
const char iwl9560_killer_1550i_name[] =
"Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)";
const char iwl9560_killer_1550s_name[] =
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 82a4f7e8ba54..e31bba836c6f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2020 Intel Corporation
+ * Copyright (C) 2019-2021 Intel Corporation
*/
#include <linux/uuid.h>
#include "iwl-drv.h"
@@ -181,14 +181,13 @@ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
/*
* We need at least two packages, one for the revision and one
* for the data itself. Also check that the revision is valid
- * (i.e. it is an integer smaller than 2, as we currently support only
- * 2 revisions).
+ * (i.e. it is an integer (each caller has to check by itself
+ * if the returned revision is supported)).
*/
if (data->type != ACPI_TYPE_PACKAGE ||
data->package.count < 2 ||
- data->package.elements[0].type != ACPI_TYPE_INTEGER ||
- data->package.elements[0].integer.value > 1) {
- IWL_DEBUG_DEV_RADIO(dev, "Unsupported packages structure\n");
+ data->package.elements[0].type != ACPI_TYPE_INTEGER) {
+ IWL_DEBUG_DEV_RADIO(dev, "Invalid packages structure\n");
return ERR_PTR(-EINVAL);
}
@@ -696,3 +695,70 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
return 0;
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
+
+static u32 iwl_acpi_eval_dsm_func(struct device *dev, enum iwl_dsm_funcs_rev_0 eval_func)
+{
+ union acpi_object *obj;
+ u32 ret;
+
+ obj = iwl_acpi_get_dsm_object(dev, 0,
+ eval_func, NULL,
+ &iwl_guid);
+
+ if (IS_ERR(obj)) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM func '%d': Got Error in obj = %ld\n",
+ eval_func,
+ PTR_ERR(obj));
+ return 0;
+ }
+
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM func '%d' did not return a valid object, type=%d\n",
+ eval_func,
+ obj->type);
+ ret = 0;
+ goto out;
+ }
+
+ ret = obj->integer.value;
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM method evaluated: func='%d', ret=%d\n",
+ eval_func,
+ ret);
+out:
+ ACPI_FREE(obj);
+ return ret;
+}
+
+__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
+{
+ u32 ret;
+ __le32 config_bitmap = 0;
+
+ /*
+ ** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'
+ */
+ ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_ENABLE_INDONESIA_5G2);
+
+ if (ret == DSM_VALUE_INDONESIA_ENABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
+
+ /*
+ ** Evaluate func 'DSM_FUNC_DISABLE_SRD'
+ */
+ ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_DISABLE_SRD);
+
+ if (ret == DSM_VALUE_SRD_PASSIVE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
+
+ else if (ret == DSM_VALUE_SRD_DISABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+
+ return config_bitmap;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 030c50082568..d16e6ec08c9f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -53,8 +53,8 @@
#define ACPI_WGDS_TABLE_SIZE 3
-#define ACPI_PPAG_WIFI_DATA_SIZE ((IWL_NUM_CHAIN_LIMITS * \
- IWL_NUM_SUB_BANDS) + 2)
+#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
+ IWL_NUM_SUB_BANDS_V1) + 2)
#define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V2) + 2)
@@ -77,6 +77,7 @@ enum iwl_dsm_funcs_rev_0 {
DSM_FUNC_QUERY = 0,
DSM_FUNC_DISABLE_SRD = 1,
DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
+ DSM_FUNC_11AX_ENABLEMENT = 6,
};
enum iwl_dsm_values_srd {
@@ -160,6 +161,8 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *block_list_array,
int *block_list_size);
+__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@@ -235,5 +238,11 @@ static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
{
return -ENOENT;
}
+
+static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
+{
+ return 0;
+}
+
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index ceeef8749765..0e38eb1cd75d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -147,6 +147,10 @@ enum iwl_tof_mcsi_enable {
* @IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL: retry on algorithm failure
* is valid
* @IWL_TOF_RESPONDER_CMD_VALID_STA_ID: station ID is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_NDP_SUPPORT: enable/disable NDP ranging support
+ * is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS: NDP parameters are valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK: LMR feedback support is valid
*/
enum iwl_tof_responder_cmd_valid_field {
IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO = BIT(0),
@@ -162,6 +166,9 @@ enum iwl_tof_responder_cmd_valid_field {
IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT = BIT(10),
IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL = BIT(11),
IWL_TOF_RESPONDER_CMD_VALID_STA_ID = BIT(12),
+ IWL_TOF_RESPONDER_CMD_VALID_NDP_SUPPORT = BIT(22),
+ IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS = BIT(23),
+ IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK = BIT(24),
};
/**
@@ -176,6 +183,9 @@ enum iwl_tof_responder_cmd_valid_field {
* @IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT: fast algorithm support
* @IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL: retry on algorithm fail
* @IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT: TX antenna mask
+ * @IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT: support NDP ranging
+ * @IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK: request for LMR feedback if the
+ * initiator supports it
*/
enum iwl_tof_responder_cfg_flags {
IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT = BIT(0),
@@ -188,6 +198,8 @@ enum iwl_tof_responder_cfg_flags {
IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT = BIT(9),
IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL = BIT(10),
IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_ABC_MSK,
+ IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT = BIT(24),
+ IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK = BIT(25),
};
/**
@@ -226,7 +238,7 @@ struct iwl_tof_responder_config_cmd_v6 {
} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */
/**
- * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * struct iwl_tof_responder_config_cmd_v7 - ToF AP mode (for debug)
* @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
* @responder_cfg_flags: &iwl_tof_responder_cfg_flags
* @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
@@ -245,7 +257,7 @@ struct iwl_tof_responder_config_cmd_v6 {
* @bssid: Current AP BSSID
* @reserved2: reserved
*/
-struct iwl_tof_responder_config_cmd {
+struct iwl_tof_responder_config_cmd_v7 {
__le32 cmd_valid_fields;
__le32 responder_cfg_flags;
u8 format_bw;
@@ -259,7 +271,56 @@ struct iwl_tof_responder_config_cmd {
__le16 specific_calib;
u8 bssid[ETH_ALEN];
__le16 reserved2;
-} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */
+} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_7 */
+
+#define IWL_RESPONDER_STS_POS 3
+#define IWL_RESPONDER_TOTAL_LTF_POS 6
+
+/**
+ * struct iwl_tof_responder_config_cmd_v8 - ToF AP mode (for debug)
+ * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
+ * @responder_cfg_flags: &iwl_tof_responder_cfg_flags
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @rate: current AP rate
+ * @channel_num: current AP Channel
+ * @ctrl_ch_position: coding of the control channel position relative to
+ * the center frequency, see iwl_mvm_get_ctrl_pos()
+ * @sta_id: index of the AP STA when in AP mode
+ * @reserved1: reserved
+ * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
+ * purposes, simulating station movement by adding various values
+ * to this field
+ * @common_calib: XVT: common calibration value
+ * @specific_calib: XVT: specific calibration value
+ * @bssid: Current AP BSSID
+ * @r2i_ndp_params: parameters for R2I NDP.
+ * bits 0 - 2: max number of LTF repetitions
+ * bits 3 - 5: max number of spatial streams (supported values are < 2)
+ * bits 6 - 7: max number of total LTFs
+ * (&enum ieee80211_range_params_max_total_ltf)
+ * @i2r_ndp_params: parameters for I2R NDP.
+ * bits 0 - 2: max number of LTF repetitions
+ * bits 3 - 5: max number of spatial streams
+ * bits 6 - 7: max number of total LTFs
+ * (&enum ieee80211_range_params_max_total_ltf)
+ */
+struct iwl_tof_responder_config_cmd_v8 {
+ __le32 cmd_valid_fields;
+ __le32 responder_cfg_flags;
+ u8 format_bw;
+ u8 rate;
+ u8 channel_num;
+ u8 ctrl_ch_position;
+ u8 sta_id;
+ u8 reserved1;
+ __le16 toa_offset;
+ __le16 common_calib;
+ __le16 specific_calib;
+ u8 bssid[ETH_ALEN];
+ u8 r2i_ndp_params;
+ u8 i2r_ndp_params;
+} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */
#define IWL_LCI_CIVIC_IE_MAX_SIZE 400
@@ -422,10 +483,12 @@ struct iwl_tof_range_req_ap_entry_v2 {
* driver.
* @IWL_INITIATOR_AP_FLAGS_NON_TB: Use non trigger based flow
* @IWL_INITIATOR_AP_FLAGS_TB: Use trigger based flow
- * @IWL_INITIATOR_AP_FLAGS_SECURED: request secured measurement
+ * @IWL_INITIATOR_AP_FLAGS_SECURED: request secure LTF measurement
* @IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK: Send LMR feedback
* @IWL_INITIATOR_AP_FLAGS_USE_CALIB: Use calibration values from the request
* instead of fw internal values.
+ * @IWL_INITIATOR_AP_FLAGS_PMF: request to protect the negotiation and LMR
+ * frames with protected management frames.
*/
enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1),
@@ -440,6 +503,7 @@ enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_SECURED = BIT(11),
IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK = BIT(12),
IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13),
+ IWL_INITIATOR_AP_FLAGS_PMF = BIT(14),
};
/**
@@ -657,6 +721,79 @@ struct iwl_tof_range_req_ap_entry_v7 {
u8 tx_pn[IEEE80211_CCMP_PN_LEN];
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_7 */
+#define IWL_LOCATION_MAX_STS_POS 3
+
+/**
+ * struct iwl_tof_range_req_ap_entry_v8 - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @channel_num: AP Channel number
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @ftmr_max_retries: Max number of retries to send the FTMR in case of no
+ * reply from the AP.
+ * @bssid: AP's BSSID
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @sta_id: the station id of the AP. Only relevant when associated to the AP,
+ * otherwise should be set to &IWL_MVM_INVALID_STA.
+ * @cipher: pairwise cipher suite for secured measurement.
+ * &enum iwl_location_cipher.
+ * @hltk: HLTK to be used for secured 11az measurement
+ * @tk: TK to be used for secured 11az measurement
+ * @calib: An array of calibration values per FTM rx bandwidth.
+ * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the
+ * calibration value that corresponds to the rx bandwidth of the FTM
+ * frame.
+ * @beacon_interval: beacon interval of the AP in TUs. Only required if
+ * &IWL_INITIATOR_AP_FLAGS_TB is set.
+ * @rx_pn: the next expected PN for protected management frames Rx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ * @tx_pn: the next PN to use for protected management frames Tx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ * @r2i_ndp_params: parameters for R2I NDP ranging negotiation.
+ * bits 0 - 2: max LTF repetitions
+ * bits 3 - 5: max number of spatial streams
+ * bits 6 - 7: reserved
+ * @i2r_ndp_params: parameters for I2R NDP ranging negotiation.
+ * bits 0 - 2: max LTF repetitions
+ * bits 3 - 5: max number of spatial streams (supported values are < 2)
+ * bits 6 - 7: reserved
+ * @r2i_max_total_ltf: R2I Max Total LTFs for NDP ranging negotiation.
+ * One of &enum ieee80211_range_params_max_total_ltf.
+ * @i2r_max_total_ltf: I2R Max Total LTFs for NDP ranging negotiation.
+ * One of &enum ieee80211_range_params_max_total_ltf.
+ */
+struct iwl_tof_range_req_ap_entry_v8 {
+ __le32 initiator_ap_flags;
+ u8 channel_num;
+ u8 format_bw;
+ u8 ctrl_ch_position;
+ u8 ftmr_max_retries;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ u8 sta_id;
+ u8 cipher;
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+ __le16 calib[IWL_TOF_BW_NUM];
+ __le16 beacon_interval;
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 r2i_ndp_params;
+ u8 i2r_ndp_params;
+ u8 r2i_max_total_ltf;
+ u8 i2r_max_total_ltf;
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_8 */
+
/**
* enum iwl_tof_response_mode
* @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
@@ -878,6 +1015,34 @@ struct iwl_tof_range_req_cmd_v11 {
struct iwl_tof_range_req_ap_entry_v7 ap[IWL_MVM_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_11 */
+/**
+ * struct iwl_tof_range_req_cmd_v12 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
+ */
+struct iwl_tof_range_req_cmd_v12 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ struct iwl_tof_range_req_ap_entry_v8 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_12 */
+
/*
* enum iwl_tof_range_request_status - status of the sent request
* @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index fbca9dd872e7..dc8f2777e944 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -414,6 +414,9 @@ enum iwl_lari_config_masks {
LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK = BIT(3),
};
+#define IWL_11AX_UKRAINE_MASK 3
+#define IWL_11AX_UKRAINE_SHIFT 8
+
/**
* struct iwl_lari_config_change_cmd_v1 - change LARI configuration
* @config_bitmap: bit map of the config commands. each bit will trigger a
@@ -435,6 +438,21 @@ struct iwl_lari_config_change_cmd_v2 {
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_2 */
/**
+ * struct iwl_lari_config_change_cmd_v3 - change LARI configuration
+ * @config_bitmap: bit map of the config commands. each bit will trigger a
+ * different predefined FW config operation
+ * @oem_uhb_allow_bitmap: bitmap of UHB enabled MCC sets
+ * @oem_11ax_allow_bitmap: bitmap of 11ax allowed MCCs.
+ * For each supported country, a pair of regulatory override bit and 11ax mode exist
+ * in the bit field.
+ */
+struct iwl_lari_config_change_cmd_v3 {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_3 */
+
+/**
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
* @status: PNVM image loading status
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 798417182d54..86445385f072 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -274,7 +274,7 @@ enum iwl_dev_tx_power_cmd_mode {
#define IWL_NUM_CHAIN_TABLES 1
#define IWL_NUM_CHAIN_TABLES_V2 2
#define IWL_NUM_CHAIN_LIMITS 2
-#define IWL_NUM_SUB_BANDS 5
+#define IWL_NUM_SUB_BANDS_V1 5
#define IWL_NUM_SUB_BANDS_V2 11
/**
@@ -300,7 +300,7 @@ struct iwl_dev_tx_power_common {
* @per_chain: per chain restrictions
*/
struct iwl_dev_tx_power_cmd_v3 {
- __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1];
} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */
#define IWL_DEV_MAX_TX_POWER 0x7FFF
@@ -313,7 +313,7 @@ struct iwl_dev_tx_power_cmd_v3 {
* @reserved: reserved (padding)
*/
struct iwl_dev_tx_power_cmd_v4 {
- __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1];
u8 enable_ack_reduction;
u8 reserved[3];
} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
@@ -332,7 +332,7 @@ struct iwl_dev_tx_power_cmd_v4 {
* BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
*/
struct iwl_dev_tx_power_cmd_v5 {
- __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1];
u8 enable_ack_reduction;
u8 per_chain_restriction_changed;
u8 reserved[2];
@@ -454,21 +454,23 @@ struct iwl_geo_tx_power_profiles_resp {
/**
* union iwl_ppag_table_cmd - union for all versions of PPAG command
- * @v1: version 1, table revision = 0
- * @v2: version 2, table revision = 1
+ * @v1: version 1
+ * @v2: version 2
*
- * @enabled: 1 if PPAG is enabled, 0 otherwise
+ * @flags: bit 0 - indicates enablement of PPAG for ETSI
+ * bit 1 - indicates enablement of PPAG for CHINA BIOS
+ * bit 1 can be used only in v3 (identical to v2)
* @gain: table of antenna gain values per chain and sub-band
* @reserved: reserved
*/
union iwl_ppag_table_cmd {
struct {
- __le32 enabled;
- s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
+ __le32 flags;
+ s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1];
s8 reserved[2];
} v1;
struct {
- __le32 enabled;
+ __le32 flags;
s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
s8 reserved[2];
} v2;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 2c74db823778..3f13b572915a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -780,36 +780,6 @@ struct iwl_rxq_sync_notification {
} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
/**
- * enum iwl_mvm_rxq_notif_type - Internal message identifier
- *
- * @IWL_MVM_RXQ_EMPTY: empty sync notification
- * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
- * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN
- */
-enum iwl_mvm_rxq_notif_type {
- IWL_MVM_RXQ_EMPTY,
- IWL_MVM_RXQ_NOTIF_DEL_BA,
- IWL_MVM_RXQ_NSSN_SYNC,
-};
-
-/**
- * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
- * in &iwl_rxq_sync_cmd. Should be DWORD aligned.
- * FW is agnostic to the payload, so there are no endianity requirements.
- *
- * @type: value from &iwl_mvm_rxq_notif_type
- * @sync: ctrl path is waiting for all notifications to be received
- * @cookie: internal cookie to identify old notifications
- * @data: payload
- */
-struct iwl_mvm_internal_rxq_notif {
- u16 type;
- u16 sync;
- u32 cookie;
- u8 data[];
-} __packed;
-
-/**
* enum iwl_mvm_pm_event - type of station PM event
* @IWL_MVM_PM_EVENT_AWAKE: station woke up
* @IWL_MVM_PM_EVENT_ASLEEP: station went to sleep
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 6b8ca35cec1a..b2605aefc290 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -634,6 +634,12 @@ enum iwl_umac_scan_general_flags2 {
* @IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN: at the end of 2.4GHz and
* 5.2Ghz bands scan, trigger scan on 6GHz band to discover
* the reported collocated APs
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN: at the end of 2.4GHz and 5GHz
+ * bands scan, if not APs were discovered, allow scan to conitnue and scan
+ * 6GHz PSC channels in order to discover country information.
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN: in case
+ * &IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN is enabled and scan is
+ * activated over 6GHz PSC channels, filter in beacons and probe responses.
*/
enum iwl_umac_scan_general_flags_v2 {
IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC = BIT(0),
@@ -649,6 +655,8 @@ enum iwl_umac_scan_general_flags_v2 {
IWL_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID = BIT(10),
IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE = BIT(11),
IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN = BIT(12),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN = BIT(13),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN = BIT(14),
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 504729663c35..cc4e18ca9566 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -2559,7 +2559,9 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
fwrt->dump.wks[idx].dump_data = *dump_data;
- IWL_WARN(fwrt, "WRT: Collecting data: ini trigger %d fired.\n", tp_id);
+ IWL_WARN(fwrt,
+ "WRT: Collecting data: ini trigger %d fired (delay=%dms).\n",
+ tp_id, (u32)(delay / USEC_PER_MSEC));
schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 35dffcaf5aba..f9c5cf538ad1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -362,6 +362,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_PROTECTED_TWT: Supports protection of TWT action frames
* @IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE: Supports the firmware handshake in
* reset flow
+ * @IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN: Support for passive scan on 6GHz PSC
+ * channels even when these are not enabled.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -408,6 +410,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54,
IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56,
IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE = (__force iwl_ucode_tlv_capa_t)57,
+ IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)58,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 1dee4714e505..153a3529e77a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -116,6 +116,9 @@ struct fw_img {
#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0
#define PAGING_TLV_SECURE_MASK 1
+/* FW MSB Mask for regions/cache_control */
+#define FW_ADDR_CACHE_CONTROL 0xC0000000UL
+
/**
* struct iwl_fw_paging
* @fw_paging_phys: page phy pointer
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index 986913f2fbd5..2ecec00db9da 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -10,6 +10,8 @@
#include "fw/api/soc.h"
#include "fw/api/commands.h"
+#include "fw/api/rx.h"
+#include "fw/api/datapath.h"
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
const struct iwl_fw *fw,
@@ -95,3 +97,60 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
return ret;
}
IWL_EXPORT_SYMBOL(iwl_set_soc_latency);
+
+int iwl_configure_rxq(struct iwl_fw_runtime *fwrt)
+{
+ int i, num_queues, size, ret;
+ struct iwl_rfh_queue_config *cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+
+ /*
+ * The default queue is configured via context info, so if we
+ * have a single queue, there's nothing to do here.
+ */
+ if (fwrt->trans->num_rx_queues == 1)
+ return 0;
+
+ if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22000)
+ return 0;
+
+ /* skip the default queue */
+ num_queues = fwrt->trans->num_rx_queues - 1;
+
+ size = struct_size(cmd, data, num_queues);
+
+ cmd = kzalloc(size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->num_queues = num_queues;
+
+ for (i = 0; i < num_queues; i++) {
+ struct iwl_trans_rxq_dma_data data;
+
+ cmd->data[i].q_num = i + 1;
+ iwl_trans_get_rxq_dma_data(fwrt->trans, i + 1, &data);
+
+ cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
+ cmd->data[i].urbd_stts_wrptr =
+ cpu_to_le64(data.urbd_stts_wrptr);
+ cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
+ cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
+ }
+
+ hcmd.data[0] = cmd;
+ hcmd.len[0] = size;
+
+ ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+
+ kfree(cmd);
+
+ if (ret)
+ IWL_ERR(fwrt, "Failed to configure RX queues: %d\n", ret);
+
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_configure_rxq);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 0dba5444f2db..35af85a5430b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -190,5 +190,6 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt);
void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt);
int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt);
+int iwl_configure_rxq(struct iwl_fw_runtime *fwrt);
#endif /* __iwl_fw_runtime_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index c4f5da76f1c0..b35ffdfdf14b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -416,6 +416,7 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_SNJ 0x42
#define IWL_CFG_MAC_TYPE_SOF 0x43
#define IWL_CFG_MAC_TYPE_MA 0x44
+#define IWL_CFG_MAC_TYPE_BZ 0x46
#define IWL_CFG_RF_TYPE_TH 0x105
#define IWL_CFG_RF_TYPE_TH1 0x108
@@ -477,6 +478,7 @@ extern const struct iwl_cfg_trans_params iwl_snj_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_so_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg;
extern const char iwl9162_name[];
extern const char iwl9260_name[];
extern const char iwl9260_1_name[];
@@ -501,8 +503,10 @@ extern const char iwl_ax200_killer_1650w_name[];
extern const char iwl_ax200_killer_1650x_name[];
extern const char iwl_ax201_killer_1650s_name[];
extern const char iwl_ax201_killer_1650i_name[];
-extern const char iwl_ma_name[];
+extern const char iwl_ax210_killer_1675w_name[];
+extern const char iwl_ax210_killer_1675x_name[];
extern const char iwl_ax211_name[];
+extern const char iwl_ax221_name[];
extern const char iwl_ax411_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
@@ -594,7 +598,7 @@ extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg killer1650x_2ax_cfg;
extern const struct iwl_cfg killer1650w_2ax_cfg;
extern const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg;
-extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
+extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long;
@@ -612,6 +616,10 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
+extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0;
+extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0;
+extern const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0;
+extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 6ccde7e30211..db312abd2e09 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -578,6 +578,9 @@ enum msix_fh_int_causes {
MSIX_FH_INT_CAUSES_FH_ERR = BIT(21),
};
+/* The low 16 bits are for rx data queue indication */
+#define MSIX_FH_INT_CAUSES_DATA_QUEUE 0xffff
+
/*
* Causes for the HW register interrupts
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 579bc81cc0ae..4cd8c39cc3e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/firmware.h>
#include "iwl-drv.h"
@@ -426,7 +426,8 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
const struct firmware *fw;
int res;
- if (!iwlwifi_mod_params.enable_ini)
+ if (!iwlwifi_mod_params.enable_ini ||
+ trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
return;
res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index eb168dc535d4..884750bf7840 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -550,8 +550,6 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
return 0;
}
-#define FW_ADDR_CACHE_CONTROL 0xC0000000
-
static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index c5a1e84dc1ab..fc75d049046d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -550,9 +550,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2,
.mac_cap_info[4] =
- IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU |
+ IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU |
IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39,
.mac_cap_info[5] =
IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 |
@@ -583,11 +583,11 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
.phy_cap_info[6] =
- IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
- IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
.phy_cap_info[7] =
- IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP7_MAX_NC_1,
.phy_cap_info[8] =
@@ -636,9 +636,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_MAC_CAP2_BSR,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2,
.mac_cap_info[4] =
- IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
.mac_cap_info[5] =
IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU,
.phy_cap_info[0] =
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index e6d2e0994317..cf9c64090014 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -176,6 +176,8 @@ iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state)
static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
struct sk_buff *skb)
{
+ if (WARN_ON_ONCE(!op_mode))
+ return;
op_mode->ops->free_skb(op_mode, skb);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 60e0db4a5e20..9236f9106826 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2020 Intel Corporation
+ * Copyright (C) 2019-2021 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/bsearch.h>
@@ -21,7 +21,6 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
const struct iwl_cfg_trans_params *cfg_trans)
{
struct iwl_trans *trans;
- int txcmd_size, txcmd_align;
#ifdef CONFIG_LOCKDEP
static struct lock_class_key __key;
#endif
@@ -31,10 +30,40 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
return NULL;
trans->trans_cfg = cfg_trans;
- if (!cfg_trans->gen2) {
+
+#ifdef CONFIG_LOCKDEP
+ lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
+ &__key, 0);
+#endif
+
+ trans->dev = dev;
+ trans->ops = ops;
+ trans->num_rx_queues = 1;
+
+ WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
+
+ if (trans->trans_cfg->use_tfh) {
+ trans->txqs.tfd.addr_size = 64;
+ trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
+ trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
+ } else {
+ trans->txqs.tfd.addr_size = 36;
+ trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
+ trans->txqs.tfd.size = sizeof(struct iwl_tfd);
+ }
+ trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
+
+ return trans;
+}
+
+int iwl_trans_init(struct iwl_trans *trans)
+{
+ int txcmd_size, txcmd_align;
+
+ if (!trans->trans_cfg->gen2) {
txcmd_size = sizeof(struct iwl_tx_cmd);
txcmd_align = sizeof(void *);
- } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
+ } else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
txcmd_align = 64;
} else {
@@ -46,17 +75,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
txcmd_size += 36; /* biggest possible 802.11 header */
/* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
- if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
- return ERR_PTR(-EINVAL);
-
-#ifdef CONFIG_LOCKDEP
- lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
- &__key, 0);
-#endif
-
- trans->dev = dev;
- trans->ops = ops;
- trans->num_rx_queues = 1;
+ if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
+ return -EINVAL;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
@@ -68,23 +88,16 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
* allocate here.
*/
if (trans->trans_cfg->gen2) {
- trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", dev,
+ trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev,
trans->txqs.bc_tbl_size,
256, 0);
if (!trans->txqs.bc_pool)
- return NULL;
+ return -ENOMEM;
}
- if (trans->trans_cfg->use_tfh) {
- trans->txqs.tfd.addr_size = 64;
- trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
- trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
- } else {
- trans->txqs.tfd.addr_size = 36;
- trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
- trans->txqs.tfd.size = sizeof(struct iwl_tfd);
- }
- trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
+ /* Some things must not change even if the config does */
+ WARN_ON(trans->txqs.tfd.addr_size !=
+ (trans->trans_cfg->use_tfh ? 64 : 36));
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
@@ -93,35 +106,35 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
txcmd_size, txcmd_align,
SLAB_HWCACHE_ALIGN, NULL);
if (!trans->dev_cmd_pool)
- return NULL;
-
- WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
+ return -ENOMEM;
trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
if (!trans->txqs.tso_hdr_page) {
kmem_cache_destroy(trans->dev_cmd_pool);
- return NULL;
+ return -ENOMEM;
}
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans->wait_command_queue);
- return trans;
+ return 0;
}
void iwl_trans_free(struct iwl_trans *trans)
{
int i;
- for_each_possible_cpu(i) {
- struct iwl_tso_hdr_page *p =
- per_cpu_ptr(trans->txqs.tso_hdr_page, i);
+ if (trans->txqs.tso_hdr_page) {
+ for_each_possible_cpu(i) {
+ struct iwl_tso_hdr_page *p =
+ per_cpu_ptr(trans->txqs.tso_hdr_page, i);
- if (p->page)
- __free_page(p->page);
- }
+ if (p && p->page)
+ __free_page(p->page);
+ }
- free_percpu(trans->txqs.tso_hdr_page);
+ free_percpu(trans->txqs.tso_hdr_page);
+ }
kmem_cache_destroy(trans->dev_cmd_pool);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 4a5822c1be13..bf569f856ad8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1267,7 +1267,8 @@ static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
return -ENOTSUPP;
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+ /* No need to wait if the firmware is not alive */
+ if (trans->state != IWL_TRANS_FW_ALIVE) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return -EIO;
}
@@ -1438,6 +1439,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_trans_ops *ops,
const struct iwl_cfg_trans_params *cfg_trans);
+int iwl_trans_init(struct iwl_trans *trans);
void iwl_trans_free(struct iwl_trans *trans);
/*****************************************************
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 617b41ee5801..1343f25f1090 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -93,6 +93,15 @@
#define IWL_MVM_ENABLE_EBS 1
#define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE
#define IWL_MVM_FTM_INITIATOR_DYNACK true
+#define IWL_MVM_FTM_R2I_MAX_REP 7
+#define IWL_MVM_FTM_I2R_MAX_REP 7
+#define IWL_MVM_FTM_R2I_MAX_STS 1
+#define IWL_MVM_FTM_I2R_MAX_STS 1
+#define IWL_MVM_FTM_R2I_MAX_TOTAL_LTF 3
+#define IWL_MVM_FTM_I2R_MAX_TOTAL_LTF 3
+#define IWL_MVM_FTM_INITIATOR_SECURE_LTF false
+#define IWL_MVM_FTM_RESP_NDP_SUPPORT true
+#define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true
#define IWL_MVM_D3_DEBUG false
#define IWL_MVM_USE_TWT true
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
@@ -108,5 +117,7 @@
#define IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT 20016
#define IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC 2
#define IWL_MVM_DISABLE_AP_FILS false
+#define IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */
+#define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index a7dc85c704a9..2e28cf299ef4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -2028,6 +2028,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
mutex_lock(&mvm->mutex);
+ mvm->last_reset_or_resume_time_jiffies = jiffies;
+
/* get the BSS vif pointer again */
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 34ddef97b099..63d65018d098 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1210,10 +1210,10 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE))
return -EINVAL;
- rcu_read_lock();
+ mutex_lock(&mvm->mutex);
for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
- vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, true);
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, false);
if (!vif)
continue;
@@ -1253,18 +1253,16 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
&beacon_cmd.tim_size,
beacon->data, beacon->len);
- mutex_lock(&mvm->mutex);
iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
sizeof(beacon_cmd));
mutex_unlock(&mvm->mutex);
dev_kfree_skb(beacon);
- rcu_read_unlock();
return 0;
out_err:
- rcu_read_unlock();
+ mutex_unlock(&mvm->mutex);
return -EINVAL;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index a4fd0bf9ba19..a456b8a0ae58 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -490,6 +490,15 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (vif->bss_conf.assoc &&
!memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
+ if (sta->mfp)
+ FTM_PUT_FLAG(PMF);
+
+ rcu_read_unlock();
target->sta_id = mvmvif->ap_sta_id;
} else {
@@ -684,6 +693,19 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
}
+static int
+iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v7 *target)
+{
+ int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target);
+ if (err)
+ return err;
+
+ iwl_mvm_ftm_set_secured_ranging(mvm, vif, target);
+ return err;
+}
+
static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
@@ -704,11 +726,67 @@ static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i];
- err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target);
+ err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target);
+ if (err)
+ return err;
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+static void
+iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm,
+ struct iwl_tof_range_req_ap_entry_v8 *target)
+{
+ /* Only 2 STS are supported on Tx */
+ u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 :
+ IWL_MVM_FTM_I2R_MAX_STS;
+
+ target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
+ (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS);
+ target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
+ (i2r_max_sts << IWL_LOCATION_MAX_STS_POS);
+ target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF;
+ target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF;
+}
+
+static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v12 cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+ struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i];
+ u32 flags;
+
+ err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target);
if (err)
return err;
- iwl_mvm_ftm_set_secured_ranging(mvm, vif, target);
+ iwl_mvm_ftm_set_ndp_params(mvm, target);
+
+ /*
+ * If secure LTF is turned off, replace the flag with PMF only
+ */
+ flags = le32_to_cpu(target->initiator_ap_flags);
+ if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) &&
+ !IWL_MVM_FTM_INITIATOR_SECURE_LTF) {
+ flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+ flags |= IWL_INITIATOR_AP_FLAGS_PMF;
+ target->initiator_ap_flags = cpu_to_le32(flags);
+ }
}
return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
@@ -732,6 +810,9 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_FW_CMD_VER_UNKNOWN);
switch (cmd_ver) {
+ case 12:
+ err = iwl_mvm_ftm_start_v12(mvm, vif, req);
+ break;
case 11:
err = iwl_mvm_ftm_start_v11(mvm, vif, req);
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 996f45c19f10..5a249ea97eb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -75,6 +75,24 @@ static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef,
return 0;
}
+static void
+iwl_mvm_ftm_responder_set_ndp(struct iwl_mvm *mvm,
+ struct iwl_tof_responder_config_cmd_v8 *cmd)
+{
+ /* Up to 2 R2I STS are allowed on the responder */
+ u32 r2i_max_sts = IWL_MVM_FTM_R2I_MAX_STS < 2 ?
+ IWL_MVM_FTM_R2I_MAX_STS : 1;
+
+ cmd->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
+ (r2i_max_sts << IWL_RESPONDER_STS_POS) |
+ (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_RESPONDER_TOTAL_LTF_POS);
+ cmd->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
+ (IWL_MVM_FTM_I2R_MAX_STS << IWL_RESPONDER_STS_POS) |
+ (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_RESPONDER_TOTAL_LTF_POS);
+ cmd->cmd_valid_fields |=
+ cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS);
+}
+
static int
iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -82,11 +100,11 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
/*
- * The command structure is the same for versions 6 and 7, (only the
+ * The command structure is the same for versions 6, 7 and 8 (only the
* field interpretation is different), so the same struct can be use
* for all cases.
*/
- struct iwl_tof_responder_config_cmd cmd = {
+ struct iwl_tof_responder_config_cmd_v8 cmd = {
.channel_num = chandef->chan->hw_value,
.cmd_valid_fields =
cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO |
@@ -100,7 +118,10 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- if (cmd_ver == 7)
+if (cmd_ver == 8)
+ iwl_mvm_ftm_responder_set_ndp(mvm, &cmd);
+
+ if (cmd_ver >= 7)
err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw,
&cmd.ctrl_ch_position);
else
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 5ee64f7f3c85..8aa5f1a2c58c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -29,6 +29,9 @@
#define UCODE_VALID_OK cpu_to_le32(0x1)
+#define IWL_PPAG_MASK 3
+#define IWL_PPAG_ETSI_MASK BIT(0)
+
struct iwl_mvm_alive_data {
bool valid;
u32 scd_base_addr;
@@ -70,56 +73,6 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
}
-static int iwl_configure_rxq(struct iwl_mvm *mvm)
-{
- int i, num_queues, size, ret;
- struct iwl_rfh_queue_config *cmd;
- struct iwl_host_cmd hcmd = {
- .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD),
- .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
- };
-
- /*
- * The default queue is configured via context info, so if we
- * have a single queue, there's nothing to do here.
- */
- if (mvm->trans->num_rx_queues == 1)
- return 0;
-
- /* skip the default queue */
- num_queues = mvm->trans->num_rx_queues - 1;
-
- size = struct_size(cmd, data, num_queues);
-
- cmd = kzalloc(size, GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- cmd->num_queues = num_queues;
-
- for (i = 0; i < num_queues; i++) {
- struct iwl_trans_rxq_dma_data data;
-
- cmd->data[i].q_num = i + 1;
- iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data);
-
- cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
- cmd->data[i].urbd_stts_wrptr =
- cpu_to_le64(data.urbd_stts_wrptr);
- cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
- cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
- }
-
- hcmd.data[0] = cmd;
- hcmd.len[0] = size;
-
- ret = iwl_mvm_send_cmd(mvm, &hcmd);
-
- kfree(cmd);
-
- return ret;
-}
-
static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
{
struct iwl_dqa_enable_cmd dqa_cmd = {
@@ -233,7 +186,8 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
mvm->trans->dbg.lmac_error_event_table[1] =
le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
- umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr);
+ umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) &
+ ~FW_ADDR_CACHE_CONTROL;
if (umac_error_table) {
if (umac_error_table >=
@@ -773,16 +727,16 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
} else if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_REDUCE_TX_POWER)) {
len = sizeof(cmd.v5);
- n_subbands = IWL_NUM_SUB_BANDS;
+ n_subbands = IWL_NUM_SUB_BANDS_V1;
per_chain = cmd.v5.per_chain[0][0];
} else if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) {
len = sizeof(cmd.v4);
- n_subbands = IWL_NUM_SUB_BANDS;
+ n_subbands = IWL_NUM_SUB_BANDS_V1;
per_chain = cmd.v4.per_chain[0][0];
} else {
len = sizeof(cmd.v3);
- n_subbands = IWL_NUM_SUB_BANDS;
+ n_subbands = IWL_NUM_SUB_BANDS_V1;
per_chain = cmd.v3.per_chain[0][0];
}
@@ -909,46 +863,50 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
{
- union acpi_object *wifi_pkg, *data, *enabled;
+ union acpi_object *wifi_pkg, *data, *flags;
int i, j, ret, tbl_rev, num_sub_bands;
int idx = 2;
s8 *gain;
/*
- * The 'enabled' field is the same in v1 and v2 so we can just
+ * The 'flags' field is the same in v1 and in v2 so we can just
* use v1 to access it.
*/
- mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0);
+ mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0);
+
data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- /* try to read ppag table revision 1 */
+ /* try to read ppag table rev 2 or 1 (both have the same data size) */
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev != 1) {
+ if (tbl_rev == 1 || tbl_rev == 2) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = mvm->fwrt.ppag_table.v2.gain[0];
+ mvm->fwrt.ppag_ver = tbl_rev;
+ IWL_DEBUG_RADIO(mvm,
+ "Reading PPAG table v2 (tbl_rev=%d)\n",
+ tbl_rev);
+ goto read_table;
+ } else {
ret = -EINVAL;
goto out_free;
}
- num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = mvm->fwrt.ppag_table.v2.gain[0];
- mvm->fwrt.ppag_ver = 2;
- IWL_DEBUG_RADIO(mvm, "Reading PPAG table v2 (tbl_rev=1)\n");
- goto read_table;
}
/* try to read ppag table revision 0 */
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev);
+ ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
- num_sub_bands = IWL_NUM_SUB_BANDS;
+ num_sub_bands = IWL_NUM_SUB_BANDS_V1;
gain = mvm->fwrt.ppag_table.v1.gain[0];
- mvm->fwrt.ppag_ver = 1;
+ mvm->fwrt.ppag_ver = 0;
IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n");
goto read_table;
}
@@ -956,15 +914,17 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
goto out_free;
read_table:
- enabled = &wifi_pkg->package.elements[1];
- if (enabled->type != ACPI_TYPE_INTEGER ||
- (enabled->integer.value != 0 && enabled->integer.value != 1)) {
+ flags = &wifi_pkg->package.elements[1];
+
+ if (flags->type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
goto out_free;
}
- mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(enabled->integer.value);
- if (!mvm->fwrt.ppag_table.v1.enabled) {
+ mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(flags->integer.value &
+ IWL_PPAG_MASK);
+
+ if (!mvm->fwrt.ppag_table.v1.flags) {
ret = 0;
goto out_free;
}
@@ -992,12 +952,13 @@ read_table:
(j != 0 &&
(gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_HB ||
gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_HB))) {
- mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0);
+ mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0);
ret = -EINVAL;
goto out_free;
}
}
}
+
ret = 0;
out_free:
kfree(data);
@@ -1015,7 +976,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
"PPAG capability not supported by FW, command not sent.\n");
return 0;
}
- if (!mvm->fwrt.ppag_table.v1.enabled) {
+ if (!mvm->fwrt.ppag_table.v1.flags) {
IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n");
return 0;
}
@@ -1024,20 +985,28 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
PER_PLATFORM_ANT_GAIN_CMD,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 1) {
- num_sub_bands = IWL_NUM_SUB_BANDS;
+ num_sub_bands = IWL_NUM_SUB_BANDS_V1;
gain = mvm->fwrt.ppag_table.v1.gain[0];
cmd_size = sizeof(mvm->fwrt.ppag_table.v1);
- if (mvm->fwrt.ppag_ver == 2) {
+ if (mvm->fwrt.ppag_ver == 1 || mvm->fwrt.ppag_ver == 2) {
IWL_DEBUG_RADIO(mvm,
- "PPAG table is v2 but FW supports v1, sending truncated table\n");
+ "PPAG table rev is %d but FW supports v1, sending truncated table\n",
+ mvm->fwrt.ppag_ver);
+ mvm->fwrt.ppag_table.v1.flags &=
+ cpu_to_le32(IWL_PPAG_ETSI_MASK);
}
- } else if (cmd_ver == 2) {
+ } else if (cmd_ver == 2 || cmd_ver == 3) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = mvm->fwrt.ppag_table.v2.gain[0];
cmd_size = sizeof(mvm->fwrt.ppag_table.v2);
- if (mvm->fwrt.ppag_ver == 1) {
+ if (mvm->fwrt.ppag_ver == 0) {
IWL_DEBUG_RADIO(mvm,
"PPAG table is v1 but FW supports v2, sending padded table\n");
+ } else if (cmd_ver == 2 && mvm->fwrt.ppag_ver == 2) {
+ IWL_DEBUG_RADIO(mvm,
+ "PPAG table is v3 but FW supports v2, sending partial bitmap.\n");
+ mvm->fwrt.ppag_table.v1.flags &=
+ cpu_to_le32(IWL_PPAG_ETSI_MASK);
}
} else {
IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n");
@@ -1102,7 +1071,7 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is not in the approved list, disabling PPAG.\n",
dmi_get_system_info(DMI_SYS_VENDOR));
- mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0);
+ mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0);
return 0;
}
@@ -1144,33 +1113,6 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
-static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
-{
- u8 value;
-
- int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
- DSM_FUNC_ENABLE_INDONESIA_5G2,
- &iwl_guid, &value);
-
- if (ret < 0)
- IWL_DEBUG_RADIO(mvm,
- "Failed to evaluate DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
- ret);
-
- else if (value >= DSM_VALUE_INDONESIA_MAX)
- IWL_DEBUG_RADIO(mvm,
- "DSM function ENABLE_INDONESIA_5G2 return invalid value, value=%d\n",
- value);
-
- else if (value == DSM_VALUE_INDONESIA_ENABLE) {
- IWL_DEBUG_RADIO(mvm,
- "Evaluated DSM function ENABLE_INDONESIA_5G2: Enabling 5g2\n");
- return DSM_VALUE_INDONESIA_ENABLE;
- }
- /* default behaviour is disabled */
- return DSM_VALUE_INDONESIA_DISABLE;
-}
-
static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
{
u8 value;
@@ -1195,64 +1137,27 @@ static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
return DSM_VALUE_RFI_DISABLE;
}
-static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm)
-{
- u8 value;
- int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
- DSM_FUNC_DISABLE_SRD,
- &iwl_guid, &value);
-
- if (ret < 0)
- IWL_DEBUG_RADIO(mvm,
- "Failed to evaluate DSM function DISABLE_SRD, ret=%d\n",
- ret);
-
- else if (value >= DSM_VALUE_SRD_MAX)
- IWL_DEBUG_RADIO(mvm,
- "DSM function DISABLE_SRD return invalid value, value=%d\n",
- value);
-
- else if (value == DSM_VALUE_SRD_PASSIVE) {
- IWL_DEBUG_RADIO(mvm,
- "Evaluated DSM function DISABLE_SRD: setting SRD to passive\n");
- return DSM_VALUE_SRD_PASSIVE;
-
- } else if (value == DSM_VALUE_SRD_DISABLE) {
- IWL_DEBUG_RADIO(mvm,
- "Evaluated DSM function DISABLE_SRD: disabling SRD\n");
- return DSM_VALUE_SRD_DISABLE;
- }
- /* default behaviour is active */
- return DSM_VALUE_SRD_ACTIVE;
-}
-
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
- u8 ret;
int cmd_ret;
- struct iwl_lari_config_change_cmd_v2 cmd = {};
-
- if (iwl_mvm_eval_dsm_indonesia_5g2(mvm) == DSM_VALUE_INDONESIA_ENABLE)
- cmd.config_bitmap |=
- cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
+ struct iwl_lari_config_change_cmd_v3 cmd = {};
- ret = iwl_mvm_eval_dsm_disable_srd(mvm);
- if (ret == DSM_VALUE_SRD_PASSIVE)
- cmd.config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
-
- else if (ret == DSM_VALUE_SRD_DISABLE)
- cmd.config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+ cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
/* apply more config masks here */
if (cmd.config_bitmap) {
- size_t cmd_size = iwl_fw_lookup_cmd_ver(mvm->fw,
- REGULATORY_AND_NVM_GROUP,
- LARI_CONFIG_CHANGE, 1) == 2 ?
- sizeof(struct iwl_lari_config_change_cmd_v2) :
- sizeof(struct iwl_lari_config_change_cmd_v1);
+ size_t cmd_size;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE, 1);
+ if (cmd_ver == 3)
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
+ else if (cmd_ver == 2)
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
+ else
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
+
IWL_DEBUG_RADIO(mvm,
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n",
le32_to_cpu(cmd.config_bitmap));
@@ -1485,14 +1390,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
}
/* Init RSS configuration */
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
- ret = iwl_configure_rxq(mvm);
- if (ret) {
- IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
- ret);
- goto error;
- }
- }
+ ret = iwl_configure_rxq(&mvm->fwrt);
+ if (ret)
+ goto error;
if (iwl_mvm_has_new_rx_api(mvm)) {
ret = iwl_send_rss_cfg_cmd(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index baf7404c137d..607d5d564928 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1099,6 +1099,8 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC,
NULL);
+ mvm->last_reset_or_resume_time_jiffies = jiffies;
+
if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
/* Something went wrong - we need to finish some cleanup
* that normally iwl_mvm_mac_restart_complete() below
@@ -4610,6 +4612,16 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_STATION:
+ /*
+ * We haven't configured the firmware to be associated yet since
+ * we don't know the dtim period. In this case, the firmware can't
+ * track the beacons.
+ */
+ if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
if (chsw->delay > IWL_MAX_CSA_BLOCK_TX)
schedule_delayed_work(&mvmvif->csa_work, 0);
@@ -5134,28 +5146,50 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
}
void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
- struct iwl_mvm_internal_rxq_notif *notif,
- u32 size)
+ enum iwl_mvm_rxq_notif_type type,
+ bool sync,
+ const void *data, u32 size)
{
- u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
+ struct {
+ struct iwl_rxq_sync_cmd cmd;
+ struct iwl_mvm_internal_rxq_notif notif;
+ } __packed cmd = {
+ .cmd.rxq_mask = cpu_to_le32(BIT(mvm->trans->num_rx_queues) - 1),
+ .cmd.count =
+ cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) +
+ size),
+ .notif.type = type,
+ .notif.sync = sync,
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD),
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ .data[1] = data,
+ .len[1] = size,
+ .flags = sync ? 0 : CMD_ASYNC,
+ };
int ret;
+ /* size must be a multiple of DWORD */
+ if (WARN_ON(cmd.cmd.count & cpu_to_le32(3)))
+ return;
if (!iwl_mvm_has_new_rx_api(mvm))
return;
- if (notif->sync) {
- notif->cookie = mvm->queue_sync_cookie;
+ if (sync) {
+ cmd.notif.cookie = mvm->queue_sync_cookie;
mvm->queue_sync_state = (1 << mvm->trans->num_rx_queues) - 1;
}
- ret = iwl_mvm_notify_rx_queue(mvm, qmask, notif, size, !notif->sync);
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (ret) {
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
goto out;
}
- if (notif->sync) {
+ if (sync) {
lockdep_assert_held(&mvm->mutex);
ret = wait_event_timeout(mvm->rx_sync_waitq,
READ_ONCE(mvm->queue_sync_state) == 0 ||
@@ -5167,21 +5201,18 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
}
out:
- mvm->queue_sync_state = 0;
- if (notif->sync)
+ if (sync) {
+ mvm->queue_sync_state = 0;
mvm->queue_sync_cookie++;
+ }
}
static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_internal_rxq_notif data = {
- .type = IWL_MVM_RXQ_EMPTY,
- .sync = 1,
- };
mutex_lock(&mvm->mutex);
- iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
+ iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0);
mutex_unlock(&mvm->mutex);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 0a963d01b825..4d9d4d6892fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -591,7 +591,6 @@ struct iwl_mvm_tcm {
enum iwl_mvm_traffic_load global_load;
bool low_latency[NUM_MAC_INDEX_DRIVER];
bool change[NUM_MAC_INDEX_DRIVER];
- bool global_change;
} result;
};
@@ -1096,6 +1095,9 @@ struct iwl_mvm {
/* sniffer data to include in radiotap */
__le16 cur_aid;
u8 cur_bssid[ETH_ALEN];
+
+ unsigned long last_6ghz_passive_scan_jiffies;
+ unsigned long last_reset_or_resume_time_jiffies;
};
/* Extract MVM priv from op_mode and _hw */
@@ -1570,9 +1572,6 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
-int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
- const struct iwl_mvm_internal_rxq_notif *notif,
- u32 notif_size, bool async);
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
@@ -2001,8 +2000,9 @@ void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
- struct iwl_mvm_internal_rxq_notif *notif,
- u32 size);
+ enum iwl_mvm_rxq_notif_type type,
+ bool sync,
+ const void *data, u32 size);
void iwl_mvm_reorder_timer_expired(struct timer_list *t);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 8772b65c9dab..2d58cb969918 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include "rs.h"
#include "fw-api.h"
@@ -72,19 +72,15 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
bool vht_ena = vht_cap->vht_supported;
u16 flags = 0;
+ /* get STBC flags */
if (mvm->cfg->ht_params->stbc &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
- if (he_cap->has_he) {
- if (he_cap->he_cap_elem.phy_cap_info[2] &
- IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
- flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
-
- if (he_cap->he_cap_elem.phy_cap_info[7] &
- IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
- flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK;
- } else if ((ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) ||
- (vht_ena &&
- (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)))
+ if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+ else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+ else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)
flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 91b6541d579f..b97708cb869d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*
- * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014, 2018 - 2021 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
@@ -1926,9 +1926,7 @@ static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (is_ht(rate))
return index == IWL_RATE_MCS_7_INDEX;
if (is_vht(rate))
- return index == IWL_RATE_MCS_7_INDEX ||
- index == IWL_RATE_MCS_8_INDEX ||
- index == IWL_RATE_MCS_9_INDEX;
+ return index == IWL_RATE_MCS_9_INDEX;
WARN_ON_ONCE(1);
return false;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index af5a6dd81c41..8e26422ca326 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -527,37 +527,6 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
return false;
}
-int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
- const struct iwl_mvm_internal_rxq_notif *notif,
- u32 notif_size, bool async)
-{
- u8 buf[sizeof(struct iwl_rxq_sync_cmd) +
- sizeof(struct iwl_mvm_rss_sync_notif)];
- struct iwl_rxq_sync_cmd *cmd = (void *)buf;
- u32 data_size = sizeof(*cmd) + notif_size;
- int ret;
-
- /*
- * size must be a multiple of DWORD
- * Ensure we don't overflow buf
- */
- if (WARN_ON(notif_size & 3 ||
- notif_size > sizeof(struct iwl_mvm_rss_sync_notif)))
- return -EINVAL;
-
- cmd->rxq_mask = cpu_to_le32(rxq_mask);
- cmd->count = cpu_to_le32(notif_size);
- cmd->flags = 0;
- memcpy(cmd->payload, notif, notif_size);
-
- ret = iwl_mvm_send_cmd_pdu(mvm,
- WIDE_ID(DATA_PATH_GROUP,
- TRIGGER_RX_QUEUES_NOTIF_CMD),
- async ? CMD_ASYNC : 0, data_size, cmd);
-
- return ret;
-}
-
/*
* Returns true if sn2 - buffer_size < sn1 < sn2.
* To be used only in order to compare reorder buffer head with NSSN.
@@ -573,15 +542,13 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
{
if (IWL_MVM_USE_NSSN_SYNC) {
- struct iwl_mvm_rss_sync_notif notif = {
- .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
- .metadata.sync = 0,
- .nssn_sync.baid = baid,
- .nssn_sync.nssn = nssn,
+ struct iwl_mvm_nssn_sync_data notif = {
+ .baid = baid,
+ .nssn = nssn,
};
- iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif,
- sizeof(notif));
+ iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NSSN_SYNC, false,
+ &notif, sizeof(notif));
}
}
@@ -830,8 +797,7 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
"invalid notification size %d (%d)",
len, (int)(sizeof(*notif) + sizeof(*internal_notif))))
return;
- /* remove only the firmware header, we want all of our payload below */
- len -= sizeof(*notif);
+ len -= sizeof(*notif) + sizeof(*internal_notif);
if (internal_notif->sync &&
mvm->queue_sync_cookie != internal_notif->cookie) {
@@ -841,21 +807,19 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
switch (internal_notif->type) {
case IWL_MVM_RXQ_EMPTY:
- WARN_ONCE(len != sizeof(*internal_notif),
- "invalid empty notification size %d (%d)",
- len, (int)sizeof(*internal_notif));
+ WARN_ONCE(len, "invalid empty notification size %d", len);
break;
case IWL_MVM_RXQ_NOTIF_DEL_BA:
- if (WARN_ONCE(len != sizeof(struct iwl_mvm_rss_sync_notif),
+ if (WARN_ONCE(len != sizeof(struct iwl_mvm_delba_data),
"invalid delba notification size %d (%d)",
- len, (int)sizeof(struct iwl_mvm_rss_sync_notif)))
+ len, (int)sizeof(struct iwl_mvm_delba_data)))
break;
iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
break;
case IWL_MVM_RXQ_NSSN_SYNC:
- if (WARN_ONCE(len != sizeof(struct iwl_mvm_rss_sync_notif),
+ if (WARN_ONCE(len != sizeof(struct iwl_mvm_nssn_sync_data),
"invalid nssn sync notification size %d (%d)",
- len, (int)sizeof(struct iwl_mvm_rss_sync_notif)))
+ len, (int)sizeof(struct iwl_mvm_nssn_sync_data)))
break;
iwl_mvm_nssn_sync(mvm, napi, queue,
(void *)internal_notif->data);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index caf87f320094..5a0696c44f6d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -43,6 +43,9 @@
/* adaptive dwell number of APs override for social channels */
#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
+/* minimal number of 2GHz and 5GHz channels in the regular scan request */
+#define IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS 4
+
struct iwl_mvm_scan_timing_params {
u32 suspend_time;
u32 max_out_time;
@@ -94,6 +97,7 @@ struct iwl_mvm_scan_params {
struct cfg80211_scan_6ghz_params *scan_6ghz_params;
u32 n_6ghz_params;
bool scan_6ghz;
+ bool enable_6ghz_passive;
};
static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
@@ -1873,6 +1877,98 @@ static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
return flags;
}
+static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_supported_band *sband =
+ &mvm->nvm_data->bands[NL80211_BAND_6GHZ];
+ u32 n_disabled, i;
+
+ params->enable_6ghz_passive = false;
+
+ if (params->scan_6ghz)
+ return;
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN)) {
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz passive scan: Not supported by FW\n");
+ return;
+ }
+
+ /* 6GHz passive scan allowed only on station interface */
+ if (vif->type != NL80211_IFTYPE_STATION) {
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz passive scan: not station interface\n");
+ return;
+ }
+
+ /*
+ * 6GHz passive scan is allowed while associated in a defined time
+ * interval following HW reset or resume flow
+ */
+ if (vif->bss_conf.assoc &&
+ (time_before(mvm->last_reset_or_resume_time_jiffies +
+ (IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ),
+ jiffies))) {
+ IWL_DEBUG_SCAN(mvm, "6GHz passive scan: associated\n");
+ return;
+ }
+
+ /* No need for 6GHz passive scan if not enough time elapsed */
+ if (time_after(mvm->last_6ghz_passive_scan_jiffies +
+ (IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) {
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz passive scan: timeout did not expire\n");
+ return;
+ }
+
+ /* not enough channels in the regular scan request */
+ if (params->n_channels < IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS) {
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz passive scan: not enough channels\n");
+ return;
+ }
+
+ for (i = 0; i < params->n_ssids; i++) {
+ if (!params->ssids[i].ssid_len)
+ break;
+ }
+
+ /* not a wildcard scan, so cannot enable passive 6GHz scan */
+ if (i == params->n_ssids) {
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz passive scan: no wildcard SSID\n");
+ return;
+ }
+
+ if (!sband || !sband->n_channels) {
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz passive scan: no 6GHz channels\n");
+ return;
+ }
+
+ for (i = 0, n_disabled = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED))
+ n_disabled++;
+ }
+
+ /*
+ * Not all the 6GHz channels are disabled, so no need for 6GHz passive
+ * scan
+ */
+ if (n_disabled != sband->n_channels) {
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz passive scan: 6GHz channels enabled\n");
+ return;
+ }
+
+ /* all conditions to enable 6ghz passive scan are satisfied */
+ IWL_DEBUG_SCAN(mvm, "6GHz passive scan: can be enabled\n");
+ params->enable_6ghz_passive = true;
+}
+
static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif,
@@ -1911,6 +2007,9 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
params->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN;
+ if (params->enable_6ghz_passive)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN;
+
return flags;
}
@@ -2183,6 +2282,30 @@ iwl_mvm_scan_umac_fill_ch_p_v6(struct iwl_mvm *mvm,
params->n_channels,
channel_cfg_flags,
vif->type);
+
+ if (params->enable_6ghz_passive) {
+ struct ieee80211_supported_band *sband =
+ &mvm->nvm_data->bands[NL80211_BAND_6GHZ];
+ u32 i;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ struct ieee80211_channel *channel =
+ &sband->channels[i];
+
+ struct iwl_scan_channel_cfg_umac *cfg =
+ &cp->channel_config[cp->count];
+
+ if (!cfg80211_channel_is_psc(channel))
+ continue;
+
+ cfg->flags = 0;
+ cfg->v2.channel_num = channel->hw_value;
+ cfg->v2.band = PHY_BAND_6;
+ cfg->v2.iter_count = 1;
+ cfg->v2.iter_interval = 0;
+ cp->count++;
+ }
+ }
}
static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -2500,6 +2623,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
+ iwl_mvm_scan_6ghz_passive_scan(mvm, &params, vif);
+
uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params,
IWL_MVM_SCAN_REGULAR);
@@ -2524,6 +2649,9 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
+ if (params.enable_6ghz_passive)
+ mvm->last_6ghz_passive_scan_jiffies = jiffies;
+
schedule_delayed_work(&mvm->scan_timeout_dwork,
msecs_to_jiffies(SCAN_TIMEOUT));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 3a411bbda5fd..f618368eda83 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2441,12 +2441,12 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
{
- struct iwl_mvm_rss_sync_notif notif = {
- .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
- .metadata.sync = 1,
- .delba.baid = baid,
+ struct iwl_mvm_delba_data notif = {
+ .baid = baid,
};
- iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
+
+ iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true,
+ &notif, sizeof(notif));
};
static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 35a18b96aac5..32b4d1935788 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -281,6 +281,36 @@ struct iwl_mvm_key_pn {
} ____cacheline_aligned_in_smp q[];
};
+/**
+ * enum iwl_mvm_rxq_notif_type - Internal message identifier
+ *
+ * @IWL_MVM_RXQ_EMPTY: empty sync notification
+ * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
+ * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN
+ */
+enum iwl_mvm_rxq_notif_type {
+ IWL_MVM_RXQ_EMPTY,
+ IWL_MVM_RXQ_NOTIF_DEL_BA,
+ IWL_MVM_RXQ_NSSN_SYNC,
+};
+
+/**
+ * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
+ * in &iwl_rxq_sync_cmd. Should be DWORD aligned.
+ * FW is agnostic to the payload, so there are no endianity requirements.
+ *
+ * @type: value from &iwl_mvm_rxq_notif_type
+ * @sync: ctrl path is waiting for all notifications to be received
+ * @cookie: internal cookie to identify old notifications
+ * @data: payload
+ */
+struct iwl_mvm_internal_rxq_notif {
+ u16 type;
+ u16 sync;
+ u32 cookie;
+ u8 data[];
+} __packed;
+
struct iwl_mvm_delba_data {
u32 baid;
} __packed;
@@ -290,14 +320,6 @@ struct iwl_mvm_nssn_sync_data {
u32 nssn;
} __packed;
-struct iwl_mvm_rss_sync_notif {
- struct iwl_mvm_internal_rxq_notif metadata;
- union {
- struct iwl_mvm_delba_data delba;
- struct iwl_mvm_nssn_sync_data nssn_sync;
- };
-} __packed;
-
/**
* struct iwl_mvm_rxq_dup_data - per station per rx queue data
* @last_seq: last sequence per tid for duplicate packet detection
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 0b012f8c9eb2..83342a6a6d5b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -151,6 +151,16 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
if (errmsg)
IWL_ERR(mvm, "%s\n", errmsg);
+ if (mvmvif->csa_bcn_pending) {
+ struct iwl_mvm_sta *mvmsta;
+
+ rcu_read_lock();
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
+ if (!WARN_ON(!mvmsta))
+ iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
+ rcu_read_unlock();
+ }
+
iwl_mvm_connection_loss(mvm, vif, errmsg);
return true;
}
@@ -285,6 +295,17 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
break;
case NL80211_IFTYPE_STATION:
/*
+ * If we are switching channel, don't disconnect
+ * if the time event is already done. Beacons can
+ * be delayed a bit after the switch.
+ */
+ if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
+ IWL_DEBUG_TE(mvm,
+ "No beacon heard and the CS time event is over, don't disconnect\n");
+ break;
+ }
+
+ /*
* By now, we should have finished association
* and know the dtim period.
*/
@@ -713,8 +734,8 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
sizeof(time_cmd), &time_cmd);
- if (WARN_ON(ret))
- return;
+ if (ret)
+ IWL_ERR(mvm, "Couldn't remove the time event\n");
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 2a7339b12b13..398390c59344 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -146,8 +146,8 @@ void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (mvm->tz_device.tzone) {
struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device;
- thermal_notify_framework(tz_dev->tzone,
- tz_dev->fw_trips_index[ths_crossed]);
+ thermal_zone_device_update(tz_dev->tzone,
+ THERMAL_TRIP_VIOLATED);
}
#endif /* CONFIG_THERMAL */
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index b6b481ff1518..c566be99a4c7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1030,15 +1030,9 @@ iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
return IWL_MVM_TRAFFIC_LOW;
}
-struct iwl_mvm_tcm_iter_data {
- struct iwl_mvm *mvm;
- bool any_sent;
-};
-
static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
{
- struct iwl_mvm_tcm_iter_data *data = _data;
- struct iwl_mvm *mvm = data->mvm;
+ struct iwl_mvm *mvm = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
@@ -1060,22 +1054,15 @@ static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
} else {
iwl_mvm_update_quotas(mvm, false, NULL);
}
-
- data->any_sent = true;
}
static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
{
- struct iwl_mvm_tcm_iter_data data = {
- .mvm = mvm,
- .any_sent = false,
- };
-
mutex_lock(&mvm->mutex);
ieee80211_iterate_active_interfaces(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_tcm_iter, &data);
+ iwl_mvm_tcm_iter, mvm);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
iwl_mvm_config_scan(mvm);
@@ -1257,7 +1244,6 @@ static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
}
load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
- mvm->tcm.result.global_change = load != mvm->tcm.result.global_load;
mvm->tcm.result.global_load = load;
for (i = 0; i < NUM_NL80211_BANDS; i++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 558a0b2ef0fc..d94bd8d732e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -17,10 +17,20 @@
#include "iwl-prph.h"
#include "internal.h"
+#define TRANS_CFG_MARKER BIT(0)
+#define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg), \
+ struct _struct)
+extern int _invalid_type;
+#define _TRANS_CFG_MARKER(cfg) \
+ (__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params), \
+ TRANS_CFG_MARKER, \
+ __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type)))
+#define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg))
+
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
.vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = (subdev), \
- .driver_data = (kernel_ulong_t)&(cfg)
+ .driver_data = _ASSIGN_CFG(cfg)
/* Hardware specific file defines the PCI IDs table for that hardware module */
static const struct pci_device_id iwl_hw_card_ids[] = {
@@ -490,6 +500,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
{IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
+/* Bz devices */
+ {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -607,6 +619,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x2725, 0x4020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
IWL_DEV_INFO(0x2725, 0x6020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
IWL_DEV_INFO(0x2725, 0x6024, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+ IWL_DEV_INFO(0x2725, 0x1673, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675w_name),
+ IWL_DEV_INFO(0x2725, 0x1674, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675x_name),
IWL_DEV_INFO(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
IWL_DEV_INFO(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
IWL_DEV_INFO(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long, NULL),
@@ -1014,12 +1028,12 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_ma_a0_mr_a0, iwl_ma_name),
+ iwl_cfg_ma_a0_mr_a0, iwl_ax221_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_snj_a0_mr_a0, iwl_ma_name),
+ iwl_cfg_snj_a0_mr_a0, iwl_ax221_name),
/* So with Hr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -1067,6 +1081,35 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
+/* Bz */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_bz_a0_hr_b0, iwl_ax201_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_bz_a0_gf_a0, iwl_ax211_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB,
+ iwl_cfg_bz_a0_gf4_a0, iwl_ax211_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_bz_a0_mr_a0, iwl_ax211_name),
+
+/* So with GF */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name)
+
#endif /* CONFIG_IWLMVM */
};
@@ -1075,19 +1118,22 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- const struct iwl_cfg_trans_params *trans =
- (struct iwl_cfg_trans_params *)(ent->driver_data);
+ const struct iwl_cfg_trans_params *trans;
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
struct iwl_trans_pcie *trans_pcie;
int i, ret;
+ const struct iwl_cfg *cfg;
+
+ trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
+
/*
* This is needed for backwards compatibility with the old
* tables, so we don't need to change all the config structs
* at the same time. The cfg is used to compare with the old
* full cfg structs.
*/
- const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+ cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
/* make sure trans is the first element in iwl_cfg */
BUILD_BUG_ON(offsetof(struct iwl_cfg, trans));
@@ -1165,7 +1211,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
- iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_a0;
+ iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_b0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
@@ -1202,11 +1248,19 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
/*
- * If we didn't set the cfg yet, assume the trans is actually
- * a full cfg from the old tables.
+ * If we didn't set the cfg yet, the PCI ID table entry should have
+ * been a full config - if yes, use it, otherwise fail.
*/
- if (!iwl_trans->cfg)
+ if (!iwl_trans->cfg) {
+ if (ent->driver_data & TRANS_CFG_MARKER) {
+ pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",
+ pdev->device, pdev->subsystem_device,
+ iwl_trans->hw_rev, iwl_trans->hw_rf_id);
+ ret = -EINVAL;
+ goto out_free_trans;
+ }
iwl_trans->cfg = cfg;
+ }
/* if we don't have a name yet, copy name from the old cfg */
if (!iwl_trans->name)
@@ -1222,6 +1276,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
trans_pcie->num_rx_bufs = RX_QUEUE_SIZE;
}
+ ret = iwl_trans_init(iwl_trans);
+ if (ret)
+ goto out_free_trans;
+
pci_set_drvdata(pdev, iwl_trans);
iwl_trans->drv = iwl_drv_start(iwl_trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index d9688c7bed07..76a512cd2e5c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -447,6 +447,11 @@ struct iwl_trans
const struct iwl_cfg_trans_params *cfg_trans);
void iwl_trans_pcie_free(struct iwl_trans *trans);
+bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
+#define _iwl_trans_pcie_grab_nic_access(trans) \
+ __cond_lock(nic_access_nobh, \
+ likely(__iwl_trans_pcie_grab_nic_access(trans)))
+
/*****************************************************
* RX
******************************************************/
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 2bec97133119..fb8491412be4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1023,6 +1023,9 @@ static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+ IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n",
+ rxq->id, ret, budget);
+
if (ret < budget) {
spin_lock(&trans_pcie->irq_lock);
if (test_bit(STATUS_INT_ENABLED, &trans->status))
@@ -1046,33 +1049,19 @@ static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
trans = trans_pcie->trans;
ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+ IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret,
+ budget);
if (ret < budget) {
- spin_lock(&trans_pcie->irq_lock);
- iwl_pcie_clear_irq(trans, rxq->id);
- spin_unlock(&trans_pcie->irq_lock);
+ int irq_line = rxq->id;
- napi_complete_done(&rxq->napi, ret);
- }
+ /* FIRST_RSS is shared with line 0 */
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
+ rxq->id == 1)
+ irq_line = 0;
- return ret;
-}
-
-static int iwl_pcie_napi_poll_msix_shared(struct napi_struct *napi, int budget)
-{
- struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
- struct iwl_trans_pcie *trans_pcie;
- struct iwl_trans *trans;
- int ret;
-
- trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
- trans = trans_pcie->trans;
-
- ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
-
- if (ret < budget) {
spin_lock(&trans_pcie->irq_lock);
- iwl_pcie_clear_irq(trans, 0);
+ iwl_pcie_clear_irq(trans, irq_line);
spin_unlock(&trans_pcie->irq_lock);
napi_complete_done(&rxq->napi, ret);
@@ -1134,18 +1123,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
if (!rxq->napi.poll) {
int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll;
- if (trans_pcie->msix_enabled) {
+ if (trans_pcie->msix_enabled)
poll = iwl_pcie_napi_poll_msix;
- if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX &&
- i == 0)
- poll = iwl_pcie_napi_poll_msix_shared;
-
- if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
- i == 1)
- poll = iwl_pcie_napi_poll_msix_shared;
- }
-
netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
poll, NAPI_POLL_WEIGHT);
napi_enable(&rxq->napi);
@@ -1659,10 +1639,13 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
if (WARN_ON(entry->entry >= trans->num_rx_queues))
return IRQ_NONE;
- if (WARN_ONCE(!rxq, "Got MSI-X interrupt before we have Rx queues"))
+ if (WARN_ONCE(!rxq,
+ "[%d] Got MSI-X interrupt before we have Rx queues",
+ entry->entry))
return IRQ_NONE;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
+ IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
local_bh_disable();
if (napi_schedule_prep(&rxq->napi))
@@ -2194,9 +2177,16 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
struct iwl_trans *trans = trans_pcie->trans;
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+ u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE;
u32 inta_fh, inta_hw;
bool polling = false;
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
+ inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0;
+
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
+ inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1;
+
lock_map_acquire(&trans->sync_cmd_lockdep_map);
spin_lock_bh(&trans_pcie->irq_lock);
@@ -2205,7 +2195,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
/*
* Clear causes registers to avoid being handling the same cause.
*/
- iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
+ iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk);
iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
spin_unlock_bh(&trans_pcie->irq_lock);
@@ -2219,8 +2209,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if (iwl_have_debug_level(IWL_DL_ISR)) {
IWL_DEBUG_ISR(trans,
- "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
- inta_fh, trans_pcie->fh_mask,
+ "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
+ entry->entry, inta_fh, trans_pcie->fh_mask,
iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
if (inta_fh & ~trans_pcie->fh_mask)
IWL_DEBUG_ISR(trans,
@@ -2275,8 +2265,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
/* After checking FH register check HW register */
if (iwl_have_debug_level(IWL_DL_ISR)) {
IWL_DEBUG_ISR(trans,
- "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
- inta_hw, trans_pcie->hw_mask,
+ "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
+ entry->entry, inta_hw, trans_pcie->hw_mask,
iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
if (inta_hw & ~trans_pcie->hw_mask)
IWL_DEBUG_ISR(trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 94ffc1ae484d..1bcd36e9e008 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-prph.h"
@@ -108,8 +108,8 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
ret = wait_event_timeout(trans_pcie->fw_reset_waitq,
trans_pcie->fw_reset_done, FW_RESET_TIMEOUT);
if (!ret)
- IWL_ERR(trans,
- "firmware didn't ACK the reset - continue anyway\n");
+ IWL_INFO(trans,
+ "firmware didn't ACK the reset - continue anyway\n");
}
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
@@ -143,7 +143,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
IWL_DEBUG_INFO(trans,
"DEVICE_ENABLED bit was set and is now cleared\n");
- iwl_txq_gen2_tx_stop(trans);
+ iwl_txq_gen2_tx_free(trans);
iwl_pcie_rx_stop(trans);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 1bf4c37fe960..239bc177a3e5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1604,6 +1604,11 @@ iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
} else {
trans_pcie->trans->num_rx_queues = num_irqs - 1;
}
+
+ IWL_DEBUG_INFO(trans,
+ "MSI-X enabled with rx queues %d, vec mask 0x%x\n",
+ trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask);
+
WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
trans_pcie->alloc_vecs = num_irqs;
@@ -1973,12 +1978,16 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
module_put(THIS_MODULE);
}
-static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
+/*
+ * This version doesn't disable BHs but rather assumes they're
+ * already disabled.
+ */
+bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
{
int ret;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- spin_lock_bh(&trans_pcie->reg_lock);
+ spin_lock(&trans_pcie->reg_lock);
if (trans_pcie->cmd_hold_nic_awake)
goto out;
@@ -2063,7 +2072,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
}
err:
- spin_unlock_bh(&trans_pcie->reg_lock);
+ spin_unlock(&trans_pcie->reg_lock);
return false;
}
@@ -2076,6 +2085,20 @@ out:
return true;
}
+static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
+{
+ bool ret;
+
+ local_bh_disable();
+ ret = __iwl_trans_pcie_grab_nic_access(trans);
+ if (ret) {
+ /* keep BHs disabled until iwl_trans_pcie_release_nic_access */
+ return ret;
+ }
+ local_bh_enable();
+ return false;
+}
+
static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 4456abb9a074..34bde8c87324 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -40,6 +40,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
struct iwl_tfh_tfd *tfd;
+ unsigned long flags;
copy_size = sizeof(struct iwl_cmd_header_wide);
cmd_size = sizeof(struct iwl_cmd_header_wide);
@@ -108,14 +109,14 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf;
}
- spin_lock_bh(&txq->lock);
+ spin_lock_irqsave(&txq->lock, flags);
idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_bh(&txq->lock);
+ spin_unlock_irqrestore(&txq->lock, flags);
IWL_ERR(trans, "No space in command queue\n");
iwl_op_mode_cmd_queue_full(trans->op_mode);
@@ -250,7 +251,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
spin_unlock(&trans_pcie->reg_lock);
out:
- spin_unlock_bh(&txq->lock);
+ spin_unlock_irqrestore(&txq->lock, flags);
free_dup_buf:
if (idx < 0)
kfree(dup_buf);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 7ae32491b5da..4f6c187eed69 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -181,16 +181,20 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- lockdep_assert_held(&trans_pcie->reg_lock);
-
if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
return;
- if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
+
+ spin_lock(&trans_pcie->reg_lock);
+
+ if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) {
+ spin_unlock(&trans_pcie->reg_lock);
return;
+ }
trans_pcie->cmd_hold_nic_awake = false;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ spin_unlock(&trans_pcie->reg_lock);
}
/*
@@ -198,7 +202,6 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
*/
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id];
if (!txq) {
@@ -222,12 +225,9 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_txq_free_tfd(trans, txq);
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
- if (txq->read_ptr == txq->write_ptr) {
- spin_lock(&trans_pcie->reg_lock);
- if (txq_id == trans->txqs.cmd.q_id)
- iwl_pcie_clear_cmd_in_flight(trans);
- spin_unlock(&trans_pcie->reg_lock);
- }
+ if (txq->read_ptr == txq->write_ptr &&
+ txq_id == trans->txqs.cmd.q_id)
+ iwl_pcie_clear_cmd_in_flight(trans);
}
while (!skb_queue_empty(&txq->overflow_q)) {
@@ -629,38 +629,30 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
const struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret;
-
- lockdep_assert_held(&trans_pcie->reg_lock);
/* Make sure the NIC is still alive in the bus */
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return -ENODEV;
+ if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
+ return 0;
+
/*
* wake up the NIC to make sure that the firmware will see the host
* command - we will let the NIC sleep once all the host commands
* returned. This needs to be done only on NICs that have
- * apmg_wake_up_wa set.
+ * apmg_wake_up_wa set (see above.)
*/
- if (trans->trans_cfg->base_params->apmg_wake_up_wa &&
- !trans_pcie->cmd_hold_nic_awake) {
- __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
- (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
- CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
- 15000);
- if (ret < 0) {
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
- return -EIO;
- }
- trans_pcie->cmd_hold_nic_awake = true;
- }
+ if (!_iwl_trans_pcie_grab_nic_access(trans))
+ return -EIO;
+
+ /*
+ * In iwl_trans_grab_nic_access(), we've acquired the reg_lock.
+ * There, we also returned immediately if cmd_hold_nic_awake is
+ * already true, so it's OK to unconditionally set it to true.
+ */
+ trans_pcie->cmd_hold_nic_awake = true;
+ spin_unlock(&trans_pcie->reg_lock);
return 0;
}
@@ -674,7 +666,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
*/
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id];
int nfreed = 0;
u16 r;
@@ -705,12 +696,8 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
}
}
- if (txq->read_ptr == txq->write_ptr) {
- /* BHs are also disabled due to txq->lock */
- spin_lock(&trans_pcie->reg_lock);
+ if (txq->read_ptr == txq->write_ptr)
iwl_pcie_clear_cmd_in_flight(trans);
- spin_unlock(&trans_pcie->reg_lock);
- }
iwl_txq_progress(txq);
}
@@ -914,7 +901,6 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
@@ -1161,19 +1147,16 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
- spin_lock(&trans_pcie->reg_lock);
ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
if (ret < 0) {
idx = ret;
- goto unlock_reg;
+ goto out;
}
/* Increment and update queue's write index */
txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
- unlock_reg:
- spin_unlock(&trans_pcie->reg_lock);
out:
spin_unlock_irqrestore(&txq->lock, flags);
free_dup_buf:
@@ -1367,7 +1350,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
/* this is the data left for this subframe */
unsigned int data_left =
min_t(unsigned int, mss, total_len);
- struct sk_buff *csum_skb = NULL;
unsigned int hdr_tb_len;
dma_addr_t hdr_tb_phys;
u8 *subf_hdrs_start = hdr_page->pos;
@@ -1398,10 +1380,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
hdr_tb_len = hdr_page->pos - start_hdr;
hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
hdr_tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) {
- dev_kfree_skb(csum_skb);
+ if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys)))
return -EINVAL;
- }
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
hdr_tb_len, false);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
@@ -1420,10 +1400,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
tb_phys = dma_map_single(trans->dev, tso.data,
size, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
- dev_kfree_skb(csum_skb);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
- }
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
size, false);
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index 833f43d1ca7a..451b06069350 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2020 Intel Corporation
+ * Copyright (C) 2020-2021 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
@@ -14,30 +14,6 @@
#include <linux/dmapool.h>
/*
- * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels
- */
-void iwl_txq_gen2_tx_stop(struct iwl_trans *trans)
-{
- int txq_id;
-
- /*
- * This function can be called before the op_mode disabled the
- * queues. This happens when we have an rfkill interrupt.
- * Since we stop Tx altogether - mark the queues as stopped.
- */
- memset(trans->txqs.queue_stopped, 0,
- sizeof(trans->txqs.queue_stopped));
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
-
- /* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
- if (!trans->txqs.txq[txq_id])
- continue;
- iwl_txq_gen2_unmap(trans, txq_id);
- }
-}
-
-/*
* iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
*/
static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
@@ -399,7 +375,6 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
while (total_len) {
/* this is the data left for this subframe */
unsigned int data_left = min_t(unsigned int, mss, total_len);
- struct sk_buff *csum_skb = NULL;
unsigned int tb_len;
dma_addr_t tb_phys;
u8 *subf_hdrs_start = hdr_page->pos;
@@ -430,10 +405,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
tb_len = hdr_page->pos - start_hdr;
tb_phys = dma_map_single(trans->dev, start_hdr,
tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
- dev_kfree_skb(csum_skb);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
- }
/*
* No need for _with_wa, this is from the TSO page and
* we leave some space at the end of it so can't hit
@@ -458,10 +431,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
tb_phys, tso.data,
tb_len, NULL);
- if (ret) {
- dev_kfree_skb(csum_skb);
+ if (ret)
goto out_err;
- }
data_left -= tb_len;
tso_build_data(skb, &tso, tb_len);
@@ -1189,6 +1160,12 @@ static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
goto error_free_resp;
}
+ if (WARN_ONCE(trans->txqs.txq[qid],
+ "queue %d already allocated\n", qid)) {
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
txq->id = qid;
trans->txqs.txq[qid] = txq;
wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
index af1dbdf5617a..20efc62acf13 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2020 Intel Corporation
+ * Copyright (C) 2020-2021 Intel Corporation
*/
#ifndef __iwl_trans_queue_tx_h__
#define __iwl_trans_queue_tx_h__
@@ -123,7 +123,6 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
-void iwl_txq_gen2_tx_stop(struct iwl_trans *trans);
void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
bool cmd_queue);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
index 97c270845fd1..51c847d98755 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c
@@ -227,6 +227,7 @@ static ssize_t prism2_aux_dump_proc_no_read(struct file *file, char __user *buf,
static const struct proc_ops prism2_aux_dump_proc_ops = {
.proc_read = prism2_aux_dump_proc_no_read,
+ .proc_lseek = default_llseek,
};
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c b/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c
index 96a03d10a080..18bd0d9876c2 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c
@@ -312,11 +312,3 @@ static void __exit orinoco_nortel_exit(void)
module_init(orinoco_nortel_init);
module_exit(orinoco_nortel_exit);
-
-/*
- * Local variables:
- * c-indent-level: 8
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_pci.c b/drivers/net/wireless/intersil/orinoco/orinoco_pci.c
index f3c86b07b1b9..7e3a6dd60c15 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_pci.c
@@ -255,11 +255,3 @@ static void __exit orinoco_pci_exit(void)
module_init(orinoco_pci_init);
module_exit(orinoco_pci_exit);
-
-/*
- * Local variables:
- * c-indent-level: 8
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_plx.c b/drivers/net/wireless/intersil/orinoco/orinoco_plx.c
index 16dada94c774..73e6ae124013 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_plx.c
@@ -360,11 +360,3 @@ static void __exit orinoco_plx_exit(void)
module_init(orinoco_plx_init);
module_exit(orinoco_plx_exit);
-
-/*
- * Local variables:
- * c-indent-level: 8
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c b/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c
index 9a9d335611ac..939d5a1dce97 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c
@@ -235,11 +235,3 @@ static void __exit orinoco_tmd_exit(void)
module_init(orinoco_tmd_init);
module_exit(orinoco_tmd_exit);
-
-/*
- * Local variables:
- * c-indent-level: 8
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index fa7d4c20dc13..51ce767eaf88 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -596,7 +596,7 @@ static const struct nl80211_vendor_cmd_info mac80211_hwsim_vendor_events[] = {
{ .vendor_id = OUI_QCA, .subcmd = 1 },
};
-static spinlock_t hwsim_radio_lock;
+static DEFINE_SPINLOCK(hwsim_radio_lock);
static LIST_HEAD(hwsim_radios);
static struct rhashtable hwsim_radios_rht;
static int hwsim_radio_idx;
@@ -763,7 +763,7 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
/* MAC80211_HWSIM virtio queues */
static struct virtqueue *hwsim_vqs[HWSIM_NUM_VQS];
static bool hwsim_virtio_enabled;
-static spinlock_t hwsim_virtio_lock;
+static DEFINE_SPINLOCK(hwsim_virtio_lock);
static void hwsim_virtio_rx_work(struct work_struct *work);
static DECLARE_WORK(hwsim_virtio_rx, hwsim_virtio_rx_work);
@@ -2795,8 +2795,8 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
IEEE80211_HE_MAC_CAP2_ACK_EN,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
- .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
.phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
@@ -2839,8 +2839,8 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
IEEE80211_HE_MAC_CAP2_ACK_EN,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
- .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
.phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
@@ -2885,8 +2885,8 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = {
IEEE80211_HE_MAC_CAP2_ACK_EN,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
- .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
.phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
@@ -2933,8 +2933,8 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = {
IEEE80211_HE_MAC_CAP2_ACK_EN,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
- .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
.phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
@@ -4410,8 +4410,6 @@ static struct virtio_driver virtio_hwsim = {
static int hwsim_register_virtio_driver(void)
{
- spin_lock_init(&hwsim_virtio_lock);
-
return register_virtio_driver(&virtio_hwsim);
}
@@ -4440,8 +4438,6 @@ static int __init init_mac80211_hwsim(void)
if (channels < 1)
return -EINVAL;
- spin_lock_init(&hwsim_radio_lock);
-
err = rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params);
if (err)
return err;
diff --git a/drivers/net/wireless/marvell/libertas/decl.h b/drivers/net/wireless/marvell/libertas/decl.h
index 5d1e30e0c5db..c1e0388ef01d 100644
--- a/drivers/net/wireless/marvell/libertas/decl.h
+++ b/drivers/net/wireless/marvell/libertas/decl.h
@@ -23,7 +23,6 @@ struct lbs_private;
typedef void (*lbs_fw_cb)(struct lbs_private *priv, int ret,
const struct firmware *helper, const struct firmware *mainfw);
-struct lbs_private;
struct sk_buff;
struct net_device;
struct cmd_ds_command;
diff --git a/drivers/net/wireless/marvell/libertas/mesh.h b/drivers/net/wireless/marvell/libertas/mesh.h
index d49717b20c09..44c4cd0230a8 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.h
+++ b/drivers/net/wireless/marvell/libertas/mesh.h
@@ -60,13 +60,13 @@ void lbs_mesh_ethtool_get_strings(struct net_device *dev,
#else
-#define lbs_init_mesh(priv)
-#define lbs_deinit_mesh(priv)
-#define lbs_start_mesh(priv)
-#define lbs_add_mesh(priv)
-#define lbs_remove_mesh(priv)
+#define lbs_init_mesh(priv) do { } while (0)
+#define lbs_deinit_mesh(priv) do { } while (0)
+#define lbs_start_mesh(priv) do { } while (0)
+#define lbs_add_mesh(priv) do { } while (0)
+#define lbs_remove_mesh(priv) do { } while (0)
#define lbs_mesh_set_dev(priv, dev, rxpd) (dev)
-#define lbs_mesh_set_txpd(priv, dev, txpd)
+#define lbs_mesh_set_txpd(priv, dev, txpd) do { } while (0)
#define lbs_mesh_set_channel(priv, channel) (0)
#define lbs_mesh_activated(priv) (false)
diff --git a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
index 67bbb6a8f113..5d726545d987 100644
--- a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
@@ -453,7 +453,6 @@ struct cmd_ds_802_11_beacon_set {
u8 beacon[MRVL_MAX_BCN_SIZE];
};
-struct lbtf_private;
struct cmd_ctrl_node;
/** Function Prototype Declaration */
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index a2ed268ce0da..0961f4a5e415 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2300,8 +2300,7 @@ done:
is_scanning_required = 1;
} else {
mwifiex_dbg(priv->adapter, MSG,
- "info: trying to associate to '%.*s' bssid %pM\n",
- req_ssid.ssid_len, (char *)req_ssid.ssid,
+ "info: trying to associate to bssid %pM\n",
bss->bssid);
memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
break;
@@ -2378,8 +2377,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
}
mwifiex_dbg(adapter, INFO,
- "info: Trying to associate to %.*s and bssid %pM\n",
- (int)sme->ssid_len, (char *)sme->ssid, sme->bssid);
+ "info: Trying to associate to bssid %pM\n", sme->bssid);
if (!mwifiex_stop_bg_scan(priv))
cfg80211_sched_scan_stopped_locked(priv->wdev.wiphy, 0);
@@ -2512,9 +2510,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
goto done;
}
- mwifiex_dbg(priv->adapter, MSG,
- "info: trying to join to %.*s and bssid %pM\n",
- params->ssid_len, (char *)params->ssid, params->bssid);
+ mwifiex_dbg(priv->adapter, MSG, "info: trying to join to bssid %pM\n",
+ params->bssid);
mwifiex_set_ibss_params(priv, params);
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index c2a685f63e95..0b877f3f6b97 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1211,7 +1211,6 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
struct mwifiex_bssdescriptor *bss_entry)
{
- int ret = 0;
u8 element_id;
struct ieee_types_fh_param_set *fh_param_set;
struct ieee_types_ds_param_set *ds_param_set;
@@ -1464,7 +1463,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
bytes_left -= total_ie_len;
} /* while (bytes_left > 2) */
- return ret;
+ return 0;
}
/*
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index c9f8c056aa51..84b32a5f01ee 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -1473,6 +1473,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
if (txq->skb == NULL) {
dma_free_coherent(&priv->pdev->dev, size, txq->txd,
txq->txd_dma);
+ txq->txd = NULL;
return -ENOMEM;
}
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index df25c00d9e06..72622220051b 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -76,9 +76,9 @@ mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
nframes--;
status = (struct mt76_rx_status *)skb->cb;
- if (!time_after(jiffies,
- status->reorder_time +
- mt76_aggr_tid_to_timeo(tid->num)))
+ if (!time_after32(jiffies,
+ status->reorder_time +
+ mt76_aggr_tid_to_timeo(tid->num)))
continue;
mt76_rx_aggr_release_frames(tid, frames, status->seqno);
@@ -122,6 +122,7 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
struct ieee80211_bar *bar = mt76_skb_get_hdr(skb);
struct mt76_wcid *wcid = status->wcid;
struct mt76_rx_tid *tid;
+ u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
u16 seqno;
if (!ieee80211_is_ctl(bar->frame_control))
@@ -130,9 +131,9 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
if (!ieee80211_is_back_req(bar->frame_control))
return;
- status->tid = le16_to_cpu(bar->control) >> 12;
+ status->qos_ctl = tidno = le16_to_cpu(bar->control) >> 12;
seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
- tid = rcu_dereference(wcid->aggr[status->tid]);
+ tid = rcu_dereference(wcid->aggr[tidno]);
if (!tid)
return;
@@ -147,12 +148,12 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
struct mt76_wcid *wcid = status->wcid;
struct ieee80211_sta *sta;
struct mt76_rx_tid *tid;
bool sn_less;
u16 seqno, head, size, idx;
+ u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
u8 ackp;
__skb_queue_tail(frames, skb);
@@ -161,18 +162,18 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
if (!sta)
return;
- if (!status->aggr) {
+ if (!status->aggr && !(status->flag & RX_FLAG_8023)) {
mt76_rx_aggr_check_ctl(skb, frames);
return;
}
/* not part of a BA session */
- ackp = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
+ ackp = status->qos_ctl & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
return;
- tid = rcu_dereference(wcid->aggr[status->tid]);
+ tid = rcu_dereference(wcid->aggr[tidno]);
if (!tid)
return;
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index d4a6b8108971..fa48cc3a7a8f 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -25,6 +25,32 @@ mt76_reg_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set,
"0x%08llx\n");
+static int
+mt76_napi_threaded_set(void *data, u64 val)
+{
+ struct mt76_dev *dev = data;
+
+ if (!mt76_is_mmio(dev))
+ return -EOPNOTSUPP;
+
+ if (dev->napi_dev.threaded != val)
+ return dev_set_threaded(&dev->napi_dev, val);
+
+ return 0;
+}
+
+static int
+mt76_napi_threaded_get(void *data, u64 *val)
+{
+ struct mt76_dev *dev = data;
+
+ *val = dev->napi_dev.threaded;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_napi_threaded, mt76_napi_threaded_get,
+ mt76_napi_threaded_set, "%llu\n");
+
int mt76_queues_read(struct seq_file *s, void *data)
{
struct mt76_dev *dev = dev_get_drvdata(s->private);
@@ -102,6 +128,8 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg);
debugfs_create_file_unsafe("regval", 0600, dir, dev,
&fops_regval);
+ debugfs_create_file_unsafe("napi_threaded", 0600, dir, dev,
+ &fops_napi_threaded);
debugfs_create_blob("eeprom", 0400, dir, &dev->eeprom);
if (dev->otp.data)
debugfs_create_blob("otp", 0400, dir, &dev->otp);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 2f27c43ad76d..72b1cc0ecfda 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -79,13 +79,38 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
local_bh_enable();
}
+static void
+mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ writel(q->desc_dma, &q->regs->desc_base);
+ writel(q->ndesc, &q->regs->ring_size);
+ q->head = readl(&q->regs->dma_idx);
+ q->tail = q->head;
+}
+
+static void
+mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ int i;
+
+ if (!q)
+ return;
+
+ /* clear descriptors */
+ for (i = 0; i < q->ndesc; i++)
+ q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+
+ writel(0, &q->regs->cpu_idx);
+ writel(0, &q->regs->dma_idx);
+ mt76_dma_sync_idx(dev, q);
+}
+
static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
u32 ring_base)
{
int size;
- int i;
spin_lock_init(&q->lock);
spin_lock_init(&q->cleanup_lock);
@@ -105,14 +130,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
if (!q->entry)
return -ENOMEM;
- /* clear descriptors */
- for (i = 0; i < q->ndesc; i++)
- q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
-
- writel(q->desc_dma, &q->regs->desc_base);
- writel(0, &q->regs->cpu_idx);
- writel(0, &q->regs->dma_idx);
- writel(q->ndesc, &q->regs->ring_size);
+ mt76_dma_queue_reset(dev, q);
return 0;
}
@@ -202,15 +220,6 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
}
static void
-mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
-{
- writel(q->desc_dma, &q->regs->desc_base);
- writel(q->ndesc, &q->regs->ring_size);
- q->head = readl(&q->regs->dma_idx);
- q->tail = q->head;
-}
-
-static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
wmb();
@@ -309,7 +318,7 @@ static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, u32 tx_info)
{
- struct mt76_queue_buf buf;
+ struct mt76_queue_buf buf = {};
dma_addr_t addr;
if (q->queued + 1 >= q->ndesc - 1)
@@ -593,8 +602,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
return done;
}
-static int
-mt76_dma_rx_poll(struct napi_struct *napi, int budget)
+int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
{
struct mt76_dev *dev;
int qid, done = 0, cur;
@@ -602,7 +610,6 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
dev = container_of(napi->dev, struct mt76_dev, napi_dev);
qid = napi - dev->napi;
- local_bh_disable();
rcu_read_lock();
do {
@@ -612,24 +619,28 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
} while (cur && done < budget);
rcu_read_unlock();
- local_bh_enable();
if (done < budget && napi_complete(napi))
dev->drv->rx_poll_complete(dev, qid);
return done;
}
+EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
static int
-mt76_dma_init(struct mt76_dev *dev)
+mt76_dma_init(struct mt76_dev *dev,
+ int (*poll)(struct napi_struct *napi, int budget))
{
int i;
init_dummy_netdev(&dev->napi_dev);
+ init_dummy_netdev(&dev->tx_napi_dev);
+ snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
+ wiphy_name(dev->hw->wiphy));
+ dev->napi_dev.threaded = 1;
mt76_for_each_q_rx(dev, i) {
- netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
- 64);
+ netif_napi_add(&dev->napi_dev, &dev->napi[i], poll, 64);
mt76_dma_rx_fill(dev, &dev->q_rx[i]);
napi_enable(&dev->napi[i]);
}
@@ -640,9 +651,11 @@ mt76_dma_init(struct mt76_dev *dev)
static const struct mt76_queue_ops mt76_dma_ops = {
.init = mt76_dma_init,
.alloc = mt76_dma_alloc_queue,
+ .reset_q = mt76_dma_queue_reset,
.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
.tx_queue_skb = mt76_dma_tx_queue_skb,
.tx_cleanup = mt76_dma_tx_cleanup,
+ .rx_cleanup = mt76_dma_rx_cleanup,
.rx_reset = mt76_dma_rx_reset,
.kick = mt76_dma_kick_queue,
};
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index e7c27697ef04..fdf786f975ea 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -45,6 +45,7 @@ enum mt76_mcu_evt_type {
EVT_EVENT_DFS_DETECT_RSP,
};
+int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
void mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 665b54c5c8ae..3b47e85e95e7 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -9,8 +9,7 @@
#include <linux/etherdevice.h>
#include "mt76.h"
-static int
-mt76_get_of_eeprom(struct mt76_dev *dev, int len)
+int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len)
{
#if defined(CONFIG_OF) && defined(CONFIG_MTD)
struct device_node *np = dev->dev->of_node;
@@ -18,7 +17,6 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
const __be32 *list;
const char *part;
phandle phandle;
- int offset = 0;
int size;
size_t retlen;
int ret;
@@ -54,7 +52,7 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
}
offset = be32_to_cpup(list);
- ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data);
+ ret = mtd_read(mtd, offset, len, &retlen, eep);
put_mtd_device(mtd);
if (ret)
goto out_put_node;
@@ -65,7 +63,7 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
}
if (of_property_read_bool(dev->dev->of_node, "big-endian")) {
- u8 *data = (u8 *)dev->eeprom.data;
+ u8 *data = (u8 *)eep;
int i;
/* convert eeprom data in Little Endian */
@@ -86,21 +84,15 @@ out_put_node:
return -ENOENT;
#endif
}
+EXPORT_SYMBOL_GPL(mt76_get_of_eeprom);
void
mt76_eeprom_override(struct mt76_phy *phy)
{
struct mt76_dev *dev = phy->dev;
-
-#ifdef CONFIG_OF
struct device_node *np = dev->dev->of_node;
- const u8 *mac = NULL;
- if (np)
- mac = of_get_mac_address(np);
- if (!IS_ERR_OR_NULL(mac))
- ether_addr_copy(phy->macaddr, mac);
-#endif
+ of_get_mac_address(np, phy->macaddr);
if (!is_valid_ether_addr(phy->macaddr)) {
eth_random_addr(phy->macaddr);
@@ -111,6 +103,226 @@ mt76_eeprom_override(struct mt76_phy *phy)
}
EXPORT_SYMBOL_GPL(mt76_eeprom_override);
+static bool mt76_string_prop_find(struct property *prop, const char *str)
+{
+ const char *cp = NULL;
+
+ if (!prop || !str || !str[0])
+ return false;
+
+ while ((cp = of_prop_next_string(prop, cp)) != NULL)
+ if (!strcasecmp(cp, str))
+ return true;
+
+ return false;
+}
+
+static struct device_node *
+mt76_find_power_limits_node(struct mt76_dev *dev)
+{
+ struct device_node *np = dev->dev->of_node;
+ const char *const region_names[] = {
+ [NL80211_DFS_ETSI] = "etsi",
+ [NL80211_DFS_FCC] = "fcc",
+ [NL80211_DFS_JP] = "jp",
+ };
+ struct device_node *cur, *fallback = NULL;
+ const char *region_name = NULL;
+
+ if (dev->region < ARRAY_SIZE(region_names))
+ region_name = region_names[dev->region];
+
+ np = of_get_child_by_name(np, "power-limits");
+ if (!np)
+ return NULL;
+
+ for_each_child_of_node(np, cur) {
+ struct property *country = of_find_property(cur, "country", NULL);
+ struct property *regd = of_find_property(cur, "regdomain", NULL);
+
+ if (!country && !regd) {
+ fallback = cur;
+ continue;
+ }
+
+ if (mt76_string_prop_find(country, dev->alpha2) ||
+ mt76_string_prop_find(regd, region_name))
+ return cur;
+ }
+
+ return fallback;
+}
+
+static const __be32 *
+mt76_get_of_array(struct device_node *np, char *name, size_t *len, int min)
+{
+ struct property *prop = of_find_property(np, name, NULL);
+
+ if (!prop || !prop->value || prop->length < min * 4)
+ return NULL;
+
+ *len = prop->length;
+
+ return prop->value;
+}
+
+static struct device_node *
+mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan)
+{
+ struct device_node *cur;
+ const __be32 *val;
+ size_t len;
+
+ for_each_child_of_node(np, cur) {
+ val = mt76_get_of_array(cur, "channels", &len, 2);
+ if (!val)
+ continue;
+
+ while (len >= 2 * sizeof(*val)) {
+ if (chan->hw_value >= be32_to_cpu(val[0]) &&
+ chan->hw_value <= be32_to_cpu(val[1]))
+ return cur;
+
+ val += 2;
+ len -= 2 * sizeof(*val);
+ }
+ }
+
+ return NULL;
+}
+
+static s8
+mt76_get_txs_delta(struct device_node *np, u8 nss)
+{
+ const __be32 *val;
+ size_t len;
+
+ val = mt76_get_of_array(np, "txs-delta", &len, nss);
+ if (!val)
+ return 0;
+
+ return be32_to_cpu(val[nss - 1]);
+}
+
+static void
+mt76_apply_array_limit(s8 *pwr, size_t pwr_len, const __be32 *data,
+ s8 target_power, s8 nss_delta, s8 *max_power)
+{
+ int i;
+
+ if (!data)
+ return;
+
+ for (i = 0; i < pwr_len; i++) {
+ pwr[i] = min_t(s8, target_power,
+ be32_to_cpu(data[i]) + nss_delta);
+ *max_power = max(*max_power, pwr[i]);
+ }
+}
+
+static void
+mt76_apply_multi_array_limit(s8 *pwr, size_t pwr_len, s8 pwr_num,
+ const __be32 *data, size_t len, s8 target_power,
+ s8 nss_delta, s8 *max_power)
+{
+ int i, cur;
+
+ if (!data)
+ return;
+
+ len /= 4;
+ cur = be32_to_cpu(data[0]);
+ for (i = 0; i < pwr_num; i++) {
+ if (len < pwr_len + 1)
+ break;
+
+ mt76_apply_array_limit(pwr + pwr_len * i, pwr_len, data + 1,
+ target_power, nss_delta, max_power);
+ if (--cur > 0)
+ continue;
+
+ data += pwr_len + 1;
+ len -= pwr_len + 1;
+ if (!len)
+ break;
+
+ cur = be32_to_cpu(data[0]);
+ }
+}
+
+s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
+ struct ieee80211_channel *chan,
+ struct mt76_power_limits *dest,
+ s8 target_power)
+{
+ struct mt76_dev *dev = phy->dev;
+ struct device_node *np;
+ const __be32 *val;
+ char name[16];
+ u32 mcs_rates = dev->drv->mcs_rates;
+ u32 ru_rates = ARRAY_SIZE(dest->ru[0]);
+ char band;
+ size_t len;
+ s8 max_power = 0;
+ s8 txs_delta;
+
+ if (!mcs_rates)
+ mcs_rates = 10;
+
+ memset(dest, target_power, sizeof(*dest));
+
+ if (!IS_ENABLED(CONFIG_OF))
+ return target_power;
+
+ np = mt76_find_power_limits_node(dev);
+ if (!np)
+ return target_power;
+
+ switch (chan->band) {
+ case NL80211_BAND_2GHZ:
+ band = '2';
+ break;
+ case NL80211_BAND_5GHZ:
+ band = '5';
+ break;
+ default:
+ return target_power;
+ }
+
+ snprintf(name, sizeof(name), "txpower-%cg", band);
+ np = of_get_child_by_name(np, name);
+ if (!np)
+ return target_power;
+
+ np = mt76_find_channel_node(np, chan);
+ if (!np)
+ return target_power;
+
+ txs_delta = mt76_get_txs_delta(np, hweight8(phy->antenna_mask));
+
+ val = mt76_get_of_array(np, "rates-cck", &len, ARRAY_SIZE(dest->cck));
+ mt76_apply_array_limit(dest->cck, ARRAY_SIZE(dest->cck), val,
+ target_power, txs_delta, &max_power);
+
+ val = mt76_get_of_array(np, "rates-ofdm",
+ &len, ARRAY_SIZE(dest->ofdm));
+ mt76_apply_array_limit(dest->ofdm, ARRAY_SIZE(dest->ofdm), val,
+ target_power, txs_delta, &max_power);
+
+ val = mt76_get_of_array(np, "rates-mcs", &len, mcs_rates + 1);
+ mt76_apply_multi_array_limit(dest->mcs[0], ARRAY_SIZE(dest->mcs[0]),
+ ARRAY_SIZE(dest->mcs), val, len,
+ target_power, txs_delta, &max_power);
+
+ val = mt76_get_of_array(np, "rates-ru", &len, ru_rates + 1);
+ mt76_apply_multi_array_limit(dest->ru[0], ARRAY_SIZE(dest->ru[0]),
+ ARRAY_SIZE(dest->ru), val, len,
+ target_power, txs_delta, &max_power);
+
+ return max_power;
+}
+EXPORT_SYMBOL_GPL(mt76_get_rate_power_limits);
+
int
mt76_eeprom_init(struct mt76_dev *dev, int len)
{
@@ -119,6 +331,6 @@ mt76_eeprom_init(struct mt76_dev *dev, int len)
if (!dev->eeprom.data)
return -ENOMEM;
- return !mt76_get_of_eeprom(dev, len);
+ return !mt76_get_of_eeprom(dev, dev->eeprom.data, 0, len);
}
EXPORT_SYMBOL_GPL(mt76_eeprom_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 696d00d1976c..977acab0360a 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -428,6 +428,9 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
mutex_init(&dev->mcu.mutex);
dev->tx_worker.fn = mt76_tx_worker;
+ spin_lock_init(&dev->token_lock);
+ idr_init(&dev->token);
+
INIT_LIST_HEAD(&dev->txwi_cache);
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
@@ -508,6 +511,39 @@ void mt76_free_device(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_free_device);
+static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
+{
+ struct sk_buff *skb = phy->rx_amsdu[q].head;
+ struct mt76_dev *dev = phy->dev;
+
+ phy->rx_amsdu[q].head = NULL;
+ phy->rx_amsdu[q].tail = NULL;
+ __skb_queue_tail(&dev->rx_skb[q], skb);
+}
+
+static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+
+ if (phy->rx_amsdu[q].head &&
+ (!status->amsdu || status->first_amsdu ||
+ status->seqno != phy->rx_amsdu[q].seqno))
+ mt76_rx_release_amsdu(phy, q);
+
+ if (!phy->rx_amsdu[q].head) {
+ phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
+ phy->rx_amsdu[q].seqno = status->seqno;
+ phy->rx_amsdu[q].head = skb;
+ } else {
+ *phy->rx_amsdu[q].tail = skb;
+ phy->rx_amsdu[q].tail = &skb->next;
+ }
+
+ if (!status->amsdu || status->last_amsdu)
+ mt76_rx_release_amsdu(phy, q);
+}
+
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@@ -525,7 +561,8 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
phy->test.rx_stats.fcs_error[q]++;
}
#endif
- __skb_queue_tail(&dev->rx_skb[q], skb);
+
+ mt76_rx_release_burst(phy, q, skb);
}
EXPORT_SYMBOL_GPL(mt76_rx);
@@ -720,6 +757,8 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
status->signal = mstat.signal;
status->chains = mstat.chains;
status->ampdu_reference = mstat.ampdu_ref;
+ status->device_timestamp = mstat.timestamp;
+ status->mactime = mstat.timestamp;
BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
BUILD_BUG_ON(sizeof(status->chain_signal) !=
@@ -737,6 +776,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_wcid *wcid = status->wcid;
struct ieee80211_hdr *hdr;
+ u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
int ret;
if (!(status->flag & RX_FLAG_DECRYPTED))
@@ -757,12 +797,12 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
}
BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
- ret = memcmp(status->iv, wcid->rx_key_pn[status->tid],
+ ret = memcmp(status->iv, wcid->rx_key_pn[tidno],
sizeof(status->iv));
if (ret <= 0)
return -EINVAL; /* replay */
- memcpy(wcid->rx_key_pn[status->tid], status->iv, sizeof(status->iv));
+ memcpy(wcid->rx_key_pn[tidno], status->iv, sizeof(status->iv));
if (status->flag & RX_FLAG_IV_STRIPPED)
status->flag |= RX_FLAG_PN_VALIDATED;
@@ -785,6 +825,7 @@ mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
};
struct ieee80211_sta *sta;
u32 airtime;
+ u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
spin_lock(&dev->cc_lock);
@@ -795,7 +836,7 @@ mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
return;
sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
- ieee80211_sta_register_airtime(sta, status->tid, 0, airtime);
+ ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
}
static void
@@ -823,7 +864,6 @@ mt76_airtime_flush_ampdu(struct mt76_dev *dev)
static void
mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_wcid *wcid = status->wcid;
@@ -831,6 +871,11 @@ mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
return;
if (!wcid || !wcid->sta) {
+ struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
+
+ if (status->flag & RX_FLAG_8023)
+ return;
+
if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
return;
@@ -864,10 +909,12 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
struct ieee80211_sta *sta;
struct ieee80211_hw *hw;
struct mt76_wcid *wcid = status->wcid;
+ u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
bool ps;
hw = mt76_phy_hw(dev, status->ext_phy);
- if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
+ if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
+ !(status->flag & RX_FLAG_8023)) {
sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
if (sta)
wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
@@ -885,6 +932,9 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
wcid->inactive_count = 0;
+ if (status->flag & RX_FLAG_8023)
+ return;
+
if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
return;
@@ -902,7 +952,7 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
ieee80211_is_qos_nullfunc(hdr->frame_control)))
- ieee80211_sta_uapsd_trigger(sta, status->tid);
+ ieee80211_sta_uapsd_trigger(sta, tidno);
if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
return;
@@ -926,13 +976,26 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
spin_lock(&dev->rx_lock);
while ((skb = __skb_dequeue(frames)) != NULL) {
+ struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+
if (mt76_check_ccmp_pn(skb)) {
dev_kfree_skb(skb);
continue;
}
+ skb_shinfo(skb)->frag_list = NULL;
mt76_rx_convert(dev, skb, &hw, &sta);
ieee80211_rx_list(hw, sta, skb, &list);
+
+ /* subsequent amsdu frames */
+ while (nskb) {
+ skb = nskb;
+ nskb = nskb->next;
+ skb->next = NULL;
+
+ mt76_rx_convert(dev, skb, &hw, &sta);
+ ieee80211_rx_list(hw, sta, skb, &list);
+ }
}
spin_unlock(&dev->rx_lock);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 8bf45497cfca..36ede65919f8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -17,12 +17,14 @@
#include "util.h"
#include "testmode.h"
-#define MT_MCU_RING_SIZE 32
-#define MT_RX_BUF_SIZE 2048
-#define MT_SKB_HEAD_LEN 128
+#define MT_MCU_RING_SIZE 32
+#define MT_RX_BUF_SIZE 2048
+#define MT_SKB_HEAD_LEN 128
-#define MT_MAX_NON_AQL_PKT 16
-#define MT_TXQ_FREE_THR 32
+#define MT_MAX_NON_AQL_PKT 16
+#define MT_TXQ_FREE_THR 32
+
+#define MT76_TOKEN_FREE_THR 64
struct mt76_dev;
struct mt76_phy;
@@ -169,7 +171,8 @@ struct mt76_mcu_ops {
};
struct mt76_queue_ops {
- int (*init)(struct mt76_dev *dev);
+ int (*init)(struct mt76_dev *dev,
+ int (*poll)(struct napi_struct *napi, int budget));
int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
@@ -190,13 +193,18 @@ struct mt76_queue_ops {
void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
bool flush);
+ void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q);
+
void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
+
+ void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
};
enum mt76_wcid_flags {
MT_WCID_FLAG_CHECK_PS,
MT_WCID_FLAG_PS,
MT_WCID_FLAG_4ADDR,
+ MT_WCID_FLAG_HDR_TRANS,
};
#define MT76_N_WCIDS 288
@@ -222,6 +230,7 @@ struct mt76_wcid {
u16 idx;
u8 hw_key_idx;
+ u8 hw_key_idx2;
u8 sta:1;
u8 ext_phy:1;
@@ -324,6 +333,8 @@ struct mt76_driver_ops {
u32 drv_flags;
u32 survey_flags;
u16 txwi_size;
+ u16 token_size;
+ u8 mcs_rates;
void (*update_survey)(struct mt76_dev *dev);
@@ -491,15 +502,16 @@ struct mt76_rx_status {
u16 wcid_idx;
};
- unsigned long reorder_time;
+ u32 reorder_time;
u32 ampdu_ref;
+ u32 timestamp;
u8 iv[6];
u8 ext_phy:1;
u8 aggr:1;
- u8 tid;
+ u8 qos_ctl;
u16 seqno;
u16 freq;
@@ -507,6 +519,7 @@ struct mt76_rx_status {
u8 enc_flags;
u8 encoding:2, bw:3, he_ru:3;
u8 he_gi:2, he_dcm:1;
+ u8 amsdu:1, first_amsdu:1, last_amsdu:1;
u8 rate_idx;
u8 nss;
u8 band;
@@ -529,7 +542,7 @@ struct mt76_testmode_data {
struct sk_buff *tx_skb;
u32 tx_count;
- u16 tx_msdu_len;
+ u16 tx_mpdu_len;
u8 tx_rate_mode;
u8 tx_rate_idx;
@@ -600,6 +613,12 @@ struct mt76_phy {
struct delayed_work mac_work;
u8 mac_work_count;
+
+ struct {
+ struct sk_buff *head;
+ struct sk_buff **tail;
+ u16 seqno;
+ } rx_amsdu[__MT_RXQ_MAX];
};
struct mt76_dev {
@@ -628,6 +647,7 @@ struct mt76_dev {
struct mt76_mcu mcu;
struct net_device napi_dev;
+ struct net_device tx_napi_dev;
spinlock_t rx_lock;
struct napi_struct napi[__MT_RXQ_MAX];
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
@@ -641,6 +661,10 @@ struct mt76_dev {
struct mt76_worker tx_worker;
struct napi_struct tx_napi;
+ spinlock_t token_lock;
+ struct idr token;
+ int token_count;
+
wait_queue_head_t tx_wait;
struct sk_buff_head status_list;
@@ -695,6 +719,13 @@ struct mt76_dev {
};
};
+struct mt76_power_limits {
+ s8 cck[4];
+ s8 ofdm[8];
+ s8 mcs[4][10];
+ s8 ru[7][12];
+};
+
enum mt76_phy_type {
MT_PHY_TYPE_CCK,
MT_PHY_TYPE_OFDM,
@@ -778,13 +809,15 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
-#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
+#define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
-#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
#define mt76_for_each_q_rx(dev, i) \
for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \
@@ -811,6 +844,7 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_phy *phy);
+int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
@@ -988,6 +1022,7 @@ void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
void mt76_txq_schedule_all(struct mt76_phy *phy);
+void mt76_tx_worker_run(struct mt76_dev *dev);
void mt76_tx_worker(struct mt76_worker *w);
void mt76_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
@@ -1056,6 +1091,7 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb, void *data, int len);
int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
+int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
{
@@ -1176,4 +1212,45 @@ mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd,
void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
+s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
+ struct ieee80211_channel *chan,
+ struct mt76_power_limits *dest,
+ s8 target_power);
+
+struct mt76_txwi_cache *
+mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
+int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
+void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
+
+static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
+{
+ spin_lock_bh(&dev->token_lock);
+ __mt76_set_tx_blocked(dev, blocked);
+ spin_unlock_bh(&dev->token_lock);
+}
+
+static inline int
+mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
+{
+ int token;
+
+ spin_lock_bh(&dev->token_lock);
+ token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
+ GFP_ATOMIC);
+ spin_unlock_bh(&dev->token_lock);
+
+ return token;
+}
+
+static inline struct mt76_txwi_cache *
+mt76_token_put(struct mt76_dev *dev, int token)
+{
+ struct mt76_txwi_cache *txwi;
+
+ spin_lock_bh(&dev->token_lock);
+ txwi = idr_remove(&dev->token, token);
+ spin_unlock_bh(&dev->token_lock);
+
+ return txwi;
+}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index 0086f18cb79a..415ea17b9be6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -219,11 +219,11 @@ int mt7603_dma_init(struct mt7603_dev *dev)
return ret;
mt76_wr(dev, MT_DELAY_INT_CFG, 0);
- ret = mt76_init_queues(dev);
+ ret = mt76_init_queues(dev, mt76_dma_rx_poll);
if (ret)
return ret;
- netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+ netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt7603_poll_tx, NAPI_POLL_WEIGHT);
napi_enable(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index f0b879c3eba8..e1b2cfa56074 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -548,6 +548,9 @@ int mt7603_register_device(struct mt7603_dev *dev)
hw->max_report_rates = 7;
hw->max_rate_tries = 11;
+ hw->radiotap_timestamp.units_pos =
+ IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
+
hw->sta_data_size = sizeof(struct mt7603_sta);
hw->vif_data_size = sizeof(struct mt7603_vif);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index cc4e7bc48294..fbceb07c5f37 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -532,20 +532,6 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
}
- if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
- MT_RXD2_NORMAL_NON_AMPDU))) {
- status->flag |= RX_FLAG_AMPDU_DETAILS;
-
- /* all subframes of an A-MPDU have the same timestamp */
- if (dev->rx_ampdu_ts != rxd[12]) {
- if (!++dev->ampdu_ref)
- dev->ampdu_ref++;
- }
- dev->rx_ampdu_ts = rxd[12];
-
- status->ampdu_ref = dev->ampdu_ref;
- }
-
remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
@@ -579,6 +565,23 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
return -EINVAL;
}
if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
+ status->timestamp = le32_to_cpu(rxd[0]);
+ status->flag |= RX_FLAG_MACTIME_START;
+
+ if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
+ MT_RXD2_NORMAL_NON_AMPDU))) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (dev->rx_ampdu_ts != status->timestamp) {
+ if (!++dev->ampdu_ref)
+ dev->ampdu_ref++;
+ }
+ dev->rx_ampdu_ts = status->timestamp;
+
+ status->ampdu_ref = dev->ampdu_ref;
+ }
+
rxd += 2;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
@@ -651,7 +654,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
status->aggr = unicast &&
!ieee80211_is_qos_nullfunc(hdr->frame_control);
- status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ status->qos_ctl = *ieee80211_get_qos_ctl(hdr);
status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
return 0;
@@ -1442,6 +1445,8 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
mt76_queue_rx_reset(dev, i);
}
+ mt76_tx_status_check(&dev->mt76, NULL, true);
+
mt7603_dma_sched_reset(dev);
mt7603_mac_dma_start(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
index 96b6c8916730..6abfe6b19afa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -21,9 +21,8 @@ mt7603_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct mt7603_mcu_rxd *rxd;
if (!skb) {
- dev_err(mdev->dev,
- "MCU message %d (seq %d) timed out\n",
- cmd, seq);
+ dev_err(mdev->dev, "MCU message %02x (seq %d) timed out\n",
+ abs(cmd), seq);
dev->mcu_hang = MT7603_WATCHDOG_TIMEOUT;
return -ETIMEDOUT;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index b787c56fd8d6..1df5b9fed2bb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -120,7 +120,7 @@ struct mt7603_dev {
unsigned long last_cca_adj;
u32 ampdu_ref;
- __le32 rx_ampdu_ts;
+ u32 rx_ampdu_ts;
u8 rssi_offset[3];
u8 slottime;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
index 06fa28f645f2..aa6cb668b012 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
@@ -7,7 +7,7 @@
#include "mt7603.h"
static const struct pci_device_id mt76pci_device_table[] = {
- { PCI_DEVICE(0x14c3, 0x7603) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7603) },
{ },
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index 7ae48b4fa564..676bb22726d6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -69,6 +69,7 @@ static int
mt7615_pm_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
+ struct mt76_connac_pm *pm = &dev->pm;
int ret = 0;
if (!mt7615_wait_for_mcu_init(dev))
@@ -77,6 +78,9 @@ mt7615_pm_set(void *data, u64 val)
if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76))
return -EOPNOTSUPP;
+ if (val == pm->enable)
+ return 0;
+
mt7615_mutex_acquire(dev);
if (dev->phy.n_beacon_vif) {
@@ -84,7 +88,11 @@ mt7615_pm_set(void *data, u64 val)
goto out;
}
- dev->pm.enable = val;
+ if (!pm->enable) {
+ pm->stats.last_wake_event = jiffies;
+ pm->stats.last_doze_event = jiffies;
+ }
+ pm->enable = val;
out:
mt7615_mutex_release(dev);
@@ -104,6 +112,26 @@ mt7615_pm_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7615_pm_get, mt7615_pm_set, "%lld\n");
static int
+mt7615_pm_stats(struct seq_file *s, void *data)
+{
+ struct mt7615_dev *dev = dev_get_drvdata(s->private);
+ struct mt76_connac_pm *pm = &dev->pm;
+ unsigned long awake_time = pm->stats.awake_time;
+ unsigned long doze_time = pm->stats.doze_time;
+
+ if (!test_bit(MT76_STATE_PM, &dev->mphy.state))
+ awake_time += jiffies - pm->stats.last_wake_event;
+ else
+ doze_time += jiffies - pm->stats.last_doze_event;
+
+ seq_printf(s, "awake time: %14u\ndoze time: %15u\n",
+ jiffies_to_msecs(awake_time),
+ jiffies_to_msecs(doze_time));
+
+ return 0;
+}
+
+static int
mt7615_pm_idle_timeout_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
@@ -515,20 +543,27 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm);
debugfs_create_file("idle-timeout", 0600, dir, dev,
&fops_pm_idle_timeout);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir,
+ mt7615_pm_stats);
debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
mt7615_radio_read);
- debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern);
- /* test pattern knobs */
- debugfs_create_u8("pattern_len", 0600, dir,
- &dev->radar_pattern.n_pulses);
- debugfs_create_u32("pulse_period", 0600, dir,
- &dev->radar_pattern.period);
- debugfs_create_u16("pulse_width", 0600, dir,
- &dev->radar_pattern.width);
- debugfs_create_u16("pulse_power", 0600, dir,
- &dev->radar_pattern.power);
- debugfs_create_file("radar_trigger", 0200, dir, dev,
- &fops_radar_pattern);
+
+ if (is_mt7615(&dev->mt76)) {
+ debugfs_create_u32("dfs_hw_pattern", 0400, dir,
+ &dev->hw_pattern);
+ /* test pattern knobs */
+ debugfs_create_u8("pattern_len", 0600, dir,
+ &dev->radar_pattern.n_pulses);
+ debugfs_create_u32("pulse_period", 0600, dir,
+ &dev->radar_pattern.period);
+ debugfs_create_u16("pulse_width", 0600, dir,
+ &dev->radar_pattern.width);
+ debugfs_create_u16("pulse_power", 0600, dir,
+ &dev->radar_pattern.power);
+ debugfs_create_file("radar_trigger", 0200, dir, dev,
+ &fops_radar_pattern);
+ }
+
debugfs_create_file("reset_test", 0200, dir, dev,
&fops_reset_test);
debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index 25e3069cf2b1..8004ae5c16a9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -71,15 +71,39 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
struct mt7615_dev *dev;
dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
+ if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
+ napi_complete(napi);
+ queue_work(dev->mt76.wq, &dev->pm.wake_work);
+ return 0;
+ }
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
-
- if (napi_complete_done(napi, 0))
+ if (napi_complete(napi))
mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev));
+ mt76_connac_pm_unref(&dev->pm);
+
return 0;
}
+static int mt7615_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct mt7615_dev *dev;
+ int done;
+
+ dev = container_of(napi->dev, struct mt7615_dev, mt76.napi_dev);
+
+ if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
+ napi_complete(napi);
+ queue_work(dev->mt76.wq, &dev->pm.wake_work);
+ return 0;
+ }
+ done = mt76_dma_rx_poll(napi, budget);
+ mt76_connac_pm_unref(&dev->pm);
+
+ return done;
+}
+
int mt7615_wait_pdma_busy(struct mt7615_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
@@ -176,10 +200,30 @@ static void mt7663_dma_sched_init(struct mt7615_dev *dev)
mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987);
}
+void mt7615_dma_start(struct mt7615_dev *dev)
+{
+ /* start dma engine */
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+
+ if (is_mt7622(&dev->mt76))
+ mt7622_dma_sched_init(dev);
+
+ if (is_mt7663(&dev->mt76)) {
+ mt7663_dma_sched_init(dev);
+
+ mt76_wr(dev, MT_MCU2HOST_INT_ENABLE, MT7663_MCU_CMD_ERROR_MASK);
+ }
+
+}
+
int mt7615_dma_init(struct mt7615_dev *dev)
{
int rx_ring_size = MT7615_RX_RING_SIZE;
int rx_buf_size = MT_RX_BUF_SIZE;
+ u32 mask;
int ret;
/* Increase buffer size to receive large VHT MPDUs */
@@ -241,11 +285,11 @@ int mt7615_dma_init(struct mt7615_dev *dev)
mt76_wr(dev, MT_DELAY_INT_CFG, 0);
- ret = mt76_init_queues(dev);
+ ret = mt76_init_queues(dev, mt7615_poll_rx);
if (ret < 0)
return ret;
- netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+ netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt7615_poll_tx, NAPI_POLL_WEIGHT);
napi_enable(&dev->mt76.tx_napi);
@@ -253,20 +297,17 @@ int mt7615_dma_init(struct mt7615_dev *dev)
MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000);
- /* start dma engine */
- mt76_set(dev, MT_WPDMA_GLO_CFG,
- MT_WPDMA_GLO_CFG_TX_DMA_EN |
- MT_WPDMA_GLO_CFG_RX_DMA_EN);
-
/* enable interrupts for TX/RX rings */
- mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | mt7615_tx_mcu_int_mask(dev) |
- MT_INT_MCU_CMD);
-
- if (is_mt7622(&dev->mt76))
- mt7622_dma_sched_init(dev);
+ mask = MT_INT_RX_DONE_ALL | mt7615_tx_mcu_int_mask(dev);
if (is_mt7663(&dev->mt76))
- mt7663_dma_sched_init(dev);
+ mask |= MT7663_INT_MCU_CMD;
+ else
+ mask |= MT_INT_MCU_CMD;
+
+ mt7615_irq_enable(dev, mask);
+
+ mt7615_dma_start(dev);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index 2eab23898c77..6dbaaf95ee38 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -86,6 +86,7 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
switch (val) {
case 0x7615:
case 0x7622:
+ case 0x7663:
return 0;
default:
return -EINVAL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 571390fa4de7..86341d1f82f3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -116,10 +116,10 @@ mt7615_mac_init(struct mt7615_dev *dev)
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_EN);
mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_EN);
- /* disable hdr translation and hw AMSDU */
mt76_wr(dev, MT_DMA_DCR0,
FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072) |
- MT_DMA_DCR0_RX_VEC_DROP);
+ MT_DMA_DCR0_RX_VEC_DROP | MT_DMA_DCR0_DAMSDU_EN |
+ MT_DMA_DCR0_RX_HDR_TRANS_EN);
/* disable TDLS filtering */
mt76_clear(dev, MT_WF_PFCR, MT_WF_PFCR_TDLS_EN);
mt76_set(dev, MT_WF_MIB_SCR0, MT_MIB_SCR0_AGG_CNT_RANGE_EN);
@@ -129,6 +129,7 @@ mt7615_mac_init(struct mt7615_dev *dev)
} else {
mt7615_init_mac_chain(dev, 1);
}
+ mt7615_mcu_set_rx_hdr_trans_blacklist(dev);
}
static void
@@ -251,6 +252,7 @@ void mt7615_init_txpower(struct mt7615_dev *dev,
int delta_idx, delta = mt76_tx_power_nss_delta(n_chains);
u8 *eep = (u8 *)dev->mt76.eeprom.data;
enum nl80211_band band = sband->band;
+ struct mt76_power_limits limits;
u8 rate_val;
delta_idx = mt7615_eeprom_get_power_delta_index(dev, band);
@@ -279,7 +281,11 @@ void mt7615_init_txpower(struct mt7615_dev *dev,
target_power = max(target_power, eep[index]);
}
- target_power = DIV_ROUND_UP(target_power + delta, 2);
+ target_power = mt76_get_rate_power_limits(&dev->mphy, chan,
+ &limits,
+ target_power);
+ target_power += delta;
+ target_power = DIV_ROUND_UP(target_power, 2);
chan->max_power = min_t(int, chan->max_reg_power,
target_power);
chan->orig_mpwr = target_power;
@@ -310,12 +316,18 @@ mt7615_regd_notifier(struct wiphy *wiphy,
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
+ mt7615_init_txpower(dev, &mphy->sband_2g.sband);
+ mt7615_init_txpower(dev, &mphy->sband_5g.sband);
+
mt7615_mutex_acquire(dev);
if (chandef->chan->flags & IEEE80211_CHAN_RADAR)
mt7615_dfs_init_radar_detector(phy);
- if (mt7615_firmware_offload(phy->dev))
+
+ if (mt7615_firmware_offload(phy->dev)) {
mt76_connac_mcu_set_channel_domain(mphy);
+ mt76_connac_mcu_set_rate_txpower(mphy);
+ }
mt7615_mutex_release(dev);
}
@@ -330,6 +342,10 @@ mt7615_init_wiphy(struct ieee80211_hw *hw)
hw->max_rates = 3;
hw->max_report_rates = 7;
hw->max_rate_tries = 11;
+ hw->netdev_features = NETIF_F_RXCSUM;
+
+ hw->radiotap_timestamp.units_pos =
+ IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
phy->slottime = 9;
@@ -360,11 +376,17 @@ mt7615_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
if (is_mt7615(&phy->dev->mt76))
hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
else
hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM;
+
+ phy->mt76->sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ phy->mt76->sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
}
static void
@@ -480,10 +502,13 @@ void mt7615_init_device(struct mt7615_dev *dev)
dev->phy.dev = dev;
dev->phy.mt76 = &dev->mt76.phy;
dev->mt76.phy.priv = &dev->phy;
+ dev->mt76.tx_worker.fn = mt7615_tx_worker;
INIT_DELAYED_WORK(&dev->pm.ps_work, mt7615_pm_power_save_work);
INIT_WORK(&dev->pm.wake_work, mt7615_pm_wake_work);
- init_completion(&dev->pm.wake_cmpl);
+ spin_lock_init(&dev->pm.wake.lock);
+ mutex_init(&dev->pm.mutex);
+ init_waitqueue_head(&dev->pm.wait);
spin_lock_init(&dev->pm.txq_lock);
set_bit(MT76_STATE_PM, &dev->mphy.state);
INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7615_mac_work);
@@ -496,16 +521,13 @@ void mt7615_init_device(struct mt7615_dev *dev)
init_waitqueue_head(&dev->reset_wait);
init_waitqueue_head(&dev->phy.roc_wait);
- INIT_WORK(&dev->reset_work, mt7615_mac_reset_work);
INIT_WORK(&dev->phy.roc_work, mt7615_roc_work);
timer_setup(&dev->phy.roc_timer, mt7615_roc_timer, 0);
mt7615_init_wiphy(hw);
dev->pm.idle_timeout = MT7615_PM_TIMEOUT;
- dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
- dev->mphy.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
- dev->mphy.sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ dev->pm.stats.last_wake_event = jiffies;
+ dev->pm.stats.last_doze_event = jiffies;
mt7615_cap_dbdc_disable(dev);
dev->phy.dfs_state = -1;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index 59fdd0fc2ad4..f81a17d56008 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -234,11 +234,13 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
- __le32 rxd12 = rxd[12];
- bool unicast, remove_pad, insert_ccmp_hdr = false;
+ u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false;
int phy_idx;
int i, idx;
- u8 chfreq;
+ u8 chfreq, amsdu_info, qos_ctl = 0;
+ u16 seq_ctrl = 0;
+ __le16 fc = 0;
memset(status, 0, sizeof(*status));
@@ -254,8 +256,12 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
else
phy_idx = -1;
+ if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
+ return -EINVAL;
+
unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
+ hdr_trans = rxd1 & MT_RXD1_NORMAL_HDR_TRANS;
status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
if (status->wcid) {
@@ -268,6 +274,9 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
spin_unlock_bh(&dev->sta_poll_lock);
}
+ if ((rxd0 & csum_mask) == csum_mask)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -288,6 +297,13 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
rxd += 4;
if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
+ u32 v0 = le32_to_cpu(rxd[0]);
+ u32 v2 = le32_to_cpu(rxd[2]);
+
+ fc = cpu_to_le16(FIELD_GET(MT_RXD4_FRAME_CONTROL, v0));
+ qos_ctl = FIELD_GET(MT_RXD6_QOS_CTL, v2);
+ seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, v2);
+
rxd += 4;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
@@ -312,6 +328,23 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
}
if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
+ status->timestamp = le32_to_cpu(rxd[0]);
+ status->flag |= RX_FLAG_MACTIME_START;
+
+ if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
+ MT_RXD2_NORMAL_NON_AMPDU))) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (phy->rx_ampdu_ts != status->timestamp) {
+ if (!++phy->ampdu_ref)
+ phy->ampdu_ref++;
+ }
+ phy->rx_ampdu_ts = status->timestamp;
+
+ status->ampdu_ref = phy->ampdu_ref;
+ }
+
rxd += 2;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
@@ -355,20 +388,6 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
if (!sband->channels)
return -EINVAL;
- if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
- MT_RXD2_NORMAL_NON_AMPDU))) {
- status->flag |= RX_FLAG_AMPDU_DETAILS;
-
- /* all subframes of an A-MPDU have the same timestamp */
- if (phy->rx_ampdu_ts != rxd12) {
- if (!++phy->ampdu_ref)
- phy->ampdu_ref++;
- }
- phy->rx_ampdu_ts = rxd12;
-
- status->ampdu_ref = phy->ampdu_ref;
- }
-
if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
u32 rxdg0 = le32_to_cpu(rxd[0]);
u32 rxdg1 = le32_to_cpu(rxd[1]);
@@ -446,20 +465,42 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
- if (insert_ccmp_hdr) {
+ amsdu_info = FIELD_GET(MT_RXD1_NORMAL_PAYLOAD_FORMAT, rxd1);
+ status->amsdu = !!amsdu_info;
+ if (status->amsdu) {
+ status->first_amsdu = amsdu_info == MT_RXD1_FIRST_AMSDU_FRAME;
+ status->last_amsdu = amsdu_info == MT_RXD1_LAST_AMSDU_FRAME;
+ if (!hdr_trans) {
+ memmove(skb->data + 2, skb->data,
+ ieee80211_get_hdrlen_from_skb(skb));
+ skb_pull(skb, 2);
+ }
+ }
+
+ if (insert_ccmp_hdr && !hdr_trans) {
u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
mt76_insert_ccmp_hdr(skb, key_id);
}
- hdr = (struct ieee80211_hdr *)skb->data;
- if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
+ if (!hdr_trans) {
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+ if (ieee80211_is_data_qos(fc)) {
+ seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
+ qos_ctl = *ieee80211_get_qos_ctl(hdr);
+ }
+ } else {
+ status->flag |= RX_FLAG_8023;
+ }
+
+ if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
status->aggr = unicast &&
- !ieee80211_is_qos_nullfunc(hdr->frame_control);
- status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
- status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ !ieee80211_is_qos_nullfunc(fc);
+ status->qos_ctl = qos_ctl;
+ status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
return 0;
}
@@ -690,7 +731,7 @@ mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
{
int i;
- for (i = 1; i < txp->nbuf; i++)
+ for (i = 0; i < txp->nbuf; i++)
dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
}
@@ -966,6 +1007,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct mt7615_dev *dev = phy->dev;
struct mt7615_rate_desc rd;
u32 w5, w27, addr;
+ u16 idx = sta->vif->mt76.omac_idx;
if (!mt76_is_mmio(&dev->mt76)) {
mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates);
@@ -1017,7 +1059,10 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
mt76_wr(dev, addr + 27 * 4, w27);
- mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
+ idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
+ addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
+
+ mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
sta->rate_set_tsf |= rd.rateset;
@@ -1033,7 +1078,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
static int
mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
- enum mt7615_cipher_type cipher,
+ enum mt7615_cipher_type cipher, u16 cipher_mask,
enum set_key_cmd cmd)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
@@ -1050,22 +1095,22 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
memcpy(data + 16, key->key + 24, 8);
memcpy(data + 24, key->key + 16, 8);
} else {
- if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher)
- memmove(data + 16, data, 16);
- if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
+ if (cipher_mask == BIT(cipher))
memcpy(data, key->key, key->keylen);
- else if (cipher == MT_CIPHER_BIP_CMAC_128)
+ else if (cipher != MT_CIPHER_BIP_CMAC_128)
+ memcpy(data, key->key, 16);
+ if (cipher == MT_CIPHER_BIP_CMAC_128)
memcpy(data + 16, key->key, 16);
}
} else {
- if (wcid->cipher & ~BIT(cipher)) {
- if (cipher != MT_CIPHER_BIP_CMAC_128)
- memmove(data, data + 16, 16);
+ if (cipher == MT_CIPHER_BIP_CMAC_128)
memset(data + 16, 0, 16);
- } else {
+ else if (cipher_mask)
+ memset(data, 0, 16);
+ if (!cipher_mask)
memset(data, 0, sizeof(data));
- }
}
+
mt76_wr_copy(dev, addr, data, sizeof(data));
return 0;
@@ -1073,7 +1118,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
static int
mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
- enum mt7615_cipher_type cipher,
+ enum mt7615_cipher_type cipher, u16 cipher_mask,
int keyidx, enum set_key_cmd cmd)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
@@ -1083,20 +1128,23 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
w0 = mt76_rr(dev, addr);
w1 = mt76_rr(dev, addr + 4);
- if (cmd == SET_KEY) {
- w0 |= MT_WTBL_W0_RX_KEY_VALID |
- FIELD_PREP(MT_WTBL_W0_RX_IK_VALID,
- cipher == MT_CIPHER_BIP_CMAC_128);
- if (cipher != MT_CIPHER_BIP_CMAC_128 ||
- !wcid->cipher)
- w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
- } else {
- if (!(wcid->cipher & ~BIT(cipher)))
- w0 &= ~(MT_WTBL_W0_RX_KEY_VALID |
- MT_WTBL_W0_KEY_IDX);
- if (cipher == MT_CIPHER_BIP_CMAC_128)
- w0 &= ~MT_WTBL_W0_RX_IK_VALID;
+
+ if (cipher_mask)
+ w0 |= MT_WTBL_W0_RX_KEY_VALID;
+ else
+ w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX);
+ if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128))
+ w0 |= MT_WTBL_W0_RX_IK_VALID;
+ else
+ w0 &= ~MT_WTBL_W0_RX_IK_VALID;
+
+ if (cmd == SET_KEY &&
+ (cipher != MT_CIPHER_BIP_CMAC_128 ||
+ cipher_mask == BIT(cipher))) {
+ w0 &= ~MT_WTBL_W0_KEY_IDX;
+ w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
}
+
mt76_wr(dev, MT_WTBL_RICR0, w0);
mt76_wr(dev, MT_WTBL_RICR1, w1);
@@ -1109,24 +1157,25 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
static void
mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
- enum mt7615_cipher_type cipher,
+ enum mt7615_cipher_type cipher, u16 cipher_mask,
enum set_key_cmd cmd)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
- if (cmd == SET_KEY) {
- if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
- mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
- FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
- } else {
- if (cipher != MT_CIPHER_BIP_CMAC_128 &&
- wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128))
- mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
- FIELD_PREP(MT_WTBL_W2_KEY_TYPE,
- MT_CIPHER_BIP_CMAC_128));
- else if (!(wcid->cipher & ~BIT(cipher)))
- mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
+ if (!cipher_mask) {
+ mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
+ return;
}
+
+ if (cmd != SET_KEY)
+ return;
+
+ if (cipher == MT_CIPHER_BIP_CMAC_128 &&
+ cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
+ return;
+
+ mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
+ FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
}
int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
@@ -1135,25 +1184,30 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
enum set_key_cmd cmd)
{
enum mt7615_cipher_type cipher;
+ u16 cipher_mask = wcid->cipher;
int err;
cipher = mt7615_mac_get_cipher(key->cipher);
if (cipher == MT_CIPHER_NONE)
return -EOPNOTSUPP;
- mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd);
- err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd);
+ if (cmd == SET_KEY)
+ cipher_mask |= BIT(cipher);
+ else
+ cipher_mask &= ~BIT(cipher);
+
+ mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
+ err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
+ cmd);
if (err < 0)
return err;
- err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, cmd);
+ err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
+ key->keyidx, cmd);
if (err < 0)
return err;
- if (cmd == SET_KEY)
- wcid->cipher |= BIT(cipher);
- else
- wcid->cipher &= ~BIT(cipher);
+ wcid->cipher = cipher_mask;
return 0;
}
@@ -1411,11 +1465,7 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
u8 wcid;
trace_mac_tx_free(dev, token);
-
- spin_lock_bh(&dev->token_lock);
- txwi = idr_remove(&dev->token, token);
- spin_unlock_bh(&dev->token_lock);
-
+ txwi = mt76_token_put(mdev, token);
if (!txwi)
return;
@@ -1460,14 +1510,10 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
dev_kfree_skb(skb);
- if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state))
- return;
-
rcu_read_lock();
mt7615_mac_sta_poll(dev);
rcu_read_unlock();
- mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
mt76_worker_schedule(&dev->mt76.tx_worker);
}
@@ -1821,10 +1867,8 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
int i, aggr;
u32 val, val2;
- memset(mib, 0, sizeof(*mib));
-
- mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
- MT_MIB_SDR3_FCS_ERR_MASK);
+ mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
+ MT_MIB_SDR3_FCS_ERR_MASK);
val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy),
MT_MIB_AMPDU_MPDU_COUNT);
@@ -1837,24 +1881,16 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < 4; i++) {
val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
-
- val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
- if (val2 > mib->ack_fail_cnt)
- mib->ack_fail_cnt = val2;
-
- val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
- if (val2 > mib->ba_miss_cnt)
- mib->ba_miss_cnt = val2;
+ mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+ mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK,
+ val);
val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
- val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
- if (val2 > mib->rts_retries_cnt) {
- mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
- mib->rts_retries_cnt = val2;
- }
+ mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+ mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK,
+ val);
val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
-
dev->mt76.aggr_stats[aggr++] += val & 0xffff;
dev->mt76.aggr_stats[aggr++] += val >> 16;
}
@@ -1869,13 +1905,19 @@ void mt7615_pm_wake_work(struct work_struct *work)
pm.wake_work);
mphy = dev->phy.mt76;
- if (!mt7615_mcu_set_drv_ctrl(dev))
+ if (!mt7615_mcu_set_drv_ctrl(dev)) {
+ int i;
+
+ mt76_for_each_q_rx(&dev->mt76, i)
+ napi_schedule(&dev->mt76.napi[i]);
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
- else
- dev_err(mphy->dev->dev, "failed to wake device\n");
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+ MT7615_WATCHDOG_TIME);
+ }
ieee80211_wake_queues(mphy->hw);
- complete_all(&dev->pm.wake_cmpl);
+ wake_up(&dev->pm.wait);
}
void mt7615_pm_power_save_work(struct work_struct *work)
@@ -1887,6 +1929,10 @@ void mt7615_pm_power_save_work(struct work_struct *work)
pm.ps_work.work);
delta = dev->pm.idle_timeout;
+ if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) ||
+ test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
+ goto out;
+
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
delta = dev->pm.last_activity + delta - jiffies;
goto out;
@@ -1924,179 +1970,27 @@ void mt7615_mac_work(struct work_struct *work)
MT7615_WATCHDOG_TIME);
}
-static bool
-mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state)
-{
- bool ret;
-
- ret = wait_event_timeout(dev->reset_wait,
- (READ_ONCE(dev->reset_state) & state),
- MT7615_RESET_TIMEOUT);
- WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
- return ret;
-}
-
-static void
-mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ieee80211_hw *hw = priv;
- struct mt7615_dev *dev = mt7615_hw_dev(hw);
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_AP:
- mt7615_mcu_add_beacon(dev, hw, vif,
- vif->bss_conf.enable_beacon);
- break;
- default:
- break;
- }
-}
-
-static void
-mt7615_update_beacons(struct mt7615_dev *dev)
-{
- ieee80211_iterate_active_interfaces(dev->mt76.hw,
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7615_update_vif_beacon, dev->mt76.hw);
-
- if (!dev->mt76.phy2)
- return;
-
- ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7615_update_vif_beacon, dev->mt76.phy2->hw);
-}
-
-void mt7615_dma_reset(struct mt7615_dev *dev)
-{
- int i;
-
- mt76_clear(dev, MT_WPDMA_GLO_CFG,
- MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
- MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
- usleep_range(1000, 2000);
-
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
- for (i = 0; i < __MT_TXQ_MAX; i++)
- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
-
- mt76_for_each_q_rx(&dev->mt76, i) {
- mt76_queue_rx_reset(dev, i);
- }
-
- mt76_set(dev, MT_WPDMA_GLO_CFG,
- MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
- MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
-}
-EXPORT_SYMBOL_GPL(mt7615_dma_reset);
-
void mt7615_tx_token_put(struct mt7615_dev *dev)
{
struct mt76_txwi_cache *txwi;
int id;
- spin_lock_bh(&dev->token_lock);
- idr_for_each_entry(&dev->token, txwi, id) {
+ spin_lock_bh(&dev->mt76.token_lock);
+ idr_for_each_entry(&dev->mt76.token, txwi, id) {
mt7615_txp_skb_unmap(&dev->mt76, txwi);
- if (txwi->skb)
- dev_kfree_skb_any(txwi->skb);
+ if (txwi->skb) {
+ struct ieee80211_hw *hw;
+
+ hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
+ ieee80211_free_txskb(hw, txwi->skb);
+ }
mt76_put_txwi(&dev->mt76, txwi);
}
- spin_unlock_bh(&dev->token_lock);
- idr_destroy(&dev->token);
+ spin_unlock_bh(&dev->mt76.token_lock);
+ idr_destroy(&dev->mt76.token);
}
EXPORT_SYMBOL_GPL(mt7615_tx_token_put);
-void mt7615_mac_reset_work(struct work_struct *work)
-{
- struct mt7615_phy *phy2;
- struct mt76_phy *ext_phy;
- struct mt7615_dev *dev;
-
- dev = container_of(work, struct mt7615_dev, reset_work);
- ext_phy = dev->mt76.phy2;
- phy2 = ext_phy ? ext_phy->priv : NULL;
-
- if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA))
- return;
-
- ieee80211_stop_queues(mt76_hw(dev));
- if (ext_phy)
- ieee80211_stop_queues(ext_phy->hw);
-
- set_bit(MT76_RESET, &dev->mphy.state);
- set_bit(MT76_MCU_RESET, &dev->mphy.state);
- wake_up(&dev->mt76.mcu.wait);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
- del_timer_sync(&dev->phy.roc_timer);
- cancel_work_sync(&dev->phy.roc_work);
- if (phy2) {
- cancel_delayed_work_sync(&phy2->mt76->mac_work);
- del_timer_sync(&phy2->roc_timer);
- cancel_work_sync(&phy2->roc_work);
- }
-
- /* lock/unlock all queues to ensure that no tx is pending */
- mt76_txq_schedule_all(&dev->mphy);
- if (ext_phy)
- mt76_txq_schedule_all(ext_phy);
-
- mt76_worker_disable(&dev->mt76.tx_worker);
- napi_disable(&dev->mt76.napi[0]);
- napi_disable(&dev->mt76.napi[1]);
- napi_disable(&dev->mt76.tx_napi);
-
- mt7615_mutex_acquire(dev);
-
- mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED);
-
- mt7615_tx_token_put(dev);
- idr_init(&dev->token);
-
- if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
- mt7615_dma_reset(dev);
-
- mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0);
-
- mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_INIT);
- mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
- }
-
- clear_bit(MT76_MCU_RESET, &dev->mphy.state);
- clear_bit(MT76_RESET, &dev->mphy.state);
-
- mt76_worker_enable(&dev->mt76.tx_worker);
- napi_enable(&dev->mt76.tx_napi);
- napi_schedule(&dev->mt76.tx_napi);
-
- napi_enable(&dev->mt76.napi[0]);
- napi_schedule(&dev->mt76.napi[0]);
-
- napi_enable(&dev->mt76.napi[1]);
- napi_schedule(&dev->mt76.napi[1]);
-
- ieee80211_wake_queues(mt76_hw(dev));
- if (ext_phy)
- ieee80211_wake_queues(ext_phy->hw);
-
- mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
- mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
-
- mt7615_update_beacons(dev);
-
- mt7615_mutex_release(dev);
-
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
- MT7615_WATCHDOG_TIME);
- if (phy2)
- ieee80211_queue_delayed_work(ext_phy->hw,
- &phy2->mt76->mac_work,
- MT7615_WATCHDOG_TIME);
-
-}
-
static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy)
{
struct mt7615_dev *dev = phy->dev;
@@ -2304,8 +2198,10 @@ void mt7615_coredump_work(struct work_struct *work)
break;
skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
- if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ)
- break;
+ if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
+ dev_kfree_skb(skb);
+ continue;
+ }
memcpy(data, skb->data, skb->len);
data += skb->len;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
index 169f4e17b5b4..6bf9da040196 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
@@ -33,6 +33,9 @@ enum rx_pkt_type {
#define MT_RXD1_NORMAL_BSSID GENMASK(31, 26)
#define MT_RXD1_NORMAL_PAYLOAD_FORMAT GENMASK(25, 24)
+#define MT_RXD1_FIRST_AMSDU_FRAME GENMASK(1, 0)
+#define MT_RXD1_MID_AMSDU_FRAME BIT(1)
+#define MT_RXD1_LAST_AMSDU_FRAME BIT(0)
#define MT_RXD1_NORMAL_HDR_TRANS BIT(23)
#define MT_RXD1_NORMAL_HDR_OFFSET BIT(22)
#define MT_RXD1_NORMAL_MAC_HDR_LEN GENMASK(21, 16)
@@ -78,6 +81,11 @@ enum rx_pkt_type {
#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(8)
#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
+#define MT_RXD4_FRAME_CONTROL GENMASK(15, 0)
+
+#define MT_RXD6_SEQ_CTRL GENMASK(15, 0)
+#define MT_RXD6_QOS_CTL GENMASK(31, 16)
+
#define MT_RXV1_ACID_DET_H BIT(31)
#define MT_RXV1_ACID_DET_L BIT(30)
#define MT_RXV1_VHTA2_B8_B3 GENMASK(29, 24)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 25faf486d279..39733b351ac4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -29,6 +29,7 @@ static int mt7615_start(struct ieee80211_hw *hw)
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
bool running;
+ int ret;
if (!mt7615_wait_for_mcu_init(dev))
return -EIO;
@@ -38,21 +39,42 @@ static int mt7615_start(struct ieee80211_hw *hw)
running = mt7615_dev_running(dev);
if (!running) {
- mt7615_mcu_set_pm(dev, 0, 0);
- mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, true, false);
+ ret = mt7615_mcu_set_pm(dev, 0, 0);
+ if (ret)
+ goto out;
+
+ ret = mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, true, false);
+ if (ret)
+ goto out;
+
mt7615_mac_enable_nf(dev, 0);
}
if (phy != &dev->phy) {
- mt7615_mcu_set_pm(dev, 1, 0);
- mt76_connac_mcu_set_mac_enable(&dev->mt76, 1, true, false);
+ ret = mt7615_mcu_set_pm(dev, 1, 0);
+ if (ret)
+ goto out;
+
+ ret = mt76_connac_mcu_set_mac_enable(&dev->mt76, 1, true, false);
+ if (ret)
+ goto out;
+
mt7615_mac_enable_nf(dev, 1);
}
- if (mt7615_firmware_offload(dev))
- mt76_connac_mcu_set_channel_domain(phy->mt76);
+ if (mt7615_firmware_offload(dev)) {
+ ret = mt76_connac_mcu_set_channel_domain(phy->mt76);
+ if (ret)
+ goto out;
- mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+ ret = mt76_connac_mcu_set_rate_txpower(phy->mt76);
+ if (ret)
+ goto out;
+ }
+
+ ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+ if (ret)
+ goto out;
set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
@@ -62,9 +84,10 @@ static int mt7615_start(struct ieee80211_hw *hw)
if (!running)
mt7615_mac_reset_counters(dev);
+out:
mt7615_mutex_release(dev);
- return 0;
+ return ret;
}
static void mt7615_stop(struct ieee80211_hw *hw)
@@ -197,7 +220,9 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
dev->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
- mt7615_mcu_set_dbdc(dev);
+ ret = mt7615_mcu_set_dbdc(dev);
+ if (ret)
+ goto out;
idx = MT7615_WTBL_RESERVED - mvif->mt76.idx;
@@ -217,8 +242,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
ret = mt7615_mcu_add_dev_info(phy, vif, true);
if (ret)
goto out;
-
- mt7615_mac_set_beacon_filter(phy, vif, true);
out:
mt7615_mutex_release(dev);
@@ -234,17 +257,17 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
struct mt7615_phy *phy = mt7615_hw_phy(hw);
int idx = msta->wcid.idx;
- /* TODO: disable beacon for the bss */
-
mt7615_mutex_acquire(dev);
+ mt7615_mcu_add_bss_info(phy, vif, NULL, false);
+ mt7615_mcu_sta_add(phy, vif, NULL, false);
+
mt76_testmode_reset(phy->mt76, true);
if (vif == phy->monitor_vif)
phy->monitor_vif = NULL;
mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
- mt7615_mac_set_beacon_filter(phy, vif, false);
mt7615_mcu_add_dev_info(phy, vif, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
@@ -296,8 +319,13 @@ int mt7615_set_channel(struct mt7615_phy *phy)
mt76_set_channel(phy->mt76);
if (is_mt7615(&dev->mt76) && dev->flash_eeprom) {
- mt7615_mcu_apply_rx_dcoc(phy);
- mt7615_mcu_apply_tx_dpd(phy);
+ ret = mt7615_mcu_apply_rx_dcoc(phy);
+ if (ret)
+ goto out;
+
+ ret = mt7615_mcu_apply_tx_dpd(phy);
+ if (ret)
+ goto out;
}
ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH);
@@ -306,8 +334,13 @@ int mt7615_set_channel(struct mt7615_phy *phy)
mt7615_mac_set_timing(phy);
ret = mt7615_dfs_init_radar_detector(phy);
+ if (ret)
+ goto out;
+
mt7615_mac_cca_stats_reset(phy);
- mt7615_mcu_set_sku_en(phy, true);
+ ret = mt7615_mcu_set_sku_en(phy, true);
+ if (ret)
+ goto out;
mt7615_mac_reset_counters(dev);
phy->noise = 0;
@@ -318,8 +351,7 @@ out:
mt7615_mutex_release(dev);
- mt76_txq_schedule_all(phy->mt76);
-
+ mt76_worker_schedule(&dev->mt76.tx_worker);
if (!mt76_testmode_enabled(phy->mt76))
ieee80211_queue_delayed_work(phy->mt76->hw,
&phy->mt76->mac_work,
@@ -337,7 +369,8 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
&mvif->sta;
struct mt76_wcid *wcid = &msta->wcid;
- int idx = key->keyidx, err;
+ int idx = key->keyidx, err = 0;
+ u8 *wcid_keyidx = &wcid->hw_key_idx;
/* The hardware does not support per-STA RX GTK, fallback
* to software mode for these.
@@ -352,6 +385,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
/* fall back to sw encryption for unsupported ciphers */
switch (key->cipher) {
case WLAN_CIPHER_SUITE_AES_CMAC:
+ wcid_keyidx = &wcid->hw_key_idx2;
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
break;
case WLAN_CIPHER_SUITE_TKIP:
@@ -369,12 +403,13 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt7615_mutex_acquire(dev);
- if (cmd == SET_KEY) {
- key->hw_key_idx = wcid->idx;
- wcid->hw_key_idx = idx;
- } else if (idx == wcid->hw_key_idx) {
- wcid->hw_key_idx = -1;
- }
+ if (cmd == SET_KEY)
+ *wcid_keyidx = idx;
+ else if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
+ else
+ goto out;
+
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
@@ -383,6 +418,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
else
err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
+out:
mt7615_mutex_release(dev);
return err;
@@ -526,11 +562,11 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if (changed & BSS_CHANGED_BEACON_ENABLED) {
- mt7615_mcu_add_bss_info(phy, vif, NULL, info->enable_beacon);
- mt7615_mcu_sta_add(phy, vif, NULL, info->enable_beacon);
+ if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) {
+ mt7615_mcu_add_bss_info(phy, vif, NULL, true);
+ mt7615_mcu_sta_add(phy, vif, NULL, true);
- if (vif->p2p && info->enable_beacon)
+ if (vif->p2p)
mt7615_mcu_set_p2p_oppps(hw, vif);
}
@@ -541,8 +577,16 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_PS)
mt76_connac_mcu_set_vif_ps(&dev->mt76, vif);
- if (changed & BSS_CHANGED_ARP_FILTER)
- mt7615_mcu_update_arp_filter(hw, vif, info);
+ if ((changed & BSS_CHANGED_ARP_FILTER) &&
+ mt7615_firmware_offload(dev)) {
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+
+ mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->mt76,
+ info);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt7615_mac_set_beacon_filter(phy, vif, info->assoc);
mt7615_mutex_release(dev);
}
@@ -583,15 +627,21 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (err)
return err;
- if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
- mt7615_mcu_add_bss_info(phy, vif, sta, true);
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+ err = mt7615_mcu_add_bss_info(phy, vif, sta, true);
+ if (err)
+ return err;
+ }
+
mt7615_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- mt7615_mcu_sta_add(&dev->phy, vif, sta, true);
+ err = mt7615_mcu_sta_add(&dev->phy, vif, sta, true);
+ if (err)
+ return err;
mt76_connac_power_save_sched(phy->mt76, &dev->pm);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mt7615_mac_sta_add);
@@ -643,28 +693,25 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
break;
}
msta->n_rates = i;
- if (!test_bit(MT76_STATE_PM, &phy->mt76->state))
+ if (mt76_connac_pm_ref(phy->mt76, &dev->pm)) {
mt7615_mac_set_rates(phy, msta, NULL, msta->rates);
+ mt76_connac_pm_unref(&dev->pm);
+ }
spin_unlock_bh(&dev->mt76.lock);
}
-static void
-mt7615_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+void mt7615_tx_worker(struct mt76_worker *w)
{
- struct mt7615_dev *dev = mt7615_hw_dev(hw);
- struct mt7615_phy *phy = mt7615_hw_phy(hw);
- struct mt76_phy *mphy = phy->mt76;
-
- if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
- return;
+ struct mt7615_dev *dev = container_of(w, struct mt7615_dev,
+ mt76.tx_worker);
- if (test_bit(MT76_STATE_PM, &mphy->state)) {
+ if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
queue_work(dev->mt76.wq, &dev->pm.wake_work);
return;
}
- dev->pm.last_activity = jiffies;
- mt76_worker_schedule(&dev->mt76.tx_worker);
+ mt76_tx_worker_run(&dev->mt76);
+ mt76_connac_pm_unref(&dev->pm);
}
static void mt7615_tx(struct ieee80211_hw *hw,
@@ -692,9 +739,9 @@ static void mt7615_tx(struct ieee80211_hw *hw,
wcid = &msta->wcid;
}
- if (!test_bit(MT76_STATE_PM, &mphy->state)) {
- dev->pm.last_activity = jiffies;
+ if (mt76_connac_pm_ref(mphy, &dev->pm)) {
mt76_tx(mphy, control->sta, wcid, skb);
+ mt76_connac_pm_unref(&dev->pm);
return;
}
@@ -711,13 +758,13 @@ static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
- int band = phy != &dev->phy;
+ int err, band = phy != &dev->phy;
mt7615_mutex_acquire(dev);
- mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, band);
+ err = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, band);
mt7615_mutex_release(dev);
- return 0;
+ return err;
}
static int
@@ -745,16 +792,16 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
params->buf_size);
- mt7615_mcu_add_rx_ba(dev, params, true);
+ ret = mt7615_mcu_add_rx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_RX_STOP:
mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
- mt7615_mcu_add_rx_ba(dev, params, false);
+ ret = mt7615_mcu_add_rx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
mtxq->send_bar = false;
- mt7615_mcu_add_tx_ba(dev, params, true);
+ ret = mt7615_mcu_add_tx_ba(dev, params, true);
ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid);
ieee80211_send_bar(vif, sta->addr, tid,
IEEE80211_SN_TO_SEQ(ssn));
@@ -762,7 +809,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
- mt7615_mcu_add_tx_ba(dev, params, false);
+ ret = mt7615_mcu_add_tx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_START:
ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid);
@@ -771,7 +818,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
- mt7615_mcu_add_tx_ba(dev, params, false);
+ ret = mt7615_mcu_add_tx_ba(dev, params, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
@@ -803,26 +850,38 @@ mt7615_get_stats(struct ieee80211_hw *hw,
struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mib_stats *mib = &phy->mib;
+ mt7615_mutex_acquire(phy->dev);
+
stats->dot11RTSSuccessCount = mib->rts_cnt;
stats->dot11RTSFailureCount = mib->rts_retries_cnt;
stats->dot11FCSErrorCount = mib->fcs_err_cnt;
stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+ memset(mib, 0, sizeof(*mib));
+
+ mt7615_mutex_release(phy->dev);
+
return 0;
}
static u64
mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_dev *dev = mt7615_hw_dev(hw);
union {
u64 t64;
u32 t32[2];
} tsf;
+ u16 idx = mvif->mt76.omac_idx;
+ u32 reg;
+
+ idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
+ reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
mt7615_mutex_acquire(dev);
- mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
+ mt76_set(dev, reg, MT_LPON_TCR_MODE); /* TSF read */
tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
@@ -835,18 +894,24 @@ static void
mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u64 timestamp)
{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_dev *dev = mt7615_hw_dev(hw);
union {
u64 t64;
u32 t32[2];
} tsf = { .t64 = timestamp, };
+ u16 idx = mvif->mt76.omac_idx;
+ u32 reg;
+
+ idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
+ reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
mt7615_mutex_acquire(dev);
mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
/* TSF software overwrite */
- mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_WRITE);
+ mt76_set(dev, reg, MT_LPON_TCR_WRITE);
mt7615_mutex_release(dev);
}
@@ -1069,6 +1134,7 @@ static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ int err;
if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
return 0;
@@ -1077,10 +1143,26 @@ static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw,
cancel_work_sync(&phy->roc_work);
mt7615_mutex_acquire(phy->dev);
- mt7615_mcu_set_roc(phy, vif, NULL, 0);
+ err = mt7615_mcu_set_roc(phy, vif, NULL, 0);
mt7615_mutex_release(phy->dev);
- return 0;
+ return err;
+}
+
+static void mt7615_sta_set_decap_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool enabled)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+ if (enabled)
+ set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ else
+ clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+
+ mt7615_mcu_sta_update_hdr_trans(dev, vif, sta);
}
#ifdef CONFIG_PM
@@ -1183,9 +1265,10 @@ const struct ieee80211_ops mt7615_ops = {
.sta_remove = mt7615_sta_remove,
.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt7615_set_key,
+ .sta_set_decap_offload = mt7615_sta_set_decap_offload,
.ampdu_action = mt7615_ampdu_action,
.set_rts_threshold = mt7615_set_rts_threshold,
- .wake_tx_queue = mt7615_wake_tx_queue,
+ .wake_tx_queue = mt76_wake_tx_queue,
.sta_rate_tbl_update = mt7615_sta_rate_tbl_update,
.sw_scan_start = mt76_sw_scan,
.sw_scan_complete = mt76_sw_scan_complete,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 631596fc2f36..aa42af9ebfd6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -175,8 +175,8 @@ int mt7615_mcu_parse_response(struct mt76_dev *mdev, int cmd,
int ret = 0;
if (!skb) {
- dev_err(mdev->dev, "Message %ld (seq %d) timeout\n",
- cmd & MCU_CMD_MASK, seq);
+ dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
+ cmd, seq);
return -ETIMEDOUT;
}
@@ -274,7 +274,7 @@ int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val)
sizeof(req), false);
}
-static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
+void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
{
if (!is_mt7622(&dev->mt76))
return;
@@ -283,20 +283,30 @@ static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
MT_INFRACFG_MISC_AP2CONN_WAKE,
!en * MT_INFRACFG_MISC_AP2CONN_WAKE);
}
+EXPORT_SYMBOL_GPL(mt7622_trigger_hif_int);
static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_connac_pm *pm = &dev->pm;
struct mt76_dev *mdev = &dev->mt76;
u32 addr;
int err;
- addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
+ if (is_mt7663(mdev)) {
+ /* Clear firmware own via N9 eint */
+ mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
+ mt76_poll(dev, MT_CONN_ON_MISC, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
+
+ addr = MT_CONN_HIF_ON_LPCTL;
+ } else {
+ addr = MT_CFG_LPCR_HOST;
+ }
+
mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
mt7622_trigger_hif_int(dev, true);
- addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
mt7622_trigger_hif_int(dev, false);
@@ -308,15 +318,22 @@ static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
clear_bit(MT76_STATE_PM, &mphy->state);
+ pm->stats.last_wake_event = jiffies;
+ pm->stats.doze_time += pm->stats.last_wake_event -
+ pm->stats.last_doze_event;
+
return 0;
}
static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
- int i;
+ struct mt76_connac_pm *pm = &dev->pm;
+ int i, err = 0;
+
+ mutex_lock(&pm->mutex);
- if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
+ if (!test_bit(MT76_STATE_PM, &mphy->state))
goto out;
for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) {
@@ -328,24 +345,31 @@ static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev)
if (i == MT7615_DRV_OWN_RETRY_COUNT) {
dev_err(dev->mt76.dev, "driver own failed\n");
- set_bit(MT76_STATE_PM, &mphy->state);
- return -EIO;
+ err = -EIO;
+ goto out;
}
+ clear_bit(MT76_STATE_PM, &mphy->state);
+ pm->stats.last_wake_event = jiffies;
+ pm->stats.doze_time += pm->stats.last_wake_event -
+ pm->stats.last_doze_event;
out:
- dev->pm.last_activity = jiffies;
+ mutex_unlock(&pm->mutex);
- return 0;
+ return err;
}
static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_connac_pm *pm = &dev->pm;
int err = 0;
u32 addr;
- if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
- return 0;
+ mutex_lock(&pm->mutex);
+
+ if (mt76_connac_skip_fw_pmctrl(mphy, pm))
+ goto out;
mt7622_trigger_hif_int(dev, true);
@@ -362,6 +386,12 @@ static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev)
mt7622_trigger_hif_int(dev, false);
+ pm->stats.last_doze_event = jiffies;
+ pm->stats.awake_time += pm->stats.last_doze_event -
+ pm->stats.last_wake_event;
+out:
+ mutex_unlock(&pm->mutex);
+
return err;
}
@@ -373,6 +403,23 @@ mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
}
static void
+mt7615_mcu_rx_csa_notify(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_phy *ext_phy = mt7615_ext_phy(dev);
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7615_mcu_csa_notify *c;
+
+ c = (struct mt7615_mcu_csa_notify *)skb->data;
+
+ if (ext_phy && ext_phy->omac_mask & BIT_ULL(c->omac_idx))
+ mphy = dev->mt76.phy2;
+
+ ieee80211_iterate_active_interfaces_atomic(mphy->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_mcu_csa_finish, mphy->hw);
+}
+
+static void
mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -380,7 +427,7 @@ mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
r = (struct mt7615_mcu_rdd_report *)skb->data;
- if (r->idx && dev->mt76.phy2)
+ if (r->band_idx && dev->mt76.phy2)
mphy = dev->mt76.phy2;
ieee80211_radar_detected(mphy->hw);
@@ -406,7 +453,8 @@ mt7615_mcu_rx_log_message(struct mt7615_dev *dev, struct sk_buff *skb)
break;
}
- wiphy_info(mt76_hw(dev)->wiphy, "%s: %s", type, data);
+ wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type,
+ (int)(skb->len - sizeof(*rxd)), data);
}
static void
@@ -419,9 +467,7 @@ mt7615_mcu_rx_ext_event(struct mt7615_dev *dev, struct sk_buff *skb)
mt7615_mcu_rx_radar_detected(dev, skb);
break;
case MCU_EXT_EVENT_CSA_NOTIFY:
- ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw,
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7615_mcu_csa_finish, dev);
+ mt7615_mcu_rx_csa_notify(dev, skb);
break;
case MCU_EXT_EVENT_FW_LOG_2_HOST:
mt7615_mcu_rx_log_message(dev, skb);
@@ -685,6 +731,9 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
};
struct sk_buff *skb;
+ if (!enable)
+ goto out;
+
skb = ieee80211_beacon_get_template(hw, vif, &offs);
if (!skb)
return -EINVAL;
@@ -714,6 +763,7 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
}
dev_kfree_skb(skb);
+out:
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BCN_OFFLOAD, &req,
sizeof(req), true);
}
@@ -973,7 +1023,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable);
if (enable && sta)
- mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif);
+ mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0);
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
WTBL_RESET_AND_SET, NULL,
@@ -987,6 +1037,8 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
if (sta)
mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta,
NULL, wtbl_hdr);
+ mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, &msta->wcid, NULL,
+ wtbl_hdr);
}
cmd = enable ? MCU_EXT_CMD_WTBL_UPDATE : MCU_EXT_CMD_STA_REC_UPDATE;
@@ -1040,6 +1092,9 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev,
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
WTBL_SET, sta_wtbl, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, tx,
sta_wtbl, wtbl_hdr);
@@ -1068,10 +1123,15 @@ __mt7615_mcu_add_sta(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable, int cmd)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt76_wcid *wcid;
+ struct mt76_sta_cmd_info info = {
+ .sta = sta,
+ .vif = vif,
+ .enable = enable,
+ .cmd = cmd,
+ };
- wcid = sta ? (struct mt76_wcid *)sta->drv_priv : &mvif->sta.wcid;
- return mt76_connac_mcu_add_sta_cmd(phy, vif, sta, wcid, enable, cmd);
+ info.wcid = sta ? (struct mt76_wcid *)sta->drv_priv : &mvif->sta.wcid;
+ return mt76_connac_mcu_add_sta_cmd(phy, &info);
}
static int
@@ -1094,6 +1154,25 @@ static const struct mt7615_mcu_ops sta_update_ops = {
.set_fw_ctrl = mt7615_mcu_fw_pmctrl,
};
+int mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct sk_buff *skb = NULL;
+
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_SET, NULL, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, &msta->wcid, NULL, wtbl_hdr);
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
+ true);
+}
+
static int
mt7615_mcu_uni_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
{
@@ -1120,8 +1199,8 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
__le16 tim_ie_pos;
__le16 csa_ie_pos;
__le16 bcc_ie_pos;
- /* 0: enable beacon offload
- * 1: disable beacon offload
+ /* 0: disable beacon offload
+ * 1: enable beacon offload
* 2: update probe respond offload
*/
u8 enable;
@@ -1144,6 +1223,9 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
};
struct sk_buff *skb;
+ if (!enable)
+ goto out;
+
skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs);
if (!skb)
return -EINVAL;
@@ -1168,6 +1250,7 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
}
dev_kfree_skb(skb);
+out:
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
&req, sizeof(req), true);
}
@@ -1279,25 +1362,26 @@ static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
const struct firmware *fw = NULL;
int len, ret, sem;
+ ret = firmware_request_nowarn(&fw, name, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true);
switch (sem) {
case PATCH_IS_DL:
- return 0;
+ goto release_fw;
case PATCH_NOT_DL_SEM_SUCCESS:
break;
default:
dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
- return -EAGAIN;
- }
-
- ret = firmware_request_nowarn(&fw, name, dev->mt76.dev);
- if (ret)
- goto out;
-
- if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
- dev_err(dev->mt76.dev, "Invalid firmware\n");
- ret = -EINVAL;
- goto out;
+ ret = -EAGAIN;
+ goto release_fw;
}
hdr = (const struct mt7615_patch_hdr *)(fw->data);
@@ -1326,8 +1410,6 @@ static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
dev_err(dev->mt76.dev, "Failed to start patch\n");
out:
- release_firmware(fw);
-
sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false);
switch (sem) {
case PATCH_REL_SEM_SUCCESS:
@@ -1338,6 +1420,9 @@ out:
break;
}
+release_fw:
+ release_firmware(fw);
+
return ret;
}
@@ -1427,8 +1512,7 @@ static int mt7615_load_n9(struct mt7615_dev *dev, const char *name)
sizeof(dev->mt76.hw->wiphy->fw_version),
"%.10s-%.15s", hdr->fw_ver, hdr->build_date);
- if (!is_mt7615(&dev->mt76) &&
- !strncmp(hdr->fw_ver, "2.0", sizeof(hdr->fw_ver))) {
+ if (!is_mt7615(&dev->mt76)) {
dev->fw_ver = MT7615_FIRMWARE_V2;
dev->mcu_ops = &sta_update_ops;
} else {
@@ -2084,16 +2168,80 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
{
struct mt76_phy *mphy = phy->mt76;
struct ieee80211_hw *hw = mphy->hw;
+ struct mt76_power_limits limits;
+ s8 *limits_array = (s8 *)&limits;
int n_chains = hweight8(mphy->antenna_mask);
int tx_power;
int i;
+ static const u8 sku_mapping[] = {
+#define SKU_FIELD(_type, _field) \
+ [MT_SKU_##_type] = offsetof(struct mt76_power_limits, _field)
+ SKU_FIELD(CCK_1_2, cck[0]),
+ SKU_FIELD(CCK_55_11, cck[2]),
+ SKU_FIELD(OFDM_6_9, ofdm[0]),
+ SKU_FIELD(OFDM_12_18, ofdm[2]),
+ SKU_FIELD(OFDM_24_36, ofdm[4]),
+ SKU_FIELD(OFDM_48, ofdm[6]),
+ SKU_FIELD(OFDM_54, ofdm[7]),
+ SKU_FIELD(HT20_0_8, mcs[0][0]),
+ SKU_FIELD(HT20_32, ofdm[0]),
+ SKU_FIELD(HT20_1_2_9_10, mcs[0][1]),
+ SKU_FIELD(HT20_3_4_11_12, mcs[0][3]),
+ SKU_FIELD(HT20_5_13, mcs[0][5]),
+ SKU_FIELD(HT20_6_14, mcs[0][6]),
+ SKU_FIELD(HT20_7_15, mcs[0][7]),
+ SKU_FIELD(HT40_0_8, mcs[1][0]),
+ SKU_FIELD(HT40_32, ofdm[0]),
+ SKU_FIELD(HT40_1_2_9_10, mcs[1][1]),
+ SKU_FIELD(HT40_3_4_11_12, mcs[1][3]),
+ SKU_FIELD(HT40_5_13, mcs[1][5]),
+ SKU_FIELD(HT40_6_14, mcs[1][6]),
+ SKU_FIELD(HT40_7_15, mcs[1][7]),
+ SKU_FIELD(VHT20_0, mcs[0][0]),
+ SKU_FIELD(VHT20_1_2, mcs[0][1]),
+ SKU_FIELD(VHT20_3_4, mcs[0][3]),
+ SKU_FIELD(VHT20_5_6, mcs[0][5]),
+ SKU_FIELD(VHT20_7, mcs[0][7]),
+ SKU_FIELD(VHT20_8, mcs[0][8]),
+ SKU_FIELD(VHT20_9, mcs[0][9]),
+ SKU_FIELD(VHT40_0, mcs[1][0]),
+ SKU_FIELD(VHT40_1_2, mcs[1][1]),
+ SKU_FIELD(VHT40_3_4, mcs[1][3]),
+ SKU_FIELD(VHT40_5_6, mcs[1][5]),
+ SKU_FIELD(VHT40_7, mcs[1][7]),
+ SKU_FIELD(VHT40_8, mcs[1][8]),
+ SKU_FIELD(VHT40_9, mcs[1][9]),
+ SKU_FIELD(VHT80_0, mcs[2][0]),
+ SKU_FIELD(VHT80_1_2, mcs[2][1]),
+ SKU_FIELD(VHT80_3_4, mcs[2][3]),
+ SKU_FIELD(VHT80_5_6, mcs[2][5]),
+ SKU_FIELD(VHT80_7, mcs[2][7]),
+ SKU_FIELD(VHT80_8, mcs[2][8]),
+ SKU_FIELD(VHT80_9, mcs[2][9]),
+ SKU_FIELD(VHT160_0, mcs[3][0]),
+ SKU_FIELD(VHT160_1_2, mcs[3][1]),
+ SKU_FIELD(VHT160_3_4, mcs[3][3]),
+ SKU_FIELD(VHT160_5_6, mcs[3][5]),
+ SKU_FIELD(VHT160_7, mcs[3][7]),
+ SKU_FIELD(VHT160_8, mcs[3][8]),
+ SKU_FIELD(VHT160_9, mcs[3][9]),
+#undef SKU_FIELD
+ };
tx_power = hw->conf.power_level * 2 -
mt76_tx_power_nss_delta(n_chains);
+
+ tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
+ &limits, tx_power);
mphy->txpower_cur = tx_power;
+ if (is_mt7663(mphy->dev)) {
+ memset(sku, tx_power, MT_SKU_4SS_DELTA + 1);
+ return;
+ }
+
for (i = 0; i < MT_SKU_1SS_DELTA; i++)
- sku[i] = tx_power;
+ sku[i] = limits_array[sku_mapping[i]];
for (i = 0; i < 4; i++) {
int delta = 0;
@@ -2155,7 +2303,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
.center_chan2 = ieee80211_frequency_to_channel(freq2),
};
- if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
@@ -2497,6 +2645,26 @@ out:
return ret;
}
+int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev)
+{
+ struct {
+ u8 operation;
+ u8 count;
+ u8 _rsv[2];
+ u8 index;
+ u8 enable;
+ __le16 etype;
+ } req = {
+ .operation = 1,
+ .count = 1,
+ .enable = 1,
+ .etype = cpu_to_le16(ETH_P_PAE),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RX_HDR_TRANS,
+ &req, sizeof(req), false);
+}
+
int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
bool enable)
{
@@ -2557,53 +2725,6 @@ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
sizeof(req), false);
}
-int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_dev *dev = mt7615_hw_dev(hw);
- struct sk_buff *skb;
- int i, len = min_t(int, info->arp_addr_cnt,
- IEEE80211_BSS_ARP_ADDR_LIST_LEN);
- struct {
- struct {
- u8 bss_idx;
- u8 pad[3];
- } __packed hdr;
- struct mt76_connac_arpns_tlv arp;
- } req_hdr = {
- .hdr = {
- .bss_idx = mvif->mt76.idx,
- },
- .arp = {
- .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
- .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
- .ips_num = len,
- .mode = 2, /* update */
- .option = 1,
- },
- };
-
- if (!mt7615_firmware_offload(dev))
- return 0;
-
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- sizeof(req_hdr) + len * sizeof(__be32));
- if (!skb)
- return -ENOMEM;
-
- skb_put_data(skb, &req_hdr, sizeof(req_hdr));
- for (i = 0; i < len; i++) {
- u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
-
- memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
- }
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_OFFLOAD,
- true);
-}
-
int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
index 3874f45da9eb..98c383e400a1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
@@ -176,10 +176,18 @@ struct mt7615_mcu_rxd {
u8 s2d_index;
};
+struct mt7615_mcu_csa_notify {
+ struct mt7615_mcu_rxd rxd;
+
+ u8 omac_idx;
+ u8 csa_count;
+ u8 rsv[2];
+} __packed;
+
struct mt7615_mcu_rdd_report {
struct mt7615_mcu_rxd rxd;
- u8 idx;
+ u8 band_idx;
u8 long_detected;
u8 constant_prf_detected;
u8 staggered_prf_detected;
@@ -362,30 +370,6 @@ enum {
BSS_INFO_MAX_NUM
};
-#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
- sizeof(struct wtbl_generic) + \
- sizeof(struct wtbl_rx) + \
- sizeof(struct wtbl_ht) + \
- sizeof(struct wtbl_vht) + \
- sizeof(struct wtbl_tx_ps) + \
- sizeof(struct wtbl_hdr_trans) +\
- sizeof(struct wtbl_ba) + \
- sizeof(struct wtbl_bf) + \
- sizeof(struct wtbl_smps) + \
- sizeof(struct wtbl_pn) + \
- sizeof(struct wtbl_spe))
-
-#define MT7615_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
- sizeof(struct sta_rec_basic) + \
- sizeof(struct sta_rec_ht) + \
- sizeof(struct sta_rec_vht) + \
- sizeof(struct sta_rec_uapsd) + \
- sizeof(struct tlv) + \
- MT7615_WTBL_UPDATE_MAX_SIZE)
-
-#define MT7615_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \
- sizeof(struct wtbl_ba))
-
enum {
CH_SWITCH_NORMAL = 0,
CH_SWITCH_SCAN = 3,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index a7f92fa0488f..202ea235415e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -1,3 +1,6 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -102,6 +105,7 @@ static void mt7615_irq_tasklet(struct tasklet_struct *t)
{
struct mt7615_dev *dev = from_tasklet(dev, t, irq_tasklet);
u32 intr, mask = 0, tx_mcu_mask = mt7615_tx_mcu_int_mask(dev);
+ u32 mcu_int;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
@@ -125,15 +129,23 @@ static void mt7615_irq_tasklet(struct tasklet_struct *t)
if (intr & MT_INT_RX_DONE(1))
napi_schedule(&dev->mt76.napi[1]);
- if (intr & MT_INT_MCU_CMD) {
- u32 val = mt76_rr(dev, MT_MCU_CMD);
+ if (!(intr & (MT_INT_MCU_CMD | MT7663_INT_MCU_CMD)))
+ return;
- if (val & MT_MCU_CMD_ERROR_MASK) {
- dev->reset_state = val;
- ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
- wake_up(&dev->reset_wait);
- }
+ if (is_mt7663(&dev->mt76)) {
+ mcu_int = mt76_rr(dev, MT_MCU2HOST_INT_STATUS);
+ mcu_int &= MT7663_MCU_CMD_ERROR_MASK;
+ } else {
+ mcu_int = mt76_rr(dev, MT_MCU_CMD);
+ mcu_int &= MT_MCU_CMD_ERROR_MASK;
}
+
+ if (!mcu_int)
+ return;
+
+ dev->reset_state = mcu_int;
+ ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
+ wake_up(&dev->reset_wait);
}
static u32 __mt7615_reg_addr(struct mt7615_dev *dev, u32 addr)
@@ -178,6 +190,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
+ .token_size = MT7615_TOKEN_SIZE,
.tx_prepare_skb = mt7615_tx_prepare_skb,
.tx_complete_skb = mt7615_tx_complete_skb,
.rx_skb = mt7615_queue_rx_skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 491841bc6291..989f05ed4377 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -133,11 +133,11 @@ struct mt7615_vif {
};
struct mib_stats {
- u16 ack_fail_cnt;
- u16 fcs_err_cnt;
- u16 rts_cnt;
- u16 rts_retries_cnt;
- u16 ba_miss_cnt;
+ u32 ack_fail_cnt;
+ u32 fcs_err_cnt;
+ u32 rts_cnt;
+ u32 rts_retries_cnt;
+ u32 ba_miss_cnt;
unsigned long aggr_per;
};
@@ -168,7 +168,7 @@ struct mt7615_phy {
u8 rdd_state;
int dfs_state;
- __le32 rx_ampdu_ts;
+ u32 rx_ampdu_ts;
u32 ampdu_ref;
struct mib_stats mib;
@@ -263,9 +263,6 @@ struct mt7615_dev {
bool flash_eeprom;
bool dbdc_support;
- spinlock_t token_lock;
- struct idr token;
-
u8 fw_ver;
struct work_struct rate_work;
@@ -376,6 +373,7 @@ int mt7615_eeprom_get_power_delta_index(struct mt7615_dev *dev,
enum nl80211_band band);
int mt7615_wait_pdma_busy(struct mt7615_dev *dev);
int mt7615_dma_init(struct mt7615_dev *dev);
+void mt7615_dma_start(struct mt7615_dev *dev);
void mt7615_dma_cleanup(struct mt7615_dev *dev);
int mt7615_mcu_init(struct mt7615_dev *dev);
bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev);
@@ -408,11 +406,6 @@ static inline bool is_mt7615(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611;
}
-static inline bool is_mt7663(struct mt76_dev *dev)
-{
- return mt76_chip(dev) == 0x7663;
-}
-
static inline bool is_mt7611(struct mt76_dev *dev)
{
return mt76_chip(dev) == 0x7611;
@@ -512,6 +505,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
+void mt7615_tx_worker(struct mt76_worker *w);
void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7615_tx_token_put(struct mt7615_dev *dev);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
@@ -524,6 +518,10 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
void mt7615_mac_work(struct work_struct *work);
void mt7615_txp_skb_unmap(struct mt76_dev *dev,
struct mt76_txwi_cache *txwi);
+int mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev);
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val);
int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
const struct mt7615_dfs_pulse *pulse);
@@ -549,14 +547,13 @@ int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy,
bool enable);
int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
bool enable);
-int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info);
int __mt7663_load_firmware(struct mt7615_dev *dev);
u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset);
void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
void mt7615_coredump_work(struct work_struct *work);
+void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en);
+
/* usb */
int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
index 71487f532f36..11f169cdd603 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -13,9 +13,9 @@
#include "mcu.h"
static const struct pci_device_id mt7615_pci_device_table[] = {
- { PCI_DEVICE(0x14c3, 0x7615) },
- { PCI_DEVICE(0x14c3, 0x7663) },
- { PCI_DEVICE(0x14c3, 0x7611) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7615) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7663) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7611) },
{ },
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
index 72395925ddee..ec8ec1a2033f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
@@ -40,13 +40,16 @@ static int mt7615_init_hardware(struct mt7615_dev *dev)
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->mcu_work, mt7615_pci_init_work);
- spin_lock_init(&dev->token_lock);
- idr_init(&dev->token);
-
ret = mt7615_eeprom_init(dev, addr);
if (ret < 0)
return ret;
+ if (is_mt7663(&dev->mt76)) {
+ /* Reset RGU */
+ mt76_clear(dev, MT_MCU_CIRQ_IRQ_SEL(4), BIT(1));
+ mt76_set(dev, MT_MCU_CIRQ_IRQ_SEL(4), BIT(1));
+ }
+
ret = mt7615_dma_init(dev);
if (ret)
return ret;
@@ -76,7 +79,7 @@ mt7615_led_set_config(struct led_classdev *led_cdev,
mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
dev = container_of(mt76, struct mt7615_dev, mt76);
- if (test_bit(MT76_STATE_PM, &mt76->phy.state))
+ if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm))
return;
val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
@@ -94,6 +97,8 @@ mt7615_led_set_config(struct led_classdev *led_cdev,
val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
addr = mt7615_reg_map(dev, MT_LED_CTRL);
mt76_wr(dev, addr, val);
+
+ mt76_connac_pm_unref(&dev->pm);
}
static int
@@ -126,6 +131,7 @@ int mt7615_register_device(struct mt7615_dev *dev)
int ret;
mt7615_init_device(dev);
+ INIT_WORK(&dev->reset_work, mt7615_mac_reset_work);
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
@@ -163,10 +169,9 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
mt76_unregister_device(&dev->mt76);
if (mcu_running)
mt7615_mcu_exit(dev);
- mt7615_dma_cleanup(dev);
mt7615_tx_token_put(dev);
-
+ mt7615_dma_cleanup(dev);
tasklet_disable(&dev->irq_tasklet);
mt76_free_device(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
index 1b4cb145f38e..d7cbef752f9f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
@@ -37,9 +37,7 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
token = le16_to_cpu(txp->hw.msdu_id[0]) &
~MT_MSDU_ID_VALID;
- spin_lock_bh(&dev->token_lock);
- t = idr_remove(&dev->token, token);
- spin_unlock_bh(&dev->token_lock);
+ t = mt76_token_put(mdev, token);
e->skb = t ? t->skb : NULL;
}
@@ -161,9 +159,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
- spin_lock_bh(&dev->token_lock);
- id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
- spin_unlock_bh(&dev->token_lock);
+ id = mt76_token_get(mdev, &t);
if (id < 0)
return id;
@@ -181,3 +177,178 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return 0;
}
+
+void mt7615_dma_reset(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+
+ usleep_range(1000, 2000);
+
+ for (i = 0; i < __MT_TXQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+
+ for (i = 0; i < __MT_MCUQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
+
+ mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_queue_rx_reset(dev, i);
+
+ mt76_tx_status_check(&dev->mt76, NULL, true);
+
+ mt7615_dma_start(dev);
+}
+EXPORT_SYMBOL_GPL(mt7615_dma_reset);
+
+static void
+mt7615_hif_int_event_trigger(struct mt7615_dev *dev, u8 event)
+{
+ u32 reg = MT_MCU_INT_EVENT;
+
+ if (is_mt7663(&dev->mt76))
+ reg = MT7663_MCU_INT_EVENT;
+
+ mt76_wr(dev, reg, event);
+
+ mt7622_trigger_hif_int(dev, true);
+ mt7622_trigger_hif_int(dev, false);
+}
+
+static bool
+mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state)
+{
+ bool ret;
+
+ ret = wait_event_timeout(dev->reset_wait,
+ (READ_ONCE(dev->reset_state) & state),
+ MT7615_RESET_TIMEOUT);
+ WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
+ return ret;
+}
+
+static void
+mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ieee80211_hw *hw = priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ mt7615_mcu_add_beacon(dev, hw, vif,
+ vif->bss_conf.enable_beacon);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mt7615_update_beacons(struct mt7615_dev *dev)
+{
+ ieee80211_iterate_active_interfaces(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_update_vif_beacon, dev->mt76.hw);
+
+ if (!dev->mt76.phy2)
+ return;
+
+ ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_update_vif_beacon, dev->mt76.phy2->hw);
+}
+
+void mt7615_mac_reset_work(struct work_struct *work)
+{
+ struct mt7615_phy *phy2;
+ struct mt76_phy *ext_phy;
+ struct mt7615_dev *dev;
+
+ dev = container_of(work, struct mt7615_dev, reset_work);
+ ext_phy = dev->mt76.phy2;
+ phy2 = ext_phy ? ext_phy->priv : NULL;
+
+ if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA))
+ return;
+
+ ieee80211_stop_queues(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_stop_queues(ext_phy->hw);
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ cancel_delayed_work_sync(&dev->mphy.mac_work);
+ del_timer_sync(&dev->phy.roc_timer);
+ cancel_work_sync(&dev->phy.roc_work);
+ if (phy2) {
+ set_bit(MT76_RESET, &phy2->mt76->state);
+ cancel_delayed_work_sync(&phy2->mt76->mac_work);
+ del_timer_sync(&phy2->roc_timer);
+ cancel_work_sync(&phy2->roc_work);
+ }
+
+ /* lock/unlock all queues to ensure that no tx is pending */
+ mt76_txq_schedule_all(&dev->mphy);
+ if (ext_phy)
+ mt76_txq_schedule_all(ext_phy);
+
+ mt76_worker_disable(&dev->mt76.tx_worker);
+ napi_disable(&dev->mt76.napi[0]);
+ napi_disable(&dev->mt76.napi[1]);
+ napi_disable(&dev->mt76.tx_napi);
+
+ mt7615_mutex_acquire(dev);
+
+ mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_STOPPED);
+
+ if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
+ mt7615_dma_reset(dev);
+
+ mt7615_tx_token_put(dev);
+ idr_init(&dev->mt76.token);
+
+ mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0);
+
+ mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_INIT);
+ mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
+ }
+
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
+ if (phy2)
+ clear_bit(MT76_RESET, &phy2->mt76->state);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+
+ napi_enable(&dev->mt76.napi[0]);
+ napi_schedule(&dev->mt76.napi[0]);
+
+ napi_enable(&dev->mt76.napi[1]);
+ napi_schedule(&dev->mt76.napi[1]);
+
+ ieee80211_wake_queues(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_wake_queues(ext_phy->hw);
+
+ mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_RESET_DONE);
+ mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
+
+ mt7615_update_beacons(dev);
+
+ mt7615_mutex_release(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
+ MT7615_WATCHDOG_TIME);
+ if (phy2)
+ ieee80211_queue_delayed_work(ext_phy->hw,
+ &phy2->mt76->mac_work,
+ MT7615_WATCHDOG_TIME);
+
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
index 6e5db015b32c..63c081bb04d0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -61,6 +61,11 @@ enum mt7615_reg_base {
#define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19)
#define MT_PCIE_REMAP_BASE_2 ((dev)->reg_map[MT_PCIE_REMAP_BASE2])
+#define MT_MCU_CIRQ_BASE 0xc0000
+#define MT_MCU_CIRQ(ofs) (MT_MCU_CIRQ_BASE + (ofs))
+
+#define MT_MCU_CIRQ_IRQ_SEL(n) MT_MCU_CIRQ((n) << 2)
+
#define MT_HIF(ofs) ((dev)->reg_map[MT_HIF_BASE] + (ofs))
#define MT_HIF_RST MT_HIF(0x100)
#define MT_HIF_LOGIC_RST_N BIT(4)
@@ -88,6 +93,10 @@ enum mt7615_reg_base {
#define MT_CFG_LPCR_HOST_FW_OWN BIT(0)
#define MT_CFG_LPCR_HOST_DRV_OWN BIT(1)
+#define MT_MCU2HOST_INT_STATUS MT_HIF(0x1f0)
+#define MT_MCU2HOST_INT_ENABLE MT_HIF(0x1f4)
+
+#define MT7663_MCU_INT_EVENT MT_HIF(0x108)
#define MT_MCU_INT_EVENT MT_HIF(0x1f8)
#define MT_MCU_INT_EVENT_PDMA_STOPPED BIT(0)
#define MT_MCU_INT_EVENT_PDMA_INIT BIT(1)
@@ -102,6 +111,7 @@ enum mt7615_reg_base {
#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
#define MT_INT_TX_DONE_ALL GENMASK(19, 4)
#define MT_INT_TX_DONE(_n) BIT((_n) + 4)
+#define MT7663_INT_MCU_CMD BIT(29)
#define MT_INT_MCU_CMD BIT(30)
#define MT_WPDMA_GLO_CFG MT_HIF(0x208)
@@ -138,6 +148,7 @@ enum mt7615_reg_base {
#define MT_MCU_CMD_PDMA_ERROR BIT(27)
#define MT_MCU_CMD_PCIE_ERROR BIT(28)
#define MT_MCU_CMD_ERROR_MASK (GENMASK(5, 1) | GENMASK(28, 24))
+#define MT7663_MCU_CMD_ERROR_MASK GENMASK(5, 2)
#define MT_TX_RING_BASE MT_HIF(0x300)
#define MT_RX_RING_BASE MT_HIF(0x400)
@@ -368,7 +379,9 @@ enum mt7615_reg_base {
#define MT_DMA_DCR0 MT_WF_DMA(0x000)
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 2)
+#define MT_DMA_DCR0_DAMSDU_EN BIT(16)
#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
+#define MT_DMA_DCR0_RX_HDR_TRANS_EN BIT(19)
#define MT_DMA_RCFR0(_band) MT_WF_DMA(0x070 + (_band) * 0x40)
#define MT_DMA_RCFR0_MCU_RX_MGMT BIT(2)
@@ -447,9 +460,10 @@ enum mt7615_reg_base {
#define MT_LPON(_n) ((dev)->reg_map[MT_LPON_BASE] + (_n))
-#define MT_LPON_T0CR MT_LPON(0x010)
-#define MT_LPON_T0CR_MODE GENMASK(1, 0)
-#define MT_LPON_T0CR_WRITE BIT(0)
+#define MT_LPON_TCR0(_n) MT_LPON(0x010 + ((_n) * 4))
+#define MT_LPON_TCR2(_n) MT_LPON(0x0f8 + ((_n) - 2) * 4)
+#define MT_LPON_TCR_MODE GENMASK(1, 0)
+#define MT_LPON_TCR_WRITE BIT(0)
#define MT_LPON_UTTR0 MT_LPON(0x018)
#define MT_LPON_UTTR1 MT_LPON(0x01c)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
index 9fb506f2ace6..4393dd21ebbb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
@@ -218,12 +218,15 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
struct mt76_sdio *sdio = &dev->sdio;
+ u8 pad;
qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid;
while (q->first != q->head) {
struct mt76_queue_entry *e = &q->entry[q->first];
struct sk_buff *iter;
+ smp_rmb();
+
if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
__skb_put_zero(e->skb, 4);
err = __mt7663s_xmit_queue(dev, e->skb->data,
@@ -234,7 +237,8 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
goto next;
}
- if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ)
+ pad = roundup(e->skb->len, 4) - e->skb->len;
+ if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ)
break;
if (mt7663s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
@@ -252,6 +256,11 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
len += iter->len;
nframes++;
}
+
+ if (unlikely(pad)) {
+ memset(sdio->xmit_buf[qid] + len, 0, pad);
+ len += pad;
+ }
next:
q->first = (q->first + 1) % q->ndesc;
e->done = true;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
index 9aa5183c7a56..be9a69fe1b38 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
@@ -40,10 +40,8 @@ static int mt7622_wmac_probe(struct platform_device *pdev)
return irq;
mem_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mem_base)) {
- dev_err(&pdev->dev, "Failed to get memory resource\n");
+ if (IS_ERR(mem_base))
return PTR_ERR(mem_base);
- }
return mt7615_mmio_probe(&pdev->dev, mem_base, irq, mt7615e_reg_map);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
index 203256862dfd..f8d3673c2cae 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
@@ -67,6 +67,7 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
struct mt7615_rate_desc *rate = &wrd->rate;
struct mt7615_sta *sta = wrd->sta;
u32 w5, w27, addr, val;
+ u16 idx;
lockdep_assert_held(&dev->mt76.mutex);
@@ -118,7 +119,11 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1;
- mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
+ idx = sta->vif->mt76.omac_idx;
+ idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
+ addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
+
+ mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
val = mt76_rr(dev, MT_LPON_UTTR0);
sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index 0d58606391b0..6c889b90fd12 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -53,11 +53,25 @@ struct mt76_connac_pm {
} tx_q[IEEE80211_NUM_ACS];
struct work_struct wake_work;
- struct completion wake_cmpl;
+ wait_queue_head_t wait;
+
+ struct {
+ spinlock_t lock;
+ u32 count;
+ } wake;
+ struct mutex mutex;
struct delayed_work ps_work;
unsigned long last_activity;
unsigned long idle_timeout;
+
+ struct {
+ unsigned long last_wake_event;
+ unsigned long awake_time;
+ unsigned long last_doze_event;
+ unsigned long doze_time;
+ unsigned int lp_wake;
+ } stats;
};
struct mt76_connac_coredump {
@@ -73,12 +87,55 @@ static inline bool is_mt7921(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7961;
}
+static inline bool is_mt7663(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7663;
+}
+
int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm);
void mt76_connac_power_save_sched(struct mt76_phy *phy,
struct mt76_connac_pm *pm);
void mt76_connac_free_pending_tx_skbs(struct mt76_connac_pm *pm,
struct mt76_wcid *wcid);
+static inline bool
+mt76_connac_pm_ref(struct mt76_phy *phy, struct mt76_connac_pm *pm)
+{
+ bool ret = false;
+
+ spin_lock_bh(&pm->wake.lock);
+ if (test_bit(MT76_STATE_PM, &phy->state))
+ goto out;
+
+ pm->wake.count++;
+ ret = true;
+out:
+ spin_unlock_bh(&pm->wake.lock);
+
+ return ret;
+}
+
+static inline void
+mt76_connac_pm_unref(struct mt76_connac_pm *pm)
+{
+ spin_lock_bh(&pm->wake.lock);
+ pm->wake.count--;
+ pm->last_activity = jiffies;
+ spin_unlock_bh(&pm->wake.lock);
+}
+
+static inline bool
+mt76_connac_skip_fw_pmctrl(struct mt76_phy *phy, struct mt76_connac_pm *pm)
+{
+ bool ret;
+
+ spin_lock_bh(&pm->wake.lock);
+ ret = pm->wake.count || test_and_set_bit(MT76_STATE_PM, &phy->state);
+ spin_unlock_bh(&pm->wake.lock);
+
+ return ret;
+}
+
static inline void
mt76_connac_mutex_acquire(struct mt76_dev *dev, struct mt76_connac_pm *pm)
__acquires(&dev->mutex)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index c5f5037f5757..6f180c92d413 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -13,17 +13,14 @@ int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm)
if (!mt76_is_mmio(dev))
return 0;
+ cancel_delayed_work_sync(&pm->ps_work);
if (!test_bit(MT76_STATE_PM, &phy->state))
return 0;
- if (test_bit(MT76_HW_SCANNING, &phy->state) ||
- test_bit(MT76_HW_SCHED_SCANNING, &phy->state))
- return 0;
-
- if (queue_work(dev->wq, &pm->wake_work))
- reinit_completion(&pm->wake_cmpl);
-
- if (!wait_for_completion_timeout(&pm->wake_cmpl, 3 * HZ)) {
+ queue_work(dev->wq, &pm->wake_work);
+ if (!wait_event_timeout(pm->wait,
+ !test_bit(MT76_STATE_PM, &phy->state),
+ 3 * HZ)) {
ieee80211_wake_queues(phy->hw);
return -ETIMEDOUT;
}
@@ -40,17 +37,15 @@ void mt76_connac_power_save_sched(struct mt76_phy *phy,
if (!mt76_is_mmio(dev))
return;
- if (!pm->enable || !test_bit(MT76_STATE_RUNNING, &phy->state))
+ if (!pm->enable)
return;
pm->last_activity = jiffies;
- if (test_bit(MT76_HW_SCANNING, &phy->state) ||
- test_bit(MT76_HW_SCHED_SCANNING, &phy->state))
- return;
-
- if (!test_bit(MT76_STATE_PM, &phy->state))
+ if (!test_bit(MT76_STATE_PM, &phy->state)) {
+ cancel_delayed_work(&phy->mac_work);
queue_delayed_work(dev->wq, &pm->ps_work, pm->idle_timeout);
+ }
}
EXPORT_SYMBOL_GPL(mt76_connac_power_save_sched);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 6cbccfb05f8b..fe0ab5e5ff81 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -287,7 +287,7 @@ mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
&hdr.wlan_idx_hi);
if (!nskb) {
nskb = mt76_mcu_msg_alloc(dev, NULL,
- MT76_CONNAC_WTBL_UPDATE_BA_SIZE);
+ MT76_CONNAC_WTBL_UPDATE_MAX_SIZE);
if (!nskb)
return ERR_PTR(-ENOMEM);
@@ -392,6 +392,21 @@ mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
uapsd->max_sp = sta->max_sp;
}
+void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
+ struct mt76_wcid *wcid,
+ void *sta_wtbl, void *wtbl_tlv)
+{
+ struct wtbl_hdr_trans *htr;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HDR_TRANS,
+ sizeof(*htr),
+ wtbl_tlv, sta_wtbl);
+ htr = (struct wtbl_hdr_trans *)tlv;
+ htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_hdr_trans_tlv);
+
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
struct sk_buff *skb,
struct ieee80211_vif *vif,
@@ -496,7 +511,7 @@ mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
if (elem->mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
cap |= STA_REC_HE_CAP_OM;
- if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU)
+ if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU)
cap |= STA_REC_HE_CAP_AMSDU_IN_AMPDU;
if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
@@ -655,7 +670,8 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
struct ieee80211_sta *sta,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ u8 rcpi)
{
struct cfg80211_chan_def *chandef = &mphy->chandef;
enum nl80211_band band = chandef->chan->band;
@@ -704,6 +720,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
phy = (struct sta_rec_phy *)tlv;
phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta);
phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
+ phy->rcpi = rcpi;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
ra_info = (struct sta_rec_ra_info *)tlv;
@@ -808,40 +825,42 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ht_tlv);
int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct mt76_wcid *wcid,
- bool enable, int cmd)
+ struct mt76_sta_cmd_info *info)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct wtbl_req_hdr *wtbl_hdr;
struct tlv *sta_wtbl;
struct sk_buff *skb;
- skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
+ skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
- mt76_connac_mcu_sta_basic_tlv(skb, vif, sta, enable);
- if (enable && sta)
- mt76_connac_mcu_sta_tlv(phy, skb, sta, vif);
+ mt76_connac_mcu_sta_basic_tlv(skb, info->vif, info->sta, info->enable);
+ if (info->enable && info->sta)
+ mt76_connac_mcu_sta_tlv(phy, skb, info->sta, info->vif,
+ info->rcpi);
sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
sizeof(struct tlv));
- wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid,
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, info->wcid,
WTBL_RESET_AND_SET,
sta_wtbl, &skb);
- if (enable) {
- mt76_connac_mcu_wtbl_generic_tlv(dev, skb, vif, sta, sta_wtbl,
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ if (info->enable) {
+ mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif,
+ info->sta, sta_wtbl,
wtbl_hdr);
- if (sta)
- mt76_connac_mcu_wtbl_ht_tlv(dev, skb, sta, sta_wtbl,
- wtbl_hdr);
+ if (info->sta)
+ mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
+ sta_wtbl, wtbl_hdr);
}
- return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
+ return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_sta_cmd);
@@ -946,6 +965,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP:
basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
break;
@@ -1195,6 +1215,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
.center_chan = ieee80211_frequency_to_channel(freq1),
.center_chan2 = ieee80211_frequency_to_channel(freq2),
.tx_streams = hweight8(phy->antenna_mask),
+ .ht_op_info = 4, /* set HT 40M allowed */
.rx_streams = phy->chainmask,
.short_st = true,
},
@@ -1287,6 +1308,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
case NL80211_CHAN_WIDTH_20:
default:
rlm_req.rlm.bw = CMD_CBW_20MHZ;
+ rlm_req.rlm.ht_op_info = 0;
break;
}
@@ -1306,7 +1328,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
{
struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
struct cfg80211_scan_request *sreq = &scan_req->req;
- int n_ssids = 0, err, i, duration = MT76_CONNAC_SCAN_CHANNEL_TIME;
+ int n_ssids = 0, err, i, duration;
int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
struct ieee80211_channel **scan_list = sreq->channels;
struct mt76_dev *mdev = phy->dev;
@@ -1343,6 +1365,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
req->ssid_type_ext = n_ssids ? BIT(0) : 0;
req->ssids_num = n_ssids;
+ duration = is_mt7921(phy->dev) ? 0 : MT76_CONNAC_SCAN_CHANNEL_TIME;
/* increase channel time for passive scan */
if (!sreq->n_ssids)
duration *= 2;
@@ -1368,11 +1391,14 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
req->ies_len = cpu_to_le16(sreq->ie_len);
}
+ if (is_mt7921(phy->dev))
+ req->scan_func |= SCAN_FUNC_SPLIT_SCAN;
+
memcpy(req->bssid, sreq->bssid, ETH_ALEN);
if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
get_random_mask_addr(req->random_mac, sreq->mac_addr,
sreq->mac_addr_mask);
- req->scan_func = 1;
+ req->scan_func |= SCAN_FUNC_RANDOM_MAC;
}
err = mt76_mcu_skb_send_msg(mdev, skb, MCU_CMD_START_HW_SCAN, false);
@@ -1433,10 +1459,13 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
req->version = 1;
req->seq_num = mvif->scan_seq_num | ext_phy << 7;
- if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- get_random_mask_addr(req->random_mac, sreq->mac_addr,
+ if (is_mt7663(phy->dev) &&
+ (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)) {
+ get_random_mask_addr(req->mt7663.random_mac, sreq->mac_addr,
sreq->mac_addr_mask);
req->scan_func = 1;
+ } else if (is_mt7921(phy->dev)) {
+ req->mt7921.bss_idx = mvif->idx;
}
req->ssids_num = sreq->n_ssids;
@@ -1499,14 +1528,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sched_scan_enable);
int mt76_connac_mcu_chip_config(struct mt76_dev *dev)
{
- struct {
- __le16 id;
- u8 type;
- u8 resp_type;
- __le16 data_size;
- __le16 resv;
- u8 data[320];
- } req = {
+ struct mt76_connac_config req = {
.resp_type = 0,
};
@@ -1517,6 +1539,19 @@ int mt76_connac_mcu_chip_config(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_chip_config);
+int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable)
+{
+ struct mt76_connac_config req = {
+ .resp_type = 0,
+ };
+
+ snprintf(req.data, sizeof(req.data), "KeepFullPwr %d", !enable);
+
+ return mt76_mcu_send_msg(dev, MCU_CMD_CHIP_CONFIG, &req, sizeof(req),
+ false);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_deep_sleep);
+
void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
struct mt76_connac_coredump *coredump)
{
@@ -1531,6 +1566,181 @@ void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_coredump_event);
+static void
+mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku,
+ struct mt76_power_limits *limits,
+ enum nl80211_band band)
+{
+ int max_power = is_mt7921(dev) ? 127 : 63;
+ int i, offset = sizeof(limits->cck);
+
+ memset(sku, max_power, MT_SKU_POWER_LIMIT);
+
+ if (band == NL80211_BAND_2GHZ) {
+ /* cck */
+ memcpy(sku, limits->cck, sizeof(limits->cck));
+ }
+
+ /* ofdm */
+ memcpy(&sku[offset], limits->ofdm, sizeof(limits->ofdm));
+ offset += sizeof(limits->ofdm);
+
+ /* ht */
+ for (i = 0; i < 2; i++) {
+ memcpy(&sku[offset], limits->mcs[i], 8);
+ offset += 8;
+ }
+ sku[offset++] = limits->mcs[0][0];
+
+ /* vht */
+ for (i = 0; i < ARRAY_SIZE(limits->mcs); i++) {
+ memcpy(&sku[offset], limits->mcs[i],
+ ARRAY_SIZE(limits->mcs[i]));
+ offset += 12;
+ }
+
+ if (!is_mt7921(dev))
+ return;
+
+ /* he */
+ for (i = 0; i < ARRAY_SIZE(limits->ru); i++) {
+ memcpy(&sku[offset], limits->ru[i], ARRAY_SIZE(limits->ru[i]));
+ offset += ARRAY_SIZE(limits->ru[i]);
+ }
+}
+
+static int
+mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
+ enum nl80211_band band)
+{
+ struct mt76_dev *dev = phy->dev;
+ int sku_len, batch_len = is_mt7921(dev) ? 8 : 16;
+ static const u8 chan_list_2ghz[] = {
+ 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14
+ };
+ static const u8 chan_list_5ghz[] = {
+ 36, 38, 40, 42, 44, 46, 48,
+ 50, 52, 54, 56, 58, 60, 62,
+ 64, 100, 102, 104, 106, 108, 110,
+ 112, 114, 116, 118, 120, 122, 124,
+ 126, 128, 132, 134, 136, 138, 140,
+ 142, 144, 149, 151, 153, 155, 157,
+ 159, 161, 165
+ };
+ struct mt76_connac_sku_tlv sku_tlbv;
+ int i, n_chan, batch_size, idx = 0;
+ struct mt76_power_limits limits;
+ const u8 *ch_list;
+
+ sku_len = is_mt7921(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92;
+
+ if (band == NL80211_BAND_2GHZ) {
+ n_chan = ARRAY_SIZE(chan_list_2ghz);
+ ch_list = chan_list_2ghz;
+ } else {
+ n_chan = ARRAY_SIZE(chan_list_5ghz);
+ ch_list = chan_list_5ghz;
+ }
+ batch_size = DIV_ROUND_UP(n_chan, batch_len);
+
+ for (i = 0; i < batch_size; i++) {
+ bool last_msg = i == batch_size - 1;
+ int num_ch = last_msg ? n_chan % batch_len : batch_len;
+ struct mt76_connac_tx_power_limit_tlv tx_power_tlv = {
+ .band = band == NL80211_BAND_2GHZ ? 1 : 2,
+ .n_chan = num_ch,
+ .last_msg = last_msg,
+ };
+ struct sk_buff *skb;
+ int j, err, msg_len;
+
+ msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv);
+ skb = mt76_mcu_msg_alloc(dev, NULL, msg_len);
+ if (!skb)
+ return -ENOMEM;
+
+ BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(tx_power_tlv.alpha2));
+ memcpy(tx_power_tlv.alpha2, dev->alpha2, sizeof(dev->alpha2));
+
+ skb_put_data(skb, &tx_power_tlv, sizeof(tx_power_tlv));
+ for (j = 0; j < num_ch; j++, idx++) {
+ struct ieee80211_channel chan = {
+ .hw_value = ch_list[idx],
+ .band = band,
+ };
+
+ mt76_get_rate_power_limits(phy, &chan, &limits, 127);
+
+ sku_tlbv.channel = ch_list[idx];
+ mt76_connac_mcu_build_sku(dev, sku_tlbv.pwr_limit,
+ &limits, band);
+ skb_put_data(skb, &sku_tlbv, sku_len);
+ }
+
+ err = mt76_mcu_skb_send_msg(dev, skb,
+ MCU_CMD_SET_RATE_TX_POWER, false);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy)
+{
+ int err;
+
+ err = mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_2GHZ);
+ if (err < 0)
+ return err;
+
+ return mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_5GHZ);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rate_txpower);
+
+int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
+ struct mt76_vif *vif,
+ struct ieee80211_bss_conf *info)
+{
+ struct sk_buff *skb;
+ int i, len = min_t(int, info->arp_addr_cnt,
+ IEEE80211_BSS_ARP_ADDR_LIST_LEN);
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt76_connac_arpns_tlv arp;
+ } req_hdr = {
+ .hdr = {
+ .bss_idx = vif->idx,
+ },
+ .arp = {
+ .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
+ .ips_num = len,
+ .mode = 2, /* update */
+ .option = 1,
+ },
+ };
+
+ skb = mt76_mcu_msg_alloc(dev, NULL,
+ sizeof(req_hdr) + len * sizeof(__be32));
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &req_hdr, sizeof(req_hdr));
+ for (i = 0; i < len; i++) {
+ u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
+
+ memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
+ }
+
+ return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_OFFLOAD, true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_arp_filter);
+
#ifdef CONFIG_PM
const struct wiphy_wowlan_support mt76_connac_wowlan_support = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index c1e1df5f7cd7..a1096861d04a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -210,7 +210,7 @@ struct wtbl_hdr_trans {
__le16 len;
u8 to_ds;
u8 from_ds;
- u8 disable_rx_trans;
+ u8 no_rx_trans;
u8 rsv;
} __packed;
@@ -304,9 +304,6 @@ struct wtbl_raw {
sizeof(struct tlv) + \
MT76_CONNAC_WTBL_UPDATE_MAX_SIZE)
-#define MT76_CONNAC_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \
- sizeof(struct wtbl_ba))
-
enum {
STA_REC_BASIC,
STA_REC_RA,
@@ -365,6 +362,9 @@ enum {
#define NETWORK_IBSS BIT(18)
#define NETWORK_WDS BIT(21)
+#define SCAN_FUNC_RANDOM_MAC BIT(0)
+#define SCAN_FUNC_SPLIT_SCAN BIT(5)
+
#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
@@ -564,6 +564,7 @@ enum {
MCU_CMD_CHIP_CONFIG = MCU_CE_PREFIX | 0xca,
MCU_CMD_FWLOG_2_HOST = MCU_CE_PREFIX | 0xc5,
MCU_CMD_GET_WTBL = MCU_CE_PREFIX | 0xcd,
+ MCU_CMD_GET_TXPWR = MCU_CE_PREFIX | 0xd0,
};
enum {
@@ -759,11 +760,19 @@ struct mt76_connac_sched_scan_req {
u8 channel_type;
u8 channels_num;
u8 intervals_num;
- u8 scan_func; /* BIT(0) eable random mac address */
+ u8 scan_func; /* MT7663: BIT(0) eable random mac address */
struct mt76_connac_mcu_scan_channel channels[64];
__le16 intervals[MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL];
- u8 random_mac[ETH_ALEN]; /* valid when BIT(0) in scan_func is set */
- u8 pad2[58];
+ union {
+ struct {
+ u8 random_mac[ETH_ALEN];
+ u8 pad2[58];
+ } mt7663;
+ struct {
+ u8 bss_idx;
+ u8 pad2[63];
+ } mt7921;
+ };
} __packed;
struct mt76_connac_sched_scan_done {
@@ -876,6 +885,48 @@ struct mt76_connac_suspend_tlv {
u8 pad[5];
} __packed;
+struct mt76_sta_cmd_info {
+ struct ieee80211_sta *sta;
+ struct mt76_wcid *wcid;
+
+ struct ieee80211_vif *vif;
+
+ bool enable;
+ int cmd;
+ u8 rcpi;
+};
+
+#define MT_SKU_POWER_LIMIT 161
+
+struct mt76_connac_sku_tlv {
+ u8 channel;
+ s8 pwr_limit[MT_SKU_POWER_LIMIT];
+} __packed;
+
+struct mt76_connac_tx_power_limit_tlv {
+ /* DW0 - common info*/
+ u8 ver;
+ u8 pad0;
+ __le16 len;
+ /* DW1 - cmd hint */
+ u8 n_chan; /* # channel */
+ u8 band; /* 2.4GHz - 5GHz */
+ u8 last_msg;
+ u8 pad1;
+ /* DW3 */
+ u8 alpha2[4]; /* regulatory_request.alpha2 */
+ u8 pad2[32];
+} __packed;
+
+struct mt76_connac_config {
+ __le16 id;
+ u8 type;
+ u8 resp_type;
+ __le16 data_size;
+ __le16 resv;
+ u8 data[320];
+} __packed;
+
#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
@@ -917,9 +968,13 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, void *sta_wtbl,
void *wtbl_tlv);
+void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
+ struct mt76_wcid *wcid,
+ void *sta_wtbl, void *wtbl_tlv);
void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
struct ieee80211_sta *sta,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ u8 rcpi);
void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_sta *sta, void *sta_wtbl,
void *wtbl_tlv);
@@ -942,10 +997,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
struct mt76_wcid *wcid,
bool enable);
int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct mt76_wcid *wcid,
- bool enable, int cmd);
+ struct mt76_sta_cmd_info *info);
void mt76_connac_mcu_beacon_loss_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif);
int mt76_connac_mcu_set_rts_thresh(struct mt76_dev *dev, u32 val, u8 band);
@@ -967,6 +1019,9 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
int mt76_connac_mcu_sched_scan_enable(struct mt76_phy *phy,
struct ieee80211_vif *vif,
bool enable);
+int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
+ struct mt76_vif *vif,
+ struct ieee80211_bss_conf *info);
int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_gtk_rekey_data *key);
@@ -974,6 +1029,8 @@ int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend);
void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif);
int mt76_connac_mcu_chip_config(struct mt76_dev *dev);
+int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable);
void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
struct mt76_connac_coredump *coredump);
+int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy);
#endif /* __MT76_CONNAC_MCU_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 02d0aa0b815e..5847f943e8da 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -221,9 +221,9 @@ mt76x0e_remove(struct pci_dev *pdev)
}
static const struct pci_device_id mt76x0e_device_table[] = {
- { PCI_DEVICE(0x14c3, 0x7610) },
- { PCI_DEVICE(0x14c3, 0x7630) },
- { PCI_DEVICE(0x14c3, 0x7650) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7610) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7630) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7650) },
{ },
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index a593a7796d23..f2b2fa733845 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -32,7 +32,8 @@ static struct usb_device_id mt76x0_device_table[] = {
{ USB_DEVICE(0x20f4, 0x806b) }, /* TRENDnet TEW-806UBH */
{ USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */
{ USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac Stick */
- { USB_DEVICE(0x2357, 0x0123) }, /* TP-LINK T2UHP */
+ { USB_DEVICE(0x2357, 0x0123) }, /* TP-LINK T2UHP_US_v1 */
+ { USB_DEVICE(0x2357, 0x010b) }, /* TP-LINK T2UHP_UN_v1 */
/* TP-LINK Archer T1U */
{ USB_DEVICE(0x2357, 0x0105), .driver_info = 1, },
/* MT7630U */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 771bad60e1bc..0da37867cb64 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -770,6 +770,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
void *rxi)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ieee80211_hdr *hdr;
struct mt76x02_rxwi *rxwi = rxi;
struct mt76x02_sta *sta;
u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
@@ -864,7 +865,8 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
status->freq = dev->mphy.chandef.chan->center_freq;
status->band = dev->mphy.chandef.chan->band;
- status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
+ hdr = (struct ieee80211_hdr *)skb->data;
+ status->qos_ctl = *ieee80211_get_qos_ctl(hdr);
status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
return mt76x02_mac_process_rate(dev, status, rate);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
index 4aa5c36afeaf..75978820a260 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
@@ -17,9 +17,8 @@ int mt76x02_mcu_parse_response(struct mt76_dev *mdev, int cmd,
u32 *rxfce;
if (!skb) {
- dev_err(mdev->dev,
- "MCU message %d (seq %d) timed out\n", cmd,
- seq);
+ dev_err(mdev->dev, "MCU message %02x (seq %d) timed out\n",
+ abs(cmd), seq);
dev->mcu_timeout = 1;
return -ETIMEDOUT;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index e7a46ac97f51..b50084bbe83d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -226,11 +226,11 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
if (ret)
return ret;
- ret = mt76_init_queues(dev);
+ ret = mt76_init_queues(dev, mt76_dma_rx_poll);
if (ret)
return ret;
- netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+ netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt76x02_poll_tx, NAPI_POLL_WEIGHT);
napi_enable(&dev->mt76.tx_napi);
@@ -472,6 +472,8 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mt76_queue_rx_reset(dev, i);
}
+ mt76_tx_status_check(&dev->mt76, NULL, true);
+
mt76x02_mac_start(dev);
if (dev->ed_monitor)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index ab671e21f882..02db5d66735d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -447,6 +447,10 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -EOPNOTSUPP;
+ /* MT76x0 GTK offloading does not work with more than one VIF */
+ if (is_mt76x0(dev) && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
wcid = msta ? &msta->wcid : &mvif->group_wcid;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index ecaf85b483ac..adf288e50e21 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -10,9 +10,9 @@
#include "mt76x2.h"
static const struct pci_device_id mt76x2e_device_table[] = {
- { PCI_DEVICE(0x14c3, 0x7662) },
- { PCI_DEVICE(0x14c3, 0x7612) },
- { PCI_DEVICE(0x14c3, 0x7602) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7662) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7612) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7602) },
{ },
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
index cc2054dffa98..40c8061787e9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
@@ -3,6 +3,6 @@
obj-$(CONFIG_MT7915E) += mt7915e.o
mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
- debugfs.o
+ debugfs.o mmio.o
mt7915e-$(CONFIG_NL80211_TESTMODE) += testmode.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index 77dcd71e49a5..6a8ddeeecbe9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -124,7 +124,7 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i));
for (i = 0; i < ARRAY_SIZE(bound); i++)
- bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
+ bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
seq_printf(file, "\nPhy %d\n", ext_phy);
@@ -192,7 +192,7 @@ mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
}
static int
-mt7915_tx_stats_read(struct seq_file *file, void *data)
+mt7915_tx_stats_show(struct seq_file *file, void *data)
{
struct mt7915_dev *dev = file->private;
int stat[8], i, n;
@@ -222,19 +222,7 @@ mt7915_tx_stats_read(struct seq_file *file, void *data)
return 0;
}
-static int
-mt7915_tx_stats_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt7915_tx_stats_read, inode->i_private);
-}
-
-static const struct file_operations fops_tx_stats = {
- .open = mt7915_tx_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(mt7915_tx_stats);
static int mt7915_read_temperature(struct seq_file *s, void *data)
{
@@ -311,8 +299,7 @@ mt7915_queues_read(struct seq_file *s, void *data)
}
static void
-mt7915_puts_rate_txpower(struct seq_file *s, s8 *delta,
- s8 txpower_cur, int band)
+mt7915_puts_rate_txpower(struct seq_file *s, struct mt7915_phy *phy)
{
static const char * const sku_group_name[] = {
"CCK", "OFDM", "HT20", "HT40",
@@ -320,24 +307,54 @@ mt7915_puts_rate_txpower(struct seq_file *s, s8 *delta,
"RU26", "RU52", "RU106", "RU242/SU20",
"RU484/SU40", "RU996/SU80", "RU2x996/SU160"
};
- s8 txpower[MT7915_SKU_RATE_NUM];
+ struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ bool ext_phy = phy != &dev->phy;
+ u32 reg_base;
int i, idx = 0;
- for (i = 0; i < MT7915_SKU_RATE_NUM; i++)
- txpower[i] = DIV_ROUND_UP(txpower_cur + delta[i], 2);
+ if (!phy)
+ return;
- for (i = 0; i < MAX_SKU_RATE_GROUP_NUM; i++) {
- const struct sku_group *sku = &mt7915_sku_groups[i];
- u32 offset = sku->offset[band];
+ reg_base = MT_TMAC_FP0R0(ext_phy);
+ seq_printf(s, "\nBand %d\n", ext_phy);
+
+ for (i = 0; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
+ u8 cnt, mcs_num = mt7915_sku_group_len[i];
+ s8 txpower[12];
+ int j;
+
+ if (i == SKU_HT_BW20 || i == SKU_HT_BW40) {
+ mcs_num = 8;
+ } else if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160) {
+ mcs_num = 10;
+ } else if (i == SKU_HE_RU26) {
+ reg_base = MT_TMAC_FP0R18(ext_phy);
+ idx = 0;
+ }
- if (!offset) {
- idx += sku->len;
- continue;
+ for (j = 0, cnt = 0; j < DIV_ROUND_UP(mcs_num, 4); j++) {
+ u32 val;
+
+ if (i == SKU_VHT_BW160 && idx == 60) {
+ reg_base = MT_TMAC_FP0R15(ext_phy);
+ idx = 0;
+ }
+
+ val = mt76_rr(dev, reg_base + (idx / 4) * 4);
+
+ if (idx && idx % 4)
+ val >>= (idx % 4) * 8;
+
+ while (val > 0 && cnt < mcs_num) {
+ s8 pwr = FIELD_GET(MT_TMAC_FP_MASK, val);
+
+ txpower[cnt++] = pwr;
+ val >>= 8;
+ idx++;
+ }
}
- mt76_seq_puts_array(s, sku_group_name[i],
- txpower + idx, sku->len);
- idx += sku->len;
+ mt76_seq_puts_array(s, sku_group_name[i], txpower, mcs_num);
}
}
@@ -345,24 +362,9 @@ static int
mt7915_read_rate_txpower(struct seq_file *s, void *data)
{
struct mt7915_dev *dev = dev_get_drvdata(s->private);
- struct mt76_phy *mphy = &dev->mphy;
- enum nl80211_band band = mphy->chandef.chan->band;
- s8 *delta = dev->rate_power[band];
- s8 txpower_base = mphy->txpower_cur - delta[MT7915_SKU_MAX_DELTA_IDX];
-
- seq_puts(s, "Band 0:\n");
- mt7915_puts_rate_txpower(s, delta, txpower_base, band);
-
- if (dev->mt76.phy2) {
- mphy = dev->mt76.phy2;
- band = mphy->chandef.chan->band;
- delta = dev->rate_power[band];
- txpower_base = mphy->txpower_cur -
- delta[MT7915_SKU_MAX_DELTA_IDX];
-
- seq_puts(s, "Band 1:\n");
- mt7915_puts_rate_txpower(s, delta, txpower_base, band);
- }
+
+ mt7915_puts_rate_txpower(s, &dev->phy);
+ mt7915_puts_rate_txpower(s, mt7915_ext_phy(dev));
return 0;
}
@@ -379,7 +381,7 @@ int mt7915_init_debugfs(struct mt7915_dev *dev)
mt7915_queues_read);
debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
mt7915_queues_acq);
- debugfs_create_file("tx_stats", 0400, dir, dev, &fops_tx_stats);
+ debugfs_create_file("tx_stats", 0400, dir, dev, &mt7915_tx_stats_fops);
debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
debugfs_create_file("implicit_txbf", 0600, dir, dev,
&fops_implicit_txbf);
@@ -412,7 +414,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_fixed_rate, NULL,
mt7915_sta_fixed_rate_set, "%llx\n");
static int
-mt7915_sta_stats_read(struct seq_file *s, void *data)
+mt7915_sta_stats_show(struct seq_file *s, void *data)
{
struct ieee80211_sta *sta = s->private;
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
@@ -455,24 +457,12 @@ mt7915_sta_stats_read(struct seq_file *s, void *data)
return 0;
}
-static int
-mt7915_sta_stats_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt7915_sta_stats_read, inode->i_private);
-}
-
-static const struct file_operations fops_sta_stats = {
- .open = mt7915_sta_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(mt7915_sta_stats);
void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
debugfs_create_file("fixed_rate", 0600, dir, sta, &fops_fixed_rate);
- debugfs_create_file("stats", 0400, dir, sta, &fops_sta_stats);
+ debugfs_create_file("stats", 0400, dir, sta, &mt7915_sta_stats_fops);
}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index bf51304a770b..11d0b760abd7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -110,121 +110,13 @@ void mt7915_dma_prefetch(struct mt7915_dev *dev)
__mt7915_dma_prefetch(dev, MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE);
}
-static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
-{
- static const struct {
- u32 phys;
- u32 mapped;
- u32 size;
- } fixed_map[] = {
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
- { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
- { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
- { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
- { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
- };
- int i;
-
- if (addr < 0x100000)
- return addr;
-
- for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
- u32 ofs;
-
- if (addr < fixed_map[i].phys)
- continue;
-
- ofs = addr - fixed_map[i].phys;
- if (ofs > fixed_map[i].size)
- continue;
-
- return fixed_map[i].mapped + ofs;
- }
-
- if ((addr >= 0x18000000 && addr < 0x18c00000) ||
- (addr >= 0x70000000 && addr < 0x78000000) ||
- (addr >= 0x7c000000 && addr < 0x7c400000))
- return mt7915_reg_map_l1(dev, addr);
-
- return mt7915_reg_map_l2(dev, addr);
-}
-
-static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset)
-{
- struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- u32 addr = __mt7915_reg_addr(dev, offset);
-
- return dev->bus_ops->rr(mdev, addr);
-}
-
-static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
-{
- struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- u32 addr = __mt7915_reg_addr(dev, offset);
-
- dev->bus_ops->wr(mdev, addr, val);
-}
-
-static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
-{
- struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- u32 addr = __mt7915_reg_addr(dev, offset);
-
- return dev->bus_ops->rmw(mdev, addr, mask, val);
-}
-
int mt7915_dma_init(struct mt7915_dev *dev)
{
/* Increase buffer size to receive large VHT/HE MPDUs */
- struct mt76_bus_ops *bus_ops;
int rx_buf_size = MT_RX_BUF_SIZE * 2;
u32 hif1_ofs = 0;
int ret;
- dev->bus_ops = dev->mt76.bus;
- bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
- GFP_KERNEL);
- if (!bus_ops)
- return -ENOMEM;
-
- bus_ops->rr = mt7915_rr;
- bus_ops->wr = mt7915_wr;
- bus_ops->rmw = mt7915_rmw;
- dev->mt76.bus = bus_ops;
-
mt76_dma_attach(&dev->mt76);
if (dev->hif2)
@@ -321,11 +213,11 @@ int mt7915_dma_init(struct mt7915_dev *dev)
return ret;
}
- ret = mt76_init_queues(dev);
+ ret = mt76_init_queues(dev, mt76_dma_rx_poll);
if (ret < 0)
return ret;
- netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+ netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt7915_poll_tx, NAPI_POLL_WEIGHT);
napi_enable(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index 660398ac53c2..8ededf2e5279 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -8,12 +8,29 @@ static u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset)
{
u8 *data = dev->mt76.eeprom.data;
- if (data[offset] == 0xff)
+ if (data[offset] == 0xff && !dev->flash_mode)
mt7915_mcu_get_eeprom(dev, offset);
return data[offset];
}
+static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 val;
+
+ val = mt7915_eeprom_read(dev, MT_EE_DO_PRE_CAL);
+ if (val != (MT_EE_WIFI_CAL_DPD | MT_EE_WIFI_CAL_GROUP))
+ return 0;
+
+ val = MT_EE_CAL_GROUP_SIZE + MT_EE_CAL_DPD_SIZE;
+ dev->cal = devm_kzalloc(mdev->dev, val, GFP_KERNEL);
+ if (!dev->cal)
+ return -ENOMEM;
+
+ return mt76_get_of_eeprom(mdev, dev->cal, MT_EE_PRECAL, val);
+}
+
static int mt7915_eeprom_load(struct mt7915_dev *dev)
{
int ret;
@@ -22,12 +39,14 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
if (ret < 0)
return ret;
- if (ret)
+ if (ret) {
dev->flash_mode = true;
- else
+ ret = mt7915_eeprom_load_precal(dev);
+ } else {
memset(dev->mt76.eeprom.data, -1, MT7915_EEPROM_SIZE);
+ }
- return 0;
+ return ret;
}
static int mt7915_check_eeprom(struct mt7915_dev *dev)
@@ -124,7 +143,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx)
{
- int index;
+ int index, target_power;
bool tssi_on;
if (chain_idx > 3)
@@ -133,131 +152,56 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
tssi_on = mt7915_tssi_enabled(dev, chan->band);
if (chan->band == NL80211_BAND_2GHZ) {
- index = MT_EE_TX0_POWER_2G + chain_idx * 3 + !tssi_on;
- } else {
- int group = tssi_on ?
- mt7915_get_channel_group(chan->hw_value) : 8;
-
- index = MT_EE_TX0_POWER_5G + chain_idx * 12 + group;
- }
-
- return mt7915_eeprom_read(dev, index);
-}
+ index = MT_EE_TX0_POWER_2G + chain_idx * 3;
+ target_power = mt7915_eeprom_read(dev, index);
-static const u8 sku_cck_delta_map[] = {
- SKU_CCK_GROUP0,
- SKU_CCK_GROUP0,
- SKU_CCK_GROUP1,
- SKU_CCK_GROUP1,
-};
+ if (!tssi_on)
+ target_power += mt7915_eeprom_read(dev, index + 1);
+ } else {
+ int group = mt7915_get_channel_group(chan->hw_value);
-static const u8 sku_ofdm_delta_map[] = {
- SKU_OFDM_GROUP0,
- SKU_OFDM_GROUP0,
- SKU_OFDM_GROUP1,
- SKU_OFDM_GROUP1,
- SKU_OFDM_GROUP2,
- SKU_OFDM_GROUP2,
- SKU_OFDM_GROUP3,
- SKU_OFDM_GROUP4,
-};
+ index = MT_EE_TX0_POWER_5G + chain_idx * 12;
+ target_power = mt7915_eeprom_read(dev, index + group);
-static const u8 sku_mcs_delta_map[] = {
- SKU_MCS_GROUP0,
- SKU_MCS_GROUP1,
- SKU_MCS_GROUP1,
- SKU_MCS_GROUP2,
- SKU_MCS_GROUP2,
- SKU_MCS_GROUP3,
- SKU_MCS_GROUP4,
- SKU_MCS_GROUP5,
- SKU_MCS_GROUP6,
- SKU_MCS_GROUP7,
- SKU_MCS_GROUP8,
- SKU_MCS_GROUP9,
-};
+ if (!tssi_on)
+ target_power += mt7915_eeprom_read(dev, index + 8);
+ }
-#define SKU_GROUP(_mode, _len, _ofs_2g, _ofs_5g, _map) \
- [_mode] = { \
- .len = _len, \
- .offset = { \
- _ofs_2g, \
- _ofs_5g, \
- }, \
- .delta_map = _map \
+ return target_power;
}
-const struct sku_group mt7915_sku_groups[] = {
- SKU_GROUP(SKU_CCK, 4, 0x252, 0, sku_cck_delta_map),
- SKU_GROUP(SKU_OFDM, 8, 0x254, 0x29d, sku_ofdm_delta_map),
-
- SKU_GROUP(SKU_HT_BW20, 8, 0x259, 0x2a2, sku_mcs_delta_map),
- SKU_GROUP(SKU_HT_BW40, 9, 0x262, 0x2ab, sku_mcs_delta_map),
- SKU_GROUP(SKU_VHT_BW20, 12, 0x259, 0x2a2, sku_mcs_delta_map),
- SKU_GROUP(SKU_VHT_BW40, 12, 0x262, 0x2ab, sku_mcs_delta_map),
- SKU_GROUP(SKU_VHT_BW80, 12, 0, 0x2b4, sku_mcs_delta_map),
- SKU_GROUP(SKU_VHT_BW160, 12, 0, 0, sku_mcs_delta_map),
-
- SKU_GROUP(SKU_HE_RU26, 12, 0x27f, 0x2dd, sku_mcs_delta_map),
- SKU_GROUP(SKU_HE_RU52, 12, 0x289, 0x2e7, sku_mcs_delta_map),
- SKU_GROUP(SKU_HE_RU106, 12, 0x293, 0x2f1, sku_mcs_delta_map),
- SKU_GROUP(SKU_HE_RU242, 12, 0x26b, 0x2bf, sku_mcs_delta_map),
- SKU_GROUP(SKU_HE_RU484, 12, 0x275, 0x2c9, sku_mcs_delta_map),
- SKU_GROUP(SKU_HE_RU996, 12, 0, 0x2d3, sku_mcs_delta_map),
- SKU_GROUP(SKU_HE_RU2x996, 12, 0, 0, sku_mcs_delta_map),
-};
-
-static s8
-mt7915_get_sku_delta(struct mt7915_dev *dev, u32 addr)
+s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band)
{
- u32 val = mt7915_eeprom_read(dev, addr);
- s8 delta = FIELD_GET(SKU_DELTA_VAL, val);
+ u32 val;
+ s8 delta;
- if (!(val & SKU_DELTA_EN))
- return 0;
+ if (band == NL80211_BAND_2GHZ)
+ val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_2G);
+ else
+ val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_5G);
- return val & SKU_DELTA_ADD ? delta : -delta;
-}
+ if (!(val & MT_EE_RATE_DELTA_EN))
+ return 0;
-static void
-mt7915_eeprom_init_sku_band(struct mt7915_dev *dev,
- struct ieee80211_supported_band *sband)
-{
- int i, band = sband->band;
- s8 *rate_power = dev->rate_power[band], max_delta = 0;
- u8 idx = 0;
-
- for (i = 0; i < ARRAY_SIZE(mt7915_sku_groups); i++) {
- const struct sku_group *sku = &mt7915_sku_groups[i];
- u32 offset = sku->offset[band];
- int j;
-
- if (!offset) {
- idx += sku->len;
- continue;
- }
-
- rate_power[idx++] = mt7915_get_sku_delta(dev, offset);
- if (rate_power[idx - 1] > max_delta)
- max_delta = rate_power[idx - 1];
-
- if (i == SKU_HT_BW20 || i == SKU_VHT_BW20)
- offset += 1;
-
- for (j = 1; j < sku->len; j++) {
- u32 addr = offset + sku->delta_map[j];
-
- rate_power[idx++] = mt7915_get_sku_delta(dev, addr);
- if (rate_power[idx - 1] > max_delta)
- max_delta = rate_power[idx - 1];
- }
- }
+ delta = FIELD_GET(MT_EE_RATE_DELTA_MASK, val);
- rate_power[idx] = max_delta;
+ return val & MT_EE_RATE_DELTA_SIGN ? delta : -delta;
}
-void mt7915_eeprom_init_sku(struct mt7915_dev *dev)
-{
- mt7915_eeprom_init_sku_band(dev, &dev->mphy.sband_2g.sband);
- mt7915_eeprom_init_sku_band(dev, &dev->mphy.sband_5g.sband);
-}
+const u8 mt7915_sku_group_len[] = {
+ [SKU_CCK] = 4,
+ [SKU_OFDM] = 8,
+ [SKU_HT_BW20] = 8,
+ [SKU_HT_BW40] = 9,
+ [SKU_VHT_BW20] = 12,
+ [SKU_VHT_BW40] = 12,
+ [SKU_VHT_BW80] = 12,
+ [SKU_VHT_BW160] = 12,
+ [SKU_HE_RU26] = 12,
+ [SKU_HE_RU52] = 12,
+ [SKU_HE_RU106] = 12,
+ [SKU_HE_RU242] = 12,
+ [SKU_HE_RU484] = 12,
+ [SKU_HE_RU996] = 12,
+ [SKU_HE_RU2x996] = 12
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
index 3ee8c27bb61b..033fb592bdf0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -17,14 +17,25 @@ enum mt7915_eeprom_field {
MT_EE_MAC_ADDR = 0x004,
MT_EE_MAC_ADDR2 = 0x00a,
MT_EE_DDIE_FT_VERSION = 0x050,
+ MT_EE_DO_PRE_CAL = 0x062,
MT_EE_WIFI_CONF = 0x190,
+ MT_EE_RATE_DELTA_2G = 0x252,
+ MT_EE_RATE_DELTA_5G = 0x29d,
MT_EE_TX0_POWER_2G = 0x2fc,
MT_EE_TX0_POWER_5G = 0x34b,
MT_EE_ADIE_FT_VERSION = 0x9a0,
- __MT_EE_MAX = 0xe00
+ __MT_EE_MAX = 0xe00,
+ /* 0xe10 ~ 0x5780 used to save group cal data */
+ MT_EE_PRECAL = 0xe10
};
+#define MT_EE_WIFI_CAL_GROUP BIT(0)
+#define MT_EE_WIFI_CAL_DPD GENMASK(2, 1)
+#define MT_EE_CAL_UNIT 1024
+#define MT_EE_CAL_GROUP_SIZE (44 * MT_EE_CAL_UNIT)
+#define MT_EE_CAL_DPD_SIZE (54 * MT_EE_CAL_UNIT)
+
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
#define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6)
#define MT_EE_WIFI_CONF1_BAND_SEL GENMASK(7, 6)
@@ -34,6 +45,10 @@ enum mt7915_eeprom_field {
#define MT_EE_WIFI_CONF7_TSSI0_5G BIT(2)
#define MT_EE_WIFI_CONF7_TSSI1_5G BIT(4)
+#define MT_EE_RATE_DELTA_MASK GENMASK(5, 0)
+#define MT_EE_RATE_DELTA_SIGN BIT(6)
+#define MT_EE_RATE_DELTA_EN BIT(7)
+
enum mt7915_eeprom_band {
MT_EE_BAND_SEL_DEFAULT,
MT_EE_BAND_SEL_5GHZ,
@@ -41,32 +56,6 @@ enum mt7915_eeprom_band {
MT_EE_BAND_SEL_DUAL,
};
-#define SKU_DELTA_VAL GENMASK(5, 0)
-#define SKU_DELTA_ADD BIT(6)
-#define SKU_DELTA_EN BIT(7)
-
-enum mt7915_sku_delta_group {
- SKU_CCK_GROUP0,
- SKU_CCK_GROUP1,
-
- SKU_OFDM_GROUP0 = 0,
- SKU_OFDM_GROUP1,
- SKU_OFDM_GROUP2,
- SKU_OFDM_GROUP3,
- SKU_OFDM_GROUP4,
-
- SKU_MCS_GROUP0 = 0,
- SKU_MCS_GROUP1,
- SKU_MCS_GROUP2,
- SKU_MCS_GROUP3,
- SKU_MCS_GROUP4,
- SKU_MCS_GROUP5,
- SKU_MCS_GROUP6,
- SKU_MCS_GROUP7,
- SKU_MCS_GROUP8,
- SKU_MCS_GROUP9,
-};
-
enum mt7915_sku_rate_group {
SKU_CCK,
SKU_OFDM,
@@ -86,12 +75,6 @@ enum mt7915_sku_rate_group {
MAX_SKU_RATE_GROUP_NUM,
};
-struct sku_group {
- u8 len;
- u16 offset[2];
- const u8 *delta_map;
-};
-
static inline int
mt7915_get_channel_group(int channel)
{
@@ -124,6 +107,6 @@ mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_2G;
}
-extern const struct sku_group mt7915_sku_groups[];
+extern const u8 mt7915_sku_group_len[MAX_SKU_RATE_GROUP_NUM];
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index ad4e5b95158b..822f3aa6bb8b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -4,6 +4,7 @@
#include <linux/etherdevice.h>
#include "mt7915.h"
#include "mac.h"
+#include "mcu.h"
#include "eeprom.h"
#define CCK_RATE(_idx, _rate) { \
@@ -67,6 +68,39 @@ static const struct ieee80211_iface_combination if_comb[] = {
};
static void
+mt7915_init_txpower(struct mt7915_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ int i, n_chains = hweight8(dev->mphy.antenna_mask);
+ int nss_delta = mt76_tx_power_nss_delta(n_chains);
+ int pwr_delta = mt7915_eeprom_get_power_delta(dev, sband->band);
+ struct mt76_power_limits limits;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ struct ieee80211_channel *chan = &sband->channels[i];
+ u32 target_power = 0;
+ int j;
+
+ for (j = 0; j < n_chains; j++) {
+ u32 val;
+
+ val = mt7915_eeprom_get_target_power(dev, chan, j);
+ target_power = max(target_power, val);
+ }
+
+ target_power += pwr_delta;
+ target_power = mt76_get_rate_power_limits(&dev->mphy, chan,
+ &limits,
+ target_power);
+ target_power += nss_delta;
+ target_power = DIV_ROUND_UP(target_power, 2);
+ chan->max_power = min_t(int, chan->max_reg_power,
+ target_power);
+ chan->orig_mpwr = target_power;
+ }
+}
+
+static void
mt7915_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
@@ -76,8 +110,12 @@ mt7915_regd_notifier(struct wiphy *wiphy,
struct mt7915_phy *phy = mphy->priv;
struct cfg80211_chan_def *chandef = &mphy->chandef;
+ memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
+ mt7915_init_txpower(dev, &mphy->sband_2g.sband);
+ mt7915_init_txpower(dev, &mphy->sband_5g.sband);
+
if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
return;
@@ -93,6 +131,10 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
hw->queues = 4;
hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->netdev_features = NETIF_F_RXCSUM;
+
+ hw->radiotap_timestamp.units_pos =
+ IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
phy->slottime = 9;
@@ -108,9 +150,28 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+ ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
hw->max_tx_fragments = 4;
+
+ if (phy->mt76->cap.has_2ghz)
+ phy->mt76->sband_2g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+
+ if (phy->mt76->cap.has_5ghz) {
+ phy->mt76->sband_5g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ }
+
+ mt76_set_stream_caps(phy->mt76, true);
+ mt7915_set_stream_vht_txbf_caps(phy);
+ mt7915_set_stream_he_caps(phy);
}
static void
@@ -153,16 +214,14 @@ static void mt7915_mac_init(struct mt7915_dev *dev)
int i;
mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 1536);
- /* disable hardware de-agg */
- mt76_clear(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
+ /* enable hardware de-agg */
+ mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
for (i = 0; i < MT7915_WTBL_SIZE; i++)
mt7915_mac_wtbl_update(dev, i,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
for (i = 0; i < 2; i++)
mt7915_mac_init_band(dev, i);
-
- mt7915_mcu_set_rts_thresh(&dev->phy, 0x92b);
}
static int mt7915_txbf_init(struct mt7915_dev *dev)
@@ -185,38 +244,6 @@ static int mt7915_txbf_init(struct mt7915_dev *dev)
return mt7915_mcu_set_txbf_type(dev);
}
-static void
-mt7915_init_txpower_band(struct mt7915_dev *dev,
- struct ieee80211_supported_band *sband)
-{
- int i, n_chains = hweight8(dev->mphy.antenna_mask);
-
- for (i = 0; i < sband->n_channels; i++) {
- struct ieee80211_channel *chan = &sband->channels[i];
- u32 target_power = 0;
- int j;
-
- for (j = 0; j < n_chains; j++) {
- u32 val;
-
- val = mt7915_eeprom_get_target_power(dev, chan, j);
- target_power = max(target_power, val);
- }
-
- chan->max_power = min_t(int, chan->max_reg_power,
- target_power / 2);
- chan->orig_mpwr = target_power / 2;
- }
-}
-
-static void mt7915_init_txpower(struct mt7915_dev *dev)
-{
- mt7915_init_txpower_band(dev, &dev->mphy.sband_2g.sband);
- mt7915_init_txpower_band(dev, &dev->mphy.sband_5g.sband);
-
- mt7915_eeprom_init_sku(dev);
-}
-
static int mt7915_register_ext_phy(struct mt7915_dev *dev)
{
struct mt7915_phy *phy = mt7915_ext_phy(dev);
@@ -238,22 +265,17 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
phy->mt76 = mphy;
mphy->chainmask = dev->chainmask & ~dev->mphy.chainmask;
mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1;
- mt7915_init_wiphy(mphy->hw);
INIT_LIST_HEAD(&phy->stats_list);
INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work);
mt7915_eeprom_parse_band_config(phy);
- mt7915_set_stream_vht_txbf_caps(phy);
- mt7915_set_stream_he_caps(phy);
+ mt7915_init_wiphy(mphy->hw);
memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR2,
ETH_ALEN);
mt76_eeprom_override(mphy);
- /* The second interface does not get any packets unless it has a vif */
- ieee80211_hw_set(mphy->hw, WANT_MONITOR_VIF);
-
ret = mt7915_init_tx_queues(phy, MT7915_TXQ_BAND1,
MT7915_TX_RING_SIZE);
if (ret)
@@ -278,9 +300,48 @@ static void mt7915_init_work(struct work_struct *work)
mt7915_mcu_set_eeprom(dev);
mt7915_mac_init(dev);
- mt7915_init_txpower(dev);
+ mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
mt7915_txbf_init(dev);
- mt7915_register_ext_phy(dev);
+}
+
+static void mt7915_wfsys_reset(struct mt7915_dev *dev)
+{
+ u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
+
+#define MT_MCU_DUMMY_RANDOM GENMASK(15, 0)
+#define MT_MCU_DUMMY_DEFAULT GENMASK(31, 16)
+
+ mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
+
+ /* change to software control */
+ val |= MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+
+ /* reset wfsys */
+ val &= ~MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+
+ /* release wfsys then mcu re-excutes romcode */
+ val |= MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+
+ /* switch to hw control */
+ val &= ~MT_TOP_PWR_SW_RST;
+ val |= MT_TOP_PWR_HW_CTRL;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+
+ /* check whether mcu resets to default */
+ if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_DEFAULT,
+ MT_MCU_DUMMY_DEFAULT, 1000)) {
+ dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
+ return;
+ }
+
+ /* wfsys reset won't clear host registers */
+ mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE);
+
+ msleep(100);
}
static int mt7915_init_hardware(struct mt7915_dev *dev)
@@ -290,10 +351,12 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->init_work, mt7915_init_work);
- spin_lock_init(&dev->token_lock);
- idr_init(&dev->token);
+ dev->dbdc_support = !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5));
- dev->dbdc_support = !!(mt7915_l1_rr(dev, MT_HW_BOUND) & BIT(5));
+ /* If MCU was already running, it is likely in a bad state */
+ if (mt76_get_field(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE) >
+ FW_STATE_FW_DOWNLOAD)
+ mt7915_wfsys_reset(dev);
ret = mt7915_dma_init(dev);
if (ret)
@@ -308,13 +371,26 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
ret = mt7915_mcu_init(dev);
- if (ret)
- return ret;
+ if (ret) {
+ /* Reset and try again */
+ mt7915_wfsys_reset(dev);
+
+ ret = mt7915_mcu_init(dev);
+ if (ret)
+ return ret;
+ }
ret = mt7915_eeprom_init(dev);
if (ret < 0)
return ret;
+
+ if (dev->flash_mode) {
+ ret = mt7915_mcu_apply_group_cal(dev);
+ if (ret)
+ return ret;
+ }
+
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA - 1);
if (idx)
@@ -330,8 +406,14 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy)
{
- int nss = hweight8(phy->mt76->chainmask);
- u32 *cap = &phy->mt76->sband_5g.sband.vht_cap.cap;
+ int nss;
+ u32 *cap;
+
+ if (!phy->mt76->cap.has_5ghz)
+ return;
+
+ nss = hweight8(phy->mt76->chainmask);
+ cap = &phy->mt76->sband_5g.sband.vht_cap.cap;
*cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
@@ -370,8 +452,8 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
elem->phy_cap_info[5] &= ~c;
- c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
- IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB;
+ c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
elem->phy_cap_info[6] &= ~c;
elem->phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
@@ -408,8 +490,8 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
c = (nss - 1) | (max_t(int, le16_to_cpu(mcs->tx_mcs_160), 1) << 3);
elem->phy_cap_info[5] |= c;
- c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
- IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB;
+ c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
elem->phy_cap_info[6] |= c;
/* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
@@ -476,9 +558,9 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
IEEE80211_HE_MAC_CAP0_HTC_HE;
he_cap_elem->mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED;
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3;
he_cap_elem->mac_cap_info[4] =
- IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU;
+ IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
if (band == NL80211_BAND_2GHZ)
he_cap_elem->phy_cap_info[0] =
@@ -535,7 +617,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
he_cap_elem->phy_cap_info[7] |=
- IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
he_cap_elem->phy_cap_info[8] |=
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
@@ -635,25 +717,14 @@ int mt7915_register_device(struct mt7915_dev *dev)
return ret;
mt7915_init_wiphy(hw);
- dev->mphy.sband_2g.sband.ht_cap.cap |=
- IEEE80211_HT_CAP_LDPC_CODING |
- IEEE80211_HT_CAP_MAX_AMSDU;
- dev->mphy.sband_5g.sband.ht_cap.cap |=
- IEEE80211_HT_CAP_LDPC_CODING |
- IEEE80211_HT_CAP_MAX_AMSDU;
- dev->mphy.sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
if (!dev->dbdc_support)
dev->mphy.sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
+
dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
-
- mt76_set_stream_caps(&dev->mphy, true);
- mt7915_set_stream_vht_txbf_caps(&dev->phy);
- mt7915_set_stream_he_caps(&dev->phy);
dev->phy.dfs_state = -1;
#ifdef CONFIG_NL80211_TESTMODE
@@ -667,6 +738,10 @@ int mt7915_register_device(struct mt7915_dev *dev)
ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
+ ret = mt7915_register_ext_phy(dev);
+ if (ret)
+ return ret;
+
return mt7915_init_debugfs(dev);
}
@@ -675,9 +750,8 @@ void mt7915_unregister_device(struct mt7915_dev *dev)
mt7915_unregister_ext_phy(dev);
mt76_unregister_device(&dev->mt76);
mt7915_mcu_exit(dev);
- mt7915_dma_cleanup(dev);
-
mt7915_tx_token_put(dev);
+ mt7915_dma_cleanup(dev);
mt76_free_device(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index e5a258958ac9..7a9759fb79d8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -317,11 +317,18 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
__le32 *rxd = (__le32 *)skb->data;
__le32 *rxv = NULL;
u32 mode = 0;
+ u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
+ u32 rxd4 = le32_to_cpu(rxd[4]);
+ u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
bool unicast, insert_ccmp_hdr = false;
- u8 remove_pad;
+ u8 remove_pad, amsdu_info;
+ bool hdr_trans;
+ u16 seq_ctrl = 0;
+ u8 qos_ctl = 0;
+ __le16 fc = 0;
int i, idx;
memset(status, 0, sizeof(*status));
@@ -338,8 +345,12 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
return -EINVAL;
+ if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
+ return -EINVAL;
+
unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
+ hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
status->wcid = mt7915_rx_get_wcid(dev, idx, unicast);
if (status->wcid) {
@@ -362,6 +373,9 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
if (!sband->channels)
return -EINVAL;
+ if ((rxd0 & csum_mask) == csum_mask)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -375,19 +389,6 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
}
- if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
- status->flag |= RX_FLAG_AMPDU_DETAILS;
-
- /* all subframes of an A-MPDU have the same timestamp */
- if (phy->rx_ampdu_ts != rxd[14]) {
- if (!++phy->ampdu_ref)
- phy->ampdu_ref++;
- }
- phy->rx_ampdu_ts = rxd[14];
-
- status->ampdu_ref = phy->ampdu_ref;
- }
-
remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
@@ -395,6 +396,13 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
rxd += 6;
if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
+ u32 v0 = le32_to_cpu(rxd[0]);
+ u32 v2 = le32_to_cpu(rxd[2]);
+
+ fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0));
+ qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2);
+ seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2);
+
rxd += 4;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
@@ -419,6 +427,22 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
}
if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
+ status->timestamp = le32_to_cpu(rxd[0]);
+ status->flag |= RX_FLAG_MACTIME_START;
+
+ if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (phy->rx_ampdu_ts != status->timestamp) {
+ if (!++phy->ampdu_ref)
+ phy->ampdu_ref++;
+ }
+ phy->rx_ampdu_ts = status->timestamp;
+
+ status->ampdu_ref = phy->ampdu_ref;
+ }
+
rxd += 2;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
@@ -541,23 +565,47 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
- if (insert_ccmp_hdr) {
+ amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
+ status->amsdu = !!amsdu_info;
+ if (status->amsdu) {
+ status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
+ status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
+ if (!hdr_trans) {
+ memmove(skb->data + 2, skb->data,
+ ieee80211_get_hdrlen_from_skb(skb));
+ skb_pull(skb, 2);
+ }
+ }
+
+ if (insert_ccmp_hdr && !hdr_trans) {
u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
mt76_insert_ccmp_hdr(skb, key_id);
}
+ if (!hdr_trans) {
+ hdr = mt76_skb_get_hdr(skb);
+ fc = hdr->frame_control;
+ if (ieee80211_is_data_qos(fc)) {
+ seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
+ qos_ctl = *ieee80211_get_qos_ctl(hdr);
+ }
+ } else {
+ status->flag &= ~(RX_FLAG_RADIOTAP_HE |
+ RX_FLAG_RADIOTAP_HE_MU);
+ status->flag |= RX_FLAG_8023;
+ }
+
if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
mt7915_mac_decode_he_radiotap(skb, status, rxv, mode);
- hdr = mt76_skb_get_hdr(skb);
- if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
+ if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
status->aggr = unicast &&
- !ieee80211_is_qos_nullfunc(hdr->frame_control);
- status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
- status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ !ieee80211_is_qos_nullfunc(fc);
+ status->qos_ctl = qos_ctl;
+ status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
return 0;
}
@@ -613,19 +661,18 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
{
#ifdef CONFIG_NL80211_TESTMODE
struct mt76_testmode_data *td = &phy->mt76->test;
+ const struct ieee80211_rate *r;
+ u8 bw, mode, nss = td->tx_rate_nss;
u8 rate_idx = td->tx_rate_idx;
- u8 nss = td->tx_rate_nss;
- u8 bw, mode;
u16 rateval = 0;
u32 val;
+ bool cck = false;
+ int band;
if (skb != phy->mt76->test.tx_skb)
return;
switch (td->tx_rate_mode) {
- case MT76_TM_TX_MODE_CCK:
- mode = MT_PHY_TYPE_CCK;
- break;
case MT76_TM_TX_MODE_HT:
nss = 1 + (rate_idx >> 3);
mode = MT_PHY_TYPE_HT;
@@ -645,7 +692,20 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
case MT76_TM_TX_MODE_HE_MU:
mode = MT_PHY_TYPE_HE_MU;
break;
+ case MT76_TM_TX_MODE_CCK:
+ cck = true;
+ fallthrough;
case MT76_TM_TX_MODE_OFDM:
+ band = phy->mt76->chandef.chan->band;
+ if (band == NL80211_BAND_2GHZ && !cck)
+ rate_idx += 4;
+
+ r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx];
+ val = cck ? r->hw_value_short : r->hw_value;
+
+ mode = val >> 8;
+ rate_idx = val & 0xff;
+ break;
default:
mode = MT_PHY_TYPE_OFDM;
break;
@@ -700,9 +760,10 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
if (mode >= MT_PHY_TYPE_HE_SU)
val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
- if (td->tx_rate_ldpc || bw > 0)
+ if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
val |= MT_TXD6_LDPC;
+ txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
txwi[6] |= cpu_to_le32(val);
txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
phy->test.spe_idx));
@@ -913,26 +974,6 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb);
}
-static void
-mt7915_set_tx_blocked(struct mt7915_dev *dev, bool blocked)
-{
- struct mt76_phy *mphy = &dev->mphy, *mphy2 = dev->mt76.phy2;
- struct mt76_queue *q, *q2 = NULL;
-
- q = mphy->q_tx[0];
- if (blocked == q->blocked)
- return;
-
- q->blocked = blocked;
- if (mphy2) {
- q2 = mphy2->q_tx[0];
- q2->blocked = blocked;
- }
-
- if (!blocked)
- mt76_worker_schedule(&dev->mt76.tx_worker);
-}
-
int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
@@ -985,15 +1026,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
- spin_lock_bh(&dev->token_lock);
- id = idr_alloc(&dev->token, t, 0, MT7915_TOKEN_SIZE, GFP_ATOMIC);
- if (id >= 0)
- dev->token_count++;
-
- if (dev->token_count >= MT7915_TOKEN_SIZE - MT7915_TOKEN_FREE_THR)
- mt7915_set_tx_blocked(dev, true);
- spin_unlock_bh(&dev->token_lock);
-
+ id = mt76_token_consume(mdev, &t);
if (id < 0)
return id;
@@ -1091,7 +1124,7 @@ void mt7915_txp_skb_unmap(struct mt76_dev *dev,
int i;
txp = mt7915_txwi_to_txp(dev, t);
- for (i = 1; i < txp->nbuf; i++)
+ for (i = 0; i < txp->nbuf; i++)
dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
}
@@ -1157,15 +1190,7 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
stat = FIELD_GET(MT_TX_FREE_STATUS, info);
- spin_lock_bh(&dev->token_lock);
- txwi = idr_remove(&dev->token, msdu);
- if (txwi)
- dev->token_count--;
- if (dev->token_count < MT7915_TOKEN_SIZE - MT7915_TOKEN_FREE_THR &&
- dev->mphy.q_tx[0]->blocked)
- wake = true;
- spin_unlock_bh(&dev->token_lock);
-
+ txwi = mt76_token_release(mdev, msdu, &wake);
if (!txwi)
continue;
@@ -1195,11 +1220,8 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
mt7915_mac_sta_poll(dev);
- if (wake) {
- spin_lock_bh(&dev->token_lock);
- mt7915_set_tx_blocked(dev, false);
- spin_unlock_bh(&dev->token_lock);
- }
+ if (wake)
+ mt76_set_tx_blocked(&dev->mt76, false);
mt76_worker_schedule(&dev->mt76.tx_worker);
@@ -1228,10 +1250,7 @@ void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
struct mt7915_txp *txp;
txp = mt7915_txwi_to_txp(mdev, e->txwi);
-
- spin_lock_bh(&dev->token_lock);
- t = idr_remove(&dev->token, le16_to_cpu(txp->token));
- spin_unlock_bh(&dev->token_lock);
+ t = mt76_token_put(mdev, le16_to_cpu(txp->token));
e->skb = t ? t->skb : NULL;
}
@@ -1252,8 +1271,8 @@ void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
bool ext_phy = phy != &dev->phy;
u32 reg = MT_WF_PHY_RX_CTRL1(ext_phy);
- mt7915_l2_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
- mt7915_l2_set(dev, reg, BIT(11) | BIT(9));
+ mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
+ mt76_set(dev, reg, BIT(11) | BIT(9));
}
void mt7915_mac_reset_counters(struct mt7915_phy *phy)
@@ -1346,12 +1365,12 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy)
{
- mt7915_l2_set(dev, MT_WF_PHY_RXTD12(ext_phy),
- MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY |
- MT_WF_PHY_RXTD12_IRPI_SW_CLR);
+ mt76_set(dev, MT_WF_PHY_RXTD12(ext_phy),
+ MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY |
+ MT_WF_PHY_RXTD12_IRPI_SW_CLR);
- mt7915_l2_set(dev, MT_WF_PHY_RX_CTRL1(ext_phy),
- FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
+ mt76_set(dev, MT_WF_PHY_RX_CTRL1(ext_phy),
+ FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
}
static u8
@@ -1366,7 +1385,7 @@ mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
u32 reg = MT_WF_IRPI(nss + (idx << dev->dbdc_support));
for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
- val = mt7915_l2_rr(dev, reg);
+ val = mt76_rr(dev, reg);
sum += val * nf_power[i];
n += val;
}
@@ -1470,9 +1489,8 @@ mt7915_update_beacons(struct mt7915_dev *dev)
}
static void
-mt7915_dma_reset(struct mt7915_phy *phy)
+mt7915_dma_reset(struct mt7915_dev *dev)
{
- struct mt7915_dev *dev = phy->dev;
struct mt76_phy *mphy_ext = dev->mt76.phy2;
u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
int i;
@@ -1489,18 +1507,22 @@ mt7915_dma_reset(struct mt7915_phy *phy)
(MT_WFDMA1_GLO_CFG_TX_DMA_EN |
MT_WFDMA1_GLO_CFG_RX_DMA_EN));
}
+
usleep_range(1000, 2000);
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true);
for (i = 0; i < __MT_TXQ_MAX; i++) {
- mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
if (mphy_ext)
mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true);
}
- mt76_for_each_q_rx(&dev->mt76, i) {
+ for (i = 0; i < __MT_MCUQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
+
+ mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_rx_reset(dev, i);
- }
+
+ mt76_tx_status_check(&dev->mt76, NULL, true);
/* re-init prefetch settings after reset */
mt7915_dma_prefetch(dev);
@@ -1524,8 +1546,8 @@ void mt7915_tx_token_put(struct mt7915_dev *dev)
struct mt76_txwi_cache *txwi;
int id;
- spin_lock_bh(&dev->token_lock);
- idr_for_each_entry(&dev->token, txwi, id) {
+ spin_lock_bh(&dev->mt76.token_lock);
+ idr_for_each_entry(&dev->mt76.token, txwi, id) {
mt7915_txp_skb_unmap(&dev->mt76, txwi);
if (txwi->skb) {
struct ieee80211_hw *hw;
@@ -1534,10 +1556,10 @@ void mt7915_tx_token_put(struct mt7915_dev *dev)
ieee80211_free_txskb(hw, txwi->skb);
}
mt76_put_txwi(&dev->mt76, txwi);
- dev->token_count--;
+ dev->mt76.token_count--;
}
- spin_unlock_bh(&dev->token_lock);
- idr_destroy(&dev->token);
+ spin_unlock_bh(&dev->mt76.token_lock);
+ idr_destroy(&dev->mt76.token);
}
/* system error recovery */
@@ -1562,9 +1584,10 @@ void mt7915_mac_reset_work(struct work_struct *work)
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
cancel_delayed_work_sync(&dev->mphy.mac_work);
- if (phy2)
+ if (phy2) {
+ set_bit(MT76_RESET, &phy2->mt76->state);
cancel_delayed_work_sync(&phy2->mt76->mac_work);
-
+ }
/* lock/unlock all queues to ensure that no tx is pending */
mt76_txq_schedule_all(&dev->mphy);
if (ext_phy)
@@ -1580,11 +1603,11 @@ void mt7915_mac_reset_work(struct work_struct *work)
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
- mt7915_tx_token_put(dev);
- idr_init(&dev->token);
-
if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
- mt7915_dma_reset(&dev->phy);
+ mt7915_dma_reset(dev);
+
+ mt7915_tx_token_put(dev);
+ idr_init(&dev->mt76.token);
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
@@ -1592,6 +1615,8 @@ void mt7915_mac_reset_work(struct work_struct *work)
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
clear_bit(MT76_RESET, &dev->mphy.state);
+ if (phy2)
+ clear_bit(MT76_RESET, &phy2->mt76->state);
mt76_worker_enable(&dev->mt76.tx_worker);
napi_enable(&dev->mt76.tx_napi);
@@ -1633,39 +1658,30 @@ mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
bool ext_phy = phy != &dev->phy;
int i, aggr0, aggr1;
- memset(mib, 0, sizeof(*mib));
-
- mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
- MT_MIB_SDR3_FCS_ERR_MASK);
+ mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
+ MT_MIB_SDR3_FCS_ERR_MASK);
aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
- u32 val, val2;
+ u32 val;
val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
-
- val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
- if (val2 > mib->ack_fail_cnt)
- mib->ack_fail_cnt = val2;
-
- val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
- if (val2 > mib->ba_miss_cnt)
- mib->ba_miss_cnt = val2;
+ mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+ mib->ack_fail_cnt +=
+ FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
- val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
- if (val2 > mib->rts_retries_cnt) {
- mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
- mib->rts_retries_cnt = val2;
- }
+ mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+ mib->rts_retries_cnt +=
+ FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
- val2 = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
-
dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
dev->mt76.aggr_stats[aggr0++] += val >> 16;
- dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
- dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
+
+ val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
+ dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
+ dev->mt76.aggr_stats[aggr1++] += val >> 16;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
index 96ff3fb0d1f3..0f929fb53027 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
@@ -86,6 +86,10 @@ enum rx_pkt_type {
/* RXD DW4 */
#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0)
+#define MT_RXD4_FIRST_AMSDU_FRAME GENMASK(1, 0)
+#define MT_RXD4_MID_AMSDU_FRAME BIT(1)
+#define MT_RXD4_LAST_AMSDU_FRAME BIT(0)
+
#define MT_RXD4_NORMAL_PATTERN_DROP BIT(9)
#define MT_RXD4_NORMAL_CLS BIT(10)
#define MT_RXD4_NORMAL_OFLD GENMASK(12, 11)
@@ -97,6 +101,17 @@ enum rx_pkt_type {
#define MT_RXV_HDR_BAND_IDX BIT(24)
+/* RXD GROUP4 */
+#define MT_RXD6_FRAME_CONTROL GENMASK(15, 0)
+#define MT_RXD6_TA_LO GENMASK(31, 16)
+
+#define MT_RXD7_TA_HI GENMASK(31, 0)
+
+#define MT_RXD8_SEQ_CTRL GENMASK(15, 0)
+#define MT_RXD8_QOS_CTL GENMASK(31, 16)
+
+#define MT_RXD9_HT_CONTROL GENMASK(31, 0)
+
/* P-RXV */
#define MT_PRXV_TX_RATE GENMASK(6, 0)
#define MT_PRXV_TX_DCM BIT(4)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index d4969b2e1ffb..e5bd687546b6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -25,6 +25,7 @@ static int mt7915_start(struct ieee80211_hw *hw)
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
bool running;
+ int ret;
flush_work(&dev->init_work);
@@ -33,21 +34,48 @@ static int mt7915_start(struct ieee80211_hw *hw)
running = mt7915_dev_running(dev);
if (!running) {
- mt7915_mcu_set_pm(dev, 0, 0);
- mt7915_mcu_set_mac(dev, 0, true, false);
- mt7915_mcu_set_scs(dev, 0, true);
+ ret = mt7915_mcu_set_pm(dev, 0, 0);
+ if (ret)
+ goto out;
+
+ ret = mt7915_mcu_set_mac(dev, 0, true, true);
+ if (ret)
+ goto out;
+
+ ret = mt7915_mcu_set_scs(dev, 0, true);
+ if (ret)
+ goto out;
+
mt7915_mac_enable_nf(dev, 0);
}
if (phy != &dev->phy) {
- mt7915_mcu_set_pm(dev, 1, 0);
- mt7915_mcu_set_mac(dev, 1, true, false);
- mt7915_mcu_set_scs(dev, 1, true);
+ ret = mt7915_mcu_set_pm(dev, 1, 0);
+ if (ret)
+ goto out;
+
+ ret = mt7915_mcu_set_mac(dev, 1, true, true);
+ if (ret)
+ goto out;
+
+ ret = mt7915_mcu_set_scs(dev, 1, true);
+ if (ret)
+ goto out;
+
mt7915_mac_enable_nf(dev, 1);
}
- mt7915_mcu_set_sku_en(phy, true);
- mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
+ ret = mt7915_mcu_set_rts_thresh(phy, 0x92b);
+ if (ret)
+ goto out;
+
+ ret = mt7915_mcu_set_sku_en(phy, true);
+ if (ret)
+ goto out;
+
+ ret = mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
+ if (ret)
+ goto out;
set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
@@ -58,9 +86,10 @@ static int mt7915_start(struct ieee80211_hw *hw)
if (!running)
mt7915_mac_reset_counters(phy);
+out:
mutex_unlock(&dev->mt76.mutex);
- return 0;
+ return ret;
}
static void mt7915_stop(struct ieee80211_hw *hw)
@@ -227,7 +256,8 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
struct mt7915_phy *phy = mt7915_hw_phy(hw);
int idx = msta->wcid.idx;
- /* TODO: disable beacon for the bss */
+ mt7915_mcu_add_bss_info(phy, vif, false);
+ mt7915_mcu_add_sta(dev, vif, NULL, false);
mutex_lock(&dev->mt76.mutex);
mt76_testmode_reset(phy->mt76, true);
@@ -283,6 +313,12 @@ int mt7915_set_channel(struct mt7915_phy *phy)
mt7915_init_dfs_state(phy);
mt76_set_channel(phy->mt76);
+ if (dev->flash_mode) {
+ ret = mt7915_mcu_apply_tx_dpd(phy);
+ if (ret)
+ goto out;
+ }
+
ret = mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH));
if (ret)
goto out;
@@ -317,7 +353,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv :
&mvif->sta;
struct mt76_wcid *wcid = &msta->wcid;
+ u8 *wcid_keyidx = &wcid->hw_key_idx;
int idx = key->keyidx;
+ int err = 0;
/* The hardware does not support per-STA RX GTK, fallback
* to software mode for these.
@@ -332,6 +370,7 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
/* fall back to sw encryption for unsupported ciphers */
switch (key->cipher) {
case WLAN_CIPHER_SUITE_AES_CMAC:
+ wcid_keyidx = &wcid->hw_key_idx2;
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
break;
case WLAN_CIPHER_SUITE_TKIP:
@@ -347,16 +386,24 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- if (cmd == SET_KEY) {
- key->hw_key_idx = wcid->idx;
- wcid->hw_key_idx = idx;
- } else if (idx == wcid->hw_key_idx) {
- wcid->hw_key_idx = -1;
- }
+ mutex_lock(&dev->mt76.mutex);
+
+ if (cmd == SET_KEY)
+ *wcid_keyidx = idx;
+ else if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
+ else
+ goto out;
+
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
- return mt7915_mcu_add_key(dev, vif, msta, key, cmd);
+ err = mt7915_mcu_add_key(dev, vif, msta, key, cmd);
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return err;
}
static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
@@ -382,7 +429,7 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- ret = mt7915_mcu_set_sku(phy);
+ ret = mt7915_mcu_set_txpower_sku(phy);
if (ret)
return ret;
}
@@ -515,9 +562,9 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if (changed & BSS_CHANGED_BEACON_ENABLED) {
- mt7915_mcu_add_bss_info(phy, vif, info->enable_beacon);
- mt7915_mcu_add_sta(dev, vif, NULL, info->enable_beacon);
+ if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) {
+ mt7915_mcu_add_bss_info(phy, vif, true);
+ mt7915_mcu_add_sta(dev, vif, NULL, true);
}
/* ensure that enable txcmd_mode after bss_info */
@@ -631,12 +678,13 @@ static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ int ret;
mutex_lock(&dev->mt76.mutex);
- mt7915_mcu_set_rts_thresh(phy, val);
+ ret = mt7915_mcu_set_rts_thresh(phy, val);
mutex_unlock(&dev->mt76.mutex);
- return 0;
+ return ret;
}
static int
@@ -663,22 +711,22 @@ mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
params->buf_size);
- mt7915_mcu_add_rx_ba(dev, params, true);
+ ret = mt7915_mcu_add_rx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_RX_STOP:
mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
- mt7915_mcu_add_rx_ba(dev, params, false);
+ ret = mt7915_mcu_add_rx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
mtxq->send_bar = false;
- mt7915_mcu_add_tx_ba(dev, params, true);
+ ret = mt7915_mcu_add_tx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
clear_bit(tid, &msta->ampdu_state);
- mt7915_mcu_add_tx_ba(dev, params, false);
+ ret = mt7915_mcu_add_tx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_START:
set_bit(tid, &msta->ampdu_state);
@@ -687,7 +735,7 @@ mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
clear_bit(tid, &msta->ampdu_state);
- mt7915_mcu_add_tx_ba(dev, params, false);
+ ret = mt7915_mcu_add_tx_ba(dev, params, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
@@ -717,13 +765,19 @@ mt7915_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mib_stats *mib = &phy->mib;
+ mutex_lock(&dev->mt76.mutex);
stats->dot11RTSSuccessCount = mib->rts_cnt;
stats->dot11RTSFailureCount = mib->rts_retries_cnt;
stats->dot11FCSErrorCount = mib->fcs_err_cnt;
stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+ memset(mib, 0, sizeof(*mib));
+
+ mutex_unlock(&dev->mt76.mutex);
+
return 0;
}
@@ -833,9 +887,12 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_sta_stats *stats = &msta->stats;
+ struct rate_info rxrate = {};
- if (mt7915_mcu_get_rx_rate(phy, vif, sta, &sinfo->rxrate) == 0)
+ if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
+ sinfo->rxrate = rxrate;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
+ }
if (!stats->tx_rate.legacy && !stats->tx_rate.flags)
return;
@@ -888,6 +945,22 @@ static void mt7915_sta_set_4addr(struct ieee80211_hw *hw,
mt7915_mcu_sta_update_hdr_trans(dev, vif, sta);
}
+static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool enabled)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+
+ if (enabled)
+ set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ else
+ clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+
+ mt7915_mcu_sta_update_hdr_trans(dev, vif, sta);
+}
+
const struct ieee80211_ops mt7915_ops = {
.tx = mt7915_tx,
.start = mt7915_start,
@@ -920,6 +993,7 @@ const struct ieee80211_ops mt7915_ops = {
.set_coverage_class = mt7915_set_coverage_class,
.sta_statistics = mt7915_sta_statistics,
.sta_set_4addr = mt7915_sta_set_4addr,
+ .sta_set_decap_offload = mt7915_sta_set_decap_offload,
CFG80211_TESTMODE_CMD(mt76_testmode_cmd)
CFG80211_TESTMODE_DUMP(mt76_testmode_dump)
#ifdef CONFIG_MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 195929242b72..b3f14ff67c5a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -147,9 +147,10 @@ mt7915_get_he_phy_cap(struct mt7915_phy *phy, struct ieee80211_vif *vif)
}
static u8
-mt7915_get_phy_mode(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- enum nl80211_band band, struct ieee80211_sta *sta)
+mt7915_get_phy_mode(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
+ enum nl80211_band band = mphy->chandef.chan->band;
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
const struct ieee80211_sta_he_cap *he_cap;
@@ -161,12 +162,8 @@ mt7915_get_phy_mode(struct mt7915_dev *dev, struct ieee80211_vif *vif,
he_cap = &sta->he_cap;
} else {
struct ieee80211_supported_band *sband;
- struct mt7915_phy *phy;
- struct mt7915_vif *mvif;
- mvif = (struct mt7915_vif *)vif->drv_priv;
- phy = mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy;
- sband = phy->mt76->hw->wiphy->bands[band];
+ sband = mphy->hw->wiphy->bands[band];
ht_cap = &sband->ht_cap;
vht_cap = &sband->vht_cap;
@@ -220,7 +217,7 @@ mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
int ret = 0;
if (!skb) {
- dev_err(mdev->dev, "Message %d (seq %d) timeout\n",
+ dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
cmd, seq);
return -ETIMEDOUT;
}
@@ -337,6 +334,22 @@ mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
}
static void
+mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7915_mcu_csa_notify *c;
+
+ c = (struct mt7915_mcu_csa_notify *)skb->data;
+
+ if (c->band_idx && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ ieee80211_iterate_active_interfaces_atomic(mphy->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7915_mcu_csa_finish, mphy->hw);
+}
+
+static void
mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
{
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -344,61 +357,69 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
r = (struct mt7915_mcu_rdd_report *)skb->data;
- if (r->idx && dev->mt76.phy2)
+ if (r->band_idx && dev->mt76.phy2)
mphy = dev->mt76.phy2;
ieee80211_radar_detected(mphy->hw);
dev->hw_pattern++;
}
-static void
+static int
mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
struct rate_info *rate, u16 r)
{
struct ieee80211_supported_band *sband;
u16 ru_idx = le16_to_cpu(ra->ru_idx);
- u16 flags = 0;
+ bool cck = false;
rate->mcs = FIELD_GET(MT_RA_RATE_MCS, r);
rate->nss = FIELD_GET(MT_RA_RATE_NSS, r) + 1;
switch (FIELD_GET(MT_RA_RATE_TX_MODE, r)) {
case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
case MT_PHY_TYPE_OFDM:
if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
else
sband = &mphy->sband_2g.sband;
+ rate->mcs = mt76_get_rate(mphy->dev, sband, rate->mcs, cck);
rate->legacy = sband->bitrates[rate->mcs].bitrate;
break;
case MT_PHY_TYPE_HT:
case MT_PHY_TYPE_HT_GF:
rate->mcs += (rate->nss - 1) * 8;
- flags |= RATE_INFO_FLAGS_MCS;
+ if (rate->mcs > 31)
+ return -EINVAL;
+ rate->flags = RATE_INFO_FLAGS_MCS;
if (ra->gi)
- flags |= RATE_INFO_FLAGS_SHORT_GI;
+ rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
break;
case MT_PHY_TYPE_VHT:
- flags |= RATE_INFO_FLAGS_VHT_MCS;
+ if (rate->mcs > 9)
+ return -EINVAL;
+ rate->flags = RATE_INFO_FLAGS_VHT_MCS;
if (ra->gi)
- flags |= RATE_INFO_FLAGS_SHORT_GI;
+ rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
break;
case MT_PHY_TYPE_HE_SU:
case MT_PHY_TYPE_HE_EXT_SU:
case MT_PHY_TYPE_HE_TB:
case MT_PHY_TYPE_HE_MU:
+ if (ra->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11)
+ return -EINVAL;
+
rate->he_gi = ra->gi;
rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r);
-
- flags |= RATE_INFO_FLAGS_HE_MCS;
+ rate->flags = RATE_INFO_FLAGS_HE_MCS;
break;
default:
- break;
+ return -EINVAL;
}
- rate->flags = flags;
if (ru_idx) {
switch (ru_idx) {
@@ -435,6 +456,8 @@ mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
break;
}
}
+
+ return 0;
}
static void
@@ -465,12 +488,12 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
mphy = dev->mt76.phy2;
/* current rate */
- mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr);
- stats->tx_rate = rate;
+ if (!mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr))
+ stats->tx_rate = rate;
/* probing rate */
- mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe);
- stats->prob_rate = prob_rate;
+ if (!mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe))
+ stats->prob_rate = prob_rate;
if (attempts) {
u16 success = le16_to_cpu(ra->success);
@@ -498,7 +521,8 @@ mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
break;
}
- wiphy_info(mt76_hw(dev)->wiphy, "%s: %s", type, data);
+ wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type,
+ (int)(skb->len - sizeof(*rxd)), data);
}
static void
@@ -511,9 +535,7 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
mt7915_mcu_rx_radar_detected(dev, skb);
break;
case MCU_EXT_EVENT_CSA_NOTIFY:
- ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw,
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7915_mcu_csa_finish, dev);
+ mt7915_mcu_rx_csa_notify(dev, skb);
break;
case MCU_EXT_EVENT_RATE_REPORT:
mt7915_mcu_tx_rate_report(dev, skb);
@@ -592,7 +614,7 @@ mt7915_mcu_alloc_wtbl_req(struct mt7915_dev *dev, struct mt7915_sta *msta,
if (!nskb) {
nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- MT7915_WTBL_UPDATE_BA_SIZE);
+ MT7915_WTBL_UPDATE_MAX_SIZE);
if (!nskb)
return ERR_PTR(-ENOMEM);
@@ -662,8 +684,6 @@ mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct mt7915_phy *phy, bool enable)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
- enum nl80211_band band = chandef->chan->band;
struct bss_info_basic *bss;
u16 wlan_idx = mvif->sta.wcid.idx;
u32 type = NETWORK_INFRA;
@@ -713,7 +733,7 @@ mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
bss->dtim_period = vif->bss_conf.dtim_period;
- bss->phy_mode = mt7915_get_phy_mode(phy->dev, vif, band, NULL);
+ bss->phy_mode = mt7915_get_phy_mode(phy->mt76, vif, NULL);
} else {
memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN);
}
@@ -989,8 +1009,10 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct sk_buff *skb;
- if (mvif->omac_idx >= REPEATER_BSSID_START)
+ if (mvif->omac_idx >= REPEATER_BSSID_START) {
+ mt7915_mcu_muar_config(phy, vif, false, enable);
mt7915_mcu_muar_config(phy, vif, true, enable);
+ }
skb = mt7915_mcu_alloc_sta_req(phy->dev, mvif, NULL,
MT7915_BSS_UPDATE_MAX_SIZE);
@@ -1188,6 +1210,9 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
&skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
@@ -1330,7 +1355,7 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
if (elem->mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
cap |= STA_REC_HE_CAP_OM;
- if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU)
+ if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU)
cap |= STA_REC_HE_CAP_AMSDU_IN_AMPDU;
if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
@@ -1685,6 +1710,7 @@ mt7915_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
return;
msta = (struct mt7915_sta *)sta->drv_priv;
+ htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
if (test_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags)) {
htr->to_ds = true;
htr->from_ds = true;
@@ -1704,6 +1730,9 @@ int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
return -ENOMEM;
wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, NULL, wtbl_hdr);
return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD(WTBL_UPDATE),
@@ -1728,6 +1757,9 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
&skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
@@ -1821,9 +1853,9 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
bf->tx_mode = MT_PHY_TYPE_HE_SU;
mt7915_mcu_sta_sounding_rate(bf);
- bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMER_FB,
+ bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMING_FB,
pe->phy_cap_info[6]);
- bf->trigger_mu = HE_PHY(CAP6_TRIG_MU_BEAMFORMER_FB,
+ bf->trigger_mu = HE_PHY(CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB,
pe->phy_cap_info[6]);
bfer_nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
ve->phy_cap_info[5]);
@@ -2045,25 +2077,30 @@ mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif,
static void
mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
- struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt76_phy *mphy = &dev->mphy;
+ enum nl80211_band band;
struct sta_rec_ra *ra;
struct tlv *tlv;
- enum nl80211_band band = chandef->chan->band;
- u32 supp_rate = sta->supp_rates[band];
- int n_rates = hweight32(supp_rate);
- u32 cap = sta->wme ? STA_CAP_WMM : 0;
+ u32 supp_rate, n_rates, cap = sta->wme ? STA_CAP_WMM : 0;
u8 i, nss = sta->rx_nss, mcs = 0;
tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
-
ra = (struct sta_rec_ra *)tlv;
+
+ if (msta->wcid.ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ band = mphy->chandef.chan->band;
+ supp_rate = sta->supp_rates[band];
+ n_rates = hweight32(supp_rate);
+
ra->valid = true;
ra->auto_rate = true;
- ra->phy_mode = mt7915_get_phy_mode(dev, vif, band, sta);
- ra->channel = chandef->chan->hw_value;
+ ra->phy_mode = mt7915_get_phy_mode(mphy, vif, sta);
+ ra->channel = mphy->chandef.chan->hw_value;
ra->bw = sta->bandwidth;
ra->rate_len = n_rates;
ra->phy.bw = sta->bandwidth;
@@ -2253,6 +2290,9 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
sta_wtbl, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
if (enable) {
mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
@@ -2411,6 +2451,17 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
struct bss_info_bcn *bcn;
int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE;
+ rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
+ if (IS_ERR(rskb))
+ return PTR_ERR(rskb);
+
+ tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
+ bcn = (struct bss_info_bcn *)tlv;
+ bcn->enable = en;
+
+ if (!en)
+ goto out;
+
skb = ieee80211_beacon_get_template(hw, vif, &offs);
if (!skb)
return -EINVAL;
@@ -2421,16 +2472,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
return -EINVAL;
}
- rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
- if (IS_ERR(rskb)) {
- dev_kfree_skb(skb);
- return PTR_ERR(rskb);
- }
-
- tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
- bcn = (struct bss_info_bcn *)tlv;
- bcn->enable = en;
-
if (mvif->band_idx) {
info = IEEE80211_SKB_CB(skb);
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
@@ -2441,6 +2482,7 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
mt7915_mcu_beacon_cont(dev, rskb, skb, bcn, &offs);
dev_kfree_skb(skb);
+out:
return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
}
@@ -2500,11 +2542,9 @@ static int mt7915_mcu_start_patch(struct mt7915_dev *dev)
static int mt7915_driver_own(struct mt7915_dev *dev)
{
- u32 reg = mt7915_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0);
-
- mt76_wr(dev, reg, MT_TOP_LPCR_HOST_DRV_OWN);
- if (!mt76_poll_msec(dev, reg, MT_TOP_LPCR_HOST_FW_OWN,
- 0, 500)) {
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND0, MT_TOP_LPCR_HOST_DRV_OWN);
+ if (!mt76_poll_msec(dev, MT_TOP_LPCR_HOST_BAND0,
+ MT_TOP_LPCR_HOST_FW_OWN, 0, 500)) {
dev_err(dev->mt76.dev, "Timeout for driver own\n");
return -EIO;
}
@@ -2743,20 +2783,6 @@ out:
static int mt7915_load_firmware(struct mt7915_dev *dev)
{
int ret;
- u32 val, reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
-
- val = FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_FW_DOWNLOAD);
-
- if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, val, 1000)) {
- /* restart firmware once */
- __mt76_mcu_restart(&dev->mt76);
- if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
- val, 1000)) {
- dev_err(dev->mt76.dev,
- "Firmware is not ready for download\n");
- return -EIO;
- }
- }
ret = mt7915_load_patch(dev);
if (ret)
@@ -2766,7 +2792,7 @@ static int mt7915_load_firmware(struct mt7915_dev *dev)
if (ret)
return ret;
- if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
+ if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
FIELD_PREP(MT_TOP_MISC_FW_STATE,
FW_STATE_WACPU_RDY), 1000)) {
dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
@@ -2854,21 +2880,39 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
void mt7915_mcu_exit(struct mt7915_dev *dev)
{
- u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
-
__mt76_mcu_restart(&dev->mt76);
- if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
+ if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
FIELD_PREP(MT_TOP_MISC_FW_STATE,
FW_STATE_FW_DOWNLOAD), 1000)) {
dev_err(dev->mt76.dev, "Failed to exit mcu\n");
return;
}
- reg = mt7915_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0);
- mt76_wr(dev, reg, MT_TOP_LPCR_HOST_FW_OWN);
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND0, MT_TOP_LPCR_HOST_FW_OWN);
skb_queue_purge(&dev->mt76.mcu.res_q);
}
+static int
+mt7915_mcu_set_rx_hdr_trans_blacklist(struct mt7915_dev *dev, int band)
+{
+ struct {
+ u8 operation;
+ u8 count;
+ u8 _rsv[2];
+ u8 index;
+ u8 enable;
+ __le16 etype;
+ } req = {
+ .operation = 1,
+ .count = 1,
+ .enable = 1,
+ .etype = cpu_to_le16(ETH_P_PAE),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_HDR_TRANS),
+ &req, sizeof(req), false);
+}
+
int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band,
bool enable, bool hdr_trans)
{
@@ -2899,6 +2943,9 @@ int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band,
if (ret)
return ret;
+ if (hdr_trans)
+ mt7915_mcu_set_rx_hdr_trans_blacklist(dev, band);
+
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MAC_INIT_CTRL),
&req_mac, sizeof(req_mac), true);
}
@@ -3182,7 +3229,7 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
}
#endif
- if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
@@ -3280,6 +3327,148 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
return 0;
}
+static int mt7915_mcu_set_pre_cal(struct mt7915_dev *dev, u8 idx,
+ u8 *data, u32 len, int cmd)
+{
+ struct {
+ u8 dir;
+ u8 valid;
+ __le16 bitmap;
+ s8 precal;
+ u8 action;
+ u8 band;
+ u8 idx;
+ u8 rsv[4];
+ __le32 len;
+ } req;
+ struct sk_buff *skb;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req) + len);
+ if (!skb)
+ return -ENOMEM;
+
+ req.idx = idx;
+ req.len = cpu_to_le32(len);
+ skb_put_data(skb, &req, sizeof(req));
+ skb_put_data(skb, data, len);
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, false);
+}
+
+int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev)
+{
+ u8 idx = 0, *cal = dev->cal, *eep = dev->mt76.eeprom.data;
+ u32 total = MT_EE_CAL_GROUP_SIZE;
+
+ if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_GROUP))
+ return 0;
+
+ /*
+ * Items: Rx DCOC, RSSI DCOC, Tx TSSI DCOC, Tx LPFG
+ * Tx FDIQ, Tx DCIQ, Rx FDIQ, Rx FIIQ, ADCDCOC
+ */
+ while (total > 0) {
+ int ret, len;
+
+ len = min_t(u32, total, MT_EE_CAL_UNIT);
+
+ ret = mt7915_mcu_set_pre_cal(dev, idx, cal, len,
+ MCU_EXT_CMD(GROUP_PRE_CAL_INFO));
+ if (ret)
+ return ret;
+
+ total -= len;
+ cal += len;
+ idx++;
+ }
+
+ return 0;
+}
+
+static int mt7915_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur)
+{
+ int i;
+
+ for (i = 0; i < n_freqs; i++)
+ if (cur == freqs[i])
+ return i;
+
+ return -1;
+}
+
+static int mt7915_dpd_freq_idx(u16 freq, u8 bw)
+{
+ static const u16 freq_list[] = {
+ 5180, 5200, 5220, 5240,
+ 5260, 5280, 5300, 5320,
+ 5500, 5520, 5540, 5560,
+ 5580, 5600, 5620, 5640,
+ 5660, 5680, 5700, 5745,
+ 5765, 5785, 5805, 5825
+ };
+ int offset_2g = ARRAY_SIZE(freq_list);
+ int idx;
+
+ if (freq < 4000) {
+ if (freq < 2432)
+ return offset_2g;
+ if (freq < 2457)
+ return offset_2g + 1;
+
+ return offset_2g + 2;
+ }
+
+ if (bw == NL80211_CHAN_WIDTH_80P80 || bw == NL80211_CHAN_WIDTH_160)
+ return -1;
+
+ if (bw != NL80211_CHAN_WIDTH_20) {
+ idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
+ freq + 10);
+ if (idx >= 0)
+ return idx;
+
+ idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
+ freq - 10);
+ if (idx >= 0)
+ return idx;
+ }
+
+ return mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq);
+}
+
+int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ u16 total = 2, idx, center_freq = chandef->center_freq1;
+ u8 *cal = dev->cal, *eep = dev->mt76.eeprom.data;
+
+ if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_DPD))
+ return 0;
+
+ idx = mt7915_dpd_freq_idx(center_freq, chandef->width);
+ if (idx < 0)
+ return -EINVAL;
+
+ /* Items: Tx DPD, Tx Flatness */
+ idx = idx * 2;
+ cal += MT_EE_CAL_GROUP_SIZE;
+
+ while (total--) {
+ int ret;
+
+ cal += (idx * MT_EE_CAL_UNIT);
+ ret = mt7915_mcu_set_pre_cal(dev, idx, cal, MT_EE_CAL_UNIT,
+ MCU_EXT_CMD(DPD_PRE_CAL_INFO));
+ if (ret)
+ return ret;
+
+ idx++;
+ }
+
+ return 0;
+}
+
int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index)
{
struct {
@@ -3314,8 +3503,9 @@ int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx)
sizeof(req), false);
}
-int mt7915_mcu_set_sku(struct mt7915_phy *phy)
+int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
{
+#define MT7915_SKU_RATE_NUM 161
struct mt7915_dev *dev = phy->dev;
struct mt76_phy *mphy = phy->mt76;
struct ieee80211_hw *hw = mphy->hw;
@@ -3328,15 +3518,37 @@ int mt7915_mcu_set_sku(struct mt7915_phy *phy)
.format_id = 4,
.dbdc_idx = phy != &dev->phy,
};
- int i;
- s8 *delta;
+ struct mt76_power_limits limits_array;
+ s8 *la = (s8 *)&limits_array;
+ int i, idx, n_chains = hweight8(mphy->antenna_mask);
+ int tx_power;
- delta = dev->rate_power[mphy->chandef.chan->band];
- mphy->txpower_cur = hw->conf.power_level * 2 +
- delta[MT7915_SKU_MAX_DELTA_IDX];
+ tx_power = hw->conf.power_level * 2 -
+ mt76_tx_power_nss_delta(n_chains);
- for (i = 0; i < MT7915_SKU_RATE_NUM; i++)
- req.val[i] = hw->conf.power_level * 2 + delta[i];
+ tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
+ &limits_array, tx_power);
+ mphy->txpower_cur = tx_power;
+
+ for (i = 0, idx = 0; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
+ u8 mcs_num, len = mt7915_sku_group_len[i];
+ int j;
+
+ if (i >= SKU_HT_BW20 && i <= SKU_VHT_BW160) {
+ mcs_num = 10;
+
+ if (i == SKU_HT_BW20 || i == SKU_VHT_BW20)
+ la = (s8 *)&limits_array + 12;
+ } else {
+ mcs_num = len;
+ }
+
+ for (j = 0; j < min_t(u8, mcs_num, len); j++)
+ req.val[idx + j] = la[j];
+
+ la += mcs_num;
+ idx += len;
+ }
return mt76_mcu_send_msg(&dev->mt76,
MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
@@ -3501,9 +3713,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_supported_band *sband;
struct mt7915_mcu_phy_rx_info *res;
struct sk_buff *skb;
- u16 flags = 0;
int ret;
- int i;
+ bool cck = false;
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(PHY_STAT_INFO),
&req, sizeof(req), true, &skb);
@@ -3517,48 +3728,53 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
switch (res->mode) {
case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
case MT_PHY_TYPE_OFDM:
if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
else
sband = &mphy->sband_2g.sband;
- for (i = 0; i < sband->n_bitrates; i++) {
- if (rate->mcs != (sband->bitrates[i].hw_value & 0xf))
- continue;
-
- rate->legacy = sband->bitrates[i].bitrate;
- break;
- }
+ rate->mcs = mt76_get_rate(&dev->mt76, sband, rate->mcs, cck);
+ rate->legacy = sband->bitrates[rate->mcs].bitrate;
break;
case MT_PHY_TYPE_HT:
case MT_PHY_TYPE_HT_GF:
- if (rate->mcs > 31)
- return -EINVAL;
-
- flags |= RATE_INFO_FLAGS_MCS;
+ if (rate->mcs > 31) {
+ ret = -EINVAL;
+ goto out;
+ }
+ rate->flags = RATE_INFO_FLAGS_MCS;
if (res->gi)
- flags |= RATE_INFO_FLAGS_SHORT_GI;
+ rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
break;
case MT_PHY_TYPE_VHT:
- flags |= RATE_INFO_FLAGS_VHT_MCS;
+ if (rate->mcs > 9) {
+ ret = -EINVAL;
+ goto out;
+ }
+ rate->flags = RATE_INFO_FLAGS_VHT_MCS;
if (res->gi)
- flags |= RATE_INFO_FLAGS_SHORT_GI;
+ rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
break;
case MT_PHY_TYPE_HE_SU:
case MT_PHY_TYPE_HE_EXT_SU:
case MT_PHY_TYPE_HE_TB:
case MT_PHY_TYPE_HE_MU:
+ if (res->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11) {
+ ret = -EINVAL;
+ goto out;
+ }
rate->he_gi = res->gi;
-
- flags |= RATE_INFO_FLAGS_HE_MCS;
+ rate->flags = RATE_INFO_FLAGS_HE_MCS;
break;
default:
- break;
+ ret = -EINVAL;
+ goto out;
}
- rate->flags = flags;
switch (res->bw) {
case IEEE80211_STA_RX_BW_160:
@@ -3575,7 +3791,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
break;
}
+out:
dev_kfree_skb(skb);
- return 0;
+ return ret;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index 2d584142c27b..42582a66e42d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -68,10 +68,19 @@ struct mt7915_mcu_rxd {
u8 s2d_index;
};
+struct mt7915_mcu_csa_notify {
+ struct mt7915_mcu_rxd rxd;
+
+ u8 omac_idx;
+ u8 csa_count;
+ u8 band_idx;
+ u8 rsv;
+} __packed;
+
struct mt7915_mcu_rdd_report {
struct mt7915_mcu_rxd rxd;
- u8 idx;
+ u8 band_idx;
u8 long_detected;
u8 constant_prf_detected;
u8 staggered_prf_detected;
@@ -275,6 +284,8 @@ enum {
MCU_EXT_CMD_FW_DBG_CTRL = 0x95,
MCU_EXT_CMD_SET_RDD_TH = 0x9d,
MCU_EXT_CMD_SET_SPR = 0xa8,
+ MCU_EXT_CMD_GROUP_PRE_CAL_INFO = 0xab,
+ MCU_EXT_CMD_DPD_PRE_CAL_INFO = 0xac,
MCU_EXT_CMD_PHY_STAT_INFO = 0xad,
};
@@ -1080,9 +1091,6 @@ enum {
sizeof(struct tlv) + \
MT7915_WTBL_UPDATE_MAX_SIZE)
-#define MT7915_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \
- sizeof(struct wtbl_ba))
-
#define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct bss_info_omac) + \
sizeof(struct bss_info_basic) +\
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
new file mode 100644
index 000000000000..af712a936ef6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include "mt7915.h"
+
+static u32 mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr)
+{
+ u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
+ u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
+
+ mt76_rmw_field(dev, MT_HIF_REMAP_L1, MT_HIF_REMAP_L1_MASK, base);
+ /* use read to push write */
+ mt76_rr(dev, MT_HIF_REMAP_L1);
+
+ return MT_HIF_REMAP_BASE_L1 + offset;
+}
+
+static u32 mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr)
+{
+ u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
+ u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+
+ mt76_rmw_field(dev, MT_HIF_REMAP_L2, MT_HIF_REMAP_L2_MASK, base);
+ /* use read to push write */
+ mt76_rr(dev, MT_HIF_REMAP_L2);
+
+ return MT_HIF_REMAP_BASE_L2 + offset;
+}
+
+static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
+{
+ static const struct {
+ u32 phys;
+ u32 mapped;
+ u32 size;
+ } fixed_map[] = {
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ };
+ int i;
+
+ if (addr < 0x100000)
+ return addr;
+
+ for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
+ u32 ofs;
+
+ if (addr < fixed_map[i].phys)
+ continue;
+
+ ofs = addr - fixed_map[i].phys;
+ if (ofs > fixed_map[i].size)
+ continue;
+
+ return fixed_map[i].mapped + ofs;
+ }
+
+ if ((addr >= 0x18000000 && addr < 0x18c00000) ||
+ (addr >= 0x70000000 && addr < 0x78000000) ||
+ (addr >= 0x7c000000 && addr < 0x7c400000))
+ return mt7915_reg_map_l1(dev, addr);
+
+ return mt7915_reg_map_l2(dev, addr);
+}
+
+static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+}
+
+int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq)
+{
+ struct mt76_bus_ops *bus_ops;
+ struct mt7915_dev *dev;
+
+ dev = container_of(mdev, struct mt7915_dev, mt76);
+ mt76_mmio_init(&dev->mt76, mem_base);
+
+ dev->bus_ops = dev->mt76.bus;
+ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+ GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+
+ bus_ops->rr = mt7915_rr;
+ bus_ops->wr = mt7915_wr;
+ bus_ops->rmw = mt7915_rmw;
+ dev->mt76.bus = bus_ops;
+
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 5c7eefdf2013..4ea8972d4e2f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -32,17 +32,12 @@
#define MT7915_EEPROM_SIZE 3584
#define MT7915_TOKEN_SIZE 8192
-#define MT7915_TOKEN_FREE_THR 64
#define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
#define MT7915_5G_RATE_DEFAULT 0x4b /* OFDM 6M */
#define MT7915_2G_RATE_DEFAULT 0x0 /* CCK 1M */
-#define MT7915_SKU_RATE_NUM 161
-#define MT7915_SKU_MAX_DELTA_IDX MT7915_SKU_RATE_NUM
-#define MT7915_SKU_TABLE_SIZE (MT7915_SKU_RATE_NUM + 1)
-
struct mt7915_vif;
struct mt7915_sta;
struct mt7915_dfs_pulse;
@@ -108,11 +103,11 @@ struct mt7915_vif {
};
struct mib_stats {
- u16 ack_fail_cnt;
- u16 fcs_err_cnt;
- u16 rts_cnt;
- u16 rts_retries_cnt;
- u16 ba_miss_cnt;
+ u32 ack_fail_cnt;
+ u32 fcs_err_cnt;
+ u32 rts_cnt;
+ u32 rts_retries_cnt;
+ u32 ba_miss_cnt;
};
struct mt7915_hif {
@@ -142,7 +137,7 @@ struct mt7915_phy {
u8 rdd_state;
int dfs_state;
- __le32 rx_ampdu_ts;
+ u32 rx_ampdu_ts;
u32 ampdu_ref;
struct mib_stats mib;
@@ -191,16 +186,12 @@ struct mt7915_dev {
u32 hw_pattern;
- spinlock_t token_lock;
- int token_count;
- struct idr token;
-
- s8 **rate_power; /* TODO: use mt76_rate_power */
-
bool dbdc_support;
bool flash_mode;
bool fw_debug;
bool ibf;
+
+ void *cal;
};
enum {
@@ -300,7 +291,7 @@ void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy);
int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx);
-void mt7915_eeprom_init_sku(struct mt7915_dev *dev);
+s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band);
int mt7915_dma_init(struct mt7915_dev *dev);
void mt7915_dma_prefetch(struct mt7915_dev *dev);
void mt7915_dma_cleanup(struct mt7915_dev *dev);
@@ -350,7 +341,7 @@ int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val);
int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter);
int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
-int mt7915_mcu_set_sku(struct mt7915_phy *phy);
+int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev);
int mt7915_mcu_set_txbf_module(struct mt7915_dev *dev);
int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev);
@@ -359,6 +350,8 @@ int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
const struct mt7915_dfs_pulse *pulse);
int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
const struct mt7915_dfs_pattern *pattern);
+int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev);
+int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy);
int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index);
int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx);
int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
@@ -394,80 +387,6 @@ static inline void mt7915_irq_disable(struct mt7915_dev *dev, u32 mask)
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
-static inline u32
-mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr)
-{
- u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
- u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
-
- mt76_rmw_field(dev, MT_HIF_REMAP_L1, MT_HIF_REMAP_L1_MASK, base);
- /* use read to push write */
- mt76_rr(dev, MT_HIF_REMAP_L1);
-
- return MT_HIF_REMAP_BASE_L1 + offset;
-}
-
-static inline u32
-mt7915_l1_rr(struct mt7915_dev *dev, u32 addr)
-{
- return mt76_rr(dev, mt7915_reg_map_l1(dev, addr));
-}
-
-static inline void
-mt7915_l1_wr(struct mt7915_dev *dev, u32 addr, u32 val)
-{
- mt76_wr(dev, mt7915_reg_map_l1(dev, addr), val);
-}
-
-static inline u32
-mt7915_l1_rmw(struct mt7915_dev *dev, u32 addr, u32 mask, u32 val)
-{
- val |= mt7915_l1_rr(dev, addr) & ~mask;
- mt7915_l1_wr(dev, addr, val);
-
- return val;
-}
-
-#define mt7915_l1_set(dev, addr, val) mt7915_l1_rmw(dev, addr, 0, val)
-#define mt7915_l1_clear(dev, addr, val) mt7915_l1_rmw(dev, addr, val, 0)
-
-static inline u32
-mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr)
-{
- u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
- u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
-
- mt76_rmw_field(dev, MT_HIF_REMAP_L2, MT_HIF_REMAP_L2_MASK, base);
- /* use read to push write */
- mt76_rr(dev, MT_HIF_REMAP_L2);
-
- return MT_HIF_REMAP_BASE_L2 + offset;
-}
-
-static inline u32
-mt7915_l2_rr(struct mt7915_dev *dev, u32 addr)
-{
- return mt76_rr(dev, mt7915_reg_map_l2(dev, addr));
-}
-
-static inline void
-mt7915_l2_wr(struct mt7915_dev *dev, u32 addr, u32 val)
-{
- mt76_wr(dev, mt7915_reg_map_l2(dev, addr), val);
-}
-
-static inline u32
-mt7915_l2_rmw(struct mt7915_dev *dev, u32 addr, u32 mask, u32 val)
-{
- val |= mt7915_l2_rr(dev, addr) & ~mask;
- mt7915_l2_wr(dev, addr, val);
-
- return val;
-}
-
-#define mt7915_l2_set(dev, addr, val) mt7915_l2_rmw(dev, addr, 0, val)
-#define mt7915_l2_clear(dev, addr, val) mt7915_l2_rmw(dev, addr, val, 0)
-
bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask);
void mt7915_mac_reset_counters(struct mt7915_phy *phy);
void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
@@ -486,6 +405,7 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
void mt7915_mac_work(struct work_struct *work);
void mt7915_mac_reset_work(struct work_struct *work);
void mt7915_mac_sta_rc_work(struct work_struct *work);
+int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq);
int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index 13880cc9c9e8..643f171884cf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -154,28 +154,6 @@ static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
return IRQ_HANDLED;
}
-static int
-mt7915_alloc_device(struct pci_dev *pdev, struct mt7915_dev *dev)
-{
-#define NUM_BANDS 2
- int i;
- s8 **sku;
-
- sku = devm_kzalloc(&pdev->dev, NUM_BANDS * sizeof(*sku), GFP_KERNEL);
- if (!sku)
- return -ENOMEM;
-
- for (i = 0; i < NUM_BANDS; i++) {
- sku[i] = devm_kzalloc(&pdev->dev, MT7915_SKU_TABLE_SIZE *
- sizeof(**sku), GFP_KERNEL);
- if (!sku[i])
- return -ENOMEM;
- }
- dev->rate_power = sku;
-
- return 0;
-}
-
static void mt7915_pci_init_hif2(struct mt7915_dev *dev)
{
struct mt7915_hif *hif;
@@ -201,7 +179,7 @@ static void mt7915_pci_init_hif2(struct mt7915_dev *dev)
}
/* master switch of PCIe tnterrupt enable */
- mt7915_l1_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
}
static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
@@ -234,6 +212,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
+ .token_size = MT7915_TOKEN_SIZE,
.tx_prepare_skb = mt7915_tx_prepare_skb,
.tx_complete_skb = mt7915_tx_complete_skb,
.rx_skb = mt7915_queue_rx_skb,
@@ -270,19 +249,13 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
dev = container_of(mdev, struct mt7915_dev, mt76);
- ret = mt7915_alloc_device(pdev, dev);
+
+ ret = mt7915_mmio_init(mdev, pcim_iomap_table(pdev)[0], pdev->irq);
if (ret)
goto error;
- mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
- mdev->rev = (mt7915_l1_rr(dev, MT_HW_CHIPID) << 16) |
- (mt7915_l1_rr(dev, MT_HW_REV) & 0xff);
- dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
-
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
-
/* master switch of PCIe tnterrupt enable */
- mt7915_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
ret = devm_request_irq(mdev->dev, pdev->irq, mt7915_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index ed0c9a24bb53..efe0f2904c66 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -4,6 +4,11 @@
#ifndef __MT7915_REGS_H
#define __MT7915_REGS_H
+/* MCU WFDMA0 */
+#define MT_MCU_WFDMA0_BASE 0x2000
+#define MT_MCU_WFDMA0(ofs) (MT_MCU_WFDMA0_BASE + (ofs))
+#define MT_MCU_WFDMA0_DUMMY_CR MT_MCU_WFDMA0(0x120)
+
/* MCU WFDMA1 */
#define MT_MCU_WFDMA1_BASE 0x3000
#define MT_MCU_WFDMA1(ofs) (MT_MCU_WFDMA1_BASE + (ofs))
@@ -77,6 +82,11 @@
#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
+#define MT_TMAC_FP0R0(_band) MT_WF_TMAC(_band, 0x020)
+#define MT_TMAC_FP0R15(_band) MT_WF_TMAC(_band, 0x080)
+#define MT_TMAC_FP0R18(_band) MT_WF_TMAC(_band, 0x270)
+#define MT_TMAC_FP_MASK GENMASK(7, 0)
+
#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0)
#define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00)
@@ -396,6 +406,14 @@
#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
+#define MT_TOP_RGU_BASE 0xf0000
+#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0))
+#define MT_TOP_PWR_KEY (0x5746 << 16)
+#define MT_TOP_PWR_SW_RST BIT(0)
+#define MT_TOP_PWR_SW_PWR_ON GENMASK(3, 2)
+#define MT_TOP_PWR_HW_CTRL BIT(4)
+#define MT_TOP_PWR_PWR_ON BIT(7)
+
#define MT_INFRA_CFG_BASE 0xf1000
#define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
index bd798df748ba..f9d81e36ef09 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
@@ -257,13 +257,13 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
{
struct mt76_phy *mphy = phy->mt76;
struct mt76_testmode_data *td = &mphy->test;
- struct sk_buff *old = td->tx_skb, *new;
struct ieee80211_supported_band *sband;
struct rate_info rate = {};
u16 flags = 0, tx_len;
u32 bitrate;
+ int ret;
- if (!tx_time || !old)
+ if (!tx_time)
return 0;
rate.mcs = td->tx_rate_idx;
@@ -323,21 +323,9 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
bitrate = cfg80211_calculate_bitrate(&rate);
tx_len = bitrate * tx_time / 10 / 8;
- if (tx_len < sizeof(struct ieee80211_hdr))
- tx_len = sizeof(struct ieee80211_hdr);
- else if (tx_len > IEEE80211_MAX_FRAME_LEN)
- tx_len = IEEE80211_MAX_FRAME_LEN;
-
- new = alloc_skb(tx_len, GFP_KERNEL);
- if (!new)
- return -ENOMEM;
-
- skb_copy_header(new, old);
- __skb_put_zero(new, tx_len);
- memcpy(new->data, old->data, sizeof(struct ieee80211_hdr));
-
- dev_kfree_skb(old);
- td->tx_skb = new;
+ ret = mt76_testmode_alloc_skb(phy->mt76, tx_len);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
index 09d1446ad933..e531666f9fb4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
@@ -2,4 +2,6 @@
obj-$(CONFIG_MT7921E) += mt7921e.o
-mt7921e-y := pci.o mac.o mcu.o dma.o eeprom.o main.o init.o debugfs.o
+CFLAGS_trace.o := -I$(src)
+
+mt7921e-y := pci.o mac.o mcu.o dma.o eeprom.o main.o init.o debugfs.o trace.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
index 0dc8e25e18e4..6ee423dd4027 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
@@ -9,10 +9,13 @@ mt7921_fw_debug_set(void *data, u64 val)
{
struct mt7921_dev *dev = data;
- dev->fw_debug = (u8)val;
+ mt7921_mutex_acquire(dev);
+ dev->fw_debug = (u8)val;
mt7921_mcu_fw_log_2_host(dev, dev->fw_debug);
+ mt7921_mutex_release(dev);
+
return 0;
}
@@ -44,14 +47,13 @@ mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy,
range[i] = mt76_rr(dev, MT_MIB_ARNG(0, i));
for (i = 0; i < ARRAY_SIZE(bound); i++)
- bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
+ bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
seq_printf(file, "\nPhy0\n");
seq_printf(file, "Length: %8d | ", bound[0]);
for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
- seq_printf(file, "%3d -%3d | ",
- bound[i] + 1, bound[i + 1]);
+ seq_printf(file, "%3d %3d | ", bound[i] + 1, bound[i + 1]);
seq_puts(file, "\nCount: ");
for (i = 0; i < ARRAY_SIZE(bound); i++)
@@ -62,7 +64,7 @@ mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy,
}
static int
-mt7921_tx_stats_read(struct seq_file *file, void *data)
+mt7921_tx_stats_show(struct seq_file *file, void *data)
{
struct mt7921_dev *dev = file->private;
int stat[8], i, n;
@@ -88,19 +90,7 @@ mt7921_tx_stats_read(struct seq_file *file, void *data)
return 0;
}
-static int
-mt7921_tx_stats_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt7921_tx_stats_read, inode->i_private);
-}
-
-static const struct file_operations fops_tx_stats = {
- .open = mt7921_tx_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(mt7921_tx_stats);
static int
mt7921_queues_acq(struct seq_file *s, void *data)
@@ -159,23 +149,107 @@ mt7921_queues_read(struct seq_file *s, void *data)
return 0;
}
+static void
+mt7921_seq_puts_array(struct seq_file *file, const char *str,
+ s8 *val, int len)
+{
+ int i;
+
+ seq_printf(file, "%-16s:", str);
+ for (i = 0; i < len; i++)
+ if (val[i] == 127)
+ seq_printf(file, " %6s", "N.A");
+ else
+ seq_printf(file, " %6d", val[i]);
+ seq_puts(file, "\n");
+}
+
+#define mt7921_print_txpwr_entry(prefix, rate) \
+({ \
+ mt7921_seq_puts_array(s, #prefix " (user)", \
+ txpwr.data[TXPWR_USER].rate, \
+ ARRAY_SIZE(txpwr.data[TXPWR_USER].rate)); \
+ mt7921_seq_puts_array(s, #prefix " (eeprom)", \
+ txpwr.data[TXPWR_EEPROM].rate, \
+ ARRAY_SIZE(txpwr.data[TXPWR_EEPROM].rate)); \
+ mt7921_seq_puts_array(s, #prefix " (tmac)", \
+ txpwr.data[TXPWR_MAC].rate, \
+ ARRAY_SIZE(txpwr.data[TXPWR_MAC].rate)); \
+})
+
+static int
+mt7921_txpwr(struct seq_file *s, void *data)
+{
+ struct mt7921_dev *dev = dev_get_drvdata(s->private);
+ struct mt7921_txpwr txpwr;
+ int ret;
+
+ ret = mt7921_get_txpwr_info(dev, &txpwr);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "Tx power table (channel %d)\n", txpwr.ch);
+ seq_printf(s, "%-16s %6s %6s %6s %6s\n",
+ " ", "1m", "2m", "5m", "11m");
+ mt7921_print_txpwr_entry(CCK, cck);
+
+ seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s\n",
+ " ", "6m", "9m", "12m", "18m", "24m", "36m",
+ "48m", "54m");
+ mt7921_print_txpwr_entry(OFDM, ofdm);
+
+ seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s\n",
+ " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5",
+ "mcs6", "mcs7");
+ mt7921_print_txpwr_entry(HT20, ht20);
+
+ seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n",
+ " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5",
+ "mcs6", "mcs7", "mcs32");
+ mt7921_print_txpwr_entry(HT40, ht40);
+
+ seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n",
+ " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5",
+ "mcs6", "mcs7", "mcs8", "mcs9", "mcs10", "mcs11");
+ mt7921_print_txpwr_entry(VHT20, vht20);
+ mt7921_print_txpwr_entry(VHT40, vht40);
+ mt7921_print_txpwr_entry(VHT80, vht80);
+ mt7921_print_txpwr_entry(VHT160, vht160);
+ mt7921_print_txpwr_entry(HE26, he26);
+ mt7921_print_txpwr_entry(HE52, he52);
+ mt7921_print_txpwr_entry(HE106, he106);
+ mt7921_print_txpwr_entry(HE242, he242);
+ mt7921_print_txpwr_entry(HE484, he484);
+ mt7921_print_txpwr_entry(HE996, he996);
+ mt7921_print_txpwr_entry(HE996x2, he996x2);
+
+ return 0;
+}
+
static int
mt7921_pm_set(void *data, u64 val)
{
struct mt7921_dev *dev = data;
+ struct mt76_connac_pm *pm = &dev->pm;
struct mt76_phy *mphy = dev->phy.mt76;
- int ret = 0;
+
+ if (val == pm->enable)
+ return 0;
mt7921_mutex_acquire(dev);
- dev->pm.enable = val;
+ if (!pm->enable) {
+ pm->stats.last_wake_event = jiffies;
+ pm->stats.last_doze_event = jiffies;
+ }
+ pm->enable = val;
ieee80211_iterate_active_interfaces(mphy->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7921_pm_interface_iter, mphy->priv);
mt7921_mutex_release(dev);
- return ret;
+ return 0;
}
static int
@@ -191,6 +265,29 @@ mt7921_pm_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7921_pm_get, mt7921_pm_set, "%lld\n");
static int
+mt7921_pm_stats(struct seq_file *s, void *data)
+{
+ struct mt7921_dev *dev = dev_get_drvdata(s->private);
+ struct mt76_connac_pm *pm = &dev->pm;
+
+ unsigned long awake_time = pm->stats.awake_time;
+ unsigned long doze_time = pm->stats.doze_time;
+
+ if (!test_bit(MT76_STATE_PM, &dev->mphy.state))
+ awake_time += jiffies - pm->stats.last_wake_event;
+ else
+ doze_time += jiffies - pm->stats.last_doze_event;
+
+ seq_printf(s, "awake time: %14u\ndoze time: %15u\n",
+ jiffies_to_msecs(awake_time),
+ jiffies_to_msecs(doze_time));
+
+ seq_printf(s, "low power wakes: %9d\n", pm->stats.lp_wake);
+
+ return 0;
+}
+
+static int
mt7921_pm_idle_timeout_set(void *data, u64 val)
{
struct mt7921_dev *dev = data;
@@ -213,19 +310,28 @@ mt7921_pm_idle_timeout_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_pm_idle_timeout, mt7921_pm_idle_timeout_get,
mt7921_pm_idle_timeout_set, "%lld\n");
-static int mt7921_config(void *data, u64 val)
+static int mt7921_chip_reset(void *data, u64 val)
{
struct mt7921_dev *dev = data;
- int ret;
+ int ret = 0;
- mt7921_mutex_acquire(dev);
- ret = mt76_connac_mcu_chip_config(&dev->mt76);
- mt7921_mutex_release(dev);
+ switch (val) {
+ case 1:
+ /* Reset wifisys directly. */
+ mt7921_reset(&dev->mt76);
+ break;
+ default:
+ /* Collect the core dump before reset wifisys. */
+ mt7921_mutex_acquire(dev);
+ ret = mt76_connac_mcu_chip_config(&dev->mt76);
+ mt7921_mutex_release(dev);
+ break;
+ }
return ret;
}
-DEFINE_DEBUGFS_ATTRIBUTE(fops_config, NULL, mt7921_config, "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_reset, NULL, mt7921_chip_reset, "%lld\n");
int mt7921_init_debugfs(struct mt7921_dev *dev)
{
@@ -239,12 +345,16 @@ int mt7921_init_debugfs(struct mt7921_dev *dev)
mt7921_queues_read);
debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
mt7921_queues_acq);
- debugfs_create_file("tx_stats", 0400, dir, dev, &fops_tx_stats);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir,
+ mt7921_txpwr);
+ debugfs_create_file("tx_stats", 0400, dir, dev, &mt7921_tx_stats_fops);
debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm);
debugfs_create_file("idle-timeout", 0600, dir, dev,
&fops_pm_idle_timeout);
- debugfs_create_file("chip_config", 0600, dir, dev, &fops_config);
+ debugfs_create_file("chip_reset", 0600, dir, dev, &fops_reset);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir,
+ mt7921_pm_stats);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
index cd9665610284..71e664ee7652 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
@@ -53,8 +53,7 @@ void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
}
-static void
-mt7921_tx_cleanup(struct mt7921_dev *dev)
+void mt7921_tx_cleanup(struct mt7921_dev *dev)
{
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false);
@@ -66,15 +65,39 @@ static int mt7921_poll_tx(struct napi_struct *napi, int budget)
dev = container_of(napi, struct mt7921_dev, mt76.tx_napi);
- mt7921_tx_cleanup(dev);
+ if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
+ napi_complete(napi);
+ queue_work(dev->mt76.wq, &dev->pm.wake_work);
+ return 0;
+ }
- if (napi_complete_done(napi, 0))
+ mt7921_tx_cleanup(dev);
+ if (napi_complete(napi))
mt7921_irq_enable(dev, MT_INT_TX_DONE_ALL);
+ mt76_connac_pm_unref(&dev->pm);
return 0;
}
-void mt7921_dma_prefetch(struct mt7921_dev *dev)
+static int mt7921_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct mt7921_dev *dev;
+ int done;
+
+ dev = container_of(napi->dev, struct mt7921_dev, mt76.napi_dev);
+
+ if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
+ napi_complete(napi);
+ queue_work(dev->mt76.wq, &dev->pm.wake_work);
+ return 0;
+ }
+ done = mt76_dma_rx_poll(napi, budget);
+ mt76_connac_pm_unref(&dev->pm);
+
+ return done;
+}
+
+static void mt7921_dma_prefetch(struct mt7921_dev *dev)
{
#define PREFETCH(base, depth) ((base) << 16 | (depth))
@@ -198,11 +221,160 @@ static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
return dev->bus_ops->rmw(mdev, addr, mask, val);
}
-static int mt7921_dmashdl_disabled(struct mt7921_dev *dev)
+static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
{
- mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0, MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
+ if (force) {
+ /* reset */
+ mt76_clear(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+ }
+
+ /* disable dmashdl */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
+ MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
+ /* disable WFDMA0 */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (!mt76_poll(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
+ MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mt7921_dma_enable(struct mt7921_dev *dev)
+{
+ /* configure perfetch settings */
+ mt7921_dma_prefetch(dev);
+
+ /* reset dma idx */
+ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
+
+ /* configure delay interrupt */
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
+
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
+ MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
+ MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
+
+ /* enable interrupts for TX/RX rings */
+ mt7921_irq_enable(dev,
+ MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ MT_INT_MCU_CMD);
+ mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE);
+
+ return 0;
+}
+
+static int mt7921_dma_reset(struct mt7921_dev *dev, bool force)
+{
+ int i, err;
+
+ err = mt7921_dma_disable(dev, force);
+ if (err)
+ return err;
+
+ /* reset hw queues */
+ for (i = 0; i < __MT_TXQ_MAX; i++)
+ mt76_queue_reset(dev, dev->mphy.q_tx[i]);
+
+ for (i = 0; i < __MT_MCUQ_MAX; i++)
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+
+ mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+
+ mt76_tx_status_check(&dev->mt76, NULL, true);
+
+ return mt7921_dma_enable(dev);
+}
+
+int mt7921_wfsys_reset(struct mt7921_dev *dev)
+{
+ mt76_set(dev, 0x70002600, BIT(0));
+ msleep(200);
+ mt76_clear(dev, 0x70002600, BIT(0));
+
+ if (!__mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B,
+ WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force)
+{
+ int i, err;
+
+ /* clean up hw queues */
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++)
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
+
+ mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
+
+ if (force) {
+ err = mt7921_wfsys_reset(dev);
+ if (err)
+ return err;
+ }
+ err = mt7921_dma_reset(dev, force);
+ if (err)
+ return err;
+
+ mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_queue_rx_reset(dev, i);
+
+ return 0;
+}
+
+int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
+{
+ struct mt76_connac_pm *pm = &dev->pm;
+ int err;
+
+ /* check if the wpdma must be reinitialized */
+ if (mt7921_dma_need_reinit(dev)) {
+ /* disable interrutpts */
+ mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
+
+ err = mt7921_wpdma_reset(dev, false);
+ if (err) {
+ dev_err(dev->mt76.dev, "wpdma reset failed\n");
+ return err;
+ }
+
+ /* enable interrutpts */
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+ pm->stats.lp_wake++;
+ }
+
return 0;
}
@@ -226,32 +398,10 @@ int mt7921_dma_init(struct mt7921_dev *dev)
mt76_dma_attach(&dev->mt76);
- /* reset */
- mt76_clear(dev, MT_WFDMA0_RST,
- MT_WFDMA0_RST_DMASHDL_ALL_RST |
- MT_WFDMA0_RST_LOGIC_RST);
-
- mt76_set(dev, MT_WFDMA0_RST,
- MT_WFDMA0_RST_DMASHDL_ALL_RST |
- MT_WFDMA0_RST_LOGIC_RST);
-
- ret = mt7921_dmashdl_disabled(dev);
+ ret = mt7921_dma_disable(dev, true);
if (ret)
return ret;
- /* disable WFDMA0 */
- mt76_clear(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN |
- MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
- MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
- MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
-
- mt76_poll(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
- MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000);
-
/* init tx queue */
ret = mt7921_init_tx_queues(&dev->phy, MT7921_TXQ_BAND0,
MT7921_TX_RING_SIZE);
@@ -295,41 +445,15 @@ int mt7921_dma_init(struct mt7921_dev *dev)
if (ret)
return ret;
- ret = mt76_init_queues(dev);
+ ret = mt76_init_queues(dev, mt7921_poll_rx);
if (ret < 0)
return ret;
- netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+ netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt7921_poll_tx, NAPI_POLL_WEIGHT);
napi_enable(&dev->mt76.tx_napi);
- /* configure perfetch settings */
- mt7921_dma_prefetch(dev);
-
- /* reset dma idx */
- mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
-
- /* configure delay interrupt */
- mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
-
- mt76_set(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
- MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
- MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
- MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
- MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
-
- mt76_set(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
-
- mt76_set(dev, 0x54000120, BIT(1));
-
- /* enable interrupts for TX/RX rings */
- mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
- MT_INT_MCU_CMD);
-
- return 0;
+ return mt7921_dma_enable(dev);
}
void mt7921_dma_cleanup(struct mt7921_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index 89a13b4a74a4..fe28bf4050c4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -58,12 +58,14 @@ mt7921_regd_notifier(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
mt7921_mutex_acquire(dev);
mt76_connac_mcu_set_channel_domain(hw->priv);
+ mt76_connac_mcu_set_rate_txpower(phy->mt76);
mt7921_mutex_release(dev);
}
@@ -77,6 +79,9 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->radiotap_timestamp.units_pos =
+ IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
+
phy->slottime = 9;
hw->sta_data_size = sizeof(struct mt7921_sta);
@@ -95,6 +100,7 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wiphy->reg_notifier = mt7921_regd_notifier;
+ wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
@@ -142,7 +148,7 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
}
-static void mt7921_mac_init(struct mt7921_dev *dev)
+void mt7921_mac_init(struct mt7921_dev *dev)
{
int i;
@@ -160,23 +166,10 @@ static void mt7921_mac_init(struct mt7921_dev *dev)
mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0);
}
-static void mt7921_init_work(struct work_struct *work)
-{
- struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
- init_work);
-
- mt7921_mcu_set_eeprom(dev);
- mt7921_mac_init(dev);
-}
-
static int mt7921_init_hardware(struct mt7921_dev *dev)
{
int ret, idx;
- INIT_WORK(&dev->init_work, mt7921_init_work);
- spin_lock_init(&dev->token_lock);
- idr_init(&dev->token);
-
ret = mt7921_dma_init(dev);
if (ret)
return ret;
@@ -196,6 +189,10 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
if (ret < 0)
return ret;
+ ret = mt7921_mcu_set_eeprom(dev);
+ if (ret)
+ return ret;
+
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1);
if (idx)
@@ -206,6 +203,8 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET;
rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
+ mt7921_mac_init(dev);
+
return 0;
}
@@ -217,10 +216,13 @@ int mt7921_register_device(struct mt7921_dev *dev)
dev->phy.dev = dev;
dev->phy.mt76 = &dev->mt76.phy;
dev->mt76.phy.priv = &dev->phy;
+ dev->mt76.tx_worker.fn = mt7921_tx_worker;
INIT_DELAYED_WORK(&dev->pm.ps_work, mt7921_pm_power_save_work);
INIT_WORK(&dev->pm.wake_work, mt7921_pm_wake_work);
- init_completion(&dev->pm.wake_cmpl);
+ spin_lock_init(&dev->pm.wake.lock);
+ mutex_init(&dev->pm.mutex);
+ init_waitqueue_head(&dev->pm.wait);
spin_lock_init(&dev->pm.txq_lock);
set_bit(MT76_STATE_PM, &dev->mphy.state);
INIT_LIST_HEAD(&dev->phy.stats_list);
@@ -232,15 +234,17 @@ int mt7921_register_device(struct mt7921_dev *dev)
INIT_LIST_HEAD(&dev->sta_poll_list);
spin_lock_init(&dev->sta_poll_lock);
- init_waitqueue_head(&dev->reset_wait);
INIT_WORK(&dev->reset_work, mt7921_mac_reset_work);
+ dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
+ dev->pm.stats.last_wake_event = jiffies;
+ dev->pm.stats.last_doze_event = jiffies;
+
ret = mt7921_init_hardware(dev);
if (ret)
return ret;
mt7921_init_wiphy(hw);
- dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
dev->mphy.sband_2g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU;
@@ -250,9 +254,6 @@ int mt7921_register_device(struct mt7921_dev *dev)
dev->mphy.sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
- dev->mphy.sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
@@ -264,18 +265,15 @@ int mt7921_register_device(struct mt7921_dev *dev)
if (ret)
return ret;
- ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
-
return mt7921_init_debugfs(dev);
}
void mt7921_unregister_device(struct mt7921_dev *dev)
{
mt76_unregister_device(&dev->mt76);
- mt7921_mcu_exit(dev);
- mt7921_dma_cleanup(dev);
-
mt7921_tx_token_put(dev);
+ mt7921_dma_cleanup(dev);
+ mt7921_mcu_exit(dev);
tasklet_disable(&dev->irq_tasklet);
mt76_free_device(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 3f9097481a5e..214bd1859792 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -9,8 +9,6 @@
#include "mac.h"
#include "mcu.h"
-#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
-
#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
IEEE80211_RADIOTAP_HE_##f)
@@ -51,14 +49,6 @@ bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask)
0, 5000);
}
-static u32 mt7921_mac_wtbl_lmac_addr(struct mt7921_dev *dev, u16 wcid)
-{
- mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
- FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
-
- return MT_WTBL_LMAC_OFFS(wcid, 0);
-}
-
static void mt7921_mac_sta_poll(struct mt7921_dev *dev)
{
static const u8 ac_to_tid[] = {
@@ -95,7 +85,7 @@ static void mt7921_mac_sta_poll(struct mt7921_dev *dev)
spin_unlock_bh(&dev->sta_poll_lock);
idx = msta->wcid.idx;
- addr = mt7921_mac_wtbl_lmac_addr(dev, idx) + 20 * 4;
+ addr = MT_WTBL_LMAC_OFFS(idx, 0) + 20 * 4;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
u32 tx_last = msta->airtime_ac[i];
@@ -285,6 +275,37 @@ mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy,
status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
}
+static void
+mt7921_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = priv;
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
+
+ if (status->signal > 0)
+ return;
+
+ if (!ether_addr_equal(vif->addr, hdr->addr1))
+ return;
+
+ ewma_rssi_add(&mvif->rssi, -status->signal);
+}
+
+static void
+mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
+
+ if (!ieee80211_is_assoc_resp(hdr->frame_control) &&
+ !ieee80211_is_auth(hdr->frame_control))
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_mac_rssi_iter, skb);
+}
+
int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@@ -349,19 +370,6 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
}
- if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
- status->flag |= RX_FLAG_AMPDU_DETAILS;
-
- /* all subframes of an A-MPDU have the same timestamp */
- if (phy->rx_ampdu_ts != rxd[14]) {
- if (!++phy->ampdu_ref)
- phy->ampdu_ref++;
- }
- phy->rx_ampdu_ts = rxd[14];
-
- status->ampdu_ref = phy->ampdu_ref;
- }
-
remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
@@ -393,6 +401,22 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
}
if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
+ status->timestamp = le32_to_cpu(rxd[0]);
+ status->flag |= RX_FLAG_MACTIME_START;
+
+ if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (phy->rx_ampdu_ts != status->timestamp) {
+ if (!++phy->ampdu_ref)
+ phy->ampdu_ref++;
+ }
+ phy->rx_ampdu_ts = status->timestamp;
+
+ status->ampdu_ref = phy->ampdu_ref;
+ }
+
rxd += 2;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
@@ -400,7 +424,9 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
/* RXD Group 3 - P-RXV */
if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
- u32 v0, v1, v2;
+ u8 stbc, gi;
+ u32 v0, v1;
+ bool cck;
rxv = rxd;
rxd += 2;
@@ -409,7 +435,6 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
v0 = le32_to_cpu(rxv[0]);
v1 = le32_to_cpu(rxv[1]);
- v2 = le32_to_cpu(rxv[2]);
if (v0 & MT_PRXV_HT_AD_CODE)
status->enc_flags |= RX_ENC_FLAG_LDPC;
@@ -429,87 +454,87 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
status->chain_signal[i]);
}
- /* RXD Group 5 - C-RXV */
- if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
- u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
- u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
- bool cck = false;
+ stbc = FIELD_GET(MT_PRXV_STBC, v0);
+ gi = FIELD_GET(MT_PRXV_SGI, v0);
+ cck = false;
- rxd += 18;
- if ((u8 *)rxd - skb->data >= skb->len)
- return -EINVAL;
+ idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
+ mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
- idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
- mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
-
- switch (mode) {
- case MT_PHY_TYPE_CCK:
- cck = true;
- fallthrough;
- case MT_PHY_TYPE_OFDM:
- i = mt76_get_rate(&dev->mt76, sband, i, cck);
- break;
- case MT_PHY_TYPE_HT_GF:
- case MT_PHY_TYPE_HT:
- status->encoding = RX_ENC_HT;
- if (i > 31)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_VHT:
- status->nss =
- FIELD_GET(MT_PRXV_NSTS, v0) + 1;
- status->encoding = RX_ENC_VHT;
- if (i > 9)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_HE_MU:
- status->flag |= RX_FLAG_RADIOTAP_HE_MU;
- fallthrough;
- case MT_PHY_TYPE_HE_SU:
- case MT_PHY_TYPE_HE_EXT_SU:
- case MT_PHY_TYPE_HE_TB:
- status->nss =
- FIELD_GET(MT_PRXV_NSTS, v0) + 1;
- status->encoding = RX_ENC_HE;
- status->flag |= RX_FLAG_RADIOTAP_HE;
- i &= GENMASK(3, 0);
-
- if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
- status->he_gi = gi;
-
- status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
- break;
- default:
+ switch (mode) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ i = mt76_get_rate(&dev->mt76, sband, i, cck);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ if (i > 31)
return -EINVAL;
- }
- status->rate_idx = i;
-
- switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
- case IEEE80211_STA_RX_BW_20:
- break;
- case IEEE80211_STA_RX_BW_40:
- if (mode & MT_PHY_TYPE_HE_EXT_SU &&
- (idx & MT_PRXV_TX_ER_SU_106T)) {
- status->bw = RATE_INFO_BW_HE_RU;
- status->he_ru =
- NL80211_RATE_INFO_HE_RU_ALLOC_106;
- } else {
- status->bw = RATE_INFO_BW_40;
- }
- break;
- case IEEE80211_STA_RX_BW_80:
- status->bw = RATE_INFO_BW_80;
- break;
- case IEEE80211_STA_RX_BW_160:
- status->bw = RATE_INFO_BW_160;
- break;
- default:
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->nss =
+ FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+ status->encoding = RX_ENC_VHT;
+ if (i > 9)
return -EINVAL;
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ fallthrough;
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ status->nss =
+ FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+ status->encoding = RX_ENC_HE;
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+ i &= GENMASK(3, 0);
+
+ if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
+ status->he_gi = gi;
+
+ status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status->rate_idx = i;
+
+ switch (FIELD_GET(MT_PRXV_FRAME_MODE, v0)) {
+ case IEEE80211_STA_RX_BW_20:
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ if (mode & MT_PHY_TYPE_HE_EXT_SU &&
+ (idx & MT_PRXV_TX_ER_SU_106T)) {
+ status->bw = RATE_INFO_BW_HE_RU;
+ status->he_ru =
+ NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ } else {
+ status->bw = RATE_INFO_BW_40;
}
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ status->bw = RATE_INFO_BW_160;
+ break;
+ default:
+ return -EINVAL;
+ }
- status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
- if (mode < MT_PHY_TYPE_HE_SU && gi)
- status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
+ if (mode < MT_PHY_TYPE_HE_SU && gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
+ rxd += 18;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
}
}
@@ -521,6 +546,8 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
mt76_insert_ccmp_hdr(skb, key_id);
}
+ mt7921_mac_assoc_rssi(dev, skb);
+
if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
mt7921_mac_decode_he_radiotap(skb, status, rxv, mode);
@@ -530,7 +557,7 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
status->aggr = unicast &&
!ieee80211_is_qos_nullfunc(hdr->frame_control);
- status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ status->qos_ctl = *ieee80211_get_qos_ctl(hdr);
status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
return 0;
@@ -758,20 +785,6 @@ mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info,
}
}
-static void mt7921_set_tx_blocked(struct mt7921_dev *dev, bool blocked)
-{
- struct mt76_phy *mphy = &dev->mphy;
- struct mt76_queue *q;
-
- q = mphy->q_tx[0];
- if (blocked == q->blocked)
- return;
-
- q->blocked = blocked;
- if (!blocked)
- mt76_worker_schedule(&dev->mt76.tx_worker);
-}
-
int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
@@ -797,15 +810,7 @@ int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
- spin_lock_bh(&dev->token_lock);
- id = idr_alloc(&dev->token, t, 0, MT7921_TOKEN_SIZE, GFP_ATOMIC);
- if (id >= 0)
- dev->token_count++;
-
- if (dev->token_count >= MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR)
- mt7921_set_tx_blocked(dev, true);
- spin_unlock_bh(&dev->token_lock);
-
+ id = mt76_token_consume(mdev, &t);
if (id < 0)
return id;
@@ -967,15 +972,7 @@ void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
stat = FIELD_GET(MT_TX_FREE_STATUS, info);
- spin_lock_bh(&dev->token_lock);
- txwi = idr_remove(&dev->token, msdu);
- if (txwi)
- dev->token_count--;
- if (dev->token_count < MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR &&
- dev->mphy.q_tx[0]->blocked)
- wake = true;
- spin_unlock_bh(&dev->token_lock);
-
+ txwi = mt76_token_release(mdev, msdu, &wake);
if (!txwi)
continue;
@@ -1003,11 +1000,8 @@ void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
mt76_put_txwi(mdev, txwi);
}
- if (wake) {
- spin_lock_bh(&dev->token_lock);
- mt7921_set_tx_blocked(dev, false);
- spin_unlock_bh(&dev->token_lock);
- }
+ if (wake)
+ mt76_set_tx_blocked(&dev->mt76, false);
napi_consume_skb(skb, 1);
@@ -1016,13 +1010,7 @@ void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
napi_consume_skb(skb, 1);
}
- if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state))
- return;
-
mt7921_mac_sta_poll(dev);
-
- mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
-
mt76_worker_schedule(&dev->mt76.tx_worker);
}
@@ -1044,11 +1032,8 @@ void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
u16 token;
txp = mt7921_txwi_to_txp(mdev, e->txwi);
-
token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
- spin_lock_bh(&dev->token_lock);
- t = idr_remove(&dev->token, token);
- spin_unlock_bh(&dev->token_lock);
+ t = mt76_token_put(mdev, token);
e->skb = t ? t->skb : NULL;
}
@@ -1183,52 +1168,13 @@ void mt7921_update_channel(struct mt76_dev *mdev)
mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
}
-static bool
-mt7921_wait_reset_state(struct mt7921_dev *dev, u32 state)
-{
- bool ret;
-
- ret = wait_event_timeout(dev->reset_wait,
- (READ_ONCE(dev->reset_state) & state),
- MT7921_RESET_TIMEOUT);
-
- WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
- return ret;
-}
-
-static void
-mt7921_dma_reset(struct mt7921_phy *phy)
-{
- struct mt7921_dev *dev = phy->dev;
- int i;
-
- mt76_clear(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
-
- usleep_range(1000, 2000);
-
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true);
- for (i = 0; i < __MT_TXQ_MAX; i++)
- mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true);
-
- mt76_for_each_q_rx(&dev->mt76, i) {
- mt76_queue_rx_reset(dev, i);
- }
-
- /* re-init prefetch settings after reset */
- mt7921_dma_prefetch(dev);
-
- mt76_set(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
-}
-
void mt7921_tx_token_put(struct mt7921_dev *dev)
{
struct mt76_txwi_cache *txwi;
int id;
- spin_lock_bh(&dev->token_lock);
- idr_for_each_entry(&dev->token, txwi, id) {
+ spin_lock_bh(&dev->mt76.token_lock);
+ idr_for_each_entry(&dev->mt76.token, txwi, id) {
mt7921_txp_skb_unmap(&dev->mt76, txwi);
if (txwi->skb) {
struct ieee80211_hw *hw;
@@ -1237,77 +1183,127 @@ void mt7921_tx_token_put(struct mt7921_dev *dev)
ieee80211_free_txskb(hw, txwi->skb);
}
mt76_put_txwi(&dev->mt76, txwi);
- dev->token_count--;
+ dev->mt76.token_count--;
}
- spin_unlock_bh(&dev->token_lock);
- idr_destroy(&dev->token);
+ spin_unlock_bh(&dev->mt76.token_lock);
+ idr_destroy(&dev->mt76.token);
}
-/* system error recovery */
-void mt7921_mac_reset_work(struct work_struct *work)
+static void
+mt7921_vif_connect_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif)
{
- struct mt7921_dev *dev;
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_dev *dev = mvif->phy->dev;
- dev = container_of(work, struct mt7921_dev, reset_work);
+ ieee80211_disconnect(vif, true);
- if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
- return;
+ mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
+ mt7921_mcu_set_tx(dev, vif);
+}
+
+static int
+mt7921_mac_reset(struct mt7921_dev *dev)
+{
+ int i, err;
- ieee80211_stop_queues(mt76_hw(dev));
+ mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
+
+ mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
- set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
- /* lock/unlock all queues to ensure that no tx is pending */
mt76_txq_schedule_all(&dev->mphy);
mt76_worker_disable(&dev->mt76.tx_worker);
- napi_disable(&dev->mt76.napi[0]);
- napi_disable(&dev->mt76.napi[1]);
- napi_disable(&dev->mt76.napi[2]);
+ napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]);
+ napi_disable(&dev->mt76.napi[MT_RXQ_MCU]);
+ napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]);
napi_disable(&dev->mt76.tx_napi);
- mt7921_mutex_acquire(dev);
-
- mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
-
mt7921_tx_token_put(dev);
- idr_init(&dev->token);
+ idr_init(&dev->mt76.token);
- if (mt7921_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
- mt7921_dma_reset(&dev->phy);
+ err = mt7921_wpdma_reset(dev, true);
+ if (err)
+ return err;
- mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
- mt7921_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ napi_enable(&dev->mt76.napi[i]);
+ napi_schedule(&dev->mt76.napi[i]);
}
- clear_bit(MT76_MCU_RESET, &dev->mphy.state);
- clear_bit(MT76_RESET, &dev->mphy.state);
-
- mt76_worker_enable(&dev->mt76.tx_worker);
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
+ mt76_worker_enable(&dev->mt76.tx_worker);
+
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ clear_bit(MT76_STATE_PM, &dev->mphy.state);
+
+ mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+
+ err = mt7921_run_firmware(dev);
+ if (err)
+ return err;
+
+ err = mt7921_mcu_set_eeprom(dev);
+ if (err)
+ return err;
- napi_enable(&dev->mt76.napi[0]);
- napi_schedule(&dev->mt76.napi[0]);
+ mt7921_mac_init(dev);
+ return __mt7921_start(&dev->phy);
+}
- napi_enable(&dev->mt76.napi[1]);
- napi_schedule(&dev->mt76.napi[1]);
+/* system error recovery */
+void mt7921_mac_reset_work(struct work_struct *work)
+{
+ struct ieee80211_hw *hw;
+ struct mt7921_dev *dev;
+ int i;
- napi_enable(&dev->mt76.napi[2]);
- napi_schedule(&dev->mt76.napi[2]);
+ dev = container_of(work, struct mt7921_dev, reset_work);
+ hw = mt76_hw(dev);
- ieee80211_wake_queues(mt76_hw(dev));
+ dev_err(dev->mt76.dev, "chip reset\n");
+ ieee80211_stop_queues(hw);
- mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
- mt7921_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
+ cancel_delayed_work_sync(&dev->mphy.mac_work);
+ cancel_delayed_work_sync(&dev->pm.ps_work);
+ cancel_work_sync(&dev->pm.wake_work);
- mt7921_mutex_release(dev);
+ mutex_lock(&dev->mt76.mutex);
+ for (i = 0; i < 10; i++) {
+ if (!mt7921_mac_reset(dev))
+ break;
+ }
+ mutex_unlock(&dev->mt76.mutex);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
- MT7921_WATCHDOG_TIME);
+ if (i == 10)
+ dev_err(dev->mt76.dev, "chip reset failed\n");
+
+ if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(dev->mphy.hw, &info);
+ }
+
+ ieee80211_wake_queues(hw);
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_vif_connect_iter, NULL);
+}
+
+void mt7921_reset(struct mt76_dev *mdev)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+
+ queue_work(dev->mt76.wq, &dev->reset_work);
}
static void
@@ -1317,31 +1313,20 @@ mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
struct mib_stats *mib = &phy->mib;
int i, aggr0 = 0, aggr1;
- memset(mib, 0, sizeof(*mib));
-
- mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(0),
- MT_MIB_SDR3_FCS_ERR_MASK);
+ mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0),
+ MT_MIB_SDR3_FCS_ERR_MASK);
+ mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0),
+ MT_MIB_ACK_FAIL_COUNT_MASK);
+ mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0),
+ MT_MIB_BA_FAIL_COUNT_MASK);
+ mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0),
+ MT_MIB_RTS_COUNT_MASK);
+ mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0),
+ MT_MIB_RTS_FAIL_COUNT_MASK);
for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
u32 val, val2;
- val = mt76_rr(dev, MT_MIB_MB_SDR1(0, i));
-
- val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
- if (val2 > mib->ack_fail_cnt)
- mib->ack_fail_cnt = val2;
-
- val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
- if (val2 > mib->ba_miss_cnt)
- mib->ba_miss_cnt = val2;
-
- val = mt76_rr(dev, MT_MIB_MB_SDR0(0, i));
- val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
- if (val2 > mib->rts_retries_cnt) {
- mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
- mib->rts_retries_cnt = val2;
- }
-
val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
@@ -1385,25 +1370,20 @@ void mt7921_mac_work(struct work_struct *work)
mac_work.work);
phy = mphy->priv;
- if (test_bit(MT76_STATE_PM, &mphy->state))
- goto out;
-
mt7921_mutex_acquire(phy->dev);
mt76_update_survey(mphy->dev);
- if (++mphy->mac_work_count == 5) {
+ if (++mphy->mac_work_count == 2) {
mphy->mac_work_count = 0;
mt7921_mac_update_mib_stats(phy);
}
- if (++phy->sta_work_count == 10) {
+ if (++phy->sta_work_count == 4) {
phy->sta_work_count = 0;
mt7921_mac_sta_stats_work(phy);
- };
+ }
mt7921_mutex_release(phy->dev);
-
-out:
ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
MT7921_WATCHDOG_TIME);
}
@@ -1417,13 +1397,19 @@ void mt7921_pm_wake_work(struct work_struct *work)
pm.wake_work);
mphy = dev->phy.mt76;
- if (!mt7921_mcu_drv_pmctrl(dev))
+ if (!mt7921_mcu_drv_pmctrl(dev)) {
+ int i;
+
+ mt76_for_each_q_rx(&dev->mt76, i)
+ napi_schedule(&dev->mt76.napi[i]);
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
- else
- dev_err(mphy->dev->dev, "failed to wake device\n");
+ mt7921_tx_cleanup(dev);
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+ MT7921_WATCHDOG_TIME);
+ }
ieee80211_wake_queues(mphy->hw);
- complete_all(&dev->pm.wake_cmpl);
+ wake_up(&dev->pm.wait);
}
void mt7921_pm_power_save_work(struct work_struct *work)
@@ -1435,6 +1421,10 @@ void mt7921_pm_power_save_work(struct work_struct *work)
pm.ps_work.work);
delta = dev->pm.idle_timeout;
+ if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) ||
+ test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
+ goto out;
+
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
delta = dev->pm.last_activity + delta - jiffies;
goto out;
@@ -1503,8 +1493,10 @@ void mt7921_coredump_work(struct work_struct *work)
break;
skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
- if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ)
- break;
+ if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
+ dev_kfree_skb(skb);
+ continue;
+ }
memcpy(data, skb->data, skb->len);
data += skb->len;
@@ -1513,4 +1505,5 @@ void mt7921_coredump_work(struct work_struct *work)
}
dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
GFP_KERNEL);
+ mt7921_reset(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
index a0c1fa0f20e4..109c8849d106 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
@@ -97,18 +97,24 @@ enum rx_pkt_type {
#define MT_RXD3_NORMAL_PF_MODE BIT(29)
#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
-/* P-RXV */
+/* P-RXV DW0 */
#define MT_PRXV_TX_RATE GENMASK(6, 0)
#define MT_PRXV_TX_DCM BIT(4)
#define MT_PRXV_TX_ER_SU_106T BIT(5)
#define MT_PRXV_NSTS GENMASK(9, 7)
#define MT_PRXV_HT_AD_CODE BIT(11)
+#define MT_PRXV_FRAME_MODE GENMASK(14, 12)
+#define MT_PRXV_SGI GENMASK(16, 15)
+#define MT_PRXV_STBC GENMASK(23, 22)
+#define MT_PRXV_TX_MODE GENMASK(27, 24)
#define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28)
-#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0)
+
+/* P-RXV DW1 */
#define MT_PRXV_RCPI3 GENMASK(31, 24)
#define MT_PRXV_RCPI2 GENMASK(23, 16)
#define MT_PRXV_RCPI1 GENMASK(15, 8)
#define MT_PRXV_RCPI0 GENMASK(7, 0)
+#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0)
/* C-RXV */
#define MT_CRXV_HT_STBC GENMASK(1, 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 729f6c42cdde..f4c27aa41048 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -65,9 +65,9 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
IEEE80211_HE_MAC_CAP0_HTC_HE;
he_cap_elem->mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED;
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3;
he_cap_elem->mac_cap_info[4] =
- IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU;
+ IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
if (band == NL80211_BAND_2GHZ)
he_cap_elem->phy_cap_info[0] =
@@ -108,7 +108,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
he_cap_elem->phy_cap_info[7] |=
- IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
he_cap_elem->phy_cap_info[8] |=
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
@@ -125,10 +125,6 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map);
- he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map);
- he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map);
- he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map);
- he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map);
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
@@ -169,28 +165,48 @@ void mt7921_set_stream_he_caps(struct mt7921_phy *phy)
}
}
-static int mt7921_start(struct ieee80211_hw *hw)
+int __mt7921_start(struct mt7921_phy *phy)
{
- struct mt7921_dev *dev = mt7921_hw_dev(hw);
- struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ struct mt76_phy *mphy = phy->mt76;
+ int err;
- mt7921_mutex_acquire(dev);
+ err = mt76_connac_mcu_set_mac_enable(mphy->dev, 0, true, false);
+ if (err)
+ return err;
- mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, true, false);
- mt76_connac_mcu_set_channel_domain(phy->mt76);
+ err = mt76_connac_mcu_set_channel_domain(mphy);
+ if (err)
+ return err;
+
+ err = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+ if (err)
+ return err;
+
+ err = mt76_connac_mcu_set_rate_txpower(phy->mt76);
+ if (err)
+ return err;
- mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
mt7921_mac_reset_counters(phy);
- set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+ set_bit(MT76_STATE_RUNNING, &mphy->state);
- ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT7921_WATCHDOG_TIME);
- mt7921_mutex_release(dev);
-
return 0;
}
+static int mt7921_start(struct ieee80211_hw *hw)
+{
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ int err;
+
+ mt7921_mutex_acquire(phy->dev);
+ err = __mt7921_start(phy);
+ mt7921_mutex_release(phy->dev);
+
+ return err;
+}
+
static void mt7921_stop(struct ieee80211_hw *hw)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
@@ -224,9 +240,6 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
if (i)
return i - 1;
- if (type != NL80211_IFTYPE_STATION)
- break;
-
/* next, try to find a free repeater entry for the sta */
i = get_free_idx(mask >> REPEATER_BSSID_START, 0,
REPEATER_BSSID_MAX - REPEATER_BSSID_START);
@@ -295,15 +308,6 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out;
- if (dev->pm.enable) {
- ret = mt7921_mcu_set_bss_pm(dev, vif, true);
- if (ret)
- goto out;
-
- vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
- mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
- }
-
dev->mt76.vif_mask |= BIT(mvif->mt76.idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
@@ -318,6 +322,8 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
mt7921_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ ewma_rssi_init(&mvif->rssi);
+
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
@@ -348,19 +354,12 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
if (vif == phy->monitor_vif)
phy->monitor_vif = NULL;
+ mt7921_mutex_acquire(dev);
mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
-
- if (dev->pm.enable) {
- mt7921_mcu_set_bss_pm(dev, vif, false);
- mt76_clear(dev, MT_WF_RFCR(0),
- MT_WF_RFCR_DROP_OTHER_BEACON);
- }
-
mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- mt7921_mutex_acquire(dev);
dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
mt7921_mutex_release(dev);
@@ -396,8 +395,7 @@ out:
clear_bit(MT76_RESET, &phy->mt76->state);
mt7921_mutex_release(dev);
- mt76_txq_schedule_all(phy->mt76);
-
+ mt76_worker_schedule(&dev->mt76.tx_worker);
ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mt76->mac_work,
MT7921_WATCHDOG_TIME);
@@ -413,7 +411,8 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct mt7921_sta *msta = sta ? (struct mt7921_sta *)sta->drv_priv :
&mvif->sta;
struct mt76_wcid *wcid = &msta->wcid;
- int idx = key->keyidx;
+ u8 *wcid_keyidx = &wcid->hw_key_idx;
+ int idx = key->keyidx, err = 0;
/* The hardware does not support per-STA RX GTK, fallback
* to software mode for these.
@@ -429,6 +428,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_AES_CMAC:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ wcid_keyidx = &wcid->hw_key_idx2;
break;
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
@@ -443,23 +443,29 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- if (cmd == SET_KEY) {
- key->hw_key_idx = wcid->idx;
- wcid->hw_key_idx = idx;
- } else if (idx == wcid->hw_key_idx) {
- wcid->hw_key_idx = -1;
- }
+ mt7921_mutex_acquire(dev);
+
+ if (cmd == SET_KEY)
+ *wcid_keyidx = idx;
+ else if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
+ else
+ goto out;
+
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
- return mt7921_mcu_add_key(dev, vif, msta, key, cmd);
+ err = mt7921_mcu_add_key(dev, vif, msta, key, cmd);
+out:
+ mt7921_mutex_release(dev);
+
+ return err;
}
static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_phy *phy = mt7921_hw_phy(hw);
- bool band = phy != &dev->phy;
int ret;
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
@@ -480,9 +486,9 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
else
phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
- mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN,
+ mt76_rmw_field(dev, MT_DMA_DCR0(0), MT_DMA_DCR0_RXD_G5_EN,
enabled);
- mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
+ mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter);
}
mt7921_mutex_release(dev);
@@ -511,7 +517,6 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_phy *phy = mt7921_hw_phy(hw);
- bool band = phy != &dev->phy;
u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
MT_WF_RFCR1_DROP_BF_POLL |
MT_WF_RFCR1_DROP_BA |
@@ -551,16 +556,46 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
MT_WF_RFCR_DROP_NDPA);
*total_flags = flags;
- mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
+ mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter);
if (*total_flags & FIF_CONTROL)
- mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags);
+ mt76_clear(dev, MT_WF_RFCR1(0), ctl_flags);
else
- mt76_set(dev, MT_WF_RFCR1(band), ctl_flags);
+ mt76_set(dev, MT_WF_RFCR1(0), ctl_flags);
mt7921_mutex_release(dev);
}
+static int
+mt7921_bss_bcnft_apply(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ bool assoc)
+{
+ int ret;
+
+ if (!dev->pm.enable)
+ return 0;
+
+ if (assoc) {
+ ret = mt7921_mcu_uni_bss_bcnft(dev, vif, true);
+ if (ret)
+ return ret;
+
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
+
+ return 0;
+ }
+
+ ret = mt7921_mcu_set_bss_pm(dev, vif, false);
+ if (ret)
+ return ret;
+
+ vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+ mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
+
+ return 0;
+}
+
static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -587,6 +622,18 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_PS)
mt7921_mcu_uni_bss_ps(dev, vif);
+ if (changed & BSS_CHANGED_ASSOC) {
+ mt7921_mcu_sta_add(dev, NULL, vif, true);
+ mt7921_bss_bcnft_apply(dev, vif, info->assoc);
+ }
+
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+
+ mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->mt76,
+ info);
+ }
+
mt7921_mutex_release(dev);
}
@@ -622,8 +669,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7921_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- ret = mt76_connac_mcu_add_sta_cmd(&dev->mphy, vif, sta, &msta->wcid,
- true, MCU_UNI_CMD_STA_REC_UPDATE);
+ ret = mt7921_mcu_sta_add(dev, sta, vif, true);
if (ret)
return ret;
@@ -641,17 +687,17 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
mt76_connac_pm_wake(&dev->mphy, &dev->pm);
- mt76_connac_mcu_add_sta_cmd(&dev->mphy, vif, sta, &msta->wcid, false,
- MCU_UNI_CMD_STA_REC_UPDATE);
-
+ mt7921_mcu_sta_add(dev, sta, vif, false);
mt7921_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+ if (vif->type == NL80211_IFTYPE_STATION) {
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
- mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
- false);
+ ewma_rssi_init(&mvif->rssi);
+ if (!sta->tdls)
+ mt76_connac_mcu_uni_add_bss(&dev->mphy, vif,
+ &mvif->sta.wcid, false);
}
spin_lock_bh(&dev->sta_poll_lock);
@@ -664,23 +710,18 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
}
-static void
-mt7921_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+void mt7921_tx_worker(struct mt76_worker *w)
{
- struct mt7921_dev *dev = mt7921_hw_dev(hw);
- struct mt7921_phy *phy = mt7921_hw_phy(hw);
- struct mt76_phy *mphy = phy->mt76;
-
- if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
- return;
+ struct mt7921_dev *dev = container_of(w, struct mt7921_dev,
+ mt76.tx_worker);
- if (test_bit(MT76_STATE_PM, &mphy->state)) {
+ if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
queue_work(dev->mt76.wq, &dev->pm.wake_work);
return;
}
- dev->pm.last_activity = jiffies;
- mt76_worker_schedule(&dev->mt76.tx_worker);
+ mt76_txq_schedule_all(&dev->mphy);
+ mt76_connac_pm_unref(&dev->pm);
}
static void mt7921_tx(struct ieee80211_hw *hw,
@@ -708,9 +749,9 @@ static void mt7921_tx(struct ieee80211_hw *hw,
wcid = &mvif->sta.wcid;
}
- if (!test_bit(MT76_STATE_PM, &mphy->state)) {
- dev->pm.last_activity = jiffies;
+ if (mt76_connac_pm_ref(mphy, &dev->pm)) {
mt76_tx(mphy, control->sta, wcid, skb);
+ mt76_connac_pm_unref(&dev->pm);
return;
}
@@ -814,11 +855,17 @@ mt7921_get_stats(struct ieee80211_hw *hw,
struct mt7921_phy *phy = mt7921_hw_phy(hw);
struct mib_stats *mib = &phy->mib;
+ mt7921_mutex_acquire(phy->dev);
+
stats->dot11RTSSuccessCount = mib->rts_cnt;
stats->dot11RTSFailureCount = mib->rts_retries_cnt;
stats->dot11FCSErrorCount = mib->fcs_err_cnt;
stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+ memset(mib, 0, sizeof(*mib));
+
+ mt7921_mutex_release(phy->dev);
+
return 0;
}
@@ -827,9 +874,7 @@ mt7921_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_dev *dev = mt7921_hw_dev(hw);
- struct mt7921_phy *phy = mt7921_hw_phy(hw);
u8 omac_idx = mvif->mt76.omac_idx;
- bool band = phy != &dev->phy;
union {
u64 t64;
u32 t32[2];
@@ -840,9 +885,9 @@ mt7921_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx;
/* TSF software read */
- mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE);
- tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
- tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
+ mt76_set(dev, MT_LPON_TCR(0, n), MT_LPON_TCR_SW_MODE);
+ tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(0));
+ tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(0));
mt7921_mutex_release(dev);
@@ -855,9 +900,7 @@ mt7921_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_dev *dev = mt7921_hw_dev(hw);
- struct mt7921_phy *phy = mt7921_hw_phy(hw);
u8 omac_idx = mvif->mt76.omac_idx;
- bool band = phy != &dev->phy;
union {
u64 t64;
u32 t32[2];
@@ -867,10 +910,10 @@ mt7921_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt7921_mutex_acquire(dev);
n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx;
- mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
- mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
+ mt76_wr(dev, MT_LPON_UTTR0(0), tsf.t32[0]);
+ mt76_wr(dev, MT_LPON_UTTR1(0), tsf.t32[1]);
/* TSF software overwrite */
- mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_WRITE);
+ mt76_set(dev, MT_LPON_TCR(0, n), MT_LPON_TCR_SW_WRITE);
mt7921_mutex_release(dev);
}
@@ -1008,14 +1051,6 @@ mt7921_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
return 0;
}
-static void
-mt7921_sta_rc_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- u32 changed)
-{
-}
-
static void mt7921_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -1120,6 +1155,15 @@ static void mt7921_set_rekey_data(struct ieee80211_hw *hw,
}
#endif /* CONFIG_PM */
+static void mt7921_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ wait_event_timeout(dev->mt76.tx_wait, !mt76_has_tx_pending(&dev->mphy),
+ HZ / 2);
+}
+
const struct ieee80211_ops mt7921_ops = {
.tx = mt7921_tx,
.start = mt7921_start,
@@ -1133,11 +1177,10 @@ const struct ieee80211_ops mt7921_ops = {
.sta_add = mt7921_sta_add,
.sta_remove = mt7921_sta_remove,
.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
- .sta_rc_update = mt7921_sta_rc_update,
.set_key = mt7921_set_key,
.ampdu_action = mt7921_ampdu_action,
.set_rts_threshold = mt7921_set_rts_threshold,
- .wake_tx_queue = mt7921_wake_tx_queue,
+ .wake_tx_queue = mt76_wake_tx_queue,
.release_buffered_frames = mt76_release_buffered_frames,
.get_txpower = mt76_get_txpower,
.get_stats = mt7921_get_stats,
@@ -1158,4 +1201,5 @@ const struct ieee80211_ops mt7921_ops = {
.set_wakeup = mt7921_set_wakeup,
.set_rekey_data = mt7921_set_rekey_data,
#endif /* CONFIG_PM */
+ .flush = mt7921_flush,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index b5cc72e7e81c..5f3d56d570a5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -4,6 +4,7 @@
#include <linux/firmware.h>
#include <linux/fs.h>
#include "mt7921.h"
+#include "mt7921_trace.h"
#include "mcu.h"
#include "mac.h"
@@ -159,8 +160,10 @@ mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
int ret = 0;
if (!skb) {
- dev_err(mdev->dev, "Message %d (seq %d) timeout\n",
+ dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
cmd, seq);
+ mt7921_reset(mdev);
+
return -ETIMEDOUT;
}
@@ -222,8 +225,16 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
u32 val;
u8 seq;
- /* TODO: make dynamic based on msg type */
- mdev->mcu.timeout = 20 * HZ;
+ switch (cmd) {
+ case MCU_UNI_CMD_HIF_CTRL:
+ case MCU_UNI_CMD_SUSPEND:
+ case MCU_UNI_CMD_OFFLOAD:
+ mdev->mcu.timeout = HZ / 3;
+ break;
+ default:
+ mdev->mcu.timeout = 3 * HZ;
+ break;
+ }
seq = ++dev->mt76.mcu.msg_seq & 0xf;
if (!seq)
@@ -404,9 +415,12 @@ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
if (wlan_idx >= MT76_N_WCIDS)
return;
+
+ rcu_read_lock();
+
wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
if (!wcid)
- return;
+ goto out;
msta = container_of(wcid, struct mt7921_sta, wcid);
stats = &msta->stats;
@@ -414,6 +428,8 @@ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
/* current rate */
mt7921_mcu_tx_rate_parse(mphy, &peer, &rate, curr);
stats->tx_rate = rate;
+out:
+ rcu_read_unlock();
}
static void
@@ -466,33 +482,45 @@ mt7921_mcu_bss_event(struct mt7921_dev *dev, struct sk_buff *skb)
static void
mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
- struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data;
- struct debug_msg {
+ struct mt7921_debug_msg {
__le16 id;
u8 type;
u8 flag;
__le32 value;
__le16 len;
u8 content[512];
- } __packed * debug_msg;
- u16 cur_len;
- int i;
-
- skb_pull(skb, sizeof(*rxd));
- debug_msg = (struct debug_msg *)skb->data;
+ } __packed * msg;
- cur_len = min_t(u16, le16_to_cpu(debug_msg->len), 512);
+ skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ msg = (struct mt7921_debug_msg *)skb->data;
- if (debug_msg->type == 0x3) {
- for (i = 0 ; i < cur_len; i++)
- if (!debug_msg->content[i])
- debug_msg->content[i] = ' ';
+ if (msg->type == 3) { /* fw log */
+ u16 len = min_t(u16, le16_to_cpu(msg->len), 512);
+ int i;
- dev_dbg(dev->mt76.dev, "%s", debug_msg->content);
+ for (i = 0 ; i < len; i++) {
+ if (!msg->content[i])
+ msg->content[i] = ' ';
+ }
+ wiphy_info(mt76_hw(dev)->wiphy, "%.*s", len, msg->content);
}
}
static void
+mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+ struct mt7921_mcu_lp_event {
+ u8 state;
+ u8 reserved[3];
+ } __packed * event;
+
+ skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ event = (struct mt7921_mcu_lp_event *)skb->data;
+
+ trace_lp_event(dev, event->state);
+}
+
+static void
mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data;
@@ -515,6 +543,9 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
mt76_connac_mcu_coredump_event(&dev->mt76, skb,
&dev->coredump);
return;
+ case MCU_EVENT_LP_INFO:
+ mt7921_mcu_low_power_event(dev, skb);
+ break;
default:
break;
}
@@ -537,6 +568,7 @@ void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
rxd->eid == MCU_EVENT_SCAN_DONE ||
rxd->eid == MCU_EVENT_DBG_MSG ||
rxd->eid == MCU_EVENT_COREDUMP ||
+ rxd->eid == MCU_EVENT_LP_INFO ||
!rxd->seq)
mt7921_mcu_rx_unsolicited_event(dev, skb);
else
@@ -919,6 +951,24 @@ int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl)
sizeof(data), false);
}
+int mt7921_run_firmware(struct mt7921_dev *dev)
+{
+ int err;
+
+ err = mt7921_driver_own(dev);
+ if (err)
+ return err;
+
+ err = mt7921_load_firmware(dev);
+ if (err)
+ return err;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ mt7921_mcu_fw_log_2_host(dev, 1);
+
+ return 0;
+}
+
int mt7921_mcu_init(struct mt7921_dev *dev)
{
static const struct mt76_mcu_ops mt7921_mcu_ops = {
@@ -927,38 +977,15 @@ int mt7921_mcu_init(struct mt7921_dev *dev)
.mcu_parse_response = mt7921_mcu_parse_response,
.mcu_restart = mt7921_mcu_restart,
};
- int ret;
dev->mt76.mcu_ops = &mt7921_mcu_ops;
- ret = mt7921_driver_own(dev);
- if (ret)
- return ret;
-
- ret = mt7921_load_firmware(dev);
- if (ret)
- return ret;
-
- set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
- mt7921_mcu_fw_log_2_host(dev, 1);
-
- return 0;
+ return mt7921_run_firmware(dev);
}
void mt7921_mcu_exit(struct mt7921_dev *dev)
{
- u32 reg = mt7921_reg_map_l1(dev, MT_TOP_MISC);
-
- __mt76_mcu_restart(&dev->mt76);
- if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
- FIELD_PREP(MT_TOP_MISC_FW_STATE,
- FW_STATE_FW_DOWNLOAD), 1000)) {
- dev_err(dev->mt76.dev, "Failed to exit mcu\n");
- return;
- }
-
- reg = mt7921_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0);
- mt76_wr(dev, reg, MT_TOP_LPCR_HOST_FW_OWN);
+ mt7921_wfsys_reset(dev);
skb_queue_purge(&dev->mt76.mcu.res_q);
}
@@ -1238,12 +1265,35 @@ int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
sizeof(req), false);
}
+int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ int rssi = -ewma_rssi_read(&mvif->rssi);
+ struct mt76_sta_cmd_info info = {
+ .sta = sta,
+ .vif = vif,
+ .enable = enable,
+ .cmd = MCU_UNI_CMD_STA_REC_UPDATE,
+ .rcpi = to_rcpi(rssi),
+ };
+ struct mt7921_sta *msta;
+
+ msta = sta ? (struct mt7921_sta *)sta->drv_priv : NULL;
+ info.wcid = msta ? &msta->wcid : &mvif->sta.wcid;
+
+ return mt76_connac_mcu_add_sta_cmd(&dev->mphy, &info);
+}
+
int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
- int i;
+ struct mt76_connac_pm *pm = &dev->pm;
+ int i, err = 0;
+
+ mutex_lock(&pm->mutex);
- if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
+ if (!test_bit(MT76_STATE_PM, &mphy->state))
goto out;
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
@@ -1255,22 +1305,35 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
if (i == MT7921_DRV_OWN_RETRY_COUNT) {
dev_err(dev->mt76.dev, "driver own failed\n");
- return -EIO;
+ err = -EIO;
+ goto out;
}
+ mt7921_wpdma_reinit_cond(dev);
+ clear_bit(MT76_STATE_PM, &mphy->state);
+
+ pm->stats.last_wake_event = jiffies;
+ pm->stats.doze_time += pm->stats.last_wake_event -
+ pm->stats.last_doze_event;
out:
- dev->pm.last_activity = jiffies;
+ mutex_unlock(&pm->mutex);
- return 0;
+ if (err)
+ mt7921_reset(&dev->mt76);
+
+ return err;
}
int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
- int i;
+ struct mt76_connac_pm *pm = &dev->pm;
+ int i, err = 0;
- if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
- return 0;
+ mutex_lock(&pm->mutex);
+
+ if (mt76_connac_skip_fw_pmctrl(mphy, pm))
+ goto out;
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN);
@@ -1281,10 +1344,20 @@ int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev)
if (i == MT7921_DRV_OWN_RETRY_COUNT) {
dev_err(dev->mt76.dev, "firmware own failed\n");
- return -EIO;
+ clear_bit(MT76_STATE_PM, &mphy->state);
+ err = -EIO;
}
- return 0;
+ pm->stats.last_doze_event = jiffies;
+ pm->stats.awake_time += pm->stats.last_doze_event -
+ pm->stats.last_wake_event;
+out:
+ mutex_unlock(&pm->mutex);
+
+ if (err)
+ mt7921_reset(&dev->mt76);
+
+ return err;
}
void
@@ -1292,8 +1365,14 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct mt7921_phy *phy = priv;
struct mt7921_dev *dev = phy->dev;
+ int ret;
- if (mt7921_mcu_set_bss_pm(dev, vif, dev->pm.enable))
+ if (dev->pm.enable)
+ ret = mt7921_mcu_uni_bss_bcnft(dev, vif, true);
+ else
+ ret = mt7921_mcu_set_bss_pm(dev, vif, false);
+
+ if (ret)
return;
if (dev->pm.enable) {
@@ -1304,3 +1383,26 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
}
}
+
+int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr)
+{
+ struct mt7921_txpwr_event *event;
+ struct mt7921_txpwr_req req = {
+ .dbdc_idx = 0,
+ };
+ struct sk_buff *skb;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_GET_TXPWR,
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ event = (struct mt7921_txpwr_event *)skb->data;
+ WARN_ON(skb->len != le16_to_cpu(event->len));
+ memcpy(txpwr, &event->txpwr, sizeof(event->txpwr));
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
index 2fdc62367b3f..49823d0a3d0a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
@@ -79,12 +79,14 @@ struct mt7921_uni_txd {
/* event table */
enum {
MCU_EVENT_REG_ACCESS = 0x05,
+ MCU_EVENT_LP_INFO = 0x07,
MCU_EVENT_SCAN_DONE = 0x0d,
MCU_EVENT_BSS_ABSENCE = 0x11,
MCU_EVENT_BSS_BEACON_LOSS = 0x13,
MCU_EVENT_CH_PRIVILEGE = 0x18,
MCU_EVENT_SCHED_SCAN_DONE = 0x23,
MCU_EVENT_DBG_MSG = 0x27,
+ MCU_EVENT_TXPWR = 0xd0,
MCU_EVENT_COREDUMP = 0xf0,
};
@@ -177,25 +179,6 @@ enum {
MCU_PHY_STATE_OFDMLQ_CNINFO,
};
-#define STA_TYPE_STA BIT(0)
-#define STA_TYPE_AP BIT(1)
-#define STA_TYPE_ADHOC BIT(2)
-#define STA_TYPE_WDS BIT(4)
-#define STA_TYPE_BC BIT(5)
-
-#define NETWORK_INFRA BIT(16)
-#define NETWORK_P2P BIT(17)
-#define NETWORK_IBSS BIT(18)
-#define NETWORK_WDS BIT(21)
-
-#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
-#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
-#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
-#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
-#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
-#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
-#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
-
struct sec_key {
u8 cipher_id;
u8 cipher_len;
@@ -251,29 +234,6 @@ enum {
MT_IBF = BIT(1) /* implicit beamforming */
};
-#define MT7921_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
- sizeof(struct wtbl_generic) + \
- sizeof(struct wtbl_rx) + \
- sizeof(struct wtbl_ht) + \
- sizeof(struct wtbl_vht) + \
- sizeof(struct wtbl_hdr_trans) +\
- sizeof(struct wtbl_ba) + \
- sizeof(struct wtbl_smps))
-
-#define MT7921_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
- sizeof(struct sta_rec_basic) + \
- sizeof(struct sta_rec_ht) + \
- sizeof(struct sta_rec_he) + \
- sizeof(struct sta_rec_ba) + \
- sizeof(struct sta_rec_vht) + \
- sizeof(struct sta_rec_uapsd) + \
- sizeof(struct sta_rec_amsdu) + \
- sizeof(struct tlv) + \
- MT7921_WTBL_UPDATE_MAX_SIZE)
-
-#define MT7921_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \
- sizeof(struct wtbl_ba))
-
#define STA_CAP_WMM BIT(0)
#define STA_CAP_SGI_20 BIT(4)
#define STA_CAP_SGI_40 BIT(5)
@@ -431,4 +391,20 @@ struct mt7921_mcu_wlan_info {
__le32 wlan_idx;
struct mt7921_mcu_wlan_info_event event;
} __packed;
+
+struct mt7921_txpwr_req {
+ u8 ver;
+ u8 action;
+ __le16 len;
+ u8 dbdc_idx;
+ u8 rsv[3];
+} __packed;
+
+struct mt7921_txpwr_event {
+ u8 ver;
+ u8 action;
+ __le16 len;
+ struct mt7921_txpwr txpwr;
+} __packed;
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 46e6aeec35ae..59862ea4951c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -18,7 +18,7 @@
#define MT7921_PM_TIMEOUT (HZ / 12)
#define MT7921_HW_SCAN_TIMEOUT (HZ / 10)
-#define MT7921_WATCHDOG_TIME (HZ / 10)
+#define MT7921_WATCHDOG_TIME (HZ / 4)
#define MT7921_RESET_TIMEOUT (30 * HZ)
#define MT7921_TX_RING_SIZE 2048
@@ -35,7 +35,6 @@
#define MT7921_EEPROM_SIZE 3584
#define MT7921_TOKEN_SIZE 8192
-#define MT7921_TOKEN_FREE_THR 64
#define MT7921_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7921_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
@@ -46,6 +45,9 @@
#define MT7921_SKU_MAX_DELTA_IDX MT7921_SKU_RATE_NUM
#define MT7921_SKU_TABLE_SIZE (MT7921_SKU_RATE_NUM + 1)
+#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
+#define to_rcpi(rssi) (2 * (rssi) + 220)
+
struct mt7921_vif;
struct mt7921_sta;
@@ -92,21 +94,25 @@ struct mt7921_sta {
struct mt7921_sta_key_conf bip;
};
+DECLARE_EWMA(rssi, 10, 8);
+
struct mt7921_vif {
struct mt76_vif mt76; /* must be first */
struct mt7921_sta sta;
struct mt7921_phy *phy;
+ struct ewma_rssi rssi;
+
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
};
struct mib_stats {
- u16 ack_fail_cnt;
- u16 fcs_err_cnt;
- u16 rts_cnt;
- u16 rts_retries_cnt;
- u16 ba_miss_cnt;
+ u32 ack_fail_cnt;
+ u32 fcs_err_cnt;
+ u32 rts_cnt;
+ u32 rts_retries_cnt;
+ u32 ba_miss_cnt;
};
struct mt7921_phy {
@@ -125,7 +131,7 @@ struct mt7921_phy {
s16 coverage_class;
u8 slottime;
- __le32 rx_ampdu_ts;
+ u32 rx_ampdu_ts;
u32 ampdu_ref;
struct mib_stats mib;
@@ -149,18 +155,11 @@ struct mt7921_dev {
u16 chainmask;
- struct work_struct init_work;
struct work_struct reset_work;
- wait_queue_head_t reset_wait;
- u32 reset_state;
struct list_head sta_poll_list;
spinlock_t sta_poll_lock;
- spinlock_t token_lock;
- int token_count;
- struct idr token;
-
u8 fw_debug;
struct mt76_connac_pm pm;
@@ -168,6 +167,36 @@ struct mt7921_dev {
};
enum {
+ TXPWR_USER,
+ TXPWR_EEPROM,
+ TXPWR_MAC,
+ TXPWR_MAX_NUM,
+};
+
+struct mt7921_txpwr {
+ u8 ch;
+ u8 rsv[3];
+ struct {
+ u8 ch;
+ u8 cck[4];
+ u8 ofdm[8];
+ u8 ht20[8];
+ u8 ht40[9];
+ u8 vht20[12];
+ u8 vht40[12];
+ u8 vht80[12];
+ u8 vht160[12];
+ u8 he26[12];
+ u8 he52[12];
+ u8 he106[12];
+ u8 he242[12];
+ u8 he484[12];
+ u8 he996[12];
+ u8 he996x2[12];
+ } data[TXPWR_MAX_NUM];
+};
+
+enum {
MT_LMAC_AC00,
MT_LMAC_AC01,
MT_LMAC_AC02,
@@ -209,6 +238,7 @@ extern struct pci_driver mt7921_pci_driver;
u32 mt7921_reg_map(struct mt7921_dev *dev, u32 addr);
+int __mt7921_start(struct mt7921_phy *phy);
int mt7921_register_device(struct mt7921_dev *dev);
void mt7921_unregister_device(struct mt7921_dev *dev);
int mt7921_eeprom_init(struct mt7921_dev *dev);
@@ -218,15 +248,17 @@ int mt7921_eeprom_get_target_power(struct mt7921_dev *dev,
u8 chain_idx);
void mt7921_eeprom_init_sku(struct mt7921_dev *dev);
int mt7921_dma_init(struct mt7921_dev *dev);
-void mt7921_dma_prefetch(struct mt7921_dev *dev);
+int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force);
+int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev);
void mt7921_dma_cleanup(struct mt7921_dev *dev);
+int mt7921_run_firmware(struct mt7921_dev *dev);
int mt7921_mcu_init(struct mt7921_dev *dev);
-int mt7921_mcu_add_bss_info(struct mt7921_phy *phy,
- struct ieee80211_vif *vif, int enable);
int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
struct mt7921_sta *msta, struct ieee80211_key_conf *key,
enum set_key_cmd cmd);
int mt7921_set_channel(struct mt7921_phy *phy);
+int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, bool enable);
int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd);
int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif);
int mt7921_mcu_set_eeprom(struct mt7921_dev *dev);
@@ -281,6 +313,12 @@ mt7921_l1_rmw(struct mt7921_dev *dev, u32 addr, u32 mask, u32 val)
#define mt7921_l1_set(dev, addr, val) mt7921_l1_rmw(dev, addr, 0, val)
#define mt7921_l1_clear(dev, addr, val) mt7921_l1_rmw(dev, addr, val, 0)
+static inline bool mt7921_dma_need_reinit(struct mt7921_dev *dev)
+{
+ return !mt76_get_field(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
+}
+
+void mt7921_mac_init(struct mt7921_dev *dev);
bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask);
void mt7921_mac_reset_counters(struct mt7921_phy *phy);
void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
@@ -296,10 +334,14 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7921_mac_work(struct work_struct *work);
void mt7921_mac_reset_work(struct work_struct *work);
+void mt7921_reset(struct mt76_dev *mdev);
+void mt7921_tx_cleanup(struct mt7921_dev *dev);
int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
+
+void mt7921_tx_worker(struct mt76_worker *w);
void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc);
void mt7921_tx_token_put(struct mt7921_dev *dev);
@@ -326,9 +368,6 @@ int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
bool enable);
-int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info);
int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev);
void mt7921_pm_wake_work(struct work_struct *work);
@@ -339,4 +378,6 @@ int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
bool enable);
void mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
void mt7921_coredump_work(struct work_struct *work);
+int mt7921_wfsys_reset(struct mt7921_dev *dev);
+int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921_trace.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921_trace.h
new file mode 100644
index 000000000000..9bc4db67f352
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921_trace.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2021 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#if !defined(__MT7921_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT7921_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt7921.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt7921
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+ wiphy_name(mt76_hw(dev)->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s"
+#define DEV_PR_ARG __entry->wiphy_name
+#define LP_STATE_PR_ARG __entry->lp_state ? "lp ready" : "lp not ready"
+
+TRACE_EVENT(lp_event,
+ TP_PROTO(struct mt7921_dev *dev, u8 lp_state),
+
+ TP_ARGS(dev, lp_state),
+
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, lp_state)
+ ),
+
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->lp_state = lp_state;
+ ),
+
+ TP_printk(
+ DEV_PR_FMT " %s",
+ DEV_PR_ARG, LP_STATE_PR_ARG
+ )
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mt7921_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 5570b4a50531..fa02d934f0bf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -13,7 +13,7 @@
#include "../trace.h"
static const struct pci_device_id mt7921_pci_device_table[] = {
- { PCI_DEVICE(0x14c3, 0x7961) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961) },
{ },
};
@@ -61,6 +61,18 @@ static void mt7921_irq_tasklet(unsigned long data)
if (intr & MT_INT_TX_DONE_MCU)
mask |= MT_INT_TX_DONE_MCU;
+ if (intr & MT_INT_MCU_CMD) {
+ u32 intr_sw;
+
+ intr_sw = mt76_rr(dev, MT_MCU_CMD);
+ /* ack MCU2HOST_SW_INT_STA */
+ mt76_wr(dev, MT_MCU_CMD, intr_sw);
+ if (intr_sw & MT_MCU_CMD_WAKE_RX_PCIE) {
+ mask |= MT_INT_RX_DONE_DATA;
+ intr |= MT_INT_RX_DONE_DATA;
+ }
+ }
+
mt76_set_irq_mask(&dev->mt76, MT_WFDMA0_HOST_INT_ENA, mask, 0);
if (intr & MT_INT_TX_DONE_ALL)
@@ -87,6 +99,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
+ .token_size = MT7921_TOKEN_SIZE,
.tx_prepare_skb = mt7921_tx_prepare_skb,
.tx_complete_skb = mt7921_tx_complete_skb,
.rx_skb = mt7921_queue_rx_skb,
@@ -137,7 +150,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
- mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
ret = devm_request_irq(mdev->dev, pdev->irq, mt7921_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
@@ -146,10 +159,12 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
ret = mt7921_register_device(dev);
if (ret)
- goto err_free_dev;
+ goto err_free_irq;
return 0;
+err_free_irq:
+ devm_free_irq(&pdev->dev, pdev->irq, dev);
err_free_dev:
mt76_free_device(&dev->mt76);
err_free_pci_vec:
@@ -187,13 +202,15 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
return err;
}
+ if (!dev->pm.enable)
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, true);
+
napi_disable(&mdev->tx_napi);
mt76_worker_disable(&mdev->tx_worker);
mt76_for_each_q_rx(mdev, i) {
napi_disable(&mdev->napi[i]);
}
- tasklet_kill(&dev->irq_tasklet);
pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
@@ -208,13 +225,16 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
/* disable interrupt */
mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
+ synchronize_irq(pdev->irq);
+ tasklet_kill(&dev->irq_tasklet);
- pci_save_state(pdev);
- err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ err = mt7921_mcu_fw_pmctrl(dev);
if (err)
goto restore;
- err = mt7921_mcu_drv_pmctrl(dev);
+ pci_save_state(pdev);
+ err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
if (err)
goto restore;
@@ -225,6 +245,10 @@ restore:
napi_enable(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
+
+ if (!dev->pm.enable)
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
+
if (hif_suspend)
mt76_connac_mcu_set_hif_suspend(mdev, false);
@@ -237,20 +261,23 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
int i, err;
- err = mt7921_mcu_fw_pmctrl(dev);
- if (err < 0)
- return err;
-
err = pci_set_power_state(pdev, PCI_D0);
if (err)
return err;
pci_restore_state(pdev);
+ err = mt7921_mcu_drv_pmctrl(dev);
+ if (err < 0)
+ return err;
+
+ mt7921_wpdma_reinit_cond(dev);
+
/* enable interrupt */
- mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
MT_INT_MCU_CMD);
+ mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE);
/* put dma enabled */
mt76_set(dev, MT_WFDMA0_GLO_CFG,
@@ -264,6 +291,9 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
napi_enable(&mdev->tx_napi);
napi_schedule(&mdev->tx_napi);
+ if (!dev->pm.enable)
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
+
if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state))
err = mt76_connac_mcu_set_hif_suspend(mdev, false);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
index 6dad7f6ab09d..b6944c867a57 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
@@ -96,8 +96,8 @@
#define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800)
#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs))
-#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014)
-#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
+#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x698)
+#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(31, 16)
#define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c)
#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0)
@@ -121,16 +121,21 @@
#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
-#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4))
-#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0)
-#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
+#define MT_MIB_MB_BSDR0(_band) MT_WF_MIB(_band, 0x688)
+#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
+#define MT_MIB_MB_BSDR1(_band) MT_WF_MIB(_band, 0x690)
+#define MT_MIB_RTS_FAIL_COUNT_MASK GENMASK(15, 0)
+#define MT_MIB_MB_BSDR2(_band) MT_WF_MIB(_band, 0x518)
+#define MT_MIB_BA_FAIL_COUNT_MASK GENMASK(15, 0)
+#define MT_MIB_MB_BSDR3(_band) MT_WF_MIB(_band, 0x520)
+#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(15, 0)
#define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x108 + ((n) << 4))
#define MT_MIB_FRAME_RETRIES_COUNT_MASK GENMASK(15, 0)
-#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
-#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x164 + ((n) << 2))
-#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
+#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x7dc + ((n) << 2))
+#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x7ec + ((n) << 2))
+#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x0b0 + ((n) << 2))
#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0))
#define MT_WTBLON_TOP_BASE 0x34000
@@ -246,13 +251,16 @@
#define MT_WFDMA0_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA0_BUSY_ENA_RX_FIFO BIT(2)
-#define MT_MCU_CMD MT_WFDMA0(0x1f0)
-#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
-#define MT_MCU_CMD_STOP_DMA BIT(2)
-#define MT_MCU_CMD_RESET_DONE BIT(3)
-#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
-#define MT_MCU_CMD_NORMAL_STATE BIT(5)
-#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
+#define MT_MCU_CMD MT_WFDMA0(0x1f0)
+#define MT_MCU_CMD_WAKE_RX_PCIE BIT(0)
+#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
+#define MT_MCU_CMD_STOP_DMA BIT(2)
+#define MT_MCU_CMD_RESET_DONE BIT(3)
+#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
+#define MT_MCU_CMD_NORMAL_STATE BIT(5)
+#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
+
+#define MT_MCU2HOST_SW_INT_ENA MT_WFDMA0(0x1f4)
#define MT_WFDMA0_HOST_INT_STA MT_WFDMA0(0x200)
#define HOST_RX_DONE_INT_STS0 BIT(0) /* Rx mcu */
@@ -357,11 +365,11 @@
#define MT_INFRA_CFG_BASE 0xfe000
#define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs))
-#define MT_HIF_REMAP_L1 MT_INFRA(0x260)
+#define MT_HIF_REMAP_L1 MT_INFRA(0x24c)
#define MT_HIF_REMAP_L1_MASK GENMASK(15, 0)
#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0)
#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16)
-#define MT_HIF_REMAP_BASE_L1 0xe0000
+#define MT_HIF_REMAP_BASE_L1 0x40000
#define MT_SWDEF_BASE 0x41f200
#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs))
@@ -380,11 +388,17 @@
#define MT_TOP_MISC MT_TOP(0xf0)
#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
+#define MT_MCU_WPDMA0_BASE 0x54000000
+#define MT_MCU_WPDMA0(ofs) (MT_MCU_WPDMA0_BASE + (ofs))
+
+#define MT_WFDMA_DUMMY_CR MT_MCU_WPDMA0(0x120)
+#define MT_WFDMA_NEED_REINIT BIT(1)
+
#define MT_HW_BOUND 0x70010020
#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
-#define MT_PCIE_MAC_BASE 0x74030000
+#define MT_PCIE_MAC_BASE 0x10000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
@@ -413,6 +427,10 @@
#define PCIE_LPCR_HOST_CLR_OWN BIT(1)
#define PCIE_LPCR_HOST_SET_OWN BIT(0)
+#define MT_WFSYS_SW_RST_B 0x18000140
+#define WFSYS_SW_RST_B BIT(0)
+#define WFSYS_SW_INIT_DONE BIT(4)
+
#define MT_CONN_ON_MISC 0x7c0600f0
#define MT_TOP_MISC2_FW_N9_RDY GENMASK(1, 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/trace.c b/drivers/net/wireless/mediatek/mt76/mt7921/trace.c
new file mode 100644
index 000000000000..4dc3c7b89ebd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2021 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "mt7921_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 0b6facb17ff7..a18d2896ee1f 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -256,6 +256,9 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
q->entry[q->head].skb = tx_info.skb;
q->entry[q->head].buf_sz = len;
+
+ smp_wmb();
+
q->head = (q->head + 1) % q->ndesc;
q->queued++;
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
index cc769645afa5..001d0ba5f73e 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -62,36 +62,83 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
spin_unlock_bh(&q->lock);
}
+static u32
+mt76_testmode_max_mpdu_len(struct mt76_phy *phy, u8 tx_rate_mode)
+{
+ switch (tx_rate_mode) {
+ case MT76_TM_TX_MODE_HT:
+ return IEEE80211_MAX_MPDU_LEN_HT_7935;
+ case MT76_TM_TX_MODE_VHT:
+ case MT76_TM_TX_MODE_HE_SU:
+ case MT76_TM_TX_MODE_HE_EXT_SU:
+ case MT76_TM_TX_MODE_HE_TB:
+ case MT76_TM_TX_MODE_HE_MU:
+ if (phy->sband_5g.sband.vht_cap.cap &
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991)
+ return IEEE80211_MAX_MPDU_LEN_VHT_7991;
+ return IEEE80211_MAX_MPDU_LEN_VHT_11454;
+ case MT76_TM_TX_MODE_CCK:
+ case MT76_TM_TX_MODE_OFDM:
+ default:
+ return IEEE80211_MAX_FRAME_LEN;
+ }
+}
-static int
-mt76_testmode_tx_init(struct mt76_phy *phy)
+static void
+mt76_testmode_free_skb(struct mt76_phy *phy)
{
struct mt76_testmode_data *td = &phy->test;
- struct ieee80211_tx_info *info;
- struct ieee80211_hdr *hdr;
- struct sk_buff *skb;
+ struct sk_buff *skb = td->tx_skb;
+
+ if (!skb)
+ return;
+
+ if (skb_has_frag_list(skb)) {
+ kfree_skb_list(skb_shinfo(skb)->frag_list);
+ skb_shinfo(skb)->frag_list = NULL;
+ }
+
+ dev_kfree_skb(skb);
+ td->tx_skb = NULL;
+}
+
+int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
+{
+#define MT_TXP_MAX_LEN 4095
u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
IEEE80211_FCTL_FROMDS;
- struct ieee80211_tx_rate *rate;
- u8 max_nss = hweight8(phy->antenna_mask);
+ struct mt76_testmode_data *td = &phy->test;
bool ext_phy = phy != &phy->dev->phy;
+ struct sk_buff **frag_tail, *head;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_hdr *hdr;
+ u32 max_len, head_len;
+ int nfrags, i;
- if (td->tx_antenna_mask)
- max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
+ max_len = mt76_testmode_max_mpdu_len(phy, td->tx_rate_mode);
+ if (len > max_len)
+ len = max_len;
+ else if (len < sizeof(struct ieee80211_hdr))
+ len = sizeof(struct ieee80211_hdr);
- skb = alloc_skb(td->tx_msdu_len, GFP_KERNEL);
- if (!skb)
+ nfrags = len / MT_TXP_MAX_LEN;
+ head_len = nfrags ? MT_TXP_MAX_LEN : len;
+
+ if (len > IEEE80211_MAX_FRAME_LEN)
+ fc |= IEEE80211_STYPE_QOS_DATA;
+
+ head = alloc_skb(head_len, GFP_KERNEL);
+ if (!head)
return -ENOMEM;
- dev_kfree_skb(td->tx_skb);
- td->tx_skb = skb;
- hdr = __skb_put_zero(skb, td->tx_msdu_len);
+ hdr = __skb_put_zero(head, head_len);
hdr->frame_control = cpu_to_le16(fc);
memcpy(hdr->addr1, phy->macaddr, sizeof(phy->macaddr));
memcpy(hdr->addr2, phy->macaddr, sizeof(phy->macaddr));
memcpy(hdr->addr3, phy->macaddr, sizeof(phy->macaddr));
+ skb_set_queue_mapping(head, IEEE80211_AC_BE);
- info = IEEE80211_SKB_CB(skb);
+ info = IEEE80211_SKB_CB(head);
info->flags = IEEE80211_TX_CTL_INJECTED |
IEEE80211_TX_CTL_NO_ACK |
IEEE80211_TX_CTL_NO_PS_BUFFER;
@@ -99,9 +146,60 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
if (ext_phy)
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+ frag_tail = &skb_shinfo(head)->frag_list;
+
+ for (i = 0; i < nfrags; i++) {
+ struct sk_buff *frag;
+ u16 frag_len;
+
+ if (i == nfrags - 1)
+ frag_len = len % MT_TXP_MAX_LEN;
+ else
+ frag_len = MT_TXP_MAX_LEN;
+
+ frag = alloc_skb(frag_len, GFP_KERNEL);
+ if (!frag)
+ return -ENOMEM;
+
+ __skb_put_zero(frag, frag_len);
+ head->len += frag->len;
+ head->data_len += frag->len;
+
+ if (*frag_tail) {
+ (*frag_tail)->next = frag;
+ frag_tail = &frag;
+ } else {
+ *frag_tail = frag;
+ }
+ }
+
+ mt76_testmode_free_skb(phy);
+ td->tx_skb = head;
+
+ return 0;
+}
+EXPORT_SYMBOL(mt76_testmode_alloc_skb);
+
+static int
+mt76_testmode_tx_init(struct mt76_phy *phy)
+{
+ struct mt76_testmode_data *td = &phy->test;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_tx_rate *rate;
+ u8 max_nss = hweight8(phy->antenna_mask);
+ int ret;
+
+ ret = mt76_testmode_alloc_skb(phy, td->tx_mpdu_len);
+ if (ret)
+ return ret;
+
if (td->tx_rate_mode > MT76_TM_TX_MODE_VHT)
goto out;
+ if (td->tx_antenna_mask)
+ max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
+
+ info = IEEE80211_SKB_CB(td->tx_skb);
rate = &info->control.rates[0];
rate->count = 1;
rate->idx = td->tx_rate_idx;
@@ -171,8 +269,6 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
}
}
out:
- skb_set_queue_mapping(skb, IEEE80211_AC_BE);
-
return 0;
}
@@ -203,8 +299,7 @@ mt76_testmode_tx_stop(struct mt76_phy *phy)
wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued,
MT76_TM_TIMEOUT * HZ);
- dev_kfree_skb(td->tx_skb);
- td->tx_skb = NULL;
+ mt76_testmode_free_skb(phy);
}
static inline void
@@ -224,10 +319,10 @@ mt76_testmode_init_defaults(struct mt76_phy *phy)
{
struct mt76_testmode_data *td = &phy->test;
- if (td->tx_msdu_len > 0)
+ if (td->tx_mpdu_len > 0)
return;
- td->tx_msdu_len = 1024;
+ td->tx_mpdu_len = 1024;
td->tx_count = 1;
td->tx_rate_mode = MT76_TM_TX_MODE_OFDM;
td->tx_rate_nss = 1;
@@ -345,16 +440,6 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (tb[MT76_TM_ATTR_TX_COUNT])
td->tx_count = nla_get_u32(tb[MT76_TM_ATTR_TX_COUNT]);
- if (tb[MT76_TM_ATTR_TX_LENGTH]) {
- u32 val = nla_get_u32(tb[MT76_TM_ATTR_TX_LENGTH]);
-
- if (val > IEEE80211_MAX_FRAME_LEN ||
- val < sizeof(struct ieee80211_hdr))
- goto out;
-
- td->tx_msdu_len = val;
- }
-
if (tb[MT76_TM_ATTR_TX_RATE_IDX])
td->tx_rate_idx = nla_get_u8(tb[MT76_TM_ATTR_TX_RATE_IDX]);
@@ -375,6 +460,16 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
&td->tx_power_control, 0, 1))
goto out;
+ if (tb[MT76_TM_ATTR_TX_LENGTH]) {
+ u32 val = nla_get_u32(tb[MT76_TM_ATTR_TX_LENGTH]);
+
+ if (val > mt76_testmode_max_mpdu_len(phy, td->tx_rate_mode) ||
+ val < sizeof(struct ieee80211_hdr))
+ goto out;
+
+ td->tx_mpdu_len = val;
+ }
+
if (tb[MT76_TM_ATTR_TX_IPG])
td->tx_ipg = nla_get_u32(tb[MT76_TM_ATTR_TX_IPG]);
@@ -506,7 +601,7 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
goto out;
if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
- nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_msdu_len) ||
+ nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_mpdu_len) ||
nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) ||
nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) ||
nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) ||
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.h b/drivers/net/wireless/mediatek/mt76/testmode.h
index e0c706ce9b42..d32a7654c47e 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.h
+++ b/drivers/net/wireless/mediatek/mt76/testmode.h
@@ -21,7 +21,7 @@
* @MT76_TM_ATTR_TX_COUNT: configured number of frames to send when setting
* state to MT76_TM_STATE_TX_FRAMES (u32)
* @MT76_TM_ATTR_TX_PENDING: pending frames during MT76_TM_STATE_TX_FRAMES (u32)
- * @MT76_TM_ATTR_TX_LENGTH: packet tx msdu length (u32)
+ * @MT76_TM_ATTR_TX_LENGTH: packet tx mpdu length (u32)
* @MT76_TM_ATTR_TX_RATE_MODE: packet tx mode (u8, see &enum mt76_testmode_tx_mode)
* @MT76_TM_ATTR_TX_RATE_NSS: packet tx number of spatial streams (u8)
* @MT76_TM_ATTR_TX_RATE_IDX: packet tx rate/MCS index (u8)
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index b8fe8adc43a3..53ea8de82df0 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -213,7 +213,7 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *sk
if (phy->test.tx_queued == phy->test.tx_done)
wake_up(&dev->tx_wait);
- ieee80211_free_txskb(hw, skb);
+ dev_kfree_skb_any(skb);
return;
}
#endif
@@ -422,8 +422,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
return idx;
do {
- if (test_bit(MT76_STATE_PM, &phy->state) ||
- test_bit(MT76_RESET, &phy->state))
+ if (test_bit(MT76_RESET, &phy->state))
return -EBUSY;
if (stop || mt76_txq_stopped(q))
@@ -461,11 +460,10 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
int ret = 0;
while (1) {
- if (test_bit(MT76_STATE_PM, &phy->state) ||
- test_bit(MT76_RESET, &phy->state)) {
- ret = -EBUSY;
- break;
- }
+ int n_frames = 0;
+
+ if (test_bit(MT76_RESET, &phy->state))
+ return -EBUSY;
if (dev->queue_ops->tx_cleanup &&
q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
@@ -497,11 +495,16 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
}
if (!mt76_txq_stopped(q))
- ret += mt76_txq_send_burst(phy, q, mtxq);
+ n_frames = mt76_txq_send_burst(phy, q, mtxq);
spin_unlock_bh(&q->lock);
ieee80211_return_txq(phy->hw, txq, false);
+
+ if (unlikely(n_frames < 0))
+ return n_frames;
+
+ ret += n_frames;
}
return ret;
@@ -535,10 +538,8 @@ void mt76_txq_schedule_all(struct mt76_phy *phy)
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
-void mt76_tx_worker(struct mt76_worker *w)
+void mt76_tx_worker_run(struct mt76_dev *dev)
{
- struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
-
mt76_txq_schedule_all(&dev->phy);
if (dev->phy2)
mt76_txq_schedule_all(dev->phy2);
@@ -550,6 +551,14 @@ void mt76_tx_worker(struct mt76_worker *w)
mt76_testmode_tx_pending(dev->phy2);
#endif
}
+EXPORT_SYMBOL_GPL(mt76_tx_worker_run);
+
+void mt76_tx_worker(struct mt76_worker *w)
+{
+ struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
+
+ mt76_tx_worker_run(dev);
+}
void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
bool send_bar)
@@ -639,3 +648,64 @@ void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
+
+void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
+{
+ struct mt76_phy *phy = &dev->phy, *phy2 = dev->phy2;
+ struct mt76_queue *q, *q2 = NULL;
+
+ q = phy->q_tx[0];
+ if (blocked == q->blocked)
+ return;
+
+ q->blocked = blocked;
+ if (phy2) {
+ q2 = phy2->q_tx[0];
+ q2->blocked = blocked;
+ }
+
+ if (!blocked)
+ mt76_worker_schedule(&dev->tx_worker);
+}
+EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked);
+
+int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
+{
+ int token;
+
+ spin_lock_bh(&dev->token_lock);
+
+ token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
+ GFP_ATOMIC);
+ if (token >= 0)
+ dev->token_count++;
+
+ if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR)
+ __mt76_set_tx_blocked(dev, true);
+
+ spin_unlock_bh(&dev->token_lock);
+
+ return token;
+}
+EXPORT_SYMBOL_GPL(mt76_token_consume);
+
+struct mt76_txwi_cache *
+mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
+{
+ struct mt76_txwi_cache *txwi;
+
+ spin_lock_bh(&dev->token_lock);
+
+ txwi = idr_remove(&dev->token, token);
+ if (txwi)
+ dev->token_count--;
+
+ if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR &&
+ dev->phy.q_tx[0]->blocked)
+ *wake = true;
+
+ spin_unlock_bh(&dev->token_lock);
+
+ return txwi;
+}
+EXPORT_SYMBOL_GPL(mt76_token_release);
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
index c868582c5d22..aa3b64902cf9 100644
--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
@@ -99,7 +99,7 @@ mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
{
u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
- return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
+ return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
}
static void
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index cada48800928..5d9e952b2966 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -610,6 +610,7 @@ int mt7601u_register_device(struct mt7601u_dev *dev)
wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
diff --git a/drivers/net/wireless/microchip/wilc1000/Kconfig b/drivers/net/wireless/microchip/wilc1000/Kconfig
index 7f15e42602dd..62cfcdc9aacc 100644
--- a/drivers/net/wireless/microchip/wilc1000/Kconfig
+++ b/drivers/net/wireless/microchip/wilc1000/Kconfig
@@ -27,6 +27,7 @@ config WILC1000_SPI
depends on CFG80211 && INET && SPI
select WILC1000
select CRC7
+ select CRC_ITU_T
help
This module adds support for the SPI interface of adapters using
WILC1000 chipset. The Atmel WILC1000 has a Serial Peripheral
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 1b205e7d97a8..7e4d9235251c 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -24,12 +24,10 @@
static irqreturn_t isr_uh_routine(int irq, void *user_data)
{
- struct net_device *dev = user_data;
- struct wilc_vif *vif = netdev_priv(dev);
- struct wilc *wilc = vif->wilc;
+ struct wilc *wilc = user_data;
if (wilc->close) {
- netdev_err(dev, "Can't handle UH interrupt\n");
+ pr_err("Can't handle UH interrupt");
return IRQ_HANDLED;
}
return IRQ_WAKE_THREAD;
@@ -37,12 +35,10 @@ static irqreturn_t isr_uh_routine(int irq, void *user_data)
static irqreturn_t isr_bh_routine(int irq, void *userdata)
{
- struct net_device *dev = userdata;
- struct wilc_vif *vif = netdev_priv(userdata);
- struct wilc *wilc = vif->wilc;
+ struct wilc *wilc = userdata;
if (wilc->close) {
- netdev_err(dev, "Can't handle BH interrupt\n");
+ pr_err("Can't handle BH interrupt\n");
return IRQ_HANDLED;
}
@@ -60,7 +56,7 @@ static int init_irq(struct net_device *dev)
ret = request_threaded_irq(wl->dev_irq_num, isr_uh_routine,
isr_bh_routine,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "WILC_IRQ", dev);
+ "WILC_IRQ", wl);
if (ret) {
netdev_err(dev, "Failed to request IRQ [%d]\n", ret);
return ret;
@@ -575,7 +571,6 @@ static int wilc_mac_open(struct net_device *ndev)
{
struct wilc_vif *vif = netdev_priv(ndev);
struct wilc *wl = vif->wilc;
- unsigned char mac_add[ETH_ALEN] = {0};
int ret = 0;
struct mgmt_frame_regs mgmt_regs = {};
@@ -598,9 +593,12 @@ static int wilc_mac_open(struct net_device *ndev)
wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype,
vif->idx);
- wilc_get_mac_address(vif, mac_add);
- netdev_dbg(ndev, "Mac address: %pM\n", mac_add);
- ether_addr_copy(ndev->dev_addr, mac_add);
+
+ if (is_valid_ether_addr(ndev->dev_addr))
+ wilc_set_mac_address(vif, ndev->dev_addr);
+ else
+ wilc_get_mac_address(vif, ndev->dev_addr);
+ netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
netdev_err(ndev, "Wrong MAC address\n");
@@ -639,7 +637,14 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
int srcu_idx;
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
+
+ if (!vif->mac_opened) {
+ eth_commit_mac_addr_change(dev, p);
+ return 0;
+ }
+
+ /* Verify MAC Address is not already in use: */
srcu_idx = srcu_read_lock(&wilc->srcu);
list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) {
@@ -647,7 +652,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
if (ether_addr_equal(addr->sa_data, mac_addr)) {
if (vif != tmp_vif) {
srcu_read_unlock(&wilc->srcu, srcu_idx);
- return -EINVAL;
+ return -EADDRNOTAVAIL;
}
srcu_read_unlock(&wilc->srcu, srcu_idx);
return 0;
@@ -659,9 +664,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
if (result)
return result;
- ether_addr_copy(vif->bssid, addr->sa_data);
- ether_addr_copy(vif->ndev->dev_addr, addr->sa_data);
-
+ eth_commit_mac_addr_change(dev, p);
return result;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 351ff909ab1c..e14b9fc2c67a 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -947,7 +947,7 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
for (i = 0; (i < 3) && (nint > 0); i++, nint--)
reg |= BIT(i);
- ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
+ ret = wilc_sdio_write_reg(wilc, WILC_INTR2_ENABLE, reg);
if (ret) {
dev_err(&func->dev,
"Failed write reg (%08x)...\n",
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index be732929322c..1472e9843896 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -7,12 +7,41 @@
#include <linux/clk.h>
#include <linux/spi/spi.h>
#include <linux/crc7.h>
+#include <linux/crc-itu-t.h>
#include "netdev.h"
#include "cfg80211.h"
+static bool enable_crc7; /* protect SPI commands with CRC7 */
+module_param(enable_crc7, bool, 0644);
+MODULE_PARM_DESC(enable_crc7,
+ "Enable CRC7 checksum to protect command transfers\n"
+ "\t\t\tagainst corruption during the SPI transfer.\n"
+ "\t\t\tCommand transfers are short and the CPU-cycle cost\n"
+ "\t\t\tof enabling this is small.");
+
+static bool enable_crc16; /* protect SPI data with CRC16 */
+module_param(enable_crc16, bool, 0644);
+MODULE_PARM_DESC(enable_crc16,
+ "Enable CRC16 checksum to protect data transfers\n"
+ "\t\t\tagainst corruption during the SPI transfer.\n"
+ "\t\t\tData transfers can be large and the CPU-cycle cost\n"
+ "\t\t\tof enabling this may be substantial.");
+
+/*
+ * For CMD_SINGLE_READ and CMD_INTERNAL_READ, WILC may insert one or
+ * more zero bytes between the command response and the DATA Start tag
+ * (0xf3). This behavior appears to be undocumented in "ATWILC1000
+ * USER GUIDE" (https://tinyurl.com/4hhshdts) but we have observed 1-4
+ * zero bytes when the SPI bus operates at 48MHz and none when it
+ * operates at 1MHz.
+ */
+#define WILC_SPI_RSP_HDR_EXTRA_DATA 8
+
struct wilc_spi {
- int crc_off;
+ bool probing_crc; /* true if we're probing chip's CRC config */
+ bool crc7_enabled; /* true if crc7 is currently enabled */
+ bool crc16_enabled; /* true if crc16 is currently enabled */
};
static const struct wilc_hif_func wilc_hif_spi;
@@ -36,12 +65,36 @@ static const struct wilc_hif_func wilc_hif_spi;
#define CMD_RESET 0xcf
#define SPI_ENABLE_VMM_RETRY_LIMIT 2
-#define DATA_PKT_SZ_256 256
-#define DATA_PKT_SZ_512 512
-#define DATA_PKT_SZ_1K 1024
-#define DATA_PKT_SZ_4K (4 * 1024)
-#define DATA_PKT_SZ_8K (8 * 1024)
-#define DATA_PKT_SZ DATA_PKT_SZ_8K
+
+/* SPI response fields (section 11.1.2 in ATWILC1000 User Guide): */
+#define RSP_START_FIELD GENMASK(7, 4)
+#define RSP_TYPE_FIELD GENMASK(3, 0)
+
+/* SPI response values for the response fields: */
+#define RSP_START_TAG 0xc
+#define RSP_TYPE_FIRST_PACKET 0x1
+#define RSP_TYPE_INNER_PACKET 0x2
+#define RSP_TYPE_LAST_PACKET 0x3
+#define RSP_STATE_NO_ERROR 0x00
+
+#define PROTOCOL_REG_PKT_SZ_MASK GENMASK(6, 4)
+#define PROTOCOL_REG_CRC16_MASK GENMASK(3, 3)
+#define PROTOCOL_REG_CRC7_MASK GENMASK(2, 2)
+
+/*
+ * The SPI data packet size may be any integer power of two in the
+ * range from 256 to 8192 bytes.
+ */
+#define DATA_PKT_LOG_SZ_MIN 8 /* 256 B */
+#define DATA_PKT_LOG_SZ_MAX 13 /* 8 KiB */
+
+/*
+ * Select the data packet size (log2 of number of bytes): Use the
+ * maximum data packet size. We only retransmit complete packets, so
+ * there is no benefit from using smaller data packets.
+ */
+#define DATA_PKT_LOG_SZ DATA_PKT_LOG_SZ_MAX
+#define DATA_PKT_SZ (1 << DATA_PKT_LOG_SZ)
#define USE_SPI_DMA 0
@@ -79,16 +132,15 @@ struct wilc_spi_cmd {
} __packed;
struct wilc_spi_read_rsp_data {
- u8 rsp_cmd_type;
- u8 status;
- u8 resp_header;
- u8 resp_data[4];
+ u8 header;
+ u8 data[4];
u8 crc[];
} __packed;
struct wilc_spi_rsp_data {
u8 rsp_cmd_type;
u8 status;
+ u8 data[];
} __packed;
static int wilc_bus_probe(struct spi_device *spi)
@@ -281,7 +333,8 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
struct wilc_spi *spi_priv = wilc->bus_data;
int ix, nbytes;
int result = 0;
- u8 cmd, order, crc[2] = {0};
+ u8 cmd, order, crc[2];
+ u16 crc_calc;
/*
* Data
@@ -323,9 +376,12 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
}
/*
- * Write Crc
+ * Write CRC
*/
- if (!spi_priv->crc_off) {
+ if (spi_priv->crc16_enabled) {
+ crc_calc = crc_itu_t(0xffff, &b[ix], nbytes);
+ crc[0] = crc_calc >> 8;
+ crc[1] = crc_calc;
if (wilc_spi_tx(wilc, crc, 2)) {
dev_err(&spi->dev, "Failed data block crc write, bus error...\n");
result = -EINVAL;
@@ -359,10 +415,11 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b,
struct spi_device *spi = to_spi_device(wilc->dev);
struct wilc_spi *spi_priv = wilc->bus_data;
u8 wb[32], rb[32];
- int cmd_len, resp_len;
- u8 crc[2];
+ int cmd_len, resp_len, i;
+ u16 crc_calc, crc_recv;
struct wilc_spi_cmd *c;
- struct wilc_spi_read_rsp_data *r;
+ struct wilc_spi_rsp_data *r;
+ struct wilc_spi_read_rsp_data *r_data;
memset(wb, 0x0, sizeof(wb));
memset(rb, 0x0, sizeof(rb));
@@ -384,8 +441,9 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b,
}
cmd_len = offsetof(struct wilc_spi_cmd, u.simple_cmd.crc);
- resp_len = sizeof(*r);
- if (!spi_priv->crc_off) {
+ resp_len = sizeof(*r) + sizeof(*r_data) + WILC_SPI_RSP_HDR_EXTRA_DATA;
+
+ if (spi_priv->crc7_enabled) {
c->u.simple_cmd.crc[0] = wilc_get_crc7(wb, cmd_len);
cmd_len += 1;
resp_len += 2;
@@ -403,11 +461,12 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b,
return -EINVAL;
}
- r = (struct wilc_spi_read_rsp_data *)&rb[cmd_len];
+ r = (struct wilc_spi_rsp_data *)&rb[cmd_len];
if (r->rsp_cmd_type != cmd) {
- dev_err(&spi->dev,
- "Failed cmd response, cmd (%02x), resp (%02x)\n",
- cmd, r->rsp_cmd_type);
+ if (!spi_priv->probing_crc)
+ dev_err(&spi->dev,
+ "Failed cmd, cmd (%02x), resp (%02x)\n",
+ cmd, r->rsp_cmd_type);
return -EINVAL;
}
@@ -417,17 +476,30 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b,
return -EINVAL;
}
- if (WILC_GET_RESP_HDR_START(r->resp_header) != 0xf) {
- dev_err(&spi->dev, "Error, data read response (%02x)\n",
- r->resp_header);
+ for (i = 0; i < WILC_SPI_RSP_HDR_EXTRA_DATA; ++i)
+ if (WILC_GET_RESP_HDR_START(r->data[i]) == 0xf)
+ break;
+
+ if (i >= WILC_SPI_RSP_HDR_EXTRA_DATA) {
+ dev_err(&spi->dev, "Error, data start missing\n");
return -EINVAL;
}
- if (b)
- memcpy(b, r->resp_data, 4);
+ r_data = (struct wilc_spi_read_rsp_data *)&r->data[i];
- if (!spi_priv->crc_off)
- memcpy(crc, r->crc, 2);
+ if (b)
+ memcpy(b, r_data->data, 4);
+
+ if (!clockless && spi_priv->crc16_enabled) {
+ crc_recv = (r_data->crc[0] << 8) | r_data->crc[1];
+ crc_calc = crc_itu_t(0xffff, r_data->data, 4);
+ if (crc_recv != crc_calc) {
+ dev_err(&spi->dev, "%s: bad CRC 0x%04x "
+ "(calculated 0x%04x)\n", __func__,
+ crc_recv, crc_calc);
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -454,7 +526,7 @@ static int wilc_spi_write_cmd(struct wilc *wilc, u8 cmd, u32 adr, u32 data,
c->u.internal_w_cmd.addr[1] = adr;
c->u.internal_w_cmd.data = cpu_to_be32(data);
cmd_len = offsetof(struct wilc_spi_cmd, u.internal_w_cmd.crc);
- if (!spi_priv->crc_off)
+ if (spi_priv->crc7_enabled)
c->u.internal_w_cmd.crc[0] = wilc_get_crc7(wb, cmd_len);
} else if (cmd == CMD_SINGLE_WRITE) {
c->u.w_cmd.addr[0] = adr >> 16;
@@ -462,14 +534,14 @@ static int wilc_spi_write_cmd(struct wilc *wilc, u8 cmd, u32 adr, u32 data,
c->u.w_cmd.addr[2] = adr;
c->u.w_cmd.data = cpu_to_be32(data);
cmd_len = offsetof(struct wilc_spi_cmd, u.w_cmd.crc);
- if (!spi_priv->crc_off)
+ if (spi_priv->crc7_enabled)
c->u.w_cmd.crc[0] = wilc_get_crc7(wb, cmd_len);
} else {
dev_err(&spi->dev, "write cmd [%x] not supported\n", cmd);
return -EINVAL;
}
- if (!spi_priv->crc_off)
+ if (spi_priv->crc7_enabled)
cmd_len += 1;
resp_len = sizeof(*r);
@@ -507,6 +579,7 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz)
{
struct spi_device *spi = to_spi_device(wilc->dev);
struct wilc_spi *spi_priv = wilc->bus_data;
+ u16 crc_recv, crc_calc;
u8 wb[32], rb[32];
int cmd_len, resp_len;
int retry, ix = 0;
@@ -525,7 +598,7 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz)
c->u.dma_cmd.size[0] = sz >> 8;
c->u.dma_cmd.size[1] = sz;
cmd_len = offsetof(struct wilc_spi_cmd, u.dma_cmd.crc);
- if (!spi_priv->crc_off)
+ if (spi_priv->crc7_enabled)
c->u.dma_cmd.crc[0] = wilc_get_crc7(wb, cmd_len);
} else if (cmd == CMD_DMA_EXT_WRITE || cmd == CMD_DMA_EXT_READ) {
c->u.dma_cmd_ext.addr[0] = adr >> 16;
@@ -535,14 +608,14 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz)
c->u.dma_cmd_ext.size[1] = sz >> 8;
c->u.dma_cmd_ext.size[2] = sz;
cmd_len = offsetof(struct wilc_spi_cmd, u.dma_cmd_ext.crc);
- if (!spi_priv->crc_off)
+ if (spi_priv->crc7_enabled)
c->u.dma_cmd_ext.crc[0] = wilc_get_crc7(wb, cmd_len);
} else {
dev_err(&spi->dev, "dma read write cmd [%x] not supported\n",
cmd);
return -EINVAL;
}
- if (!spi_priv->crc_off)
+ if (spi_priv->crc7_enabled)
cmd_len += 1;
resp_len = sizeof(*r);
@@ -608,12 +681,22 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz)
}
/*
- * Read Crc
+ * Read CRC
*/
- if (!spi_priv->crc_off && wilc_spi_rx(wilc, crc, 2)) {
- dev_err(&spi->dev,
- "Failed block crc read, bus err\n");
- return -EINVAL;
+ if (spi_priv->crc16_enabled) {
+ if (wilc_spi_rx(wilc, crc, 2)) {
+ dev_err(&spi->dev,
+ "Failed block CRC read, bus err\n");
+ return -EINVAL;
+ }
+ crc_recv = (crc[0] << 8) | crc[1];
+ crc_calc = crc_itu_t(0xffff, &b[ix], nbytes);
+ if (crc_recv != crc_calc) {
+ dev_err(&spi->dev, "%s: bad CRC 0x%04x "
+ "(calculated 0x%04x)\n", __func__,
+ crc_recv, crc_calc);
+ return -EINVAL;
+ }
}
ix += nbytes;
@@ -680,11 +763,13 @@ static int spi_internal_write(struct wilc *wilc, u32 adr, u32 dat)
static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data)
{
struct spi_device *spi = to_spi_device(wilc->dev);
+ struct wilc_spi *spi_priv = wilc->bus_data;
int result;
result = wilc_spi_single_read(wilc, CMD_INTERNAL_READ, adr, data, 0);
if (result) {
- dev_err(&spi->dev, "Failed internal read cmd...\n");
+ if (!spi_priv->probing_crc)
+ dev_err(&spi->dev, "Failed internal read cmd...\n");
return result;
}
@@ -721,6 +806,52 @@ static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data)
return 0;
}
+static int spi_data_rsp(struct wilc *wilc, u8 cmd)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ int result, i;
+ u8 rsp[4];
+
+ /*
+ * The response to data packets is two bytes long. For
+ * efficiency's sake, wilc_spi_write() wisely ignores the
+ * responses for all packets but the final one. The downside
+ * of that optimization is that when the final data packet is
+ * short, we may receive (part of) the response to the
+ * second-to-last packet before the one for the final packet.
+ * To handle this, we always read 4 bytes and then search for
+ * the last byte that contains the "Response Start" code (0xc
+ * in the top 4 bits). We then know that this byte is the
+ * first response byte of the final data packet.
+ */
+ result = wilc_spi_rx(wilc, rsp, sizeof(rsp));
+ if (result) {
+ dev_err(&spi->dev, "Failed bus error...\n");
+ return result;
+ }
+
+ for (i = sizeof(rsp) - 2; i >= 0; --i)
+ if (FIELD_GET(RSP_START_FIELD, rsp[i]) == RSP_START_TAG)
+ break;
+
+ if (i < 0) {
+ dev_err(&spi->dev,
+ "Data packet response missing (%02x %02x %02x %02x)\n",
+ rsp[0], rsp[1], rsp[2], rsp[3]);
+ return -1;
+ }
+
+ /* rsp[i] is the last response start byte */
+
+ if (FIELD_GET(RSP_TYPE_FIELD, rsp[i]) != RSP_TYPE_LAST_PACKET
+ || rsp[i + 1] != RSP_STATE_NO_ERROR) {
+ dev_err(&spi->dev, "Data response error (%02x %02x)\n",
+ rsp[i], rsp[i + 1]);
+ return -1;
+ }
+ return 0;
+}
+
static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
{
struct spi_device *spi = to_spi_device(wilc->dev);
@@ -748,7 +879,10 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
return result;
}
- return 0;
+ /*
+ * Data response
+ */
+ return spi_data_rsp(wilc, CMD_DMA_EXT_WRITE);
}
/********************************************
@@ -772,7 +906,7 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
u32 reg;
u32 chipid;
static int isinit;
- int ret;
+ int ret, i;
if (isinit) {
ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid);
@@ -787,42 +921,54 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
*/
/*
- * TODO: We can remove the CRC trials if there is a definite
- * way to reset
+ * Infer the CRC settings that are currently in effect. This
+ * is necessary because we can't be sure that the chip has
+ * been RESET (e.g, after module unload and reload).
*/
- /* the SPI to it's initial value. */
- ret = spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg);
- if (ret) {
- /*
- * Read failed. Try with CRC off. This might happen when module
- * is removed but chip isn't reset
- */
- spi_priv->crc_off = 1;
- dev_err(&spi->dev,
- "Failed read with CRC on, retrying with CRC off\n");
+ spi_priv->probing_crc = true;
+ spi_priv->crc7_enabled = enable_crc7;
+ spi_priv->crc16_enabled = false; /* don't check CRC16 during probing */
+ for (i = 0; i < 2; ++i) {
ret = spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg);
- if (ret) {
- /*
- * Read failed with both CRC on and off,
- * something went bad
- */
- dev_err(&spi->dev, "Failed internal read protocol\n");
- return ret;
- }
+ if (ret == 0)
+ break;
+ spi_priv->crc7_enabled = !enable_crc7;
}
- if (spi_priv->crc_off == 0) {
- reg &= ~0xc; /* disable crc checking */
- reg &= ~0x70;
- reg |= (0x5 << 4);
- ret = spi_internal_write(wilc, WILC_SPI_PROTOCOL_OFFSET, reg);
- if (ret) {
- dev_err(&spi->dev,
- "[wilc spi %d]: Failed internal write reg\n",
- __LINE__);
- return ret;
- }
- spi_priv->crc_off = 1;
+ if (ret) {
+ dev_err(&spi->dev, "Failed with CRC7 on and off.\n");
+ return ret;
+ }
+
+ /* set up the desired CRC configuration: */
+ reg &= ~(PROTOCOL_REG_CRC7_MASK | PROTOCOL_REG_CRC16_MASK);
+ if (enable_crc7)
+ reg |= PROTOCOL_REG_CRC7_MASK;
+ if (enable_crc16)
+ reg |= PROTOCOL_REG_CRC16_MASK;
+
+ /* set up the data packet size: */
+ BUILD_BUG_ON(DATA_PKT_LOG_SZ < DATA_PKT_LOG_SZ_MIN
+ || DATA_PKT_LOG_SZ > DATA_PKT_LOG_SZ_MAX);
+ reg &= ~PROTOCOL_REG_PKT_SZ_MASK;
+ reg |= FIELD_PREP(PROTOCOL_REG_PKT_SZ_MASK,
+ DATA_PKT_LOG_SZ - DATA_PKT_LOG_SZ_MIN);
+
+ /* establish the new setup: */
+ ret = spi_internal_write(wilc, WILC_SPI_PROTOCOL_OFFSET, reg);
+ if (ret) {
+ dev_err(&spi->dev,
+ "[wilc spi %d]: Failed internal write reg\n",
+ __LINE__);
+ return ret;
}
+ /* update our state to match new protocol settings: */
+ spi_priv->crc7_enabled = enable_crc7;
+ spi_priv->crc16_enabled = enable_crc16;
+
+ /* re-read to make sure new settings are in effect: */
+ spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg);
+
+ spi_priv->probing_crc = false;
/*
* make sure can read back chip id correctly
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 31d51385ba93..2030fc7f53ca 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -552,12 +552,60 @@ static struct rxq_entry_t *wilc_wlan_rxq_remove(struct wilc *wilc)
void chip_allow_sleep(struct wilc *wilc)
{
u32 reg = 0;
+ const struct wilc_hif_func *hif_func = wilc->hif_func;
+ u32 wakeup_reg, wakeup_bit;
+ u32 to_host_from_fw_reg, to_host_from_fw_bit;
+ u32 from_host_to_fw_reg, from_host_to_fw_bit;
+ u32 trials = 100;
+ int ret;
+
+ if (wilc->io_type == WILC_HIF_SDIO) {
+ wakeup_reg = WILC_SDIO_WAKEUP_REG;
+ wakeup_bit = WILC_SDIO_WAKEUP_BIT;
+ from_host_to_fw_reg = WILC_SDIO_HOST_TO_FW_REG;
+ from_host_to_fw_bit = WILC_SDIO_HOST_TO_FW_BIT;
+ to_host_from_fw_reg = WILC_SDIO_FW_TO_HOST_REG;
+ to_host_from_fw_bit = WILC_SDIO_FW_TO_HOST_BIT;
+ } else {
+ wakeup_reg = WILC_SPI_WAKEUP_REG;
+ wakeup_bit = WILC_SPI_WAKEUP_BIT;
+ from_host_to_fw_reg = WILC_SPI_HOST_TO_FW_REG;
+ from_host_to_fw_bit = WILC_SPI_HOST_TO_FW_BIT;
+ to_host_from_fw_reg = WILC_SPI_FW_TO_HOST_REG;
+ to_host_from_fw_bit = WILC_SPI_FW_TO_HOST_BIT;
+ }
+
+ while (--trials) {
+ ret = hif_func->hif_read_reg(wilc, to_host_from_fw_reg, &reg);
+ if (ret)
+ return;
+ if ((reg & to_host_from_fw_bit) == 0)
+ break;
+ }
+ if (!trials)
+ pr_warn("FW not responding\n");
- wilc->hif_func->hif_read_reg(wilc, WILC_SDIO_WAKEUP_REG, &reg);
+ /* Clear bit 1 */
+ ret = hif_func->hif_read_reg(wilc, wakeup_reg, &reg);
+ if (ret)
+ return;
+ if (reg & wakeup_bit) {
+ reg &= ~wakeup_bit;
+ ret = hif_func->hif_write_reg(wilc, wakeup_reg, reg);
+ if (ret)
+ return;
+ }
- wilc->hif_func->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG,
- reg & ~WILC_SDIO_WAKEUP_BIT);
- wilc->hif_func->hif_write_reg(wilc, WILC_SDIO_HOST_TO_FW_REG, 0);
+ ret = hif_func->hif_read_reg(wilc, from_host_to_fw_reg, &reg);
+ if (ret)
+ return;
+ if (reg & from_host_to_fw_bit) {
+ reg &= ~from_host_to_fw_bit;
+ ret = hif_func->hif_write_reg(wilc, from_host_to_fw_reg, reg);
+ if (ret)
+ return;
+
+ }
}
EXPORT_SYMBOL_GPL(chip_allow_sleep);
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
index d55eb6b3a12a..771c25fa849b 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
@@ -97,6 +97,12 @@
#define WILC_SPI_WAKEUP_REG 0x1
#define WILC_SPI_WAKEUP_BIT BIT(1)
+#define WILC_SPI_HOST_TO_FW_REG 0x0b
+#define WILC_SPI_HOST_TO_FW_BIT BIT(0)
+
+#define WILC_SPI_FW_TO_HOST_REG 0x10
+#define WILC_SPI_FW_TO_HOST_BIT BIT(0)
+
#define WILC_SPI_PROTOCOL_OFFSET (WILC_SPI_PROTOCOL_CONFIG - \
WILC_SPI_REG_BASE)
@@ -392,7 +398,6 @@ struct wilc_cfg_rsp {
u8 seq_no;
};
-struct wilc;
struct wilc_vif;
int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 504b4d0b98c4..84b15a655eab 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -680,13 +680,10 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev,
eth_zero_addr(vif->bssid);
ret = qtnf_cmd_send_connect(vif, sme);
- if (ret) {
+ if (ret)
pr_err("VIF%u.%u: failed to connect\n",
vif->mac->macid, vif->vifid);
- goto out;
- }
-out:
return ret;
}
@@ -702,13 +699,10 @@ qtnf_external_auth(struct wiphy *wiphy, struct net_device *dev,
pr_warn("unexpected bssid: %pM", auth->bssid);
ret = qtnf_cmd_send_external_auth(vif, auth);
- if (ret) {
+ if (ret)
pr_err("VIF%u.%u: failed to report external auth\n",
vif->mac->macid, vif->vifid);
- goto out;
- }
-out:
return ret;
}
@@ -727,8 +721,7 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
}
if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
- ret = -EOPNOTSUPP;
- goto out;
+ return -EOPNOTSUPP;
}
ret = qtnf_cmd_send_disconnect(vif, reason_code);
@@ -742,7 +735,6 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
NULL, 0, true, GFP_KERNEL);
}
-out:
return ret;
}
@@ -935,13 +927,10 @@ static int qtnf_update_owe_info(struct wiphy *wiphy, struct net_device *dev,
return -EOPNOTSUPP;
ret = qtnf_cmd_send_update_owe(vif, owe_info);
- if (ret) {
+ if (ret)
pr_err("VIF%u.%u: failed to update owe info\n",
vif->mac->macid, vif->vifid);
- goto out;
- }
-out:
return ret;
}
@@ -987,18 +976,14 @@ static int qtnf_resume(struct wiphy *wiphy)
vif = qtnf_mac_get_base_vif(mac);
if (!vif) {
pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
- ret = -EFAULT;
- goto exit;
+ return -EFAULT;
}
ret = qtnf_cmd_send_wowlan_set(vif, NULL);
- if (ret) {
+ if (ret)
pr_err("MAC%u: failed to reset WoWLAN triggers\n",
mac->macid);
- goto exit;
- }
-exit:
return ret;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index f3ccbd2b1084..c68563c83098 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -379,10 +379,6 @@ int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif)
qtnf_bus_lock(vif->mac->bus);
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(vif->mac->bus);
return ret;
@@ -407,10 +403,7 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg)
cmd->do_register = reg;
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
- if (ret)
- goto out;
-out:
qtnf_bus_unlock(vif->mac->bus);
return ret;
@@ -446,10 +439,7 @@ int qtnf_cmd_send_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
qtnf_cmd_skb_put_buffer(cmd_skb, buf, len);
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
- if (ret)
- goto out;
-out:
qtnf_bus_unlock(vif->mac->bus);
return ret;
@@ -477,10 +467,6 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
qtnf_bus_lock(vif->mac->bus);
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(vif->mac->bus);
return ret;
@@ -1677,10 +1663,7 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
wiphy->retry_short);
ret = qtnf_cmd_send(mac->bus, cmd_skb);
- if (ret)
- goto out;
-out:
qtnf_bus_unlock(mac->bus);
return ret;
@@ -1772,10 +1755,7 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
params->seq_len);
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
- if (ret)
- goto out;
-out:
qtnf_bus_unlock(vif->mac->bus);
return ret;
@@ -1807,10 +1787,7 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
cmd->pairwise = pairwise;
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
- if (ret)
- goto out;
-out:
qtnf_bus_unlock(vif->mac->bus);
return ret;
@@ -1837,10 +1814,7 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index,
cmd->multicast = multicast;
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
- if (ret)
- goto out;
-out:
qtnf_bus_unlock(vif->mac->bus);
return ret;
@@ -1864,10 +1838,7 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index)
cmd->key_index = key_index;
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
- if (ret)
- goto out;
-out:
qtnf_bus_unlock(vif->mac->bus);
return ret;
@@ -1931,8 +1902,6 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
}
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
- if (ret)
- goto out;
out:
qtnf_bus_unlock(vif->mac->bus);
@@ -1966,10 +1935,7 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif,
cmd->reason_code = cpu_to_le16(params->reason_code);
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
- if (ret)
- goto out;
-out:
qtnf_bus_unlock(vif->mac->bus);
return ret;
@@ -2189,10 +2155,6 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
qtnf_bus_lock(vif->mac->bus);
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(vif->mac->bus);
return ret;
@@ -2218,10 +2180,6 @@ int qtnf_cmd_send_external_auth(struct qtnf_vif *vif,
qtnf_bus_lock(vif->mac->bus);
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(vif->mac->bus);
return ret;
@@ -2245,10 +2203,7 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code)
cmd->reason = cpu_to_le16(reason_code);
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
- if (ret)
- goto out;
-out:
qtnf_bus_unlock(vif->mac->bus);
return ret;
@@ -2271,10 +2226,6 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up)
qtnf_bus_lock(vif->mac->bus);
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(vif->mac->bus);
return ret;
@@ -2580,10 +2531,6 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
qtnf_bus_lock(bus);
ret = qtnf_cmd_send(bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(bus);
return ret;
@@ -2611,10 +2558,6 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
qtnf_bus_lock(bus);
ret = qtnf_cmd_send(bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(bus);
return ret;
@@ -2639,10 +2582,7 @@ int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout)
qtnf_bus_lock(bus);
ret = qtnf_cmd_send(bus, cmd_skb);
- if (ret)
- goto out;
-out:
qtnf_bus_unlock(bus);
return ret;
@@ -2754,10 +2694,7 @@ int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
cmd->triggers = cpu_to_le32(triggers);
ret = qtnf_cmd_send(bus, cmd_skb);
- if (ret)
- goto out;
-out:
qtnf_bus_unlock(bus);
return ret;
}
@@ -2821,10 +2758,6 @@ int qtnf_cmd_send_update_owe(struct qtnf_vif *vif,
qtnf_bus_lock(vif->mac->bus);
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(vif->mac->bus);
return ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index c775c177933b..8dc80574d08d 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -570,8 +570,10 @@ qtnf_event_handle_external_auth(struct qtnf_vif *vif,
return 0;
if (ev->ssid_len) {
- memcpy(auth.ssid.ssid, ev->ssid, ev->ssid_len);
- auth.ssid.ssid_len = ev->ssid_len;
+ int len = clamp_val(ev->ssid_len, 0, IEEE80211_MAX_SSID_LEN);
+
+ memcpy(auth.ssid.ssid, ev->ssid, len);
+ auth.ssid.ssid_len = len;
}
auth.key_mgmt_suite = le32_to_cpu(ev->akm_suite);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 61a4f1ad31e2..e95c101c2711 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -989,11 +989,7 @@ static void rt2x00lib_rate(struct ieee80211_rate *entry,
void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr)
{
- const char *mac_addr;
-
- mac_addr = of_get_mac_address(rt2x00dev->dev->of_node);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(eeprom_mac_addr, mac_addr);
+ of_get_mac_address(rt2x00dev->dev->of_node, eeprom_mac_addr);
if (!is_valid_ether_addr(eeprom_mac_addr)) {
eth_random_addr(eeprom_mac_addr);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index d6d1be4169e5..d1a566cc0c9e 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1391,7 +1391,6 @@ struct rtl8xxxu_priv {
struct delayed_work ra_watchdog;
struct work_struct c2hcmd_work;
struct sk_buff_head c2hcmd_queue;
- spinlock_t c2hcmd_lock;
struct rtl8xxxu_btcoex bt_coex;
struct rtl8xxxu_ra_report ra_report;
};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 5cd7ef3625c5..9ff09cf7eb62 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1145,7 +1145,7 @@ void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw)
switch (hw->conf.chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
ht = false;
- /* fall through */
+ fallthrough;
case NL80211_CHAN_WIDTH_20:
opmode |= BW_OPMODE_20MHZ;
rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode);
@@ -1272,7 +1272,7 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw)
switch (hw->conf.chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
ht = false;
- /* fall through */
+ fallthrough;
case NL80211_CHAN_WIDTH_20:
rf_mode_bw |= WMAC_TRXPTCL_CTL_BW_20;
subchannel = 0;
@@ -1741,11 +1741,11 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
case 3:
priv->ep_tx_low_queue = 1;
priv->ep_tx_count++;
- /* fall through */
+ fallthrough;
case 2:
priv->ep_tx_normal_queue = 1;
priv->ep_tx_count++;
- /* fall through */
+ fallthrough;
case 1:
priv->ep_tx_high_queue = 1;
priv->ep_tx_count++;
@@ -5423,7 +5423,6 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
struct rtl8xxxu_priv *priv;
struct rtl8723bu_c2h *c2h;
struct sk_buff *skb = NULL;
- unsigned long flags;
u8 bt_info = 0;
struct rtl8xxxu_btcoex *btcoex;
struct rtl8xxxu_ra_report *rarpt;
@@ -5439,9 +5438,7 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
goto out;
while (!skb_queue_empty(&priv->c2hcmd_queue)) {
- spin_lock_irqsave(&priv->c2hcmd_lock, flags);
- skb = __skb_dequeue(&priv->c2hcmd_queue);
- spin_unlock_irqrestore(&priv->c2hcmd_lock, flags);
+ skb = skb_dequeue(&priv->c2hcmd_queue);
c2h = (struct rtl8723bu_c2h *)skb->data;
@@ -5499,7 +5496,6 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
struct rtl8723bu_c2h *c2h = (struct rtl8723bu_c2h *)skb->data;
struct device *dev = &priv->udev->dev;
int len;
- unsigned long flags;
len = skb->len - 2;
@@ -5538,9 +5534,7 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
break;
}
- spin_lock_irqsave(&priv->c2hcmd_lock, flags);
- __skb_queue_tail(&priv->c2hcmd_queue, skb);
- spin_unlock_irqrestore(&priv->c2hcmd_lock, flags);
+ skb_queue_tail(&priv->c2hcmd_queue, skb);
schedule_work(&priv->c2hcmd_work);
}
@@ -6606,7 +6600,6 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
spin_lock_init(&priv->rx_urb_lock);
INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work);
INIT_DELAYED_WORK(&priv->ra_watchdog, rtl8xxxu_watchdog_callback);
- spin_lock_init(&priv->c2hcmd_lock);
INIT_WORK(&priv->c2hcmd_work, rtl8xxxu_c2hcmd_callback);
skb_queue_head_init(&priv->c2hcmd_queue);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 6e8bd99e8911..2a7ee90a3f54 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -551,7 +551,6 @@ int rtl_init_core(struct ieee80211_hw *hw)
spin_lock_init(&rtlpriv->locks.rf_lock);
spin_lock_init(&rtlpriv->locks.waitq_lock);
spin_lock_init(&rtlpriv->locks.entry_list_lock);
- spin_lock_init(&rtlpriv->locks.c2hcmd_lock);
spin_lock_init(&rtlpriv->locks.scan_list_lock);
spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
spin_lock_init(&rtlpriv->locks.fw_ps_lock);
@@ -2269,7 +2268,6 @@ static bool rtl_c2h_fast_cmd(struct ieee80211_hw *hw, struct sk_buff *skb)
void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- unsigned long flags;
if (rtl_c2h_fast_cmd(hw, skb)) {
rtl_c2h_content_parsing(hw, skb);
@@ -2278,11 +2276,7 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
}
/* enqueue */
- spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags);
-
- __skb_queue_tail(&rtlpriv->c2hcmd_queue, skb);
-
- spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags);
+ skb_queue_tail(&rtlpriv->c2hcmd_queue, skb);
/* wake up wq */
queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.c2hcmd_wq, 0);
@@ -2340,16 +2334,11 @@ void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct sk_buff *skb;
- unsigned long flags;
int i;
for (i = 0; i < 200; i++) {
/* dequeue a task */
- spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags);
-
- skb = __skb_dequeue(&rtlpriv->c2hcmd_queue);
-
- spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags);
+ skb = skb_dequeue(&rtlpriv->c2hcmd_queue);
/* do it */
if (!skb)
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 965bd9589045..8efe2f5e5b9f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -564,7 +564,7 @@ static int rtl_op_resume(struct ieee80211_hw *hw)
rtlhal->enter_pnp_sleep = false;
rtlhal->wake_from_pnp_sleep = true;
- /* to resovle s4 can not wake up*/
+ /* to resolve s4 can not wake up*/
now = ktime_get_real_seconds();
if (now - rtlhal->last_suspend_sec < 5)
return -1;
@@ -806,7 +806,7 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (0 == changed_flags)
return;
- /*TODO: we disable broadcase now, so enable here */
+ /*TODO: we disable broadcast now, so enable here */
if (changed_flags & FIF_ALLMULTI) {
if (*new_flags & FIF_ALLMULTI) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
@@ -1018,6 +1018,25 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
}
}
+void rtl_update_beacon_work_callback(struct work_struct *work)
+{
+ struct rtl_works *rtlworks =
+ container_of(work, struct rtl_works, update_beacon_work);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_vif *vif = rtlpriv->mac80211.vif;
+
+ if (!vif) {
+ WARN_ONCE(true, "no vif to update beacon\n");
+ return;
+ }
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+ send_beacon_frame(hw, vif);
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+EXPORT_SYMBOL_GPL(rtl_update_beacon_work_callback);
+
static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -1747,6 +1766,18 @@ static void rtl_op_flush(struct ieee80211_hw *hw,
rtlpriv->intf_ops->flush(hw, queues, drop);
}
+static int rtl_op_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ bool set)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU)
+ schedule_work(&rtlpriv->works.update_beacon_work);
+
+ return 0;
+}
+
/* Description:
* This routine deals with the Power Configuration CMD
* parsing for RTL8723/RTL8188E Series IC.
@@ -1796,7 +1827,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
value |= (GET_PWR_CFG_VALUE(cfg_cmd) &
GET_PWR_CFG_MASK(cfg_cmd));
- /*Write the value back to sytem register*/
+ /*Write the value back to system register*/
rtl_write_byte(rtlpriv, offset, value);
break;
case PWR_CMD_POLLING:
@@ -1903,6 +1934,7 @@ const struct ieee80211_ops rtl_ops = {
.sta_add = rtl_op_sta_add,
.sta_remove = rtl_op_sta_remove,
.flush = rtl_op_flush,
+ .set_tim = rtl_op_set_tim,
};
EXPORT_SYMBOL_GPL(rtl_ops);
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.h b/drivers/net/wireless/realtek/rtlwifi/core.h
index 7447ff456710..345161b47442 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.h
+++ b/drivers/net/wireless/realtek/rtlwifi/core.h
@@ -60,5 +60,6 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data);
bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
bool rtl_btc_status_false(void);
void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igval);
+void rtl_update_beacon_work_callback(struct work_struct *work);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index 861cc663ca93..bf686a916acb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -2466,8 +2466,6 @@ void rtl8188ee_bt_reg_init(struct ieee80211_hw *hw)
/* 0:Low, 1:High, 2:From Efuse. */
rtlpriv->btcoexist.reg_bt_iso = 2;
- /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
- rtlpriv->btcoexist.reg_bt_sco = 3;
/* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
rtlpriv->btcoexist.reg_bt_sco = 0;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index 1dbdddce0823..a74724c971b9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -372,18 +372,14 @@ static struct pci_driver rtl92de_driver = {
/* add global spin lock to solve the problem that
* Dul mac register operation on the same time */
-spinlock_t globalmutex_power;
-spinlock_t globalmutex_for_fwdownload;
-spinlock_t globalmutex_for_power_and_efuse;
+DEFINE_SPINLOCK(globalmutex_power);
+DEFINE_SPINLOCK(globalmutex_for_fwdownload);
+DEFINE_SPINLOCK(globalmutex_for_power_and_efuse);
static int __init rtl92de_module_init(void)
{
int ret = 0;
- spin_lock_init(&globalmutex_power);
- spin_lock_init(&globalmutex_for_fwdownload);
- spin_lock_init(&globalmutex_for_power_and_efuse);
-
ret = pci_register_driver(&rtl92de_driver);
if (ret)
WARN_ONCE(true, "rtl8192de: No device found\n");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
index 27c8a5d96520..fcaaf664cbec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
@@ -249,7 +249,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
0x824, 0x00030FE0,
0x828, 0x00000000,
0x82C, 0x002081DD,
- 0x830, 0x2AAA8E24,
+ 0x830, 0x2AAAEEC8,
0x834, 0x0037A706,
0x838, 0x06489B44,
0x83C, 0x0000095B,
@@ -324,10 +324,10 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
0x9D8, 0x00000000,
0x9DC, 0x00000000,
0x9E0, 0x00005D00,
- 0x9E4, 0x00000002,
+ 0x9E4, 0x00000003,
0x9E8, 0x00000001,
0xA00, 0x00D047C8,
- 0xA04, 0x01FF000C,
+ 0xA04, 0x01FF800C,
0xA08, 0x8C8A8300,
0xA0C, 0x2E68000F,
0xA10, 0x9500BB78,
@@ -1320,7 +1320,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x083, 0x00021800,
0x084, 0x00028000,
0x085, 0x00048000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x0009483A,
+ 0xA0000000, 0x00000000,
0x086, 0x00094838,
+ 0xB0000000, 0x00000000,
0x087, 0x00044980,
0x088, 0x00048000,
0x089, 0x0000D480,
@@ -1409,36 +1413,32 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x03C, 0x000CA000,
0x0EF, 0x00000000,
0x0EF, 0x00001100,
- 0xFF0F0104, 0xABCD,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0004ADF3,
0x034, 0x00049DF0,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0004ADF3,
0x034, 0x00049DF0,
- 0xFF0F0404, 0xCDEF,
- 0x034, 0x0004ADF3,
- 0x034, 0x00049DF0,
- 0xFF0F0200, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0004ADF5,
0x034, 0x00049DF2,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A0F3,
+ 0x034, 0x000490B1,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0004A0F3,
0x034, 0x000490B1,
- 0xCDCDCDCD, 0xCDCD,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004ADF5,
+ 0x034, 0x00049DF2,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004ADF3,
+ 0x034, 0x00049DF0,
+ 0xA0000000, 0x00000000,
0x034, 0x0004ADF7,
0x034, 0x00049DF3,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0104, 0xABCD,
- 0x034, 0x00048DED,
- 0x034, 0x00047DEA,
- 0x034, 0x00046DE7,
- 0x034, 0x00045CE9,
- 0x034, 0x00044CE6,
- 0x034, 0x000438C6,
- 0x034, 0x00042886,
- 0x034, 0x00041486,
- 0x034, 0x00040447,
- 0xFF0F0204, 0xCDEF,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00048DED,
0x034, 0x00047DEA,
0x034, 0x00046DE7,
@@ -1448,7 +1448,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00042886,
0x034, 0x00041486,
0x034, 0x00040447,
- 0xFF0F0404, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00048DED,
0x034, 0x00047DEA,
0x034, 0x00046DE7,
@@ -1458,7 +1458,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00042886,
0x034, 0x00041486,
0x034, 0x00040447,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000480AE,
+ 0x034, 0x000470AB,
+ 0x034, 0x0004608B,
+ 0x034, 0x00045069,
+ 0x034, 0x00044048,
+ 0x034, 0x00043045,
+ 0x034, 0x00042026,
+ 0x034, 0x00041023,
+ 0x034, 0x00040002,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x000480AE,
0x034, 0x000470AB,
0x034, 0x0004608B,
@@ -1468,7 +1478,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00042026,
0x034, 0x00041023,
0x034, 0x00040002,
- 0xCDCDCDCD, 0xCDCD,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x00048DED,
+ 0x034, 0x00047DEA,
+ 0x034, 0x00046DE7,
+ 0x034, 0x00045CE9,
+ 0x034, 0x00044CE6,
+ 0x034, 0x000438C6,
+ 0x034, 0x00042886,
+ 0x034, 0x00041486,
+ 0x034, 0x00040447,
+ 0xA0000000, 0x00000000,
0x034, 0x00048DEF,
0x034, 0x00047DEC,
0x034, 0x00046DE9,
@@ -1478,38 +1498,36 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x0004248A,
0x034, 0x0004108D,
0x034, 0x0004008A,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0200, 0xABCD,
+ 0xB0000000, 0x00000000,
+ 0x80000210, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0002ADF4,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A0F3,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0002A0F3,
- 0xCDCDCDCD, 0xCDCD,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002ADF4,
+ 0xA0000000, 0x00000000,
0x034, 0x0002ADF7,
- 0xFF0F0200, 0xDEAD,
- 0xFF0F0104, 0xABCD,
- 0x034, 0x00029DF4,
- 0xFF0F0204, 0xCDEF,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00029DF4,
- 0xFF0F0404, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00029DF4,
- 0xFF0F0200, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00029DF1,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000290F0,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x000290F0,
- 0xCDCDCDCD, 0xCDCD,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x00029DF1,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x00029DF4,
+ 0xA0000000, 0x00000000,
0x034, 0x00029DF2,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0104, 0xABCD,
- 0x034, 0x00028DF1,
- 0x034, 0x00027DEE,
- 0x034, 0x00026DEB,
- 0x034, 0x00025CEC,
- 0x034, 0x00024CE9,
- 0x034, 0x000238CA,
- 0x034, 0x00022889,
- 0x034, 0x00021489,
- 0x034, 0x0002044A,
- 0xFF0F0204, 0xCDEF,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00028DF1,
0x034, 0x00027DEE,
0x034, 0x00026DEB,
@@ -1519,7 +1537,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00022889,
0x034, 0x00021489,
0x034, 0x0002044A,
- 0xFF0F0404, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00028DF1,
0x034, 0x00027DEE,
0x034, 0x00026DEB,
@@ -1529,7 +1547,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00022889,
0x034, 0x00021489,
0x034, 0x0002044A,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x000280AF,
0x034, 0x000270AC,
0x034, 0x0002608B,
@@ -1539,7 +1557,27 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00022026,
0x034, 0x00021023,
0x034, 0x00020002,
- 0xCDCDCDCD, 0xCDCD,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000280AF,
+ 0x034, 0x000270AC,
+ 0x034, 0x0002608B,
+ 0x034, 0x00025069,
+ 0x034, 0x00024048,
+ 0x034, 0x00023045,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020002,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x00028DF1,
+ 0x034, 0x00027DEE,
+ 0x034, 0x00026DEB,
+ 0x034, 0x00025CEC,
+ 0x034, 0x00024CE9,
+ 0x034, 0x000238CA,
+ 0x034, 0x00022889,
+ 0x034, 0x00021489,
+ 0x034, 0x0002044A,
+ 0xA0000000, 0x00000000,
0x034, 0x00028DEE,
0x034, 0x00027DEB,
0x034, 0x00026CCD,
@@ -1549,27 +1587,24 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00022849,
0x034, 0x00021449,
0x034, 0x0002004D,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F02C0, 0xABCD,
+ 0xB0000000, 0x00000000,
+ 0x8000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D7,
+ 0x034, 0x000090D3,
+ 0x034, 0x000080B1,
+ 0x034, 0x000070AE,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0000A0D7,
0x034, 0x000090D3,
0x034, 0x000080B1,
0x034, 0x000070AE,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x034, 0x0000ADF7,
0x034, 0x00009DF4,
0x034, 0x00008DF1,
0x034, 0x00007DEE,
- 0xFF0F02C0, 0xDEAD,
- 0xFF0F0104, 0xABCD,
- 0x034, 0x00006DEB,
- 0x034, 0x00005CEC,
- 0x034, 0x00004CE9,
- 0x034, 0x000038CA,
- 0x034, 0x00002889,
- 0x034, 0x00001489,
- 0x034, 0x0000044A,
- 0xFF0F0204, 0xCDEF,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00006DEB,
0x034, 0x00005CEC,
0x034, 0x00004CE9,
@@ -1577,7 +1612,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00002889,
0x034, 0x00001489,
0x034, 0x0000044A,
- 0xFF0F0404, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00006DEB,
0x034, 0x00005CEC,
0x034, 0x00004CE9,
@@ -1585,7 +1620,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00002889,
0x034, 0x00001489,
0x034, 0x0000044A,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0000608D,
0x034, 0x0000506B,
0x034, 0x0000404A,
@@ -1593,7 +1628,23 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00002044,
0x034, 0x00001025,
0x034, 0x00000004,
- 0xCDCDCDCD, 0xCDCD,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000608D,
+ 0x034, 0x0000506B,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x00002044,
+ 0x034, 0x00001025,
+ 0x034, 0x00000004,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x00006DEB,
+ 0x034, 0x00005CEC,
+ 0x034, 0x00004CE9,
+ 0x034, 0x000038CA,
+ 0x034, 0x00002889,
+ 0x034, 0x00001489,
+ 0x034, 0x0000044A,
+ 0xA0000000, 0x00000000,
0x034, 0x00006DCD,
0x034, 0x00005CCD,
0x034, 0x00004CCA,
@@ -1601,11 +1652,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00002888,
0x034, 0x00001488,
0x034, 0x00000486,
- 0xFF0F0104, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000040,
- 0xFF0F0104, 0xABCD,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x035, 0x00000187,
0x035, 0x00008187,
0x035, 0x00010187,
@@ -1615,7 +1666,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x035, 0x00040188,
0x035, 0x00048188,
0x035, 0x00050188,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x035, 0x00000187,
0x035, 0x00008187,
0x035, 0x00010187,
@@ -1625,7 +1676,37 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x035, 0x00040188,
0x035, 0x00048188,
0x035, 0x00050188,
- 0xFF0F0404, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x00000128,
+ 0x035, 0x00008128,
+ 0x035, 0x00010128,
+ 0x035, 0x000201C8,
+ 0x035, 0x000281C8,
+ 0x035, 0x000301C8,
+ 0x035, 0x000401C8,
+ 0x035, 0x000481C8,
+ 0x035, 0x000501C8,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x00000145,
+ 0x035, 0x00008145,
+ 0x035, 0x00010145,
+ 0x035, 0x00020196,
+ 0x035, 0x00028196,
+ 0x035, 0x00030196,
+ 0x035, 0x000401C7,
+ 0x035, 0x000481C7,
+ 0x035, 0x000501C7,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x00000128,
+ 0x035, 0x00008128,
+ 0x035, 0x00010128,
+ 0x035, 0x000201C8,
+ 0x035, 0x000281C8,
+ 0x035, 0x000301C8,
+ 0x035, 0x000401C8,
+ 0x035, 0x000481C8,
+ 0x035, 0x000501C8,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
0x035, 0x00000187,
0x035, 0x00008187,
0x035, 0x00010187,
@@ -1635,7 +1716,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x035, 0x00040188,
0x035, 0x00048188,
0x035, 0x00050188,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x035, 0x00000145,
0x035, 0x00008145,
0x035, 0x00010145,
@@ -1645,11 +1726,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x035, 0x000401C7,
0x035, 0x000481C7,
0x035, 0x000501C7,
- 0xFF0F0104, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000010,
- 0xFF0F0104, 0xABCD,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x036, 0x00085733,
0x036, 0x0008D733,
0x036, 0x00095733,
@@ -1662,7 +1743,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x036, 0x000CE4B4,
0x036, 0x000D64B4,
0x036, 0x000DE4B4,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x036, 0x00085733,
0x036, 0x0008D733,
0x036, 0x00095733,
@@ -1675,7 +1756,46 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x036, 0x000CE4B4,
0x036, 0x000D64B4,
0x036, 0x000DE4B4,
- 0xFF0F0404, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x000063B5,
+ 0x036, 0x0000E3B5,
+ 0x036, 0x000163B5,
+ 0x036, 0x0001E3B5,
+ 0x036, 0x000263B5,
+ 0x036, 0x0002E3B5,
+ 0x036, 0x000363B5,
+ 0x036, 0x0003E3B5,
+ 0x036, 0x000463B5,
+ 0x036, 0x0004E3B5,
+ 0x036, 0x000563B5,
+ 0x036, 0x0005E3B5,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x000056B3,
+ 0x036, 0x0000D6B3,
+ 0x036, 0x000156B3,
+ 0x036, 0x0001D6B3,
+ 0x036, 0x00026634,
+ 0x036, 0x0002E634,
+ 0x036, 0x00036634,
+ 0x036, 0x0003E634,
+ 0x036, 0x000467B4,
+ 0x036, 0x0004E7B4,
+ 0x036, 0x000567B4,
+ 0x036, 0x0005E7B4,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x000063B5,
+ 0x036, 0x0000E3B5,
+ 0x036, 0x000163B5,
+ 0x036, 0x0001E3B5,
+ 0x036, 0x000263B5,
+ 0x036, 0x0002E3B5,
+ 0x036, 0x000363B5,
+ 0x036, 0x0003E3B5,
+ 0x036, 0x000463B5,
+ 0x036, 0x0004E3B5,
+ 0x036, 0x000563B5,
+ 0x036, 0x0005E3B5,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
0x036, 0x00085733,
0x036, 0x0008D733,
0x036, 0x00095733,
@@ -1688,7 +1808,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x036, 0x000CE4B4,
0x036, 0x000D64B4,
0x036, 0x000DE4B4,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x036, 0x000056B3,
0x036, 0x0000D6B3,
0x036, 0x000156B3,
@@ -1701,103 +1821,162 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x036, 0x0004E7B4,
0x036, 0x000567B4,
0x036, 0x0005E7B4,
- 0xFF0F0104, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0EF, 0x00000008,
- 0xFF0F0104, 0xABCD,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x000001C8,
0x03C, 0x00000492,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x000001C8,
0x03C, 0x00000492,
- 0xFF0F0404, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x000001B6,
+ 0x03C, 0x00000492,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000022A,
+ 0x03C, 0x00000594,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x000001B6,
+ 0x03C, 0x00000492,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x000001C8,
0x03C, 0x00000492,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x03C, 0x0000022A,
0x03C, 0x00000594,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0104, 0xABCD,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x00000800,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x00000800,
- 0xFF0F0404, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x00000800,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x00000820,
- 0xCDCDCDCD, 0xCDCD,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000820,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000800,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000800,
+ 0xA0000000, 0x00000000,
0x03C, 0x00000900,
- 0xFF0F0104, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000002,
- 0xFF0F0104, 0xABCD,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x008, 0x0004E400,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x008, 0x0004E400,
- 0xFF0F0404, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
+ 0x008, 0x00002000,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x008, 0x00002000,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x008, 0x00002000,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x008, 0x00002000,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
0x008, 0x0004E400,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x008, 0x00002000,
- 0xFF0F0104, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0DF, 0x000000C0,
- 0x01F, 0x00040064,
- 0xFF0F0104, 0xABCD,
+ 0x01F, 0x00000064,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x058, 0x000A7284,
0x059, 0x000600EC,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x058, 0x000A7284,
0x059, 0x000600EC,
- 0xFF0F0404, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x058, 0x00081184,
+ 0x059, 0x0006016C,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x058, 0x00081184,
+ 0x059, 0x0006016C,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x058, 0x00081184,
+ 0x059, 0x0006016C,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
0x058, 0x000A7284,
0x059, 0x000600EC,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x058, 0x00081184,
0x059, 0x0006016C,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0104, 0xABCD,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x061, 0x000E8D73,
0x062, 0x00093FC5,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x061, 0x000E8D73,
0x062, 0x00093FC5,
- 0xFF0F0404, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x000EFD83,
+ 0x062, 0x00093FCC,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x000EAD53,
+ 0x062, 0x00093BC4,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x000EFD83,
+ 0x062, 0x00093FCC,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
0x061, 0x000E8D73,
0x062, 0x00093FC5,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x061, 0x000EAD53,
0x062, 0x00093BC4,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0104, 0xABCD,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x000110E9,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x000110E9,
- 0xFF0F0404, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000110EB,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x000110E9,
- 0xFF0F0200, 0xCDEF,
- 0x063, 0x000710E9,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x000110E9,
- 0xCDCDCDCD, 0xCDCD,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000110EB,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000110E9,
+ 0xA0000000, 0x00000000,
0x063, 0x000714E9,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0104, 0xABCD,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
+ 0x064, 0x0001C27C,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
+ 0x064, 0x0001C27C,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
0x064, 0x0001C27C,
- 0xFF0F0204, 0xCDEF,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x064, 0x0001C67C,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
0x064, 0x0001C27C,
- 0xFF0F0404, 0xCDEF,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
0x064, 0x0001C27C,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x064, 0x0001C67C,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0200, 0xABCD,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x00091016,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x00091016,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
0x065, 0x00093016,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
0x065, 0x00093015,
- 0xCDCDCDCD, 0xCDCD,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x00093015,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x00093016,
+ 0xA0000000, 0x00000000,
0x065, 0x00091016,
- 0xFF0F0200, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x018, 0x00000006,
0x0EF, 0x00002000,
0x03B, 0x0003824B,
@@ -1895,9 +2074,10 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x0B4, 0x0001214C,
0x0B7, 0x0003000C,
0x01C, 0x000539D2,
+ 0x0C4, 0x000AFE00,
0x018, 0x0001F12A,
- 0x0FE, 0x00000000,
- 0x0FE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
0x018, 0x0001712A,
};
@@ -2017,6 +2197,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY);
u32 RTL8821AE_MAC_REG_ARRAY[] = {
+ 0x421, 0x0000000F,
0x428, 0x0000000A,
0x429, 0x00000010,
0x430, 0x00000000,
@@ -2485,7 +2666,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
0x81C, 0xA6360001,
0x81C, 0xA5380001,
0x81C, 0xA43A0001,
- 0x81C, 0xA33C0001,
+ 0x81C, 0x683C0001,
0x81C, 0x673E0001,
0x81C, 0x66400001,
0x81C, 0x65420001,
@@ -2519,7 +2700,66 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
0x81C, 0x017A0001,
0x81C, 0x017C0001,
0x81C, 0x017E0001,
- 0xFF0F02C0, 0xABCD,
+ 0x8000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000101,
+ 0x81C, 0xFA020101,
+ 0x81C, 0xF9040101,
+ 0x81C, 0xF8060101,
+ 0x81C, 0xF7080101,
+ 0x81C, 0xF60A0101,
+ 0x81C, 0xF50C0101,
+ 0x81C, 0xF40E0101,
+ 0x81C, 0xF3100101,
+ 0x81C, 0xF2120101,
+ 0x81C, 0xF1140101,
+ 0x81C, 0xF0160101,
+ 0x81C, 0xEF180101,
+ 0x81C, 0xEE1A0101,
+ 0x81C, 0xED1C0101,
+ 0x81C, 0xEC1E0101,
+ 0x81C, 0xEB200101,
+ 0x81C, 0xEA220101,
+ 0x81C, 0xE9240101,
+ 0x81C, 0xE8260101,
+ 0x81C, 0xE7280101,
+ 0x81C, 0xE62A0101,
+ 0x81C, 0xE52C0101,
+ 0x81C, 0xE42E0101,
+ 0x81C, 0xE3300101,
+ 0x81C, 0xA5320101,
+ 0x81C, 0xA4340101,
+ 0x81C, 0xA3360101,
+ 0x81C, 0x87380101,
+ 0x81C, 0x863A0101,
+ 0x81C, 0x853C0101,
+ 0x81C, 0x843E0101,
+ 0x81C, 0x69400101,
+ 0x81C, 0x68420101,
+ 0x81C, 0x67440101,
+ 0x81C, 0x66460101,
+ 0x81C, 0x49480101,
+ 0x81C, 0x484A0101,
+ 0x81C, 0x474C0101,
+ 0x81C, 0x2A4E0101,
+ 0x81C, 0x29500101,
+ 0x81C, 0x28520101,
+ 0x81C, 0x27540101,
+ 0x81C, 0x26560101,
+ 0x81C, 0x25580101,
+ 0x81C, 0x245A0101,
+ 0x81C, 0x235C0101,
+ 0x81C, 0x055E0101,
+ 0x81C, 0x04600101,
+ 0x81C, 0x03620101,
+ 0x81C, 0x02640101,
+ 0x81C, 0x01660101,
+ 0x81C, 0x01680101,
+ 0x81C, 0x016A0101,
+ 0x81C, 0x016C0101,
+ 0x81C, 0x016E0101,
+ 0x81C, 0x01700101,
+ 0x81C, 0x01720101,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
0x81C, 0xFB000101,
0x81C, 0xFA020101,
0x81C, 0xF9040101,
@@ -2578,7 +2818,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
0x81C, 0x016E0101,
0x81C, 0x01700101,
0x81C, 0x01720101,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x81C, 0xFF000101,
0x81C, 0xFF020101,
0x81C, 0xFE040101,
@@ -2637,7 +2877,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
0x81C, 0x046E0101,
0x81C, 0x03700101,
0x81C, 0x02720101,
- 0xFF0F02C0, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x81C, 0x01740101,
0x81C, 0x01760101,
0x81C, 0x01780101,
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 6c5e242b1bc5..86a236873254 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -805,6 +805,7 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
tasklet_kill(&rtlusb->rx_work_tasklet);
cancel_work_sync(&rtlpriv->works.lps_change_work);
+ cancel_work_sync(&rtlpriv->works.update_beacon_work);
flush_workqueue(rtlpriv->works.rtl_wq);
@@ -1031,6 +1032,8 @@ int rtl_usb_probe(struct usb_interface *intf,
rtl_fill_h2c_cmd_work_callback);
INIT_WORK(&rtlpriv->works.lps_change_work,
rtl_lps_change_work_callback);
+ INIT_WORK(&rtlpriv->works.update_beacon_work,
+ rtl_update_beacon_work_callback);
rtlpriv->usb_data_index = 0;
init_completion(&rtlpriv->firmware_loading_complete);
@@ -1070,7 +1073,6 @@ int rtl_usb_probe(struct usb_interface *intf,
err = ieee80211_register_hw(hw);
if (err) {
pr_err("Can't register mac80211 hw.\n");
- err = -ENODEV;
goto error_out;
}
rtlpriv->mac80211.mac80211_registered = 1;
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index fdccfd29fd61..aa07856411b1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2450,7 +2450,6 @@ struct rtl_locks {
spinlock_t waitq_lock;
spinlock_t entry_list_lock;
spinlock_t usb_lock;
- spinlock_t c2hcmd_lock;
spinlock_t scan_list_lock; /* lock for the scan list */
/*FW clock change */
@@ -2487,6 +2486,7 @@ struct rtl_works {
struct work_struct lps_change_work;
struct work_struct fill_h2c_cmd;
+ struct work_struct update_beacon_work;
};
struct rtl_debug {
@@ -3086,14 +3086,9 @@ static inline __le16 rtl_get_fc(struct sk_buff *skb)
return rtl_get_hdr(skb)->frame_control;
}
-static inline u16 rtl_get_tid_h(struct ieee80211_hdr *hdr)
-{
- return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
-}
-
static inline u16 rtl_get_tid(struct sk_buff *skb)
{
- return rtl_get_tid_h(rtl_get_hdr(skb));
+ return ieee80211_get_tid(rtl_get_hdr(skb));
}
static inline struct ieee80211_sta *get_sta(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index ea2be1e25065..cedbf3825848 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -787,7 +787,6 @@ static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_dm *coex_dm = &rtwdev->coex.dm;
- struct rtw_efuse *efuse = &rtwdev->efuse;
u8 link = 0;
u8 center_chan = 0;
u8 bw;
@@ -798,7 +797,7 @@ static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type)
if (type != COEX_MEDIA_DISCONNECT)
center_chan = rtwdev->hal.current_channel;
- if (center_chan == 0 || (efuse->share_ant && center_chan <= 14)) {
+ if (center_chan == 0) {
link = 0;
center_chan = 0;
bw = 0;
@@ -2325,8 +2324,11 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
if (efuse->share_ant) { /* Shared-Ant */
if (coex_stat->bt_a2dp_exist) {
slot_type = TDMA_4SLOT;
- table_case = 9;
tdma_case = 11;
+ if (coex_stat->wl_gl_busy)
+ table_case = 26;
+ else
+ table_case = 9;
} else {
table_case = 9;
tdma_case = 7;
@@ -2646,6 +2648,11 @@ void rtw_coex_power_on_setting(struct rtw_dev *rtwdev)
rtw_coex_set_gnt_debug(rtwdev);
}
+void rtw_coex_power_off_setting(struct rtw_dev *rtwdev)
+{
+ rtw_write16(rtwdev, REG_WIFI_BT_INFO, BIT_BT_INT_EN);
+}
+
void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only)
{
__rtw_coex_init_hw_config(rtwdev, wifi_only);
diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h
index 8ab9852ec9ed..fc61a0cab3e4 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.h
+++ b/drivers/net/wireless/realtek/rtw88/coex.h
@@ -393,6 +393,7 @@ void rtw_coex_bt_multi_link_remain_work(struct work_struct *work);
void rtw_coex_wl_ccklock_work(struct work_struct *work);
void rtw_coex_power_on_setting(struct rtw_dev *rtwdev);
+void rtw_coex_power_off_setting(struct rtw_dev *rtwdev);
void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only);
void rtw_coex_ips_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_lps_notify(struct rtw_dev *rtwdev, u8 type);
@@ -405,4 +406,12 @@ void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev, u32 type);
void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m);
+static inline bool rtw_coex_disabled(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+
+ return coex_stat->bt_disabled;
+}
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 948cb79050ea..18ab472ea46c 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -10,6 +10,7 @@
#include "fw.h"
#include "debug.h"
#include "phy.h"
+#include "reg.h"
#ifdef CONFIG_RTW88_DEBUGFS
@@ -34,9 +35,17 @@ struct rtw_debugfs_priv {
u32 addr;
u32 len;
} read_reg;
+ struct {
+ u8 bit;
+ } dm_cap;
};
};
+static const char * const rtw_dm_cap_strs[] = {
+ [RTW_DM_CAP_NA] = "NA",
+ [RTW_DM_CAP_TXGAPK] = "TXGAPK",
+};
+
static int rtw_debugfs_single_show(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
@@ -270,7 +279,7 @@ static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp,
if (num != 2) {
rtw_warn(rtwdev, "invalid arguments\n");
- return num;
+ return -EINVAL;
}
debugfs_priv->rsvd_page.page_offset = offset;
@@ -818,6 +827,117 @@ static int rtw_debugfs_get_coex_enable(struct seq_file *m, void *v)
return 0;
}
+static ssize_t rtw_debugfs_set_fw_crash(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ char tmp[32 + 1];
+ bool input;
+ int ret;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+
+ ret = kstrtobool(tmp, &input);
+ if (ret)
+ return -EINVAL;
+
+ if (!input)
+ return -EINVAL;
+
+ rtw_write8(rtwdev, REG_HRCV_MSG, 1);
+
+ return count;
+}
+
+static int rtw_debugfs_get_fw_crash(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+
+ seq_printf(m, "%d\n", test_bit(RTW_FLAG_RESTARTING, rtwdev->flags));
+ return 0;
+}
+
+static ssize_t rtw_debugfs_set_dm_cap(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ int bit;
+ bool en;
+
+ if (kstrtoint_from_user(buffer, count, 10, &bit))
+ return -EINVAL;
+
+ en = bit > 0;
+ bit = abs(bit);
+
+ if (bit >= RTW_DM_CAP_NUM) {
+ rtw_warn(rtwdev, "unknown DM CAP %d\n", bit);
+ return -EINVAL;
+ }
+
+ if (en)
+ dm_info->dm_flags &= ~BIT(bit);
+ else
+ dm_info->dm_flags |= BIT(bit);
+
+ debugfs_priv->dm_cap.bit = bit;
+
+ return count;
+}
+
+static void dump_gapk_status(struct rtw_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk;
+ int i, path;
+ u32 val;
+
+ seq_printf(m, "\n(%2d) %c%s\n\n", RTW_DM_CAP_TXGAPK,
+ dm_info->dm_flags & BIT(RTW_DM_CAP_TXGAPK) ? '-' : '+',
+ rtw_dm_cap_strs[RTW_DM_CAP_TXGAPK]);
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ val = rtw_read_rf(rtwdev, path, RF_GAINTX, RFREG_MASK);
+ seq_printf(m, "path %d:\n0x%x = 0x%x\n", path, RF_GAINTX, val);
+
+ for (i = 0; i < RF_HW_OFFSET_NUM; i++)
+ seq_printf(m, "[TXGAPK] offset %d %d\n",
+ txgapk->rf3f_fs[path][i], i);
+ seq_puts(m, "\n");
+ }
+}
+
+static int rtw_debugfs_get_dm_cap(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ int i;
+
+ switch (debugfs_priv->dm_cap.bit) {
+ case RTW_DM_CAP_TXGAPK:
+ dump_gapk_status(rtwdev, m);
+ break;
+ default:
+ for (i = 1; i < RTW_DM_CAP_NUM; i++) {
+ seq_printf(m, "(%2d) %c%s\n", i,
+ dm_info->dm_flags & BIT(i) ? '-' : '+',
+ rtw_dm_cap_strs[i]);
+ }
+ break;
+ }
+ debugfs_priv->dm_cap.bit = RTW_DM_CAP_NA;
+ return 0;
+}
+
#define rtw_debug_impl_mac(page, addr) \
static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \
.cb_read = rtw_debug_get_mac_page, \
@@ -921,6 +1041,16 @@ static struct rtw_debugfs_priv rtw_debug_priv_coex_info = {
.cb_read = rtw_debugfs_get_coex_info,
};
+static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = {
+ .cb_write = rtw_debugfs_set_fw_crash,
+ .cb_read = rtw_debugfs_get_fw_crash,
+};
+
+static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = {
+ .cb_write = rtw_debugfs_set_dm_cap,
+ .cb_read = rtw_debugfs_get_dm_cap,
+};
+
#define rtw_debugfs_add_core(name, mode, fopname, parent) \
do { \
rtw_debug_priv_ ##name.rtwdev = rtwdev; \
@@ -994,6 +1124,8 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
}
rtw_debugfs_add_r(rf_dump);
rtw_debugfs_add_r(tx_pwr_tbl);
+ rtw_debugfs_add_rw(fw_crash);
+ rtw_debugfs_add_rw(dm_cap);
}
#endif /* CONFIG_RTW88_DEBUGFS */
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index e16e0da26e77..c8efd1900a34 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -19,6 +19,7 @@ enum rtw_debug_mask {
RTW_DBG_PS = 0x00000400,
RTW_DBG_BF = 0x00000800,
RTW_DBG_WOW = 0x00001000,
+ RTW_DBG_CFO = 0x00002000,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 6649b84f6b1e..ea2cd4db1d3c 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -350,6 +350,18 @@ void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para)
}
EXPORT_SYMBOL(rtw_fw_do_iqk);
+void rtw_fw_inform_rfk_status(struct rtw_dev *rtwdev, bool start)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WIFI_CALIBRATION);
+
+ RFK_SET_INFORM_START(h2c_pkt, start);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+EXPORT_SYMBOL(rtw_fw_inform_rfk_status);
+
void rtw_fw_query_bt_info(struct rtw_dev *rtwdev)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
@@ -500,6 +512,21 @@ void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
+void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev)
+{
+ struct rtw_traffic_stats *stats = &rtwdev->stats;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_PHY_INFO);
+ SET_WL_PHY_INFO_TX_TP(h2c_pkt, stats->tx_throughput);
+ SET_WL_PHY_INFO_RX_TP(h2c_pkt, stats->rx_throughput);
+ SET_WL_PHY_INFO_TX_RATE_DESC(h2c_pkt, dm_info->tx_rate);
+ SET_WL_PHY_INFO_RX_RATE_DESC(h2c_pkt, dm_info->curr_rx_rate);
+ SET_WL_PHY_INFO_RX_EVM(h2c_pkt, dm_info->rx_evm_dbm[RF_PATH_A]);
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 39c905c1b1d8..7c5b1d75e26f 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -345,6 +345,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_LPS_PG_INFO 0x2b
#define H2C_CMD_RA_INFO 0x40
#define H2C_CMD_RSSI_MONITOR 0x42
+#define H2C_CMD_WL_PHY_INFO 0x58
#define H2C_CMD_COEX_TDMA_TYPE 0x60
#define H2C_CMD_QUERY_BT_INFO 0x61
@@ -353,6 +354,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_WL_CH_INFO 0x66
#define H2C_CMD_QUERY_BT_MP_INFO 0x67
#define H2C_CMD_BT_WIFI_CONTROL 0x69
+#define H2C_CMD_WIFI_CALIBRATION 0x6d
#define H2C_CMD_KEEP_ALIVE 0x03
#define H2C_CMD_DISCONNECT_DECISION 0x04
@@ -369,6 +371,17 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_WL_PHY_INFO_TX_TP(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(17, 8))
+#define SET_WL_PHY_INFO_RX_TP(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(27, 18))
+#define SET_WL_PHY_INFO_TX_RATE_DESC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0))
+#define SET_WL_PHY_INFO_RX_RATE_DESC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
+#define SET_WL_PHY_INFO_RX_EVM(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
+
#define SET_PWR_MODE_SET_MODE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(14, 8))
#define SET_PWR_MODE_SET_RLBM(h2c_pkt, value) \
@@ -530,6 +543,9 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
le32_get_bits(*((__le32 *)(_header) + 0x01), GENMASK(31, 16))
#define GET_FW_DUMP_TLV_VAL(_header) \
le32_get_bits(*((__le32 *)(_header) + 0x02), GENMASK(31, 0))
+
+#define RFK_SET_INFORM_START(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
{
u32 pkt_offset;
@@ -545,6 +561,7 @@ void rtw_fw_send_general_info(struct rtw_dev *rtwdev);
void rtw_fw_send_phydm_info(struct rtw_dev *rtwdev);
void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para);
+void rtw_fw_inform_rfk_status(struct rtw_dev *rtwdev, bool start);
void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev);
void rtw_fw_set_pg_info(struct rtw_dev *rtwdev);
void rtw_fw_query_bt_info(struct rtw_dev *rtwdev);
@@ -559,6 +576,7 @@ void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data);
void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
+void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev);
int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
u8 *buf, u32 size);
void rtw_remove_rsvd_page(struct rtw_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
index 2cba327e6218..4c6fc6fb3f83 100644
--- a/drivers/net/wireless/realtek/rtw88/hci.h
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -11,6 +11,7 @@ struct rtw_hci_ops {
struct rtw_tx_pkt_info *pkt_info,
struct sk_buff *skb);
void (*tx_kick_off)(struct rtw_dev *rtwdev);
+ void (*flush_queues)(struct rtw_dev *rtwdev, u32 queues, bool drop);
int (*setup)(struct rtw_dev *rtwdev);
int (*start)(struct rtw_dev *rtwdev);
void (*stop)(struct rtw_dev *rtwdev);
@@ -258,4 +259,19 @@ static inline enum rtw_hci_type rtw_hci_type(struct rtw_dev *rtwdev)
return rtwdev->hci.type;
}
+static inline void rtw_hci_flush_queues(struct rtw_dev *rtwdev, u32 queues,
+ bool drop)
+{
+ if (rtwdev->hci.ops->flush_queues)
+ rtwdev->hci.ops->flush_queues(rtwdev, queues, drop);
+}
+
+static inline void rtw_hci_flush_all_queues(struct rtw_dev *rtwdev, bool drop)
+{
+ if (rtwdev->hci.ops->flush_queues)
+ rtwdev->hci.ops->flush_queues(rtwdev,
+ BIT(rtwdev->hw->queues) - 1,
+ drop);
+}
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 59028b121b00..d1678aed9d9c 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -530,6 +530,25 @@ static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst,
return 0;
}
+int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size)
+{
+ u32 ch0_ctrl = BIT_DDMACH0_OWN | BIT_DDMACH0_DDMA_MODE;
+
+ if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "busy to start ddma\n");
+ return -EBUSY;
+ }
+
+ ch0_ctrl |= size & BIT_MASK_DDMACH0_DLEN;
+
+ if (iddma_enable(rtwdev, ocp_src, OCPBASE_RXBUF_FW_88XX, ch0_ctrl)) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "busy to complete ddma\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static bool
check_fw_checksum(struct rtw_dev *rtwdev, u32 addr)
{
diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h
index ce64cdf7a565..3172aa5ac4de 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.h
+++ b/drivers/net/wireless/realtek/rtw88/mac.h
@@ -15,7 +15,10 @@
#define ILLEGAL_KEY_GROUP 0xFAAAAA00
/* HW memory address */
+#define OCPBASE_RXBUF_FW_88XX 0x18680000
#define OCPBASE_TXBUF_88XX 0x18780000
+#define OCPBASE_ROM_88XX 0x00000000
+#define OCPBASE_IMEM_88XX 0x00030000
#define OCPBASE_DMEM_88XX 0x00200000
#define OCPBASE_EMEM_88XX 0x00100000
@@ -33,6 +36,7 @@ void rtw_mac_power_off(struct rtw_dev *rtwdev);
int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw);
int rtw_mac_init(struct rtw_dev *rtwdev);
void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop);
+int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size);
static inline void rtw_mac_flush_all_queues(struct rtw_dev *rtwdev, bool drop)
{
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 2351dfb0d2e2..333df6b38113 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -520,6 +520,7 @@ static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
hw_key_type, hw_key_idx);
break;
case DISABLE_KEY:
+ rtw_hci_flush_all_queues(rtwdev, false);
rtw_mac_flush_all_queues(rtwdev, false);
rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx);
break;
@@ -670,6 +671,7 @@ static void rtw_ops_flush(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
rtw_leave_lps_deep(rtwdev);
+ rtw_hci_flush_queues(rtwdev, queues, drop);
rtw_mac_flush_queues(rtwdev, queues, drop);
mutex_unlock(&rtwdev->mutex);
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index e6989c0525cc..f3a3a86fa9b5 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -345,15 +345,9 @@ static bool rtw_fw_dump_crash_log(struct rtw_dev *rtwdev)
"fw crash dump's seq is wrong: %d\n", seq);
goto free_buf;
}
- if (seq == 0 &&
- (GET_FW_DUMP_TLV_TYPE(buf) != FW_CD_TYPE ||
- GET_FW_DUMP_TLV_LEN(buf) != FW_CD_LEN ||
- GET_FW_DUMP_TLV_VAL(buf) != FW_CD_VAL)) {
- rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's tlv is wrong\n");
- goto free_buf;
- }
- print_hex_dump_bytes("rtw88 fw dump: ", DUMP_PREFIX_OFFSET, buf, size);
+ print_hex_dump(KERN_ERR, "rtw88 fw dump: ", DUMP_PREFIX_OFFSET, 16, 1,
+ buf, size, true);
if (GET_FW_DUMP_MORE(buf) == 1) {
rtwdev->fw.prev_dump_seq = seq;
@@ -368,6 +362,78 @@ exit:
return ret;
}
+int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
+ const char *prefix_str)
+{
+ u32 rxff = rtwdev->chip->fw_rxff_size;
+ u32 dump_size, done_size = 0;
+ u8 *buf;
+ int ret;
+
+ buf = vzalloc(size);
+ if (!buf)
+ return -ENOMEM;
+
+ while (size) {
+ dump_size = size > rxff ? rxff : size;
+
+ ret = rtw_ddma_to_fw_fifo(rtwdev, ocp_src + done_size,
+ dump_size);
+ if (ret) {
+ rtw_err(rtwdev,
+ "ddma fw 0x%x [+0x%x] to fw fifo fail\n",
+ ocp_src, done_size);
+ goto exit;
+ }
+
+ ret = rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0,
+ dump_size, (u32 *)(buf + done_size));
+ if (ret) {
+ rtw_err(rtwdev,
+ "dump fw 0x%x [+0x%x] from fw fifo fail\n",
+ ocp_src, done_size);
+ goto exit;
+ }
+
+ size -= dump_size;
+ done_size += dump_size;
+ }
+
+ print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 1,
+ buf, done_size, true);
+
+exit:
+ vfree(buf);
+ return ret;
+}
+EXPORT_SYMBOL(rtw_dump_fw);
+
+int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size,
+ const char *prefix_str)
+{
+ u8 *buf;
+ u32 i;
+
+ if (addr & 0x3) {
+ WARN(1, "should be 4-byte aligned, addr = 0x%08x\n", addr);
+ return -EINVAL;
+ }
+
+ buf = vzalloc(size);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i += 4)
+ *(u32 *)(buf + i) = rtw_read32(rtwdev, addr + i);
+
+ print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf,
+ size, true);
+
+ vfree(buf);
+ return 0;
+}
+EXPORT_SYMBOL(rtw_dump_reg);
+
void rtw_vif_assoc_changed(struct rtw_vif *rtwvif,
struct ieee80211_bss_conf *conf)
{
@@ -419,10 +485,8 @@ void rtw_fw_recovery(struct rtw_dev *rtwdev)
ieee80211_queue_work(rtwdev->hw, &rtwdev->fw_recovery_work);
}
-static void rtw_fw_recovery_work(struct work_struct *work)
+static void __fw_recovery_work(struct rtw_dev *rtwdev)
{
- struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
- fw_recovery_work);
/* rtw_fw_dump_crash_log() returns false indicates that there are
* still more log to dump. Driver set 0x1cf[7:0] = 0x1 to tell firmware
@@ -435,18 +499,26 @@ static void rtw_fw_recovery_work(struct work_struct *work)
}
rtwdev->fw.prev_dump_seq = 0;
- WARN(1, "firmware crash, start reset and recover\n");
+ set_bit(RTW_FLAG_RESTARTING, rtwdev->flags);
+ rtw_chip_dump_fw_crash(rtwdev);
- mutex_lock(&rtwdev->mutex);
+ WARN(1, "firmware crash, start reset and recover\n");
- set_bit(RTW_FLAG_RESTARTING, rtwdev->flags);
rcu_read_lock();
rtw_iterate_keys_rcu(rtwdev, NULL, rtw_reset_key_iter, rtwdev);
rcu_read_unlock();
rtw_iterate_stas_atomic(rtwdev, rtw_reset_sta_iter, rtwdev);
rtw_iterate_vifs_atomic(rtwdev, rtw_reset_vif_iter, rtwdev);
rtw_enter_ips(rtwdev);
+}
+static void rtw_fw_recovery_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+ fw_recovery_work);
+
+ mutex_lock(&rtwdev->mutex);
+ __fw_recovery_work(rtwdev);
mutex_unlock(&rtwdev->mutex);
ieee80211_restart_hw(rtwdev->hw);
@@ -1138,6 +1210,7 @@ int rtw_core_start(struct rtw_dev *rtwdev)
static void rtw_power_off(struct rtw_dev *rtwdev)
{
rtw_hci_stop(rtwdev);
+ rtw_coex_power_off_setting(rtwdev);
rtw_mac_power_off(rtwdev);
}
@@ -1393,7 +1466,6 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_efuse *efuse = &rtwdev->efuse;
- int ret = 0;
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
@@ -1431,7 +1503,7 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
hal->bfee_sts_cap = 3;
- return ret;
+ return 0;
}
static int rtw_chip_efuse_enable(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 35afea91fd29..dc3744847ba9 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -625,6 +625,7 @@ struct rtw_rx_pkt_stat {
struct rtw_sta_info *si;
struct ieee80211_vif *vif;
+ struct ieee80211_hdr *hdr;
};
DECLARE_EWMA(tp, 10, 2);
@@ -805,6 +806,7 @@ struct rtw_regulatory {
struct rtw_chip_ops {
int (*mac_init)(struct rtw_dev *rtwdev);
+ void (*dump_fw_crash)(struct rtw_dev *rtwdev);
void (*shutdown)(struct rtw_dev *rtwdev);
int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map);
void (*phy_set_param)(struct rtw_dev *rtwdev);
@@ -837,6 +839,8 @@ struct rtw_chip_ops {
struct ieee80211_bss_conf *conf);
void (*cfg_csi_rate)(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
u8 fixrate_en, u8 *new_rate);
+ void (*cfo_init)(struct rtw_dev *rtwdev);
+ void (*cfo_track)(struct rtw_dev *rtwdev);
/* for coex */
void (*coex_set_init)(struct rtw_dev *rtwdev);
@@ -1166,6 +1170,7 @@ struct rtw_chip_info {
bool en_dis_dpd;
u16 dpd_ratemask;
u8 iqk_threshold;
+ u8 lck_threshold;
const struct rtw_pwr_track_tbl *pwr_track_tbl;
u8 bfer_su_max_num;
@@ -1497,9 +1502,46 @@ struct rtw_iqk_info {
} result;
};
+enum rtw_rf_band {
+ RF_BAND_2G_CCK,
+ RF_BAND_2G_OFDM,
+ RF_BAND_5G_L,
+ RF_BAND_5G_M,
+ RF_BAND_5G_H,
+ RF_BAND_MAX
+};
+
+#define RF_GAIN_NUM 11
+#define RF_HW_OFFSET_NUM 10
+
+struct rtw_gapk_info {
+ u32 rf3f_bp[RF_BAND_MAX][RF_GAIN_NUM][RTW_RF_PATH_MAX];
+ u32 rf3f_fs[RTW_RF_PATH_MAX][RF_GAIN_NUM];
+ bool txgapk_bp_done;
+ s8 offset[RF_GAIN_NUM][RTW_RF_PATH_MAX];
+ s8 fianl_offset[RF_GAIN_NUM][RTW_RF_PATH_MAX];
+ u8 read_txgain;
+ u8 channel;
+};
+
+struct rtw_cfo_track {
+ bool is_adjust;
+ u8 crystal_cap;
+ s32 cfo_tail[RTW_RF_PATH_MAX];
+ s32 cfo_cnt[RTW_RF_PATH_MAX];
+ u32 packet_count;
+ u32 packet_count_pre;
+};
+
#define RRSR_INIT_2G 0x15f
#define RRSR_INIT_5G 0x150
+enum rtw_dm_cap {
+ RTW_DM_CAP_NA,
+ RTW_DM_CAP_TXGAPK,
+ RTW_DM_CAP_NUM
+};
+
struct rtw_dm_info {
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
@@ -1534,6 +1576,7 @@ struct rtw_dm_info {
u32 rrsr_mask_min;
u8 thermal_avg[RTW_RF_PATH_MAX];
u8 thermal_meter_k;
+ u8 thermal_meter_lck;
s8 delta_power_index[RTW_RF_PATH_MAX];
s8 delta_power_index_last[RTW_RF_PATH_MAX];
u8 default_ofdm_index;
@@ -1549,6 +1592,7 @@ struct rtw_dm_info {
u8 dack_dck[RTW_RF_PATH_MAX][2][DACK_DCK_BACKUP_NUM];
struct rtw_dpk_info dpk_info;
+ struct rtw_cfo_track cfo_track;
/* [bandwidth 0:20M/1:40M][number of path] */
u8 cck_pd_lv[2][RTW_RF_PATH_MAX];
@@ -1566,7 +1610,10 @@ struct rtw_dm_info {
struct ewma_evm ewma_evm[RTW_EVM_NUM];
struct ewma_snr ewma_snr[RTW_SNR_NUM];
+ u32 dm_flags; /* enum rtw_dm_cap */
struct rtw_iqk_info iqk;
+ struct rtw_gapk_info gapk;
+ bool is_bt_iqk_timeout;
};
struct rtw_efuse {
@@ -1876,6 +1923,12 @@ static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id)
clear_bit(mac_id, rtwdev->mac_id_map);
}
+static inline void rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev)
+{
+ if (rtwdev->chip->ops->dump_fw_crash)
+ rtwdev->chip->ops->dump_fw_crash(rtwdev);
+}
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *ch_param);
bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target);
@@ -1905,5 +1958,9 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
bool fw_exist);
void rtw_fw_recovery(struct rtw_dev *rtwdev);
+int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
+ const char *prefix_str);
+int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size,
+ const char *prefix_str);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 786a48649946..f59a4c462e3b 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -581,23 +581,30 @@ static int rtw_pci_start(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ rtw_pci_napi_start(rtwdev);
+
spin_lock_bh(&rtwpci->irq_lock);
+ rtwpci->running = true;
rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
spin_unlock_bh(&rtwpci->irq_lock);
- rtw_pci_napi_start(rtwdev);
-
return 0;
}
static void rtw_pci_stop(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ spin_lock_bh(&rtwpci->irq_lock);
+ rtwpci->running = false;
+ rtw_pci_disable_interrupt(rtwdev, rtwpci);
+ spin_unlock_bh(&rtwpci->irq_lock);
+
+ synchronize_irq(pdev->irq);
rtw_pci_napi_stop(rtwdev);
spin_lock_bh(&rtwpci->irq_lock);
- rtw_pci_disable_interrupt(rtwdev, rtwpci);
rtw_pci_dma_release(rtwdev, rtwpci);
spin_unlock_bh(&rtwpci->irq_lock);
}
@@ -671,6 +678,8 @@ static u8 ac_to_hwq[] = {
[IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
};
+static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS);
+
static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -727,6 +736,72 @@ static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
}
+static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q)
+{
+ u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q];
+ u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2);
+
+ return FIELD_GET(TRX_BD_IDX_MASK, bd_idx);
+}
+
+static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q];
+ u32 cur_rp;
+ u8 i;
+
+ /* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a
+ * bit dynamic, it's hard to define a reasonable fixed total timeout to
+ * use read_poll_timeout* helper. Instead, we can ensure a reasonable
+ * polling times, so we just use for loop with udelay here.
+ */
+ for (i = 0; i < 30; i++) {
+ cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q);
+ if (cur_rp == ring->r.wp)
+ return;
+
+ udelay(1);
+ }
+
+ if (!drop)
+ rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q);
+}
+
+static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues,
+ bool drop)
+{
+ u8 q;
+
+ for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) {
+ /* It may be not necessary to flush BCN and H2C tx queues. */
+ if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C)
+ continue;
+
+ if (pci_queues & BIT(q))
+ __pci_flush_queue(rtwdev, q, drop);
+ }
+}
+
+static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
+{
+ u32 pci_queues = 0;
+ u8 i;
+
+ /* If all of the hardware queues are requested to flush,
+ * flush all of the pci queues.
+ */
+ if (queues == BIT(rtwdev->hw->queues) - 1) {
+ pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1;
+ } else {
+ for (i = 0; i < rtwdev->hw->queues; i++)
+ if (queues & BIT(i))
+ pci_queues |= BIT(ac_to_hwq[i]);
+ }
+
+ __rtw_pci_flush_queues(rtwdev, pci_queues, drop);
+}
+
static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
@@ -882,10 +957,12 @@ static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
return ret;
ring = &rtwpci->tx_rings[queue];
+ spin_lock_bh(&rtwpci->irq_lock);
if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
ring->queue_stopped = true;
}
+ spin_unlock_bh(&rtwpci->irq_lock);
return 0;
}
@@ -900,7 +977,7 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
struct sk_buff *skb;
u32 count;
u32 bd_idx_addr;
- u32 bd_idx, cur_rp;
+ u32 bd_idx, cur_rp, rp_idx;
u16 q_map;
ring = &rtwpci->tx_rings[hw_queue];
@@ -909,6 +986,7 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
bd_idx = rtw_read32(rtwdev, bd_idx_addr);
cur_rp = bd_idx >> 16;
cur_rp &= TRX_BD_IDX_MASK;
+ rp_idx = ring->r.rp;
if (cur_rp >= ring->r.rp)
count = cur_rp - ring->r.rp;
else
@@ -932,12 +1010,15 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
}
if (ring->queue_stopped &&
- avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) {
+ avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) {
q_map = skb_get_queue_mapping(skb);
ieee80211_wake_queue(hw, q_map);
ring->queue_stopped = false;
}
+ if (++rp_idx >= ring->r.len)
+ rp_idx = 0;
+
skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
info = IEEE80211_SKB_CB(skb);
@@ -1138,7 +1219,8 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
rtw_fw_c2h_cmd_isr(rtwdev);
/* all of the jobs for this interrupt have been done */
- rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
+ if (rtwpci->running)
+ rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
spin_unlock_bh(&rtwpci->irq_lock);
return IRQ_HANDLED;
@@ -1490,6 +1572,7 @@ static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
static struct rtw_hci_ops rtw_pci_ops = {
.tx_write = rtw_pci_tx_write,
.tx_kick_off = rtw_pci_tx_kick_off,
+ .flush_queues = rtw_pci_flush_queues,
.setup = rtw_pci_setup,
.start = rtw_pci_start,
.stop = rtw_pci_stop,
@@ -1558,7 +1641,8 @@ static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete_done(napi, work_done);
spin_lock_bh(&rtwpci->irq_lock);
- rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
+ if (rtwpci->running)
+ rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
spin_unlock_bh(&rtwpci->irq_lock);
/* When ISR happens during polling and before napi_complete
* while no further data is received. Data on the dma_ring will
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index e76fc549a788..0ffae887527a 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -211,6 +211,7 @@ struct rtw_pci {
spinlock_t irq_lock;
u32 irq_mask[4];
bool irq_enabled;
+ bool running;
/* napi structure */
struct net_device netdev;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index e114ddecac09..8146acaf1893 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -119,6 +119,14 @@ static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev)
dm_info->cck_fa_avg = CCK_FA_AVG_RESET;
}
+static void rtw_phy_cfo_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->cfo_init)
+ chip->ops->cfo_init(rtwdev);
+}
+
void rtw_phy_init(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -140,6 +148,7 @@ void rtw_phy_init(struct rtw_dev *rtwdev)
rtw_phy_cck_pd_init(rtwdev);
dm_info->iqk.done = false;
+ rtw_phy_cfo_init(rtwdev);
}
EXPORT_SYMBOL(rtw_phy_init);
@@ -316,7 +325,8 @@ rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info)
return damping;
}
-static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info,
+static void rtw_phy_dig_get_boundary(struct rtw_dev *rtwdev,
+ struct rtw_dm_info *dm_info,
u8 *upper, u8 *lower, bool linked)
{
u8 dig_max, dig_min, dig_mid;
@@ -325,8 +335,7 @@ static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info,
if (linked) {
dig_max = DIG_PERF_MAX;
dig_mid = DIG_PERF_MID;
- /* 22B=0x1c, 22C=0x20 */
- dig_min = 0x1c;
+ dig_min = rtwdev->chip->dig_min;
min_rssi = max_t(u8, dm_info->min_rssi, dig_min);
} else {
dig_max = DIG_CVRG_MAX;
@@ -437,7 +446,8 @@ static void rtw_phy_dig(struct rtw_dev *rtwdev)
* the peers connected with us, meanwhile make sure the igi value does
* not beyond the hardware limitation
*/
- rtw_phy_dig_get_boundary(dm_info, &upper_bound, &lower_bound, linked);
+ rtw_phy_dig_get_boundary(rtwdev, dm_info, &upper_bound, &lower_bound,
+ linked);
cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound);
/* record current igi value and false alarm statistics for further
@@ -527,6 +537,62 @@ static void rtw_phy_dpk_track(struct rtw_dev *rtwdev)
chip->ops->dpk_track(rtwdev);
}
+struct rtw_rx_addr_match_data {
+ struct rtw_dev *rtwdev;
+ struct ieee80211_hdr *hdr;
+ struct rtw_rx_pkt_stat *pkt_stat;
+ u8 *bssid;
+};
+
+static void rtw_phy_parsing_cfo_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_rx_addr_match_data *iter_data = data;
+ struct rtw_dev *rtwdev = iter_data->rtwdev;
+ struct rtw_rx_pkt_stat *pkt_stat = iter_data->pkt_stat;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_cfo_track *cfo = &dm_info->cfo_track;
+ u8 *bssid = iter_data->bssid;
+ u8 i;
+
+ if (!ether_addr_equal(vif->bss_conf.bssid, bssid))
+ return;
+
+ for (i = 0; i < rtwdev->hal.rf_path_num; i++) {
+ cfo->cfo_tail[i] += pkt_stat->cfo_tail[i];
+ cfo->cfo_cnt[i]++;
+ }
+
+ cfo->packet_count++;
+}
+
+void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ struct ieee80211_hdr *hdr = pkt_stat->hdr;
+ struct rtw_rx_addr_match_data data = {};
+
+ if (pkt_stat->crc_err || pkt_stat->icv_err || !pkt_stat->phy_status ||
+ ieee80211_is_ctl(hdr->frame_control))
+ return;
+
+ data.rtwdev = rtwdev;
+ data.hdr = hdr;
+ data.pkt_stat = pkt_stat;
+ data.bssid = get_hdr_bssid(hdr);
+
+ rtw_iterate_vifs_atomic(rtwdev, rtw_phy_parsing_cfo_iter, &data);
+}
+EXPORT_SYMBOL(rtw_phy_parsing_cfo);
+
+static void rtw_phy_cfo_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->cfo_track)
+ chip->ops->cfo_track(rtwdev);
+}
+
#define CCK_PD_FA_LV1_MIN 1000
#define CCK_PD_FA_LV0_MAX 500
@@ -617,6 +683,7 @@ static void rtw_phy_pwr_track(struct rtw_dev *rtwdev)
static void rtw_phy_ra_track(struct rtw_dev *rtwdev)
{
+ rtw_fw_update_wl_phy_info(rtwdev);
rtw_phy_ra_info_update(rtwdev);
rtw_phy_rrsr_update(rtwdev);
}
@@ -628,6 +695,7 @@ void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
rtw_phy_dig(rtwdev);
rtw_phy_cck_pd(rtwdev);
rtw_phy_ra_track(rtwdev);
+ rtw_phy_cfo_track(rtwdev);
rtw_phy_dpk_track(rtwdev);
rtw_phy_pwr_track(rtwdev);
}
@@ -1584,7 +1652,7 @@ void rtw_phy_load_tables(struct rtw_dev *rtwdev)
}
EXPORT_SYMBOL(rtw_phy_load_tables);
-static u8 rtw_get_channel_group(u8 channel)
+static u8 rtw_get_channel_group(u8 channel, u8 rate)
{
switch (channel) {
default:
@@ -1628,6 +1696,7 @@ static u8 rtw_get_channel_group(u8 channel)
case 106:
return 4;
case 14:
+ return rate <= DESC_RATE11M ? 5 : 4;
case 108:
case 110:
case 112:
@@ -1879,7 +1948,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
s8 *remnant = &pwr_param->pwr_remnant;
pwr_idx = &rtwdev->efuse.txpwr_idx_table[path];
- group = rtw_get_channel_group(ch);
+ group = rtw_get_channel_group(ch, rate);
/* base power index for 2.4G/5G */
if (IS_CH_2G_BAND(ch)) {
@@ -2219,6 +2288,20 @@ s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
}
EXPORT_SYMBOL(rtw_phy_pwrtrack_get_pwridx);
+bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 delta_lck;
+
+ delta_lck = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_lck);
+ if (delta_lck >= rtwdev->chip->lck_threshold) {
+ dm_info->thermal_meter_lck = dm_info->thermal_avg[0];
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL(rtw_phy_pwrtrack_need_lck);
+
bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index a4fcfb878550..0b6f2fc8193c 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -55,9 +55,12 @@ u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path);
s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
struct rtw_swing_table *swing_table,
u8 tbl_path, u8 therm_path, u8 delta);
+bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev);
bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev);
void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
struct rtw_swing_table *swing_table);
+void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev,
+ struct rtw_rx_pkt_stat *pkt_stat);
struct rtw_txpwr_lmt_cfg_pair {
u8 regd;
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index ea518aa78552..f5ce75095e90 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -129,6 +129,9 @@
#define REG_MCU_TST_CFG 0x84
#define VAL_FW_TRIGGER 0x1
+#define REG_PMC_DBG_CTRL1 0xa8
+#define BITS_PMC_BT_IQK_STS GENMASK(22, 21)
+
#define REG_EFUSE_ACCESS 0x00CF
#define EFUSE_ACCESS_ON 0x69
#define EFUSE_ACCESS_OFF 0x00
@@ -360,6 +363,7 @@
#define REG_TX_PTCL_CTRL 0x0520
#define BIT_SIFS_BK_EN BIT(12)
#define REG_TXPAUSE 0x0522
+#define BIT_AC_QUEUE GENMASK(7, 0)
#define REG_RD_CTRL 0x0524
#define BIT_DIS_TXOP_CFE BIT(10)
#define BIT_DIS_LSIG_CFE BIT(9)
@@ -516,6 +520,7 @@
#define BIT_RFE_BUF_EN BIT(3)
#define REG_ANAPAR_XTAL_0 0x1040
+#define BIT_XCAP_0 GENMASK(23, 10)
#define REG_CPU_DMEM_CON 0x1080
#define BIT_WL_PLATFORM_RST BIT(16)
#define BIT_WL_SECURITY_CLK BIT(15)
@@ -534,6 +539,7 @@
#define BIT_DDMACH0_OWN BIT(31)
#define BIT_DDMACH0_CHKSUM_EN BIT(29)
#define BIT_DDMACH0_CHKSUM_STS BIT(27)
+#define BIT_DDMACH0_DDMA_MODE BIT(26)
#define BIT_DDMACH0_RESET_CHKSUM_STS BIT(25)
#define BIT_DDMACH0_CHKSUM_CONT BIT(24)
#define BIT_MASK_DDMACH0_DLEN 0x3ffff
@@ -642,21 +648,30 @@
#define RF_WLSEL 0x02
#define RF_DTXLOK 0x08
#define RF_CFGCH 0x18
+#define BIT_BAND GENMASK(18, 16)
#define RF_RCK 0x1d
#define RF_LUTWA 0x33
#define RF_LUTWD1 0x3e
#define RF_LUTWD0 0x3f
+#define BIT_GAIN_EXT BIT(12)
+#define BIT_DATA_L GENMASK(11, 0)
#define RF_T_METER 0x42
#define RF_BSPAD 0x54
#define RF_GAINTX 0x56
#define RF_TXATANK 0x64
#define RF_TRXIQ 0x66
#define RF_RXIQGEN 0x8d
+#define RF_SYN_PFD 0xb0
#define RF_XTALX2 0xb8
+#define RF_SYN_CTRL 0xbb
#define RF_MALSEL 0xbe
+#define RF_SYN_AAC 0xc9
+#define RF_AAC_CTRL 0xca
+#define RF_FAST_LCK 0xcc
#define RF_RCKD 0xde
#define RF_TXADBG 0xde
#define RF_LUTDBG 0xdf
+#define BIT_TXA_TANK BIT(4)
#define RF_LUTWE2 0xee
#define RF_LUTWE 0xef
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 33c6cf1206c8..785b8181513f 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -581,7 +581,8 @@ static void rtw8821c_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
- pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc);
+ pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc) &&
+ GET_RX_DESC_ENC_TYPE(rx_desc) != RX_DESC_ENC_NONE;
pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index dd560c28abb2..6cb593cc33c2 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -17,6 +17,7 @@
#include "util.h"
#include "bf.h"
#include "efuse.h"
+#include "coex.h"
#define IQK_DONE_8822C 0xaa
@@ -39,7 +40,7 @@ static int rtw8822c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
efuse->rfe_option = map->rfe_option;
efuse->rf_board_option = map->rf_board_option;
- efuse->crystal_cap = map->xtal_k;
+ efuse->crystal_cap = map->xtal_k & XCAP_MASK;
efuse->channel_plan = map->channel_plan;
efuse->country_code[0] = map->country_code[0];
efuse->country_code[1] = map->country_code[1];
@@ -1094,14 +1095,719 @@ static void rtw8822c_pa_bias(struct rtw_dev *rtwdev)
if (pg_pa_bias == EFUSE_READ_FAIL)
return;
pg_pa_bias = FIELD_GET(PPG_PABIAS_MASK, pg_pa_bias);
- rtw_write_rf(rtwdev, path, 0x60, RF_PABIAS_2G_MASK, pg_pa_bias);
+ rtw_write_rf(rtwdev, path, RF_PA, RF_PABIAS_2G_MASK, pg_pa_bias);
}
for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
rtw_read8_physical_efuse(rtwdev, rf_efuse_5g[path],
&pg_pa_bias);
pg_pa_bias = FIELD_GET(PPG_PABIAS_MASK, pg_pa_bias);
- rtw_write_rf(rtwdev, path, 0x60, RF_PABIAS_5G_MASK, pg_pa_bias);
+ rtw_write_rf(rtwdev, path, RF_PA, RF_PABIAS_5G_MASK, pg_pa_bias);
+ }
+}
+
+static void rtw8822c_rfk_handshake(struct rtw_dev *rtwdev, bool is_before_k)
+{
+ struct rtw_dm_info *dm = &rtwdev->dm_info;
+ u8 u1b_tmp;
+ u8 u4b_tmp;
+ int ret;
+
+ if (is_before_k) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[RFK] WiFi / BT RFK handshake start!!\n");
+
+ if (!dm->is_bt_iqk_timeout) {
+ ret = read_poll_timeout(rtw_read32_mask, u4b_tmp,
+ u4b_tmp == 0, 20, 600000, false,
+ rtwdev, REG_PMC_DBG_CTRL1,
+ BITS_PMC_BT_IQK_STS);
+ if (ret) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[RFK] Wait BT IQK finish timeout!!\n");
+ dm->is_bt_iqk_timeout = true;
+ }
+ }
+
+ rtw_fw_inform_rfk_status(rtwdev, true);
+
+ ret = read_poll_timeout(rtw_read8_mask, u1b_tmp,
+ u1b_tmp == 1, 20, 100000, false,
+ rtwdev, REG_ARFR4, BIT_WL_RFK);
+ if (ret)
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[RFK] Send WiFi RFK start H2C cmd FAIL!!\n");
+ } else {
+ rtw_fw_inform_rfk_status(rtwdev, false);
+ ret = read_poll_timeout(rtw_read8_mask, u1b_tmp,
+ u1b_tmp == 1, 20, 100000, false,
+ rtwdev, REG_ARFR4,
+ BIT_WL_RFK);
+ if (ret)
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[RFK] Send WiFi RFK finish H2C cmd FAIL!!\n");
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[RFK] WiFi / BT RFK handshake finish!!\n");
+ }
+}
+
+static void rtw8822c_rfk_power_save(struct rtw_dev *rtwdev,
+ bool is_power_save)
+{
+ u8 path;
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path);
+ rtw_write32_mask(rtwdev, REG_DPD_CTL1_S0, BIT_PS_EN,
+ is_power_save ? 0 : 1);
+ }
+}
+
+static void rtw8822c_txgapk_backup_bb_reg(struct rtw_dev *rtwdev, const u32 reg[],
+ u32 reg_backup[], u32 reg_num)
+{
+ u32 i;
+
+ for (i = 0; i < reg_num; i++) {
+ reg_backup[i] = rtw_read32(rtwdev, reg[i]);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] Backup BB 0x%x = 0x%x\n",
+ reg[i], reg_backup[i]);
+ }
+}
+
+static void rtw8822c_txgapk_reload_bb_reg(struct rtw_dev *rtwdev,
+ const u32 reg[], u32 reg_backup[],
+ u32 reg_num)
+{
+ u32 i;
+
+ for (i = 0; i < reg_num; i++) {
+ rtw_write32(rtwdev, reg[i], reg_backup[i]);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] Reload BB 0x%x = 0x%x\n",
+ reg[i], reg_backup[i]);
+ }
+}
+
+static bool check_rf_status(struct rtw_dev *rtwdev, u8 status)
+{
+ u8 reg_rf0_a, reg_rf0_b;
+
+ reg_rf0_a = (u8)rtw_read_rf(rtwdev, RF_PATH_A,
+ RF_MODE_TRXAGC, BIT_RF_MODE);
+ reg_rf0_b = (u8)rtw_read_rf(rtwdev, RF_PATH_B,
+ RF_MODE_TRXAGC, BIT_RF_MODE);
+
+ if (reg_rf0_a == status || reg_rf0_b == status)
+ return false;
+
+ return true;
+}
+
+static void rtw8822c_txgapk_tx_pause(struct rtw_dev *rtwdev)
+{
+ bool status;
+ int ret;
+
+ rtw_write8(rtwdev, REG_TXPAUSE, BIT_AC_QUEUE);
+ rtw_write32_mask(rtwdev, REG_TX_FIFO, BIT_STOP_TX, 0x2);
+
+ ret = read_poll_timeout_atomic(check_rf_status, status, status,
+ 2, 5000, false, rtwdev, 2);
+ if (ret)
+ rtw_warn(rtwdev, "failed to pause TX\n");
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] Tx pause!!\n");
+}
+
+static void rtw8822c_txgapk_bb_dpk(struct rtw_dev *rtwdev, u8 path)
+{
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__);
+
+ rtw_write32_mask(rtwdev, REG_ENFN, BIT_IQK_DPK_EN, 0x1);
+ rtw_write32_mask(rtwdev, REG_CH_DELAY_EXTR2,
+ BIT_IQK_DPK_CLOCK_SRC, 0x1);
+ rtw_write32_mask(rtwdev, REG_CH_DELAY_EXTR2,
+ BIT_IQK_DPK_RESET_SRC, 0x1);
+ rtw_write32_mask(rtwdev, REG_CH_DELAY_EXTR2, BIT_EN_IOQ_IQK_DPK, 0x1);
+ rtw_write32_mask(rtwdev, REG_CH_DELAY_EXTR2, BIT_TST_IQK2SET_SRC, 0x0);
+ rtw_write32_mask(rtwdev, REG_CCA_OFF, BIT_CCA_ON_BY_PW, 0x1ff);
+
+ if (path == RF_PATH_A) {
+ rtw_write32_mask(rtwdev, REG_RFTXEN_GCK_A,
+ BIT_RFTXEN_GCK_FORCE_ON, 0x1);
+ rtw_write32_mask(rtwdev, REG_3WIRE, BIT_DIS_SHARERX_TXGAT, 0x1);
+ rtw_write32_mask(rtwdev, REG_DIS_SHARE_RX_A,
+ BIT_TX_SCALE_0DB, 0x1);
+ rtw_write32_mask(rtwdev, REG_3WIRE, BIT_3WIRE_EN, 0x0);
+ } else if (path == RF_PATH_B) {
+ rtw_write32_mask(rtwdev, REG_RFTXEN_GCK_B,
+ BIT_RFTXEN_GCK_FORCE_ON, 0x1);
+ rtw_write32_mask(rtwdev, REG_3WIRE2,
+ BIT_DIS_SHARERX_TXGAT, 0x1);
+ rtw_write32_mask(rtwdev, REG_DIS_SHARE_RX_B,
+ BIT_TX_SCALE_0DB, 0x1);
+ rtw_write32_mask(rtwdev, REG_3WIRE2, BIT_3WIRE_EN, 0x0);
+ }
+ rtw_write32_mask(rtwdev, REG_CCKSB, BIT_BBMODE, 0x2);
+}
+
+static void rtw8822c_txgapk_afe_dpk(struct rtw_dev *rtwdev, u8 path)
+{
+ u32 reg;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__);
+
+ if (path == RF_PATH_A) {
+ reg = REG_ANAPAR_A;
+ } else if (path == RF_PATH_B) {
+ reg = REG_ANAPAR_B;
+ } else {
+ rtw_err(rtwdev, "[TXGAPK] unknown path %d!!\n", path);
+ return;
+ }
+
+ rtw_write32_mask(rtwdev, REG_IQK_CTRL, MASKDWORD, MASKDWORD);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x700f0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x700f0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x701f0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x702f0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x703f0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x704f0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x705f0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x706f0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x707f0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x708f0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x709f0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70af0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70bf0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70cf0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70df0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70ef0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70ff0001);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70ff0001);
+}
+
+static void rtw8822c_txgapk_afe_dpk_restore(struct rtw_dev *rtwdev, u8 path)
+{
+ u32 reg;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__);
+
+ if (path == RF_PATH_A) {
+ reg = REG_ANAPAR_A;
+ } else if (path == RF_PATH_B) {
+ reg = REG_ANAPAR_B;
+ } else {
+ rtw_err(rtwdev, "[TXGAPK] unknown path %d!!\n", path);
+ return;
+ }
+ rtw_write32_mask(rtwdev, REG_IQK_CTRL, MASKDWORD, 0xffa1005e);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x700b8041);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70144041);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70244041);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70344041);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70444041);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x705b8041);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70644041);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x707b8041);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x708b8041);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x709b8041);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70ab8041);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70bb8041);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70cb8041);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70db8041);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70eb8041);
+ rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70fb8041);
+}
+
+static void rtw8822c_txgapk_bb_dpk_restore(struct rtw_dev *rtwdev, u8 path)
+{
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__);
+
+ rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TX_GAIN, 0x0);
+ rtw_write_rf(rtwdev, path, RF_DIS_BYPASS_TXBB, BIT_TIA_BYPASS, 0x0);
+ rtw_write_rf(rtwdev, path, RF_DIS_BYPASS_TXBB, BIT_TXBB, 0x0);
+
+ rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, 0x0);
+ rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x0);
+ rtw_write32_mask(rtwdev, REG_SINGLE_TONE_SW, BIT_IRQ_TEST_MODE, 0x0);
+ rtw_write32_mask(rtwdev, REG_R_CONFIG, MASKBYTE0, 0x00);
+ rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, 0x1);
+ rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x0);
+ rtw_write32_mask(rtwdev, REG_SINGLE_TONE_SW, BIT_IRQ_TEST_MODE, 0x0);
+ rtw_write32_mask(rtwdev, REG_R_CONFIG, MASKBYTE0, 0x00);
+ rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, 0x0);
+ rtw_write32_mask(rtwdev, REG_CCA_OFF, BIT_CCA_ON_BY_PW, 0x0);
+
+ if (path == RF_PATH_A) {
+ rtw_write32_mask(rtwdev, REG_RFTXEN_GCK_A,
+ BIT_RFTXEN_GCK_FORCE_ON, 0x0);
+ rtw_write32_mask(rtwdev, REG_3WIRE, BIT_DIS_SHARERX_TXGAT, 0x0);
+ rtw_write32_mask(rtwdev, REG_DIS_SHARE_RX_A,
+ BIT_TX_SCALE_0DB, 0x0);
+ rtw_write32_mask(rtwdev, REG_3WIRE, BIT_3WIRE_EN, 0x3);
+ } else if (path == RF_PATH_B) {
+ rtw_write32_mask(rtwdev, REG_RFTXEN_GCK_B,
+ BIT_RFTXEN_GCK_FORCE_ON, 0x0);
+ rtw_write32_mask(rtwdev, REG_3WIRE2,
+ BIT_DIS_SHARERX_TXGAT, 0x0);
+ rtw_write32_mask(rtwdev, REG_DIS_SHARE_RX_B,
+ BIT_TX_SCALE_0DB, 0x0);
+ rtw_write32_mask(rtwdev, REG_3WIRE2, BIT_3WIRE_EN, 0x3);
+ }
+
+ rtw_write32_mask(rtwdev, REG_CCKSB, BIT_BBMODE, 0x0);
+ rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_CFIR_EN, 0x5);
+}
+
+static bool _rtw8822c_txgapk_gain_valid(struct rtw_dev *rtwdev, u32 gain)
+{
+ if ((FIELD_GET(BIT_GAIN_TX_PAD_H, gain) >= 0xc) &&
+ (FIELD_GET(BIT_GAIN_TX_PAD_L, gain) >= 0xe))
+ return true;
+
+ return false;
+}
+
+static void _rtw8822c_txgapk_write_gain_bb_table(struct rtw_dev *rtwdev,
+ u8 band, u8 path)
+{
+ struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk;
+ u32 v, tmp_3f = 0;
+ u8 gain, check_txgain;
+
+ rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path);
+
+ switch (band) {
+ case RF_BAND_2G_OFDM:
+ rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x0);
+ break;
+ case RF_BAND_5G_L:
+ rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x2);
+ break;
+ case RF_BAND_5G_M:
+ rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x3);
+ break;
+ case RF_BAND_5G_H:
+ rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x4);
+ break;
+ default:
+ break;
+ }
+
+ rtw_write32_mask(rtwdev, REG_TX_GAIN_SET, MASKBYTE0, 0x88);
+
+ check_txgain = 0;
+ for (gain = 0; gain < RF_GAIN_NUM; gain++) {
+ v = txgapk->rf3f_bp[band][gain][path];
+ if (_rtw8822c_txgapk_gain_valid(rtwdev, v)) {
+ if (!check_txgain) {
+ tmp_3f = txgapk->rf3f_bp[band][gain][path];
+ check_txgain = 1;
+ }
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[TXGAPK] tx_gain=0x%03X >= 0xCEX\n",
+ txgapk->rf3f_bp[band][gain][path]);
+ } else {
+ tmp_3f = txgapk->rf3f_bp[band][gain][path];
+ }
+
+ rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN, tmp_3f);
+ rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_I_GAIN, gain);
+ rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_GAIN_RST, 0x1);
+ rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_GAIN_RST, 0x0);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[TXGAPK] Band=%d 0x1b98[11:0]=0x%03X path=%d\n",
+ band, tmp_3f, path);
+ }
+}
+
+static void rtw8822c_txgapk_write_gain_bb_table(struct rtw_dev *rtwdev)
+{
+ u8 path, band;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s channel=%d\n",
+ __func__, rtwdev->dm_info.gapk.channel);
+
+ for (band = 0; band < RF_BAND_MAX; band++) {
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ _rtw8822c_txgapk_write_gain_bb_table(rtwdev,
+ band, path);
+ }
+ }
+}
+
+static void rtw8822c_txgapk_read_offset(struct rtw_dev *rtwdev, u8 path)
+{
+ static const u32 cfg1_1b00[2] = {0x00000d18, 0x00000d2a};
+ static const u32 cfg2_1b00[2] = {0x00000d19, 0x00000d2b};
+ static const u32 set_pi[2] = {REG_RSV_CTRL, REG_WLRF1};
+ static const u32 path_setting[2] = {REG_ORITXCODE, REG_ORITXCODE2};
+ struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk;
+ u8 channel = txgapk->channel;
+ u32 val;
+ int i;
+
+ if (path >= ARRAY_SIZE(cfg1_1b00) ||
+ path >= ARRAY_SIZE(cfg2_1b00) ||
+ path >= ARRAY_SIZE(set_pi) ||
+ path >= ARRAY_SIZE(path_setting)) {
+ rtw_warn(rtwdev, "[TXGAPK] wrong path %d\n", path);
+ return;
+ }
+
+ rtw_write32_mask(rtwdev, REG_ANTMAP0, BIT_ANT_PATH, path + 1);
+ rtw_write32_mask(rtwdev, REG_TXLGMAP, MASKDWORD, 0xe4e40000);
+ rtw_write32_mask(rtwdev, REG_TXANTSEG, BIT_ANTSEG, 0x3);
+ rtw_write32_mask(rtwdev, path_setting[path], MASK20BITS, 0x33312);
+ rtw_write32_mask(rtwdev, path_setting[path], BIT_PATH_EN, 0x1);
+ rtw_write32_mask(rtwdev, set_pi[path], BITS_RFC_DIRECT, 0x0);
+ rtw_write_rf(rtwdev, path, RF_LUTDBG, BIT_TXA_TANK, 0x1);
+ rtw_write_rf(rtwdev, path, RF_IDAC, BIT_TX_MODE, 0x820);
+ rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path);
+ rtw_write32_mask(rtwdev, REG_IQKSTAT, MASKBYTE0, 0x0);
+
+ rtw_write32_mask(rtwdev, REG_TX_TONE_IDX, MASKBYTE0, 0x018);
+ fsleep(1000);
+ if (channel >= 1 && channel <= 14)
+ rtw_write32_mask(rtwdev, REG_R_CONFIG, MASKBYTE0, BIT_2G_SWING);
+ else
+ rtw_write32_mask(rtwdev, REG_R_CONFIG, MASKBYTE0, BIT_5G_SWING);
+ fsleep(1000);
+
+ rtw_write32_mask(rtwdev, REG_NCTL0, MASKDWORD, cfg1_1b00[path]);
+ rtw_write32_mask(rtwdev, REG_NCTL0, MASKDWORD, cfg2_1b00[path]);
+
+ read_poll_timeout(rtw_read32_mask, val,
+ val == 0x55, 1000, 100000, false,
+ rtwdev, REG_RPT_CIP, BIT_RPT_CIP_STATUS);
+
+ rtw_write32_mask(rtwdev, set_pi[path], BITS_RFC_DIRECT, 0x2);
+ rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path);
+ rtw_write32_mask(rtwdev, REG_RXSRAM_CTL, BIT_RPT_EN, 0x1);
+ rtw_write32_mask(rtwdev, REG_RXSRAM_CTL, BIT_RPT_SEL, 0x12);
+ rtw_write32_mask(rtwdev, REG_TX_GAIN_SET, BIT_GAPK_RPT_IDX, 0x3);
+ val = rtw_read32(rtwdev, REG_STAT_RPT);
+
+ txgapk->offset[0][path] = (s8)FIELD_GET(BIT_GAPK_RPT0, val);
+ txgapk->offset[1][path] = (s8)FIELD_GET(BIT_GAPK_RPT1, val);
+ txgapk->offset[2][path] = (s8)FIELD_GET(BIT_GAPK_RPT2, val);
+ txgapk->offset[3][path] = (s8)FIELD_GET(BIT_GAPK_RPT3, val);
+ txgapk->offset[4][path] = (s8)FIELD_GET(BIT_GAPK_RPT4, val);
+ txgapk->offset[5][path] = (s8)FIELD_GET(BIT_GAPK_RPT5, val);
+ txgapk->offset[6][path] = (s8)FIELD_GET(BIT_GAPK_RPT6, val);
+ txgapk->offset[7][path] = (s8)FIELD_GET(BIT_GAPK_RPT7, val);
+
+ rtw_write32_mask(rtwdev, REG_TX_GAIN_SET, BIT_GAPK_RPT_IDX, 0x4);
+ val = rtw_read32(rtwdev, REG_STAT_RPT);
+
+ txgapk->offset[8][path] = (s8)FIELD_GET(BIT_GAPK_RPT0, val);
+ txgapk->offset[9][path] = (s8)FIELD_GET(BIT_GAPK_RPT1, val);
+
+ for (i = 0; i < RF_HW_OFFSET_NUM; i++)
+ if (txgapk->offset[i][path] & BIT(3))
+ txgapk->offset[i][path] = txgapk->offset[i][path] |
+ 0xf0;
+ for (i = 0; i < RF_HW_OFFSET_NUM; i++)
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[TXGAPK] offset %d %d path=%d\n",
+ txgapk->offset[i][path], i, path);
+}
+
+static void rtw8822c_txgapk_calculate_offset(struct rtw_dev *rtwdev, u8 path)
+{
+ static const u32 bb_reg[] = {REG_ANTMAP0, REG_TXLGMAP, REG_TXANTSEG,
+ REG_ORITXCODE, REG_ORITXCODE2};
+ struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk;
+ u8 channel = txgapk->channel;
+ u32 reg_backup[ARRAY_SIZE(bb_reg)] = {0};
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s channel=%d\n",
+ __func__, channel);
+
+ rtw8822c_txgapk_backup_bb_reg(rtwdev, bb_reg,
+ reg_backup, ARRAY_SIZE(bb_reg));
+
+ if (channel >= 1 && channel <= 14) {
+ rtw_write32_mask(rtwdev,
+ REG_SINGLE_TONE_SW, BIT_IRQ_TEST_MODE, 0x0);
+ rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path);
+ rtw_write32_mask(rtwdev, REG_R_CONFIG, BIT_IQ_SWITCH, 0x3f);
+ rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x0);
+ rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TX_GAIN, 0x1);
+ rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, RFREG_MASK, 0x5000f);
+ rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_RF_GAIN, 0x0);
+ rtw_write_rf(rtwdev, path, RF_RXG_GAIN, BIT_RXG_GAIN, 0x1);
+ rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, BIT_RXAGC, 0x0f);
+ rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TRXBW, 0x1);
+ rtw_write_rf(rtwdev, path, RF_BW_TRXBB, BIT_BW_TXBB, 0x1);
+ rtw_write_rf(rtwdev, path, RF_BW_TRXBB, BIT_BW_RXBB, 0x0);
+ rtw_write_rf(rtwdev, path, RF_EXT_TIA_BW, BIT_PW_EXT_TIA, 0x1);
+
+ rtw_write32_mask(rtwdev, REG_IQKSTAT, MASKBYTE0, 0x00);
+ rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x0);
+
+ rtw8822c_txgapk_read_offset(rtwdev, path);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "=============================\n");
+
+ } else {
+ rtw_write32_mask(rtwdev,
+ REG_SINGLE_TONE_SW, BIT_IRQ_TEST_MODE, 0x0);
+ rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path);
+ rtw_write32_mask(rtwdev, REG_R_CONFIG, BIT_IQ_SWITCH, 0x3f);
+ rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x0);
+ rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TX_GAIN, 0x1);
+ rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, RFREG_MASK, 0x50011);
+ rtw_write_rf(rtwdev, path, RF_TXA_LB_SW, BIT_TXA_LB_ATT, 0x3);
+ rtw_write_rf(rtwdev, path, RF_TXA_LB_SW, BIT_LB_ATT, 0x3);
+ rtw_write_rf(rtwdev, path, RF_TXA_LB_SW, BIT_LB_SW, 0x1);
+ rtw_write_rf(rtwdev, path,
+ RF_RXA_MIX_GAIN, BIT_RXA_MIX_GAIN, 0x2);
+ rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, BIT_RXAGC, 0x12);
+ rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TRXBW, 0x1);
+ rtw_write_rf(rtwdev, path, RF_BW_TRXBB, BIT_BW_RXBB, 0x0);
+ rtw_write_rf(rtwdev, path, RF_EXT_TIA_BW, BIT_PW_EXT_TIA, 0x1);
+ rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, BIT_RF_MODE, 0x5);
+
+ rtw_write32_mask(rtwdev, REG_IQKSTAT, MASKBYTE0, 0x0);
+
+ if (channel >= 36 && channel <= 64)
+ rtw_write32_mask(rtwdev,
+ REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x2);
+ else if (channel >= 100 && channel <= 144)
+ rtw_write32_mask(rtwdev,
+ REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x3);
+ else if (channel >= 149 && channel <= 177)
+ rtw_write32_mask(rtwdev,
+ REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x4);
+
+ rtw8822c_txgapk_read_offset(rtwdev, path);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "=============================\n");
}
+ rtw8822c_txgapk_reload_bb_reg(rtwdev, bb_reg,
+ reg_backup, ARRAY_SIZE(bb_reg));
+}
+
+static void rtw8822c_txgapk_rf_restore(struct rtw_dev *rtwdev, u8 path)
+{
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__);
+
+ if (path >= rtwdev->hal.rf_path_num)
+ return;
+
+ rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, BIT_RF_MODE, 0x3);
+ rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TRXBW, 0x0);
+ rtw_write_rf(rtwdev, path, RF_EXT_TIA_BW, BIT_PW_EXT_TIA, 0x0);
+}
+
+static u32 rtw8822c_txgapk_cal_gain(struct rtw_dev *rtwdev, u32 gain, s8 offset)
+{
+ u32 gain_x2, new_gain;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__);
+
+ if (_rtw8822c_txgapk_gain_valid(rtwdev, gain)) {
+ new_gain = gain;
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[TXGAPK] gain=0x%03X(>=0xCEX) offset=%d new_gain=0x%03X\n",
+ gain, offset, new_gain);
+ return new_gain;
+ }
+
+ gain_x2 = (gain << 1) + offset;
+ new_gain = (gain_x2 >> 1) | (gain_x2 & BIT(0) ? BIT_GAIN_EXT : 0);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[TXGAPK] gain=0x%X offset=%d new_gain=0x%X\n",
+ gain, offset, new_gain);
+
+ return new_gain;
+}
+
+static void rtw8822c_txgapk_write_tx_gain(struct rtw_dev *rtwdev)
+{
+ struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk;
+ u32 i, j, tmp = 0x20, tmp_3f, v;
+ s8 offset_tmp[RF_GAIN_NUM] = {0};
+ u8 path, band = RF_BAND_2G_OFDM, channel = txgapk->channel;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__);
+
+ if (channel >= 1 && channel <= 14) {
+ tmp = 0x20;
+ band = RF_BAND_2G_OFDM;
+ } else if (channel >= 36 && channel <= 64) {
+ tmp = 0x200;
+ band = RF_BAND_5G_L;
+ } else if (channel >= 100 && channel <= 144) {
+ tmp = 0x280;
+ band = RF_BAND_5G_M;
+ } else if (channel >= 149 && channel <= 177) {
+ tmp = 0x300;
+ band = RF_BAND_5G_H;
+ } else {
+ rtw_err(rtwdev, "[TXGAPK] unknown channel %d!!\n", channel);
+ return;
+ }
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ for (i = 0; i < RF_GAIN_NUM; i++) {
+ offset_tmp[i] = 0;
+ for (j = i; j < RF_GAIN_NUM; j++) {
+ v = txgapk->rf3f_bp[band][j][path];
+ if (_rtw8822c_txgapk_gain_valid(rtwdev, v))
+ continue;
+
+ offset_tmp[i] += txgapk->offset[j][path];
+ txgapk->fianl_offset[i][path] = offset_tmp[i];
+ }
+
+ v = txgapk->rf3f_bp[band][i][path];
+ if (_rtw8822c_txgapk_gain_valid(rtwdev, v)) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[TXGAPK] tx_gain=0x%03X >= 0xCEX\n",
+ txgapk->rf3f_bp[band][i][path]);
+ } else {
+ txgapk->rf3f_fs[path][i] = offset_tmp[i];
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[TXGAPK] offset %d %d\n",
+ offset_tmp[i], i);
+ }
+ }
+
+ rtw_write_rf(rtwdev, path, RF_LUTWE2, RFREG_MASK, 0x10000);
+ for (i = 0; i < RF_GAIN_NUM; i++) {
+ rtw_write_rf(rtwdev, path,
+ RF_LUTWA, RFREG_MASK, tmp + i);
+
+ tmp_3f = rtw8822c_txgapk_cal_gain(rtwdev,
+ txgapk->rf3f_bp[band][i][path],
+ offset_tmp[i]);
+ rtw_write_rf(rtwdev, path, RF_LUTWD0,
+ BIT_GAIN_EXT | BIT_DATA_L, tmp_3f);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[TXGAPK] 0x33=0x%05X 0x3f=0x%04X\n",
+ tmp + i, tmp_3f);
+ }
+ rtw_write_rf(rtwdev, path, RF_LUTWE2, RFREG_MASK, 0x0);
+ }
+}
+
+static void rtw8822c_txgapk_save_all_tx_gain_table(struct rtw_dev *rtwdev)
+{
+ struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk;
+ static const u32 three_wire[2] = {REG_3WIRE, REG_3WIRE2};
+ static const u8 ch_num[RF_BAND_MAX] = {1, 1, 36, 100, 149};
+ static const u8 band_num[RF_BAND_MAX] = {0x0, 0x0, 0x1, 0x3, 0x5};
+ static const u8 cck[RF_BAND_MAX] = {0x1, 0x0, 0x0, 0x0, 0x0};
+ u8 path, band, gain, rf0_idx;
+ u32 rf18, v;
+
+ if (rtwdev->dm_info.dm_flags & BIT(RTW_DM_CAP_TXGAPK))
+ return;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__);
+
+ if (txgapk->read_txgain == 1) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[TXGAPK] Already Read txgapk->read_txgain return!!!\n");
+ rtw8822c_txgapk_write_gain_bb_table(rtwdev);
+ return;
+ }
+
+ for (band = 0; band < RF_BAND_MAX; band++) {
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ rf18 = rtw_read_rf(rtwdev, path, RF_CFGCH, RFREG_MASK);
+
+ rtw_write32_mask(rtwdev,
+ three_wire[path], BIT_3WIRE_EN, 0x0);
+ rtw_write_rf(rtwdev, path,
+ RF_CFGCH, MASKBYTE0, ch_num[band]);
+ rtw_write_rf(rtwdev, path,
+ RF_CFGCH, BIT_BAND, band_num[band]);
+ rtw_write_rf(rtwdev, path,
+ RF_BW_TRXBB, BIT_DBG_CCK_CCA, cck[band]);
+ rtw_write_rf(rtwdev, path,
+ RF_BW_TRXBB, BIT_TX_CCK_IND, cck[band]);
+ gain = 0;
+ for (rf0_idx = 1; rf0_idx < 32; rf0_idx += 3) {
+ rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC,
+ MASKBYTE0, rf0_idx);
+ v = rtw_read_rf(rtwdev, path,
+ RF_TX_RESULT, RFREG_MASK);
+ txgapk->rf3f_bp[band][gain][path] = v & BIT_DATA_L;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[TXGAPK] 0x5f=0x%03X band=%d path=%d\n",
+ txgapk->rf3f_bp[band][gain][path],
+ band, path);
+ gain++;
+ }
+ rtw_write_rf(rtwdev, path, RF_CFGCH, RFREG_MASK, rf18);
+ rtw_write32_mask(rtwdev,
+ three_wire[path], BIT_3WIRE_EN, 0x3);
+ }
+ }
+ rtw8822c_txgapk_write_gain_bb_table(rtwdev);
+ txgapk->read_txgain = 1;
+}
+
+static void rtw8822c_txgapk(struct rtw_dev *rtwdev)
+{
+ static const u32 bb_reg[2] = {REG_TX_PTCL_CTRL, REG_TX_FIFO};
+ struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk;
+ u32 bb_reg_backup[2];
+ u8 path;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__);
+
+ rtw8822c_txgapk_save_all_tx_gain_table(rtwdev);
+
+ if (txgapk->read_txgain == 0) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[TXGAPK] txgapk->read_txgain == 0 return!!!\n");
+ return;
+ }
+
+ if (rtwdev->efuse.power_track_type >= 4 &&
+ rtwdev->efuse.power_track_type <= 7) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[TXGAPK] Normal Mode in TSSI mode. return!!!\n");
+ return;
+ }
+
+ rtw8822c_txgapk_backup_bb_reg(rtwdev, bb_reg,
+ bb_reg_backup, ARRAY_SIZE(bb_reg));
+ rtw8822c_txgapk_tx_pause(rtwdev);
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ txgapk->channel = rtw_read_rf(rtwdev, path,
+ RF_CFGCH, RFREG_MASK) & MASKBYTE0;
+ rtw8822c_txgapk_bb_dpk(rtwdev, path);
+ rtw8822c_txgapk_afe_dpk(rtwdev, path);
+ rtw8822c_txgapk_calculate_offset(rtwdev, path);
+ rtw8822c_txgapk_rf_restore(rtwdev, path);
+ rtw8822c_txgapk_afe_dpk_restore(rtwdev, path);
+ rtw8822c_txgapk_bb_dpk_restore(rtwdev, path);
+ }
+ rtw8822c_txgapk_write_tx_gain(rtwdev);
+ rtw8822c_txgapk_reload_bb_reg(rtwdev, bb_reg,
+ bb_reg_backup, ARRAY_SIZE(bb_reg));
+}
+
+static void rtw8822c_do_gapk(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm = &rtwdev->dm_info;
+
+ if (dm->dm_flags & BIT(RTW_DM_CAP_TXGAPK)) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] feature disable!!!\n");
+ return;
+ }
+ rtw8822c_rfk_handshake(rtwdev, true);
+ rtw8822c_txgapk(rtwdev);
+ rtw8822c_rfk_handshake(rtwdev, false);
}
static void rtw8822c_rf_init(struct rtw_dev *rtwdev)
@@ -1126,6 +1832,7 @@ static void rtw8822c_pwrtrack_init(struct rtw_dev *rtwdev)
dm_info->pwr_trk_triggered = false;
dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
+ dm_info->thermal_meter_lck = rtwdev->efuse.thermal_meter_k;
}
static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
@@ -1396,6 +2103,15 @@ static int rtw8822c_mac_init(struct rtw_dev *rtwdev)
return 0;
}
+static void rtw8822c_dump_fw_crash(struct rtw_dev *rtwdev)
+{
+ rtw_dump_reg(rtwdev, 0x0, 0x2000, "rtw8822c reg_");
+ rtw_dump_fw(rtwdev, OCPBASE_DMEM_88XX, 0x10000, "rtw8822c DMEM_");
+ rtw_dump_fw(rtwdev, OCPBASE_IMEM_88XX, 0x10000, "rtw8822c IMEM_");
+ rtw_dump_fw(rtwdev, OCPBASE_EMEM_88XX, 0x20000, "rtw8822c EMEM_");
+ rtw_dump_fw(rtwdev, OCPBASE_ROM_88XX, 0x10000, "rtw8822c ROM_");
+}
+
static void rtw8822c_rstb_3wire(struct rtw_dev *rtwdev, bool enable)
{
if (enable) {
@@ -1856,6 +2572,7 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
}
dm_info->rx_evm_dbm[path] = evm_dbm;
}
+ rtw_phy_parsing_cfo(rtwdev, pkt_stat);
}
static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
@@ -1911,6 +2628,7 @@ static void rtw8822c_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift +
pkt_stat->drv_info_sz);
+ pkt_stat->hdr = hdr;
if (pkt_stat->phy_status) {
phy_status = rx_desc + desc_sz + pkt_stat->shift;
query_phy_status(rtwdev, phy_status, pkt_stat);
@@ -2108,6 +2826,26 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
rtw_write32_set(rtwdev, REG_RX_BREAK, BIT_COM_RX_GCK_EN);
}
+static void rtw8822c_do_lck(struct rtw_dev *rtwdev)
+{
+ u32 val;
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_CTRL, RFREG_MASK, 0x80010);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0FA);
+ fsleep(1);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_AAC_CTRL, RFREG_MASK, 0x80000);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_AAC, RFREG_MASK, 0x80001);
+ read_poll_timeout(rtw_read_rf, val, val != 0x1, 1000, 100000,
+ true, rtwdev, RF_PATH_A, RF_AAC_CTRL, 0x1000);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0F8);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_SYN_CTRL, RFREG_MASK, 0x80010);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x4f000);
+ fsleep(1);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000);
+}
+
static void rtw8822c_do_iqk(struct rtw_dev *rtwdev)
{
struct rtw_iqk_para para = {0};
@@ -2513,9 +3251,9 @@ static void rtw8822c_dpk_pre_setting(struct rtw_dev *rtwdev)
rtw_write_rf(rtwdev, path, RF_RXAGC_OFFSET, RFREG_MASK, 0x0);
rtw_write32(rtwdev, REG_NCTL0, 0x8 | (path << 1));
if (rtwdev->dm_info.dpk_info.dpk_band == RTW_BAND_2G)
- rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f100000);
+ rtw_write32(rtwdev, REG_DPD_CTL1_S1, 0x1f100000);
else
- rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f0d0000);
+ rtw_write32(rtwdev, REG_DPD_CTL1_S1, 0x1f0d0000);
rtw_write32_mask(rtwdev, REG_DPD_LUT0, BIT_GLOSS_DB, 0x4);
rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x3);
}
@@ -2533,11 +3271,11 @@ static u32 rtw8822c_dpk_rf_setting(struct rtw_dev *rtwdev, u8 path)
rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TX_GAIN, 0x1);
rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_PWR_TRIM, 0x1);
- rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_TX_OFFSET_VAL, 0x0);
+ rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_BB_GAIN, 0x0);
rtw_write_rf(rtwdev, path, RF_TX_GAIN, RFREG_MASK, ori_txbb);
if (rtwdev->dm_info.dpk_info.dpk_band == RTW_BAND_2G) {
- rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_LB_ATT, 0x1);
+ rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_RF_GAIN, 0x1);
rtw_write_rf(rtwdev, path, RF_RXG_GAIN, BIT_RXG_GAIN, 0x0);
} else {
rtw_write_rf(rtwdev, path, RF_TXA_LB_SW, BIT_TXA_LB_ATT, 0x0);
@@ -3284,9 +4022,9 @@ static void rtw8822c_dpk_reload_data(struct rtw_dev *rtwdev)
rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE,
0x8 | (path << 1));
if (dpk_info->dpk_band == RTW_BAND_2G)
- rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f100000);
+ rtw_write32(rtwdev, REG_DPD_CTL1_S1, 0x1f100000);
else
- rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f0d0000);
+ rtw_write32(rtwdev, REG_DPD_CTL1_S1, 0x1f0d0000);
rtw_write8(rtwdev, REG_DPD_AGC, dpk_info->dpk_txagc[path]);
@@ -3370,8 +4108,11 @@ static void rtw8822c_do_dpk(struct rtw_dev *rtwdev)
static void rtw8822c_phy_calibration(struct rtw_dev *rtwdev)
{
+ rtw8822c_rfk_power_save(rtwdev, false);
+ rtw8822c_do_gapk(rtwdev);
rtw8822c_do_iqk(rtwdev);
rtw8822c_do_dpk(rtwdev);
+ rtw8822c_rfk_power_save(rtwdev, true);
}
static void rtw8822c_dpk_track(struct rtw_dev *rtwdev)
@@ -3406,6 +4147,128 @@ static void rtw8822c_dpk_track(struct rtw_dev *rtwdev)
}
}
+#define XCAP_EXTEND(val) ({typeof(val) _v = (val); _v | _v << 7; })
+static void rtw8822c_set_crystal_cap_reg(struct rtw_dev *rtwdev, u8 crystal_cap)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_cfo_track *cfo = &dm_info->cfo_track;
+ u32 val = 0;
+
+ val = XCAP_EXTEND(crystal_cap);
+ cfo->crystal_cap = crystal_cap;
+ rtw_write32_mask(rtwdev, REG_ANAPAR_XTAL_0, BIT_XCAP_0, val);
+}
+
+static void rtw8822c_set_crystal_cap(struct rtw_dev *rtwdev, u8 crystal_cap)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_cfo_track *cfo = &dm_info->cfo_track;
+
+ if (cfo->crystal_cap == crystal_cap)
+ return;
+
+ rtw8822c_set_crystal_cap_reg(rtwdev, crystal_cap);
+}
+
+static void rtw8822c_cfo_tracking_reset(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_cfo_track *cfo = &dm_info->cfo_track;
+
+ cfo->is_adjust = true;
+
+ if (cfo->crystal_cap > rtwdev->efuse.crystal_cap)
+ rtw8822c_set_crystal_cap(rtwdev, cfo->crystal_cap - 1);
+ else if (cfo->crystal_cap < rtwdev->efuse.crystal_cap)
+ rtw8822c_set_crystal_cap(rtwdev, cfo->crystal_cap + 1);
+}
+
+static void rtw8822c_cfo_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_cfo_track *cfo = &dm_info->cfo_track;
+
+ cfo->crystal_cap = rtwdev->efuse.crystal_cap;
+ cfo->is_adjust = true;
+}
+
+#define REPORT_TO_KHZ(val) ({typeof(val) _v = (val); (_v << 1) + (_v >> 1); })
+static s32 rtw8822c_cfo_calc_avg(struct rtw_dev *rtwdev, u8 path_num)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_cfo_track *cfo = &dm_info->cfo_track;
+ s32 cfo_avg, cfo_path_sum = 0, cfo_rpt_sum;
+ u8 i;
+
+ for (i = 0; i < path_num; i++) {
+ cfo_rpt_sum = REPORT_TO_KHZ(cfo->cfo_tail[i]);
+
+ if (cfo->cfo_cnt[i])
+ cfo_avg = cfo_rpt_sum / cfo->cfo_cnt[i];
+ else
+ cfo_avg = 0;
+
+ cfo_path_sum += cfo_avg;
+ }
+
+ for (i = 0; i < path_num; i++) {
+ cfo->cfo_tail[i] = 0;
+ cfo->cfo_cnt[i] = 0;
+ }
+
+ return cfo_path_sum / path_num;
+}
+
+static void rtw8822c_cfo_need_adjust(struct rtw_dev *rtwdev, s32 cfo_avg)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_cfo_track *cfo = &dm_info->cfo_track;
+
+ if (!cfo->is_adjust) {
+ if (abs(cfo_avg) > CFO_TRK_ENABLE_TH)
+ cfo->is_adjust = true;
+ } else {
+ if (abs(cfo_avg) <= CFO_TRK_STOP_TH)
+ cfo->is_adjust = false;
+ }
+
+ if (!rtw_coex_disabled(rtwdev)) {
+ cfo->is_adjust = false;
+ rtw8822c_set_crystal_cap(rtwdev, rtwdev->efuse.crystal_cap);
+ }
+}
+
+static void rtw8822c_cfo_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_cfo_track *cfo = &dm_info->cfo_track;
+ u8 path_num = rtwdev->hal.rf_path_num;
+ s8 crystal_cap = cfo->crystal_cap;
+ s32 cfo_avg = 0;
+
+ if (rtwdev->sta_cnt != 1) {
+ rtw8822c_cfo_tracking_reset(rtwdev);
+ return;
+ }
+
+ if (cfo->packet_count == cfo->packet_count_pre)
+ return;
+
+ cfo->packet_count_pre = cfo->packet_count;
+ cfo_avg = rtw8822c_cfo_calc_avg(rtwdev, path_num);
+ rtw8822c_cfo_need_adjust(rtwdev, cfo_avg);
+
+ if (cfo->is_adjust) {
+ if (cfo_avg > CFO_TRK_ADJ_TH)
+ crystal_cap++;
+ else if (cfo_avg < -CFO_TRK_ADJ_TH)
+ crystal_cap--;
+
+ crystal_cap = clamp_t(s8, crystal_cap, 0, XCAP_MASK);
+ rtw8822c_set_crystal_cap(rtwdev, (u8)crystal_cap);
+ }
+}
+
static const struct rtw_phy_cck_pd_reg
rtw8822c_cck_pd_reg[RTW_CHANNEL_WIDTH_40 + 1][RTW_RF_PATH_MAX] = {
{
@@ -3538,11 +4401,12 @@ static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
rtw_phy_config_swing_table(rtwdev, &swing_table);
+ if (rtw_phy_pwrtrack_need_lck(rtwdev))
+ rtw8822c_do_lck(rtwdev);
+
for (i = 0; i < rtwdev->hal.rf_path_num; i++)
rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
- if (rtw_phy_pwrtrack_need_iqk(rtwdev))
- rtw8822c_do_iqk(rtwdev);
}
static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
@@ -3971,6 +4835,7 @@ static struct rtw_chip_ops rtw8822c_ops = {
.query_rx_desc = rtw8822c_query_rx_desc,
.set_channel = rtw8822c_set_channel,
.mac_init = rtw8822c_mac_init,
+ .dump_fw_crash = rtw8822c_dump_fw_crash,
.read_rf = rtw_phy_read_rf,
.write_rf = rtw_phy_write_rf_reg_mix,
.set_tx_power_index = rtw8822c_set_tx_power_index,
@@ -3984,6 +4849,8 @@ static struct rtw_chip_ops rtw8822c_ops = {
.config_bfee = rtw8822c_bf_config_bfee,
.set_gid_table = rtw_bf_set_gid_table,
.cfg_csi_rate = rtw_bf_cfg_csi_rate,
+ .cfo_init = rtw8822c_cfo_init,
+ .cfo_track = rtw8822c_cfo_track,
.coex_set_init = rtw8822c_coex_cfg_init,
.coex_set_ant_switch = NULL,
@@ -4351,6 +5218,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.dpd_ratemask = DIS_DPD_RATEALL,
.pwr_track_tbl = &rtw8822c_rtw_pwr_track_tbl,
.iqk_threshold = 8,
+ .lck_threshold = 8,
.bfer_su_max_num = 2,
.bfer_mu_max_num = 1,
.rx_ldpc = true,
@@ -4360,7 +5228,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.wowlan_stub = &rtw_wowlan_stub_8822c,
.max_sched_scan_ssids = 4,
#endif
- .coex_para_ver = 0x201029,
+ .coex_para_ver = 0x2103181c,
.bt_desired_ver = 0x1c,
.scbd_support = true,
.new_scbd10_def = true,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index bb2495b8609e..364afc6d851b 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -164,175 +164,248 @@ const struct rtw_table name ## _tbl = { \
#define REG_ANAPARLDO_POW_MAC 0x0029
#define BIT_LDOE25_PON BIT(0)
+#define XCAP_MASK GENMASK(6, 0)
+#define CFO_TRK_ENABLE_TH 20
+#define CFO_TRK_STOP_TH 10
+#define CFO_TRK_ADJ_TH 10
-#define REG_TXDFIR0 0x808
-#define REG_DFIRBW 0x810
-#define REG_ANTMAP0 0x820
-#define REG_ANTMAP 0x824
-#define REG_DYMPRITH 0x86c
-#define REG_DYMENTH0 0x870
-#define REG_DYMENTH 0x874
-#define REG_SBD 0x88c
+#define REG_TXDFIR0 0x808
+#define REG_DFIRBW 0x810
+#define REG_ANTMAP0 0x820
+#define BIT_ANT_PATH GENMASK(1, 0)
+#define REG_ANTMAP 0x824
+#define REG_DYMPRITH 0x86c
+#define REG_DYMENTH0 0x870
+#define REG_DYMENTH 0x874
+#define REG_SBD 0x88c
#define BITS_SUBTUNE GENMASK(15, 12)
-#define REG_DYMTHMIN 0x8a4
-#define REG_TXBWCTL 0x9b0
-#define REG_TXCLK 0x9b4
-#define REG_SCOTRK 0xc30
-#define REG_MRCM 0xc38
-#define REG_AGCSWSH 0xc44
-#define REG_ANTWTPD 0xc54
-#define REG_PT_CHSMO 0xcbc
+#define REG_DYMTHMIN 0x8a4
+
+#define REG_TXBWCTL 0x9b0
+#define REG_TXCLK 0x9b4
+
+#define REG_SCOTRK 0xc30
+#define REG_MRCM 0xc38
+#define REG_AGCSWSH 0xc44
+#define REG_ANTWTPD 0xc54
+#define REG_PT_CHSMO 0xcbc
#define BIT_PT_OPT BIT(21)
-#define REG_ORITXCODE 0x1800
-#define REG_3WIRE 0x180c
+
+#define REG_ORITXCODE 0x1800
+#define BIT_PATH_EN BIT(31)
+#define REG_3WIRE 0x180c
+#define BIT_DIS_SHARERX_TXGAT BIT(27)
#define BIT_3WIRE_TX_EN BIT(0)
#define BIT_3WIRE_RX_EN BIT(1)
+#define BIT_3WIRE_EN GENMASK(1, 0)
#define BIT_3WIRE_PI_ON BIT(28)
-#define REG_ANAPAR_A 0x1830
+#define REG_ANAPAR_A 0x1830
#define BIT_ANAPAR_UPDATE BIT(29)
-#define REG_RXAGCCTL0 0x18ac
+#define REG_RFTXEN_GCK_A 0x1864
+#define BIT_RFTXEN_GCK_FORCE_ON BIT(31)
+#define REG_DIS_SHARE_RX_A 0x186c
+#define BIT_TX_SCALE_0DB BIT(7)
+#define REG_RXAGCCTL0 0x18ac
#define BITS_RXAGC_CCK GENMASK(15, 12)
#define BITS_RXAGC_OFDM GENMASK(8, 4)
-#define REG_DCKA_I_0 0x18bc
-#define REG_DCKA_I_1 0x18c0
-#define REG_DCKA_Q_0 0x18d8
-#define REG_DCKA_Q_1 0x18dc
-#define REG_CCKSB 0x1a00
-#define REG_RXCCKSEL 0x1a04
-#define REG_BGCTRL 0x1a14
+#define REG_DCKA_I_0 0x18bc
+#define REG_DCKA_I_1 0x18c0
+#define REG_DCKA_Q_0 0x18d8
+#define REG_DCKA_Q_1 0x18dc
+
+#define REG_CCKSB 0x1a00
+#define BIT_BBMODE GENMASK(2, 1)
+#define REG_RXCCKSEL 0x1a04
+#define REG_BGCTRL 0x1a14
#define BITS_RX_IQ_WEIGHT (BIT(8) | BIT(9))
-#define REG_TXF0 0x1a20
-#define REG_TXF1 0x1a24
-#define REG_TXF2 0x1a28
-#define REG_CCANRX 0x1a2c
+#define REG_TXF0 0x1a20
+#define REG_TXF1 0x1a24
+#define REG_TXF2 0x1a28
+#define REG_CCANRX 0x1a2c
#define BIT_CCK_FA_RST (BIT(14) | BIT(15))
#define BIT_OFDM_FA_RST (BIT(12) | BIT(13))
-#define REG_CCK_FACNT 0x1a5c
-#define REG_CCKTXONLY 0x1a80
+#define REG_CCK_FACNT 0x1a5c
+#define REG_CCKTXONLY 0x1a80
#define BIT_BB_CCK_CHECK_EN BIT(18)
-#define REG_TXF3 0x1a98
-#define REG_TXF4 0x1a9c
-#define REG_TXF5 0x1aa0
-#define REG_TXF6 0x1aac
-#define REG_TXF7 0x1ab0
-#define REG_CCK_SOURCE 0x1abc
+#define REG_TXF3 0x1a98
+#define REG_TXF4 0x1a9c
+#define REG_TXF5 0x1aa0
+#define REG_TXF6 0x1aac
+#define REG_TXF7 0x1ab0
+#define REG_CCK_SOURCE 0x1abc
#define BIT_NBI_EN BIT(30)
-#define REG_IQKSTAT 0x1b10
-#define REG_TXANT 0x1c28
-#define REG_ENCCK 0x1c3c
-#define BIT_CCK_BLK_EN BIT(1)
-#define BIT_CCK_OFDM_BLK_EN (BIT(0) | BIT(1))
-#define REG_CCAMSK 0x1c80
-#define REG_RSTB 0x1c90
-#define BIT_RSTB_3WIRE BIT(8)
-#define REG_RX_BREAK 0x1d2c
-#define BIT_COM_RX_GCK_EN BIT(31)
-#define REG_RXFNCTL 0x1d30
-#define REG_RXIGI 0x1d70
-#define REG_ENFN 0x1e24
-#define REG_TXANTSEG 0x1e28
-#define REG_TXLGMAP 0x1e2c
-#define REG_CCKPATH 0x1e5c
-#define REG_CNT_CTRL 0x1eb4
-#define BIT_ALL_CNT_RST BIT(25)
-#define REG_OFDM_FACNT 0x2d00
-#define REG_OFDM_FACNT1 0x2d04
-#define REG_OFDM_FACNT2 0x2d08
-#define REG_OFDM_FACNT3 0x2d0c
-#define REG_OFDM_FACNT4 0x2d10
-#define REG_OFDM_FACNT5 0x2d20
-#define REG_RPT_CIP 0x2d9c
-#define REG_OFDM_TXCNT 0x2de0
-#define REG_ORITXCODE2 0x4100
-#define REG_3WIRE2 0x410c
-#define REG_ANAPAR_B 0x4130
-#define REG_RXAGCCTL 0x41ac
-#define REG_DCKB_I_0 0x41bc
-#define REG_DCKB_I_1 0x41c0
-#define REG_DCKB_Q_0 0x41d8
-#define REG_DCKB_Q_1 0x41dc
-
-#define RF_MODE_TRXAGC 0x00
-#define RF_RXAGC_OFFSET 0x19
-#define RF_BW_TRXBB 0x1a
-#define RF_TX_GAIN_OFFSET 0x55
-#define RF_TX_GAIN 0x56
-#define RF_TXA_LB_SW 0x63
-#define RF_RXG_GAIN 0x87
-#define RF_RXA_MIX_GAIN 0x8a
-#define RF_EXT_TIA_BW 0x8f
-#define RF_DEBUG 0xde
#define REG_NCTL0 0x1b00
+#define BIT_SEL_PATH GENMASK(2, 1)
+#define BIT_SUBPAGE GENMASK(3, 0)
#define REG_DPD_CTL0_S0 0x1b04
+#define BIT_GS_PWSF GENMASK(27, 0)
#define REG_DPD_CTL1_S0 0x1b08
+#define BIT_DPD_EN BIT(31)
+#define BIT_PS_EN BIT(7)
+#define REG_IQKSTAT 0x1b10
#define REG_IQK_CTL1 0x1b20
+#define BIT_TX_CFIR GENMASK(31, 30)
+#define BIT_CFIR_EN GENMASK(26, 24)
+#define BIT_BYPASS_DPD BIT(25)
+
+#define REG_TX_TONE_IDX 0x1b2c
#define REG_DPD_LUT0 0x1b44
+#define BIT_GLOSS_DB GENMASK(14, 12)
#define REG_DPD_CTL0_S1 0x1b5c
-#define REG_DPD_LUT3 0x1b60
#define REG_DPD_CTL1_S1 0x1b60
#define REG_DPD_AGC 0x1b67
+#define REG_TABLE_SEL 0x1b98
+#define BIT_I_GAIN GENMASK(19, 16)
+#define BIT_GAIN_RST BIT(15)
+#define BIT_Q_GAIN_SEL GENMASK(14, 12)
+#define BIT_Q_GAIN GENMASK(11, 0)
+#define REG_TX_GAIN_SET 0x1b9c
+#define BIT_GAPK_RPT_IDX GENMASK(11, 8)
#define REG_DPD_CTL0 0x1bb4
+#define REG_SINGLE_TONE_SW 0x1bb8
+#define BIT_IRQ_TEST_MODE BIT(20)
#define REG_R_CONFIG 0x1bcc
+#define BIT_INNER_LB BIT(21)
+#define BIT_IQ_SWITCH GENMASK(5, 0)
+#define BIT_2G_SWING 0x2d
+#define BIT_5G_SWING 0x36
#define REG_RXSRAM_CTL 0x1bd4
+#define BIT_RPT_EN BIT(21)
+#define BIT_RPT_SEL GENMASK(20, 16)
+#define BIT_DPD_CLK GENMASK(7, 4)
#define REG_DPD_CTL11 0x1be4
#define REG_DPD_CTL12 0x1be8
#define REG_DPD_CTL15 0x1bf4
#define REG_DPD_CTL16 0x1bf8
#define REG_STAT_RPT 0x1bfc
+#define BIT_RPT_DGAIN GENMASK(27, 16)
+#define BIT_GAPK_RPT0 GENMASK(3, 0)
+#define BIT_GAPK_RPT1 GENMASK(7, 4)
+#define BIT_GAPK_RPT2 GENMASK(11, 8)
+#define BIT_GAPK_RPT3 GENMASK(15, 12)
+#define BIT_GAPK_RPT4 GENMASK(19, 16)
+#define BIT_GAPK_RPT5 GENMASK(23, 20)
+#define BIT_GAPK_RPT6 GENMASK(27, 24)
+#define BIT_GAPK_RPT7 GENMASK(31, 28)
+
+#define REG_TXANT 0x1c28
+#define REG_IQK_CTRL 0x1c38
+#define REG_ENCCK 0x1c3c
+#define BIT_CCK_BLK_EN BIT(1)
+#define BIT_CCK_OFDM_BLK_EN (BIT(0) | BIT(1))
+#define REG_CCAMSK 0x1c80
+#define REG_RSTB 0x1c90
+#define BIT_RSTB_3WIRE BIT(8)
+#define REG_CH_DELAY_EXTR2 0x1cd0
+#define BIT_TST_IQK2SET_SRC BIT(31)
+#define BIT_EN_IOQ_IQK_DPK BIT(30)
+#define BIT_IQK_DPK_RESET_SRC BIT(29)
+#define BIT_IQK_DPK_CLOCK_SRC BIT(28)
+
+#define REG_RX_BREAK 0x1d2c
+#define BIT_COM_RX_GCK_EN BIT(31)
+#define REG_RXFNCTL 0x1d30
+#define REG_CCA_OFF 0x1d58
+#define BIT_CCA_ON_BY_PW GENMASK(11, 3)
+#define REG_RXIGI 0x1d70
+
+#define REG_ENFN 0x1e24
+#define BIT_IQK_DPK_EN BIT(17)
+#define REG_TXANTSEG 0x1e28
+#define BIT_ANTSEG GENMASK(3, 0)
+#define REG_TXLGMAP 0x1e2c
+#define REG_CCKPATH 0x1e5c
+#define REG_TX_FIFO 0x1e70
+#define BIT_STOP_TX GENMASK(3, 0)
+#define REG_CNT_CTRL 0x1eb4
+#define BIT_ALL_CNT_RST BIT(25)
+
+#define REG_OFDM_FACNT 0x2d00
+#define REG_OFDM_FACNT1 0x2d04
+#define REG_OFDM_FACNT2 0x2d08
+#define REG_OFDM_FACNT3 0x2d0c
+#define REG_OFDM_FACNT4 0x2d10
+#define REG_OFDM_FACNT5 0x2d20
+#define REG_RPT_CIP 0x2d9c
+#define BIT_RPT_CIP_STATUS GENMASK(7, 0)
+#define REG_OFDM_TXCNT 0x2de0
+#define REG_ORITXCODE2 0x4100
+#define REG_3WIRE2 0x410c
+#define REG_ANAPAR_B 0x4130
+#define REG_RFTXEN_GCK_B 0x4164
+#define REG_DIS_SHARE_RX_B 0x416c
#define BIT_EXT_TIA_BW BIT(1)
-#define BIT_DE_TRXBW BIT(2)
-#define BIT_DE_TX_GAIN BIT(16)
-#define BIT_RXG_GAIN BIT(18)
-#define BIT_DE_PWR_TRIM BIT(19)
-#define BIT_INNER_LB BIT(21)
-#define BIT_BYPASS_DPD BIT(25)
-#define BIT_DPD_EN BIT(31)
-#define BIT_SUBPAGE GENMASK(3, 0)
+#define REG_RXAGCCTL 0x41ac
+#define REG_DCKB_I_0 0x41bc
+#define REG_DCKB_I_1 0x41c0
+#define REG_DCKB_Q_0 0x41d8
+#define REG_DCKB_Q_1 0x41dc
+
+#define RF_MODE_TRXAGC 0x00
+#define BIT_RF_MODE GENMASK(19, 16)
+#define BIT_RXAGC GENMASK(9, 5)
#define BIT_TXAGC GENMASK(4, 0)
+#define RF_RXAGC_OFFSET 0x19
+#define RF_BW_TRXBB 0x1a
+#define BIT_TX_CCK_IND BIT(16)
+#define BIT_BW_TXBB GENMASK(14, 12)
+#define BIT_BW_RXBB GENMASK(11, 10)
+#define BIT_DBG_CCK_CCA BIT(1)
+#define RF_TX_GAIN_OFFSET 0x55
+#define BIT_BB_GAIN GENMASK(18, 14)
+#define BIT_RF_GAIN GENMASK(4, 2)
+#define RF_TX_GAIN 0x56
#define BIT_GAIN_TXBB GENMASK(4, 0)
+#define RF_IDAC 0x58
+#define BIT_TX_MODE GENMASK(19, 8)
+#define RF_TX_RESULT 0x5f
+#define BIT_GAIN_TX_PAD_H GENMASK(11, 8)
+#define BIT_GAIN_TX_PAD_L GENMASK(7, 4)
+#define RF_PA 0x60
+#define RF_PABIAS_2G_MASK GENMASK(15, 12)
+#define RF_PABIAS_5G_MASK GENMASK(19, 16)
+#define RF_TXA_LB_SW 0x63
+#define BIT_TXA_LB_ATT GENMASK(15, 14)
+#define BIT_LB_SW GENMASK(13, 12)
#define BIT_LB_ATT GENMASK(4, 2)
+#define RF_RXG_GAIN 0x87
+#define BIT_RXG_GAIN BIT(18)
+#define RF_RXA_MIX_GAIN 0x8a
#define BIT_RXA_MIX_GAIN GENMASK(4, 3)
-#define BIT_IQ_SWITCH GENMASK(5, 0)
-#define BIT_DPD_CLK GENMASK(7, 4)
-#define BIT_RXAGC GENMASK(9, 5)
-#define BIT_BW_RXBB GENMASK(11, 10)
-#define BIT_LB_SW GENMASK(13, 12)
-#define BIT_BW_TXBB GENMASK(14, 12)
-#define BIT_GLOSS_DB GENMASK(14, 12)
-#define BIT_TXA_LB_ATT GENMASK(15, 14)
-#define BIT_TX_OFFSET_VAL GENMASK(18, 14)
-#define BIT_RPT_SEL GENMASK(20, 16)
-#define BIT_GS_PWSF GENMASK(27, 0)
-#define BIT_RPT_DGAIN GENMASK(27, 16)
-#define BIT_TX_CFIR GENMASK(31, 30)
-
-#define PPG_THERMAL_A 0x1ef
-#define PPG_THERMAL_B 0x1b0
-#define RF_THEMAL_MASK GENMASK(19, 16)
-#define PPG_2GL_TXAB 0x1d4
-#define PPG_2GM_TXAB 0x1ee
-#define PPG_2GH_TXAB 0x1d2
-#define PPG_2G_A_MASK GENMASK(3, 0)
-#define PPG_2G_B_MASK GENMASK(7, 4)
-#define PPG_5GL1_TXA 0x1ec
-#define PPG_5GL2_TXA 0x1e8
-#define PPG_5GM1_TXA 0x1e4
-#define PPG_5GM2_TXA 0x1e0
-#define PPG_5GH1_TXA 0x1dc
-#define PPG_5GL1_TXB 0x1eb
-#define PPG_5GL2_TXB 0x1e7
-#define PPG_5GM1_TXB 0x1e3
-#define PPG_5GM2_TXB 0x1df
-#define PPG_5GH1_TXB 0x1db
-#define PPG_5G_MASK GENMASK(4, 0)
-#define PPG_PABIAS_2GA 0x1d6
-#define PPG_PABIAS_2GB 0x1d5
-#define PPG_PABIAS_5GA 0x1d8
-#define PPG_PABIAS_5GB 0x1d7
-#define PPG_PABIAS_MASK GENMASK(3, 0)
-#define RF_PABIAS_2G_MASK GENMASK(15, 12)
-#define RF_PABIAS_5G_MASK GENMASK(19, 16)
+#define RF_EXT_TIA_BW 0x8f
+#define BIT_PW_EXT_TIA BIT(1)
+#define RF_DIS_BYPASS_TXBB 0x9e
+#define BIT_TXBB BIT(10)
+#define BIT_TIA_BYPASS BIT(5)
+#define RF_DEBUG 0xde
+#define BIT_DE_PWR_TRIM BIT(19)
+#define BIT_DE_TX_GAIN BIT(16)
+#define BIT_DE_TRXBW BIT(2)
+#define PPG_THERMAL_B 0x1b0
+#define RF_THEMAL_MASK GENMASK(19, 16)
+#define PPG_2GH_TXAB 0x1d2
+#define PPG_2G_A_MASK GENMASK(3, 0)
+#define PPG_2G_B_MASK GENMASK(7, 4)
+#define PPG_2GL_TXAB 0x1d4
+#define PPG_PABIAS_2GB 0x1d5
+#define PPG_PABIAS_2GA 0x1d6
+#define PPG_PABIAS_MASK GENMASK(3, 0)
+#define PPG_PABIAS_5GB 0x1d7
+#define PPG_PABIAS_5GA 0x1d8
+#define PPG_5G_MASK GENMASK(4, 0)
+#define PPG_5GH1_TXB 0x1db
+#define PPG_5GH1_TXA 0x1dc
+#define PPG_5GM2_TXB 0x1df
+#define PPG_5GM2_TXA 0x1e0
+#define PPG_5GM1_TXB 0x1e3
+#define PPG_5GM1_TXA 0x1e4
+#define PPG_5GL2_TXB 0x1e7
+#define PPG_5GL2_TXA 0x1e8
+#define PPG_5GL1_TXB 0x1eb
+#define PPG_5GL1_TXA 0x1ec
+#define PPG_2GM_TXAB 0x1ee
+#define PPG_THERMAL_A 0x1ef
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
index ad5715c65de3..822f3da91f1b 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
@@ -40863,7 +40863,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 8, 1, 0, 1, 144, 76, },
{ 9, 1, 0, 1, 144, 127, },
{ 0, 1, 0, 1, 149, 76, },
- { 2, 1, 0, 1, 149, -128, },
+ { 2, 1, 0, 1, 149, 54, },
{ 1, 1, 0, 1, 149, 127, },
{ 3, 1, 0, 1, 149, 76, },
{ 4, 1, 0, 1, 149, 74, },
@@ -40871,9 +40871,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 149, 76, },
{ 7, 1, 0, 1, 149, 54, },
{ 8, 1, 0, 1, 149, 76, },
- { 9, 1, 0, 1, 149, -128, },
+ { 9, 1, 0, 1, 149, 54, },
{ 0, 1, 0, 1, 153, 76, },
- { 2, 1, 0, 1, 153, -128, },
+ { 2, 1, 0, 1, 153, 54, },
{ 1, 1, 0, 1, 153, 127, },
{ 3, 1, 0, 1, 153, 76, },
{ 4, 1, 0, 1, 153, 74, },
@@ -40881,9 +40881,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 153, 76, },
{ 7, 1, 0, 1, 153, 54, },
{ 8, 1, 0, 1, 153, 76, },
- { 9, 1, 0, 1, 153, -128, },
+ { 9, 1, 0, 1, 153, 54, },
{ 0, 1, 0, 1, 157, 76, },
- { 2, 1, 0, 1, 157, -128, },
+ { 2, 1, 0, 1, 157, 54, },
{ 1, 1, 0, 1, 157, 127, },
{ 3, 1, 0, 1, 157, 76, },
{ 4, 1, 0, 1, 157, 74, },
@@ -40891,9 +40891,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 157, 76, },
{ 7, 1, 0, 1, 157, 54, },
{ 8, 1, 0, 1, 157, 76, },
- { 9, 1, 0, 1, 157, -128, },
+ { 9, 1, 0, 1, 157, 54, },
{ 0, 1, 0, 1, 161, 76, },
- { 2, 1, 0, 1, 161, -128, },
+ { 2, 1, 0, 1, 161, 54, },
{ 1, 1, 0, 1, 161, 127, },
{ 3, 1, 0, 1, 161, 76, },
{ 4, 1, 0, 1, 161, 74, },
@@ -40901,9 +40901,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 161, 76, },
{ 7, 1, 0, 1, 161, 54, },
{ 8, 1, 0, 1, 161, 76, },
- { 9, 1, 0, 1, 161, -128, },
+ { 9, 1, 0, 1, 161, 54, },
{ 0, 1, 0, 1, 165, 76, },
- { 2, 1, 0, 1, 165, -128, },
+ { 2, 1, 0, 1, 165, 54, },
{ 1, 1, 0, 1, 165, 127, },
{ 3, 1, 0, 1, 165, 76, },
{ 4, 1, 0, 1, 165, 74, },
@@ -40911,7 +40911,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 165, 76, },
{ 7, 1, 0, 1, 165, 54, },
{ 8, 1, 0, 1, 165, 76, },
- { 9, 1, 0, 1, 165, -128, },
+ { 9, 1, 0, 1, 165, 54, },
{ 0, 1, 0, 2, 36, 72, },
{ 2, 1, 0, 2, 36, 62, },
{ 1, 1, 0, 2, 36, 62, },
@@ -41113,7 +41113,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 8, 1, 0, 2, 144, 76, },
{ 9, 1, 0, 2, 144, 127, },
{ 0, 1, 0, 2, 149, 76, },
- { 2, 1, 0, 2, 149, -128, },
+ { 2, 1, 0, 2, 149, 54, },
{ 1, 1, 0, 2, 149, 127, },
{ 3, 1, 0, 2, 149, 76, },
{ 4, 1, 0, 2, 149, 74, },
@@ -41121,9 +41121,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 149, 76, },
{ 7, 1, 0, 2, 149, 54, },
{ 8, 1, 0, 2, 149, 76, },
- { 9, 1, 0, 2, 149, -128, },
+ { 9, 1, 0, 2, 149, 54, },
{ 0, 1, 0, 2, 153, 76, },
- { 2, 1, 0, 2, 153, -128, },
+ { 2, 1, 0, 2, 153, 54, },
{ 1, 1, 0, 2, 153, 127, },
{ 3, 1, 0, 2, 153, 76, },
{ 4, 1, 0, 2, 153, 74, },
@@ -41131,9 +41131,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 153, 76, },
{ 7, 1, 0, 2, 153, 54, },
{ 8, 1, 0, 2, 153, 76, },
- { 9, 1, 0, 2, 153, -128, },
+ { 9, 1, 0, 2, 153, 54, },
{ 0, 1, 0, 2, 157, 76, },
- { 2, 1, 0, 2, 157, -128, },
+ { 2, 1, 0, 2, 157, 54, },
{ 1, 1, 0, 2, 157, 127, },
{ 3, 1, 0, 2, 157, 76, },
{ 4, 1, 0, 2, 157, 74, },
@@ -41141,9 +41141,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 157, 76, },
{ 7, 1, 0, 2, 157, 54, },
{ 8, 1, 0, 2, 157, 76, },
- { 9, 1, 0, 2, 157, -128, },
+ { 9, 1, 0, 2, 157, 54, },
{ 0, 1, 0, 2, 161, 76, },
- { 2, 1, 0, 2, 161, -128, },
+ { 2, 1, 0, 2, 161, 54, },
{ 1, 1, 0, 2, 161, 127, },
{ 3, 1, 0, 2, 161, 76, },
{ 4, 1, 0, 2, 161, 74, },
@@ -41151,9 +41151,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 161, 76, },
{ 7, 1, 0, 2, 161, 54, },
{ 8, 1, 0, 2, 161, 76, },
- { 9, 1, 0, 2, 161, -128, },
+ { 9, 1, 0, 2, 161, 54, },
{ 0, 1, 0, 2, 165, 76, },
- { 2, 1, 0, 2, 165, -128, },
+ { 2, 1, 0, 2, 165, 54, },
{ 1, 1, 0, 2, 165, 127, },
{ 3, 1, 0, 2, 165, 76, },
{ 4, 1, 0, 2, 165, 74, },
@@ -41161,7 +41161,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 165, 76, },
{ 7, 1, 0, 2, 165, 54, },
{ 8, 1, 0, 2, 165, 76, },
- { 9, 1, 0, 2, 165, -128, },
+ { 9, 1, 0, 2, 165, 54, },
{ 0, 1, 0, 3, 36, 68, },
{ 2, 1, 0, 3, 36, 38, },
{ 1, 1, 0, 3, 36, 50, },
@@ -41363,7 +41363,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 8, 1, 0, 3, 144, 68, },
{ 9, 1, 0, 3, 144, 127, },
{ 0, 1, 0, 3, 149, 76, },
- { 2, 1, 0, 3, 149, -128, },
+ { 2, 1, 0, 3, 149, 30, },
{ 1, 1, 0, 3, 149, 127, },
{ 3, 1, 0, 3, 149, 76, },
{ 4, 1, 0, 3, 149, 60, },
@@ -41371,9 +41371,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 149, 76, },
{ 7, 1, 0, 3, 149, 30, },
{ 8, 1, 0, 3, 149, 72, },
- { 9, 1, 0, 3, 149, -128, },
+ { 9, 1, 0, 3, 149, 30, },
{ 0, 1, 0, 3, 153, 76, },
- { 2, 1, 0, 3, 153, -128, },
+ { 2, 1, 0, 3, 153, 30, },
{ 1, 1, 0, 3, 153, 127, },
{ 3, 1, 0, 3, 153, 76, },
{ 4, 1, 0, 3, 153, 60, },
@@ -41381,9 +41381,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 153, 76, },
{ 7, 1, 0, 3, 153, 30, },
{ 8, 1, 0, 3, 153, 76, },
- { 9, 1, 0, 3, 153, -128, },
+ { 9, 1, 0, 3, 153, 30, },
{ 0, 1, 0, 3, 157, 76, },
- { 2, 1, 0, 3, 157, -128, },
+ { 2, 1, 0, 3, 157, 30, },
{ 1, 1, 0, 3, 157, 127, },
{ 3, 1, 0, 3, 157, 76, },
{ 4, 1, 0, 3, 157, 60, },
@@ -41391,9 +41391,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 157, 76, },
{ 7, 1, 0, 3, 157, 30, },
{ 8, 1, 0, 3, 157, 76, },
- { 9, 1, 0, 3, 157, -128, },
+ { 9, 1, 0, 3, 157, 30, },
{ 0, 1, 0, 3, 161, 76, },
- { 2, 1, 0, 3, 161, -128, },
+ { 2, 1, 0, 3, 161, 30, },
{ 1, 1, 0, 3, 161, 127, },
{ 3, 1, 0, 3, 161, 76, },
{ 4, 1, 0, 3, 161, 60, },
@@ -41401,9 +41401,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 161, 76, },
{ 7, 1, 0, 3, 161, 30, },
{ 8, 1, 0, 3, 161, 76, },
- { 9, 1, 0, 3, 161, -128, },
+ { 9, 1, 0, 3, 161, 30, },
{ 0, 1, 0, 3, 165, 76, },
- { 2, 1, 0, 3, 165, -128, },
+ { 2, 1, 0, 3, 165, 30, },
{ 1, 1, 0, 3, 165, 127, },
{ 3, 1, 0, 3, 165, 76, },
{ 4, 1, 0, 3, 165, 60, },
@@ -41411,7 +41411,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 165, 76, },
{ 7, 1, 0, 3, 165, 30, },
{ 8, 1, 0, 3, 165, 76, },
- { 9, 1, 0, 3, 165, -128, },
+ { 9, 1, 0, 3, 165, 30, },
{ 0, 1, 1, 2, 38, 66, },
{ 2, 1, 1, 2, 38, 64, },
{ 1, 1, 1, 2, 38, 62, },
@@ -41513,7 +41513,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 8, 1, 1, 2, 142, 72, },
{ 9, 1, 1, 2, 142, 127, },
{ 0, 1, 1, 2, 151, 72, },
- { 2, 1, 1, 2, 151, -128, },
+ { 2, 1, 1, 2, 151, 54, },
{ 1, 1, 1, 2, 151, 127, },
{ 3, 1, 1, 2, 151, 72, },
{ 4, 1, 1, 2, 151, 72, },
@@ -41521,9 +41521,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 151, 72, },
{ 7, 1, 1, 2, 151, 54, },
{ 8, 1, 1, 2, 151, 72, },
- { 9, 1, 1, 2, 151, -128, },
+ { 9, 1, 1, 2, 151, 54, },
{ 0, 1, 1, 2, 159, 72, },
- { 2, 1, 1, 2, 159, -128, },
+ { 2, 1, 1, 2, 159, 54, },
{ 1, 1, 1, 2, 159, 127, },
{ 3, 1, 1, 2, 159, 72, },
{ 4, 1, 1, 2, 159, 72, },
@@ -41531,7 +41531,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 159, 72, },
{ 7, 1, 1, 2, 159, 54, },
{ 8, 1, 1, 2, 159, 72, },
- { 9, 1, 1, 2, 159, -128, },
+ { 9, 1, 1, 2, 159, 54, },
{ 0, 1, 1, 3, 38, 60, },
{ 2, 1, 1, 3, 38, 40, },
{ 1, 1, 1, 3, 38, 50, },
@@ -41633,7 +41633,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 8, 1, 1, 3, 142, 68, },
{ 9, 1, 1, 3, 142, 127, },
{ 0, 1, 1, 3, 151, 72, },
- { 2, 1, 1, 3, 151, -128, },
+ { 2, 1, 1, 3, 151, 30, },
{ 1, 1, 1, 3, 151, 127, },
{ 3, 1, 1, 3, 151, 72, },
{ 4, 1, 1, 3, 151, 66, },
@@ -41641,9 +41641,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 151, 72, },
{ 7, 1, 1, 3, 151, 30, },
{ 8, 1, 1, 3, 151, 68, },
- { 9, 1, 1, 3, 151, -128, },
+ { 9, 1, 1, 3, 151, 30, },
{ 0, 1, 1, 3, 159, 72, },
- { 2, 1, 1, 3, 159, -128, },
+ { 2, 1, 1, 3, 159, 30, },
{ 1, 1, 1, 3, 159, 127, },
{ 3, 1, 1, 3, 159, 72, },
{ 4, 1, 1, 3, 159, 66, },
@@ -41651,7 +41651,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 159, 72, },
{ 7, 1, 1, 3, 159, 30, },
{ 8, 1, 1, 3, 159, 72, },
- { 9, 1, 1, 3, 159, -128, },
+ { 9, 1, 1, 3, 159, 30, },
{ 0, 1, 2, 4, 42, 64, },
{ 2, 1, 2, 4, 42, 64, },
{ 1, 1, 2, 4, 42, 64, },
@@ -41703,7 +41703,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 8, 1, 2, 4, 138, 72, },
{ 9, 1, 2, 4, 138, 127, },
{ 0, 1, 2, 4, 155, 72, },
- { 2, 1, 2, 4, 155, -128, },
+ { 2, 1, 2, 4, 155, 54, },
{ 1, 1, 2, 4, 155, 127, },
{ 3, 1, 2, 4, 155, 72, },
{ 4, 1, 2, 4, 155, 68, },
@@ -41711,7 +41711,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 4, 155, 72, },
{ 7, 1, 2, 4, 155, 54, },
{ 8, 1, 2, 4, 155, 68, },
- { 9, 1, 2, 4, 155, -128, },
+ { 9, 1, 2, 4, 155, 54, },
{ 0, 1, 2, 5, 42, 54, },
{ 2, 1, 2, 5, 42, 40, },
{ 1, 1, 2, 5, 42, 50, },
@@ -41763,7 +41763,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 8, 1, 2, 5, 138, 66, },
{ 9, 1, 2, 5, 138, 127, },
{ 0, 1, 2, 5, 155, 62, },
- { 2, 1, 2, 5, 155, -128, },
+ { 2, 1, 2, 5, 155, 30, },
{ 1, 1, 2, 5, 155, 127, },
{ 3, 1, 2, 5, 155, 62, },
{ 4, 1, 2, 5, 155, 58, },
@@ -41771,145 +41771,145 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 5, 155, 62, },
{ 7, 1, 2, 5, 155, 30, },
{ 8, 1, 2, 5, 155, 62, },
- { 9, 1, 2, 5, 155, -128, },
+ { 9, 1, 2, 5, 155, 30, },
};
RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type0);
static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 0, 0, 0, 0, 1, 72, },
- { 2, 0, 0, 0, 1, 60, },
- { 1, 0, 0, 0, 1, 68, },
+ { 2, 0, 0, 0, 1, 56, },
+ { 1, 0, 0, 0, 1, 72, },
{ 3, 0, 0, 0, 1, 72, },
{ 4, 0, 0, 0, 1, 76, },
- { 5, 0, 0, 0, 1, 60, },
+ { 5, 0, 0, 0, 1, 56, },
{ 6, 0, 0, 0, 1, 72, },
{ 7, 0, 0, 0, 1, 60, },
{ 8, 0, 0, 0, 1, 72, },
{ 9, 0, 0, 0, 1, 60, },
{ 0, 0, 0, 0, 2, 72, },
- { 2, 0, 0, 0, 2, 60, },
- { 1, 0, 0, 0, 2, 68, },
+ { 2, 0, 0, 0, 2, 56, },
+ { 1, 0, 0, 0, 2, 72, },
{ 3, 0, 0, 0, 2, 72, },
{ 4, 0, 0, 0, 2, 76, },
- { 5, 0, 0, 0, 2, 60, },
+ { 5, 0, 0, 0, 2, 56, },
{ 6, 0, 0, 0, 2, 72, },
{ 7, 0, 0, 0, 2, 60, },
{ 8, 0, 0, 0, 2, 72, },
{ 9, 0, 0, 0, 2, 60, },
{ 0, 0, 0, 0, 3, 76, },
- { 2, 0, 0, 0, 3, 60, },
- { 1, 0, 0, 0, 3, 68, },
+ { 2, 0, 0, 0, 3, 56, },
+ { 1, 0, 0, 0, 3, 72, },
{ 3, 0, 0, 0, 3, 76, },
{ 4, 0, 0, 0, 3, 76, },
- { 5, 0, 0, 0, 3, 60, },
+ { 5, 0, 0, 0, 3, 56, },
{ 6, 0, 0, 0, 3, 76, },
{ 7, 0, 0, 0, 3, 60, },
{ 8, 0, 0, 0, 3, 76, },
{ 9, 0, 0, 0, 3, 60, },
{ 0, 0, 0, 0, 4, 76, },
- { 2, 0, 0, 0, 4, 60, },
- { 1, 0, 0, 0, 4, 68, },
+ { 2, 0, 0, 0, 4, 56, },
+ { 1, 0, 0, 0, 4, 72, },
{ 3, 0, 0, 0, 4, 76, },
{ 4, 0, 0, 0, 4, 76, },
- { 5, 0, 0, 0, 4, 60, },
+ { 5, 0, 0, 0, 4, 56, },
{ 6, 0, 0, 0, 4, 76, },
{ 7, 0, 0, 0, 4, 60, },
{ 8, 0, 0, 0, 4, 76, },
{ 9, 0, 0, 0, 4, 60, },
{ 0, 0, 0, 0, 5, 76, },
- { 2, 0, 0, 0, 5, 60, },
- { 1, 0, 0, 0, 5, 68, },
+ { 2, 0, 0, 0, 5, 56, },
+ { 1, 0, 0, 0, 5, 72, },
{ 3, 0, 0, 0, 5, 76, },
{ 4, 0, 0, 0, 5, 76, },
- { 5, 0, 0, 0, 5, 60, },
+ { 5, 0, 0, 0, 5, 56, },
{ 6, 0, 0, 0, 5, 76, },
{ 7, 0, 0, 0, 5, 60, },
{ 8, 0, 0, 0, 5, 76, },
{ 9, 0, 0, 0, 5, 60, },
{ 0, 0, 0, 0, 6, 76, },
- { 2, 0, 0, 0, 6, 60, },
- { 1, 0, 0, 0, 6, 68, },
+ { 2, 0, 0, 0, 6, 56, },
+ { 1, 0, 0, 0, 6, 72, },
{ 3, 0, 0, 0, 6, 76, },
{ 4, 0, 0, 0, 6, 76, },
- { 5, 0, 0, 0, 6, 60, },
+ { 5, 0, 0, 0, 6, 56, },
{ 6, 0, 0, 0, 6, 76, },
{ 7, 0, 0, 0, 6, 60, },
{ 8, 0, 0, 0, 6, 76, },
{ 9, 0, 0, 0, 6, 60, },
{ 0, 0, 0, 0, 7, 76, },
- { 2, 0, 0, 0, 7, 60, },
- { 1, 0, 0, 0, 7, 68, },
+ { 2, 0, 0, 0, 7, 56, },
+ { 1, 0, 0, 0, 7, 72, },
{ 3, 0, 0, 0, 7, 76, },
{ 4, 0, 0, 0, 7, 76, },
- { 5, 0, 0, 0, 7, 60, },
+ { 5, 0, 0, 0, 7, 56, },
{ 6, 0, 0, 0, 7, 76, },
{ 7, 0, 0, 0, 7, 60, },
{ 8, 0, 0, 0, 7, 76, },
{ 9, 0, 0, 0, 7, 60, },
{ 0, 0, 0, 0, 8, 76, },
- { 2, 0, 0, 0, 8, 60, },
- { 1, 0, 0, 0, 8, 68, },
+ { 2, 0, 0, 0, 8, 56, },
+ { 1, 0, 0, 0, 8, 72, },
{ 3, 0, 0, 0, 8, 76, },
{ 4, 0, 0, 0, 8, 76, },
- { 5, 0, 0, 0, 8, 60, },
+ { 5, 0, 0, 0, 8, 56, },
{ 6, 0, 0, 0, 8, 76, },
{ 7, 0, 0, 0, 8, 60, },
{ 8, 0, 0, 0, 8, 76, },
{ 9, 0, 0, 0, 8, 60, },
{ 0, 0, 0, 0, 9, 76, },
- { 2, 0, 0, 0, 9, 60, },
- { 1, 0, 0, 0, 9, 68, },
+ { 2, 0, 0, 0, 9, 56, },
+ { 1, 0, 0, 0, 9, 72, },
{ 3, 0, 0, 0, 9, 76, },
{ 4, 0, 0, 0, 9, 76, },
- { 5, 0, 0, 0, 9, 60, },
+ { 5, 0, 0, 0, 9, 56, },
{ 6, 0, 0, 0, 9, 76, },
{ 7, 0, 0, 0, 9, 60, },
{ 8, 0, 0, 0, 9, 76, },
{ 9, 0, 0, 0, 9, 60, },
{ 0, 0, 0, 0, 10, 72, },
- { 2, 0, 0, 0, 10, 60, },
- { 1, 0, 0, 0, 10, 68, },
+ { 2, 0, 0, 0, 10, 56, },
+ { 1, 0, 0, 0, 10, 72, },
{ 3, 0, 0, 0, 10, 72, },
{ 4, 0, 0, 0, 10, 76, },
- { 5, 0, 0, 0, 10, 60, },
+ { 5, 0, 0, 0, 10, 56, },
{ 6, 0, 0, 0, 10, 72, },
{ 7, 0, 0, 0, 10, 60, },
{ 8, 0, 0, 0, 10, 72, },
{ 9, 0, 0, 0, 10, 60, },
{ 0, 0, 0, 0, 11, 72, },
- { 2, 0, 0, 0, 11, 60, },
- { 1, 0, 0, 0, 11, 68, },
+ { 2, 0, 0, 0, 11, 56, },
+ { 1, 0, 0, 0, 11, 72, },
{ 3, 0, 0, 0, 11, 72, },
{ 4, 0, 0, 0, 11, 76, },
- { 5, 0, 0, 0, 11, 60, },
+ { 5, 0, 0, 0, 11, 56, },
{ 6, 0, 0, 0, 11, 72, },
{ 7, 0, 0, 0, 11, 60, },
{ 8, 0, 0, 0, 11, 72, },
{ 9, 0, 0, 0, 11, 60, },
{ 0, 0, 0, 0, 12, 44, },
- { 2, 0, 0, 0, 12, 60, },
- { 1, 0, 0, 0, 12, 68, },
+ { 2, 0, 0, 0, 12, 56, },
+ { 1, 0, 0, 0, 12, 72, },
{ 3, 0, 0, 0, 12, 52, },
{ 4, 0, 0, 0, 12, 76, },
- { 5, 0, 0, 0, 12, 60, },
+ { 5, 0, 0, 0, 12, 56, },
{ 6, 0, 0, 0, 12, 52, },
{ 7, 0, 0, 0, 12, 60, },
{ 8, 0, 0, 0, 12, 52, },
{ 9, 0, 0, 0, 12, 60, },
{ 0, 0, 0, 0, 13, 40, },
- { 2, 0, 0, 0, 13, 60, },
- { 1, 0, 0, 0, 13, 68, },
+ { 2, 0, 0, 0, 13, 56, },
+ { 1, 0, 0, 0, 13, 72, },
{ 3, 0, 0, 0, 13, 48, },
{ 4, 0, 0, 0, 13, 76, },
- { 5, 0, 0, 0, 13, 60, },
+ { 5, 0, 0, 0, 13, 56, },
{ 6, 0, 0, 0, 13, 48, },
{ 7, 0, 0, 0, 13, 60, },
{ 8, 0, 0, 0, 13, 48, },
{ 9, 0, 0, 0, 13, 60, },
{ 0, 0, 0, 0, 14, 127, },
{ 2, 0, 0, 0, 14, 127, },
- { 1, 0, 0, 0, 14, 68, },
+ { 1, 0, 0, 0, 14, 72, },
{ 3, 0, 0, 0, 14, 127, },
{ 4, 0, 0, 0, 14, 127, },
{ 5, 0, 0, 0, 14, 127, },
@@ -42041,7 +42041,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 0, 1, 13, 60, },
{ 1, 0, 0, 1, 13, 76, },
{ 3, 0, 0, 1, 13, 28, },
- { 4, 0, 0, 1, 13, 70, },
+ { 4, 0, 0, 1, 13, 74, },
{ 5, 0, 0, 1, 13, 60, },
{ 6, 0, 0, 1, 13, 28, },
{ 7, 0, 0, 1, 13, 60, },
@@ -42181,7 +42181,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 0, 2, 13, 60, },
{ 1, 0, 0, 2, 13, 76, },
{ 3, 0, 0, 2, 13, 28, },
- { 4, 0, 0, 2, 13, 72, },
+ { 4, 0, 0, 2, 13, 74, },
{ 5, 0, 0, 2, 13, 60, },
{ 6, 0, 0, 2, 13, 28, },
{ 7, 0, 0, 2, 13, 60, },
@@ -42201,7 +42201,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 0, 3, 1, 36, },
{ 1, 0, 0, 3, 1, 66, },
{ 3, 0, 0, 3, 1, 52, },
- { 4, 0, 0, 3, 1, 68, },
+ { 4, 0, 0, 3, 1, 72, },
{ 5, 0, 0, 3, 1, 36, },
{ 6, 0, 0, 3, 1, 52, },
{ 7, 0, 0, 3, 1, 36, },
@@ -42211,7 +42211,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 0, 3, 2, 36, },
{ 1, 0, 0, 3, 2, 66, },
{ 3, 0, 0, 3, 2, 60, },
- { 4, 0, 0, 3, 2, 70, },
+ { 4, 0, 0, 3, 2, 72, },
{ 5, 0, 0, 3, 2, 36, },
{ 6, 0, 0, 3, 2, 60, },
{ 7, 0, 0, 3, 2, 36, },
@@ -42221,7 +42221,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 0, 3, 3, 36, },
{ 1, 0, 0, 3, 3, 66, },
{ 3, 0, 0, 3, 3, 64, },
- { 4, 0, 0, 3, 3, 70, },
+ { 4, 0, 0, 3, 3, 72, },
{ 5, 0, 0, 3, 3, 36, },
{ 6, 0, 0, 3, 3, 64, },
{ 7, 0, 0, 3, 3, 36, },
@@ -42231,7 +42231,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 0, 3, 4, 36, },
{ 1, 0, 0, 3, 4, 66, },
{ 3, 0, 0, 3, 4, 68, },
- { 4, 0, 0, 3, 4, 70, },
+ { 4, 0, 0, 3, 4, 72, },
{ 5, 0, 0, 3, 4, 36, },
{ 6, 0, 0, 3, 4, 68, },
{ 7, 0, 0, 3, 4, 36, },
@@ -42241,7 +42241,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 0, 3, 5, 36, },
{ 1, 0, 0, 3, 5, 66, },
{ 3, 0, 0, 3, 5, 76, },
- { 4, 0, 0, 3, 5, 70, },
+ { 4, 0, 0, 3, 5, 72, },
{ 5, 0, 0, 3, 5, 36, },
{ 6, 0, 0, 3, 5, 76, },
{ 7, 0, 0, 3, 5, 36, },
@@ -42251,7 +42251,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 0, 3, 6, 36, },
{ 1, 0, 0, 3, 6, 66, },
{ 3, 0, 0, 3, 6, 76, },
- { 4, 0, 0, 3, 6, 70, },
+ { 4, 0, 0, 3, 6, 72, },
{ 5, 0, 0, 3, 6, 36, },
{ 6, 0, 0, 3, 6, 76, },
{ 7, 0, 0, 3, 6, 36, },
@@ -42261,7 +42261,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 0, 3, 7, 36, },
{ 1, 0, 0, 3, 7, 66, },
{ 3, 0, 0, 3, 7, 76, },
- { 4, 0, 0, 3, 7, 70, },
+ { 4, 0, 0, 3, 7, 72, },
{ 5, 0, 0, 3, 7, 36, },
{ 6, 0, 0, 3, 7, 76, },
{ 7, 0, 0, 3, 7, 36, },
@@ -42271,7 +42271,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 0, 3, 8, 36, },
{ 1, 0, 0, 3, 8, 66, },
{ 3, 0, 0, 3, 8, 68, },
- { 4, 0, 0, 3, 8, 70, },
+ { 4, 0, 0, 3, 8, 72, },
{ 5, 0, 0, 3, 8, 36, },
{ 6, 0, 0, 3, 8, 68, },
{ 7, 0, 0, 3, 8, 36, },
@@ -42281,7 +42281,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 0, 3, 9, 36, },
{ 1, 0, 0, 3, 9, 66, },
{ 3, 0, 0, 3, 9, 64, },
- { 4, 0, 0, 3, 9, 70, },
+ { 4, 0, 0, 3, 9, 72, },
{ 5, 0, 0, 3, 9, 36, },
{ 6, 0, 0, 3, 9, 64, },
{ 7, 0, 0, 3, 9, 36, },
@@ -42291,7 +42291,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 0, 3, 10, 36, },
{ 1, 0, 0, 3, 10, 66, },
{ 3, 0, 0, 3, 10, 60, },
- { 4, 0, 0, 3, 10, 70, },
+ { 4, 0, 0, 3, 10, 72, },
{ 5, 0, 0, 3, 10, 36, },
{ 6, 0, 0, 3, 10, 60, },
{ 7, 0, 0, 3, 10, 36, },
@@ -42301,7 +42301,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 0, 3, 11, 36, },
{ 1, 0, 0, 3, 11, 66, },
{ 3, 0, 0, 3, 11, 52, },
- { 4, 0, 0, 3, 11, 70, },
+ { 4, 0, 0, 3, 11, 72, },
{ 5, 0, 0, 3, 11, 36, },
{ 6, 0, 0, 3, 11, 52, },
{ 7, 0, 0, 3, 11, 36, },
@@ -42311,7 +42311,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 0, 3, 12, 36, },
{ 1, 0, 0, 3, 12, 66, },
{ 3, 0, 0, 3, 12, 40, },
- { 4, 0, 0, 3, 12, 70, },
+ { 4, 0, 0, 3, 12, 72, },
{ 5, 0, 0, 3, 12, 36, },
{ 6, 0, 0, 3, 12, 40, },
{ 7, 0, 0, 3, 12, 36, },
@@ -42321,7 +42321,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 0, 3, 13, 36, },
{ 1, 0, 0, 3, 13, 66, },
{ 3, 0, 0, 3, 13, 28, },
- { 4, 0, 0, 3, 13, 62, },
+ { 4, 0, 0, 3, 13, 68, },
{ 5, 0, 0, 3, 13, 36, },
{ 6, 0, 0, 3, 13, 28, },
{ 7, 0, 0, 3, 13, 36, },
@@ -42501,7 +42501,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 0, 1, 3, 3, 36, },
{ 1, 0, 1, 3, 3, 66, },
{ 3, 0, 1, 3, 3, 48, },
- { 4, 0, 1, 3, 3, 66, },
+ { 4, 0, 1, 3, 3, 68, },
{ 5, 0, 1, 3, 3, 36, },
{ 6, 0, 1, 3, 3, 48, },
{ 7, 0, 1, 3, 3, 36, },
@@ -42618,137 +42618,137 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 8, 0, 1, 3, 14, 127, },
{ 9, 0, 1, 3, 14, 127, },
{ 0, 1, 0, 1, 36, 74, },
- { 2, 1, 0, 1, 36, 62, },
- { 1, 1, 0, 1, 36, 60, },
+ { 2, 1, 0, 1, 36, 58, },
+ { 1, 1, 0, 1, 36, 62, },
{ 3, 1, 0, 1, 36, 62, },
- { 4, 1, 0, 1, 36, 76, },
- { 5, 1, 0, 1, 36, 62, },
+ { 4, 1, 0, 1, 36, 74, },
+ { 5, 1, 0, 1, 36, 58, },
{ 6, 1, 0, 1, 36, 64, },
{ 7, 1, 0, 1, 36, 54, },
{ 8, 1, 0, 1, 36, 62, },
{ 9, 1, 0, 1, 36, 62, },
{ 0, 1, 0, 1, 40, 76, },
- { 2, 1, 0, 1, 40, 62, },
+ { 2, 1, 0, 1, 40, 58, },
{ 1, 1, 0, 1, 40, 62, },
{ 3, 1, 0, 1, 40, 62, },
{ 4, 1, 0, 1, 40, 76, },
- { 5, 1, 0, 1, 40, 62, },
+ { 5, 1, 0, 1, 40, 58, },
{ 6, 1, 0, 1, 40, 64, },
{ 7, 1, 0, 1, 40, 54, },
{ 8, 1, 0, 1, 40, 62, },
{ 9, 1, 0, 1, 40, 62, },
{ 0, 1, 0, 1, 44, 76, },
- { 2, 1, 0, 1, 44, 62, },
+ { 2, 1, 0, 1, 44, 58, },
{ 1, 1, 0, 1, 44, 62, },
{ 3, 1, 0, 1, 44, 62, },
{ 4, 1, 0, 1, 44, 76, },
- { 5, 1, 0, 1, 44, 62, },
+ { 5, 1, 0, 1, 44, 58, },
{ 6, 1, 0, 1, 44, 64, },
{ 7, 1, 0, 1, 44, 54, },
{ 8, 1, 0, 1, 44, 62, },
{ 9, 1, 0, 1, 44, 62, },
{ 0, 1, 0, 1, 48, 76, },
- { 2, 1, 0, 1, 48, 62, },
+ { 2, 1, 0, 1, 48, 58, },
{ 1, 1, 0, 1, 48, 62, },
{ 3, 1, 0, 1, 48, 62, },
- { 4, 1, 0, 1, 48, 54, },
- { 5, 1, 0, 1, 48, 62, },
+ { 4, 1, 0, 1, 48, 58, },
+ { 5, 1, 0, 1, 48, 58, },
{ 6, 1, 0, 1, 48, 64, },
{ 7, 1, 0, 1, 48, 54, },
{ 8, 1, 0, 1, 48, 62, },
{ 9, 1, 0, 1, 48, 62, },
{ 0, 1, 0, 1, 52, 76, },
- { 2, 1, 0, 1, 52, 62, },
+ { 2, 1, 0, 1, 52, 58, },
{ 1, 1, 0, 1, 52, 62, },
{ 3, 1, 0, 1, 52, 64, },
{ 4, 1, 0, 1, 52, 76, },
- { 5, 1, 0, 1, 52, 62, },
+ { 5, 1, 0, 1, 52, 58, },
{ 6, 1, 0, 1, 52, 76, },
{ 7, 1, 0, 1, 52, 54, },
{ 8, 1, 0, 1, 52, 76, },
{ 9, 1, 0, 1, 52, 62, },
{ 0, 1, 0, 1, 56, 76, },
- { 2, 1, 0, 1, 56, 62, },
+ { 2, 1, 0, 1, 56, 58, },
{ 1, 1, 0, 1, 56, 62, },
{ 3, 1, 0, 1, 56, 64, },
{ 4, 1, 0, 1, 56, 76, },
- { 5, 1, 0, 1, 56, 62, },
+ { 5, 1, 0, 1, 56, 58, },
{ 6, 1, 0, 1, 56, 76, },
{ 7, 1, 0, 1, 56, 54, },
{ 8, 1, 0, 1, 56, 76, },
{ 9, 1, 0, 1, 56, 62, },
{ 0, 1, 0, 1, 60, 76, },
- { 2, 1, 0, 1, 60, 62, },
+ { 2, 1, 0, 1, 60, 58, },
{ 1, 1, 0, 1, 60, 62, },
{ 3, 1, 0, 1, 60, 64, },
{ 4, 1, 0, 1, 60, 76, },
- { 5, 1, 0, 1, 60, 62, },
+ { 5, 1, 0, 1, 60, 58, },
{ 6, 1, 0, 1, 60, 76, },
{ 7, 1, 0, 1, 60, 54, },
{ 8, 1, 0, 1, 60, 76, },
{ 9, 1, 0, 1, 60, 62, },
- { 0, 1, 0, 1, 64, 74, },
- { 2, 1, 0, 1, 64, 62, },
- { 1, 1, 0, 1, 64, 60, },
+ { 0, 1, 0, 1, 64, 76, },
+ { 2, 1, 0, 1, 64, 58, },
+ { 1, 1, 0, 1, 64, 62, },
{ 3, 1, 0, 1, 64, 64, },
{ 4, 1, 0, 1, 64, 76, },
- { 5, 1, 0, 1, 64, 62, },
+ { 5, 1, 0, 1, 64, 58, },
{ 6, 1, 0, 1, 64, 74, },
{ 7, 1, 0, 1, 64, 54, },
{ 8, 1, 0, 1, 64, 74, },
{ 9, 1, 0, 1, 64, 62, },
- { 0, 1, 0, 1, 100, 72, },
- { 2, 1, 0, 1, 100, 62, },
+ { 0, 1, 0, 1, 100, 68, },
+ { 2, 1, 0, 1, 100, 58, },
{ 1, 1, 0, 1, 100, 76, },
- { 3, 1, 0, 1, 100, 72, },
+ { 3, 1, 0, 1, 100, 68, },
{ 4, 1, 0, 1, 100, 76, },
- { 5, 1, 0, 1, 100, 62, },
+ { 5, 1, 0, 1, 100, 58, },
{ 6, 1, 0, 1, 100, 72, },
{ 7, 1, 0, 1, 100, 54, },
{ 8, 1, 0, 1, 100, 72, },
{ 9, 1, 0, 1, 100, 127, },
{ 0, 1, 0, 1, 104, 76, },
- { 2, 1, 0, 1, 104, 62, },
+ { 2, 1, 0, 1, 104, 58, },
{ 1, 1, 0, 1, 104, 76, },
{ 3, 1, 0, 1, 104, 76, },
{ 4, 1, 0, 1, 104, 76, },
- { 5, 1, 0, 1, 104, 62, },
+ { 5, 1, 0, 1, 104, 58, },
{ 6, 1, 0, 1, 104, 76, },
{ 7, 1, 0, 1, 104, 54, },
{ 8, 1, 0, 1, 104, 76, },
{ 9, 1, 0, 1, 104, 127, },
{ 0, 1, 0, 1, 108, 76, },
- { 2, 1, 0, 1, 108, 62, },
+ { 2, 1, 0, 1, 108, 58, },
{ 1, 1, 0, 1, 108, 76, },
{ 3, 1, 0, 1, 108, 76, },
{ 4, 1, 0, 1, 108, 76, },
- { 5, 1, 0, 1, 108, 62, },
+ { 5, 1, 0, 1, 108, 58, },
{ 6, 1, 0, 1, 108, 76, },
{ 7, 1, 0, 1, 108, 54, },
{ 8, 1, 0, 1, 108, 76, },
{ 9, 1, 0, 1, 108, 127, },
{ 0, 1, 0, 1, 112, 76, },
- { 2, 1, 0, 1, 112, 62, },
+ { 2, 1, 0, 1, 112, 58, },
{ 1, 1, 0, 1, 112, 76, },
{ 3, 1, 0, 1, 112, 76, },
{ 4, 1, 0, 1, 112, 76, },
- { 5, 1, 0, 1, 112, 62, },
+ { 5, 1, 0, 1, 112, 58, },
{ 6, 1, 0, 1, 112, 76, },
{ 7, 1, 0, 1, 112, 54, },
{ 8, 1, 0, 1, 112, 76, },
{ 9, 1, 0, 1, 112, 127, },
{ 0, 1, 0, 1, 116, 76, },
- { 2, 1, 0, 1, 116, 62, },
+ { 2, 1, 0, 1, 116, 58, },
{ 1, 1, 0, 1, 116, 76, },
{ 3, 1, 0, 1, 116, 76, },
{ 4, 1, 0, 1, 116, 76, },
- { 5, 1, 0, 1, 116, 62, },
+ { 5, 1, 0, 1, 116, 58, },
{ 6, 1, 0, 1, 116, 76, },
{ 7, 1, 0, 1, 116, 54, },
{ 8, 1, 0, 1, 116, 76, },
{ 9, 1, 0, 1, 116, 127, },
{ 0, 1, 0, 1, 120, 76, },
- { 2, 1, 0, 1, 120, 62, },
+ { 2, 1, 0, 1, 120, 58, },
{ 1, 1, 0, 1, 120, 76, },
{ 3, 1, 0, 1, 120, 127, },
{ 4, 1, 0, 1, 120, 76, },
@@ -42758,7 +42758,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 8, 1, 0, 1, 120, 76, },
{ 9, 1, 0, 1, 120, 127, },
{ 0, 1, 0, 1, 124, 76, },
- { 2, 1, 0, 1, 124, 62, },
+ { 2, 1, 0, 1, 124, 58, },
{ 1, 1, 0, 1, 124, 76, },
{ 3, 1, 0, 1, 124, 127, },
{ 4, 1, 0, 1, 124, 76, },
@@ -42768,7 +42768,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 8, 1, 0, 1, 124, 76, },
{ 9, 1, 0, 1, 124, 127, },
{ 0, 1, 0, 1, 128, 76, },
- { 2, 1, 0, 1, 128, 62, },
+ { 2, 1, 0, 1, 128, 58, },
{ 1, 1, 0, 1, 128, 76, },
{ 3, 1, 0, 1, 128, 127, },
{ 4, 1, 0, 1, 128, 76, },
@@ -42778,38 +42778,38 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 8, 1, 0, 1, 128, 76, },
{ 9, 1, 0, 1, 128, 127, },
{ 0, 1, 0, 1, 132, 76, },
- { 2, 1, 0, 1, 132, 62, },
+ { 2, 1, 0, 1, 132, 58, },
{ 1, 1, 0, 1, 132, 76, },
{ 3, 1, 0, 1, 132, 76, },
{ 4, 1, 0, 1, 132, 76, },
- { 5, 1, 0, 1, 132, 62, },
+ { 5, 1, 0, 1, 132, 58, },
{ 6, 1, 0, 1, 132, 76, },
{ 7, 1, 0, 1, 132, 54, },
{ 8, 1, 0, 1, 132, 76, },
{ 9, 1, 0, 1, 132, 127, },
{ 0, 1, 0, 1, 136, 76, },
- { 2, 1, 0, 1, 136, 62, },
+ { 2, 1, 0, 1, 136, 58, },
{ 1, 1, 0, 1, 136, 76, },
{ 3, 1, 0, 1, 136, 76, },
{ 4, 1, 0, 1, 136, 76, },
- { 5, 1, 0, 1, 136, 62, },
+ { 5, 1, 0, 1, 136, 58, },
{ 6, 1, 0, 1, 136, 76, },
{ 7, 1, 0, 1, 136, 54, },
{ 8, 1, 0, 1, 136, 76, },
{ 9, 1, 0, 1, 136, 127, },
- { 0, 1, 0, 1, 140, 72, },
- { 2, 1, 0, 1, 140, 62, },
+ { 0, 1, 0, 1, 140, 74, },
+ { 2, 1, 0, 1, 140, 58, },
{ 1, 1, 0, 1, 140, 76, },
- { 3, 1, 0, 1, 140, 72, },
+ { 3, 1, 0, 1, 140, 74, },
{ 4, 1, 0, 1, 140, 76, },
- { 5, 1, 0, 1, 140, 62, },
+ { 5, 1, 0, 1, 140, 58, },
{ 6, 1, 0, 1, 140, 72, },
{ 7, 1, 0, 1, 140, 54, },
{ 8, 1, 0, 1, 140, 72, },
{ 9, 1, 0, 1, 140, 127, },
{ 0, 1, 0, 1, 144, 76, },
{ 2, 1, 0, 1, 144, 127, },
- { 1, 1, 0, 1, 144, 127, },
+ { 1, 1, 0, 1, 144, 76, },
{ 3, 1, 0, 1, 144, 76, },
{ 4, 1, 0, 1, 144, 76, },
{ 5, 1, 0, 1, 144, 127, },
@@ -42818,7 +42818,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 8, 1, 0, 1, 144, 76, },
{ 9, 1, 0, 1, 144, 127, },
{ 0, 1, 0, 1, 149, 76, },
- { 2, 1, 0, 1, 149, -128, },
+ { 2, 1, 0, 1, 149, 28, },
{ 1, 1, 0, 1, 149, 127, },
{ 3, 1, 0, 1, 149, 76, },
{ 4, 1, 0, 1, 149, 74, },
@@ -42826,9 +42826,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 6, 1, 0, 1, 149, 76, },
{ 7, 1, 0, 1, 149, 54, },
{ 8, 1, 0, 1, 149, 76, },
- { 9, 1, 0, 1, 149, -128, },
+ { 9, 1, 0, 1, 149, 28, },
{ 0, 1, 0, 1, 153, 76, },
- { 2, 1, 0, 1, 153, -128, },
+ { 2, 1, 0, 1, 153, 28, },
{ 1, 1, 0, 1, 153, 127, },
{ 3, 1, 0, 1, 153, 76, },
{ 4, 1, 0, 1, 153, 74, },
@@ -42836,9 +42836,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 6, 1, 0, 1, 153, 76, },
{ 7, 1, 0, 1, 153, 54, },
{ 8, 1, 0, 1, 153, 76, },
- { 9, 1, 0, 1, 153, -128, },
+ { 9, 1, 0, 1, 153, 28, },
{ 0, 1, 0, 1, 157, 76, },
- { 2, 1, 0, 1, 157, -128, },
+ { 2, 1, 0, 1, 157, 28, },
{ 1, 1, 0, 1, 157, 127, },
{ 3, 1, 0, 1, 157, 76, },
{ 4, 1, 0, 1, 157, 74, },
@@ -42846,9 +42846,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 6, 1, 0, 1, 157, 76, },
{ 7, 1, 0, 1, 157, 54, },
{ 8, 1, 0, 1, 157, 76, },
- { 9, 1, 0, 1, 157, -128, },
+ { 9, 1, 0, 1, 157, 28, },
{ 0, 1, 0, 1, 161, 76, },
- { 2, 1, 0, 1, 161, -128, },
+ { 2, 1, 0, 1, 161, 28, },
{ 1, 1, 0, 1, 161, 127, },
{ 3, 1, 0, 1, 161, 76, },
{ 4, 1, 0, 1, 161, 74, },
@@ -42856,9 +42856,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 6, 1, 0, 1, 161, 76, },
{ 7, 1, 0, 1, 161, 54, },
{ 8, 1, 0, 1, 161, 76, },
- { 9, 1, 0, 1, 161, -128, },
+ { 9, 1, 0, 1, 161, 28, },
{ 0, 1, 0, 1, 165, 76, },
- { 2, 1, 0, 1, 165, -128, },
+ { 2, 1, 0, 1, 165, 28, },
{ 1, 1, 0, 1, 165, 127, },
{ 3, 1, 0, 1, 165, 76, },
{ 4, 1, 0, 1, 165, 74, },
@@ -42866,139 +42866,139 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 6, 1, 0, 1, 165, 76, },
{ 7, 1, 0, 1, 165, 54, },
{ 8, 1, 0, 1, 165, 76, },
- { 9, 1, 0, 1, 165, -128, },
- { 0, 1, 0, 2, 36, 72, },
- { 2, 1, 0, 2, 36, 62, },
- { 1, 1, 0, 2, 36, 62, },
+ { 9, 1, 0, 1, 165, 28, },
+ { 0, 1, 0, 2, 36, 70, },
+ { 2, 1, 0, 2, 36, 58, },
+ { 1, 1, 0, 2, 36, 64, },
{ 3, 1, 0, 2, 36, 62, },
{ 4, 1, 0, 2, 36, 76, },
- { 5, 1, 0, 2, 36, 62, },
+ { 5, 1, 0, 2, 36, 58, },
{ 6, 1, 0, 2, 36, 64, },
{ 7, 1, 0, 2, 36, 54, },
{ 8, 1, 0, 2, 36, 62, },
{ 9, 1, 0, 2, 36, 62, },
{ 0, 1, 0, 2, 40, 76, },
- { 2, 1, 0, 2, 40, 62, },
+ { 2, 1, 0, 2, 40, 58, },
{ 1, 1, 0, 2, 40, 62, },
{ 3, 1, 0, 2, 40, 62, },
{ 4, 1, 0, 2, 40, 76, },
- { 5, 1, 0, 2, 40, 62, },
+ { 5, 1, 0, 2, 40, 58, },
{ 6, 1, 0, 2, 40, 64, },
{ 7, 1, 0, 2, 40, 54, },
{ 8, 1, 0, 2, 40, 62, },
{ 9, 1, 0, 2, 40, 62, },
{ 0, 1, 0, 2, 44, 76, },
- { 2, 1, 0, 2, 44, 62, },
+ { 2, 1, 0, 2, 44, 58, },
{ 1, 1, 0, 2, 44, 62, },
{ 3, 1, 0, 2, 44, 62, },
{ 4, 1, 0, 2, 44, 76, },
- { 5, 1, 0, 2, 44, 62, },
+ { 5, 1, 0, 2, 44, 58, },
{ 6, 1, 0, 2, 44, 64, },
{ 7, 1, 0, 2, 44, 54, },
{ 8, 1, 0, 2, 44, 62, },
{ 9, 1, 0, 2, 44, 62, },
{ 0, 1, 0, 2, 48, 76, },
- { 2, 1, 0, 2, 48, 62, },
+ { 2, 1, 0, 2, 48, 58, },
{ 1, 1, 0, 2, 48, 62, },
{ 3, 1, 0, 2, 48, 62, },
- { 4, 1, 0, 2, 48, 54, },
- { 5, 1, 0, 2, 48, 62, },
+ { 4, 1, 0, 2, 48, 58, },
+ { 5, 1, 0, 2, 48, 58, },
{ 6, 1, 0, 2, 48, 64, },
{ 7, 1, 0, 2, 48, 54, },
{ 8, 1, 0, 2, 48, 62, },
{ 9, 1, 0, 2, 48, 62, },
{ 0, 1, 0, 2, 52, 76, },
- { 2, 1, 0, 2, 52, 62, },
+ { 2, 1, 0, 2, 52, 58, },
{ 1, 1, 0, 2, 52, 62, },
{ 3, 1, 0, 2, 52, 64, },
{ 4, 1, 0, 2, 52, 76, },
- { 5, 1, 0, 2, 52, 62, },
+ { 5, 1, 0, 2, 52, 58, },
{ 6, 1, 0, 2, 52, 76, },
{ 7, 1, 0, 2, 52, 54, },
{ 8, 1, 0, 2, 52, 76, },
{ 9, 1, 0, 2, 52, 62, },
{ 0, 1, 0, 2, 56, 76, },
- { 2, 1, 0, 2, 56, 62, },
+ { 2, 1, 0, 2, 56, 58, },
{ 1, 1, 0, 2, 56, 62, },
{ 3, 1, 0, 2, 56, 64, },
{ 4, 1, 0, 2, 56, 76, },
- { 5, 1, 0, 2, 56, 62, },
+ { 5, 1, 0, 2, 56, 58, },
{ 6, 1, 0, 2, 56, 76, },
{ 7, 1, 0, 2, 56, 54, },
{ 8, 1, 0, 2, 56, 76, },
{ 9, 1, 0, 2, 56, 62, },
{ 0, 1, 0, 2, 60, 76, },
- { 2, 1, 0, 2, 60, 62, },
+ { 2, 1, 0, 2, 60, 58, },
{ 1, 1, 0, 2, 60, 62, },
{ 3, 1, 0, 2, 60, 64, },
{ 4, 1, 0, 2, 60, 76, },
- { 5, 1, 0, 2, 60, 62, },
+ { 5, 1, 0, 2, 60, 58, },
{ 6, 1, 0, 2, 60, 76, },
{ 7, 1, 0, 2, 60, 54, },
{ 8, 1, 0, 2, 60, 76, },
{ 9, 1, 0, 2, 60, 62, },
- { 0, 1, 0, 2, 64, 74, },
- { 2, 1, 0, 2, 64, 62, },
- { 1, 1, 0, 2, 64, 60, },
+ { 0, 1, 0, 2, 64, 70, },
+ { 2, 1, 0, 2, 64, 58, },
+ { 1, 1, 0, 2, 64, 62, },
{ 3, 1, 0, 2, 64, 64, },
{ 4, 1, 0, 2, 64, 74, },
- { 5, 1, 0, 2, 64, 62, },
+ { 5, 1, 0, 2, 64, 58, },
{ 6, 1, 0, 2, 64, 74, },
{ 7, 1, 0, 2, 64, 54, },
{ 8, 1, 0, 2, 64, 74, },
{ 9, 1, 0, 2, 64, 62, },
- { 0, 1, 0, 2, 100, 70, },
- { 2, 1, 0, 2, 100, 62, },
+ { 0, 1, 0, 2, 100, 66, },
+ { 2, 1, 0, 2, 100, 58, },
{ 1, 1, 0, 2, 100, 76, },
- { 3, 1, 0, 2, 100, 70, },
+ { 3, 1, 0, 2, 100, 66, },
{ 4, 1, 0, 2, 100, 76, },
- { 5, 1, 0, 2, 100, 62, },
+ { 5, 1, 0, 2, 100, 58, },
{ 6, 1, 0, 2, 100, 70, },
{ 7, 1, 0, 2, 100, 54, },
{ 8, 1, 0, 2, 100, 70, },
{ 9, 1, 0, 2, 100, 127, },
{ 0, 1, 0, 2, 104, 76, },
- { 2, 1, 0, 2, 104, 62, },
+ { 2, 1, 0, 2, 104, 58, },
{ 1, 1, 0, 2, 104, 76, },
{ 3, 1, 0, 2, 104, 76, },
{ 4, 1, 0, 2, 104, 76, },
- { 5, 1, 0, 2, 104, 62, },
+ { 5, 1, 0, 2, 104, 58, },
{ 6, 1, 0, 2, 104, 76, },
{ 7, 1, 0, 2, 104, 54, },
{ 8, 1, 0, 2, 104, 76, },
{ 9, 1, 0, 2, 104, 127, },
{ 0, 1, 0, 2, 108, 76, },
- { 2, 1, 0, 2, 108, 62, },
+ { 2, 1, 0, 2, 108, 58, },
{ 1, 1, 0, 2, 108, 76, },
{ 3, 1, 0, 2, 108, 76, },
{ 4, 1, 0, 2, 108, 76, },
- { 5, 1, 0, 2, 108, 62, },
+ { 5, 1, 0, 2, 108, 58, },
{ 6, 1, 0, 2, 108, 76, },
{ 7, 1, 0, 2, 108, 54, },
{ 8, 1, 0, 2, 108, 76, },
{ 9, 1, 0, 2, 108, 127, },
{ 0, 1, 0, 2, 112, 76, },
- { 2, 1, 0, 2, 112, 62, },
+ { 2, 1, 0, 2, 112, 58, },
{ 1, 1, 0, 2, 112, 76, },
{ 3, 1, 0, 2, 112, 76, },
{ 4, 1, 0, 2, 112, 76, },
- { 5, 1, 0, 2, 112, 62, },
+ { 5, 1, 0, 2, 112, 58, },
{ 6, 1, 0, 2, 112, 76, },
{ 7, 1, 0, 2, 112, 54, },
{ 8, 1, 0, 2, 112, 76, },
{ 9, 1, 0, 2, 112, 127, },
{ 0, 1, 0, 2, 116, 76, },
- { 2, 1, 0, 2, 116, 62, },
+ { 2, 1, 0, 2, 116, 58, },
{ 1, 1, 0, 2, 116, 76, },
{ 3, 1, 0, 2, 116, 76, },
{ 4, 1, 0, 2, 116, 76, },
- { 5, 1, 0, 2, 116, 62, },
+ { 5, 1, 0, 2, 116, 58, },
{ 6, 1, 0, 2, 116, 76, },
{ 7, 1, 0, 2, 116, 54, },
{ 8, 1, 0, 2, 116, 76, },
{ 9, 1, 0, 2, 116, 127, },
{ 0, 1, 0, 2, 120, 76, },
- { 2, 1, 0, 2, 120, 62, },
+ { 2, 1, 0, 2, 120, 58, },
{ 1, 1, 0, 2, 120, 76, },
{ 3, 1, 0, 2, 120, 127, },
{ 4, 1, 0, 2, 120, 76, },
@@ -43008,7 +43008,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 8, 1, 0, 2, 120, 76, },
{ 9, 1, 0, 2, 120, 127, },
{ 0, 1, 0, 2, 124, 76, },
- { 2, 1, 0, 2, 124, 62, },
+ { 2, 1, 0, 2, 124, 58, },
{ 1, 1, 0, 2, 124, 76, },
{ 3, 1, 0, 2, 124, 127, },
{ 4, 1, 0, 2, 124, 76, },
@@ -43018,7 +43018,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 8, 1, 0, 2, 124, 76, },
{ 9, 1, 0, 2, 124, 127, },
{ 0, 1, 0, 2, 128, 76, },
- { 2, 1, 0, 2, 128, 62, },
+ { 2, 1, 0, 2, 128, 58, },
{ 1, 1, 0, 2, 128, 76, },
{ 3, 1, 0, 2, 128, 127, },
{ 4, 1, 0, 2, 128, 76, },
@@ -43028,38 +43028,38 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 8, 1, 0, 2, 128, 76, },
{ 9, 1, 0, 2, 128, 127, },
{ 0, 1, 0, 2, 132, 76, },
- { 2, 1, 0, 2, 132, 62, },
+ { 2, 1, 0, 2, 132, 58, },
{ 1, 1, 0, 2, 132, 76, },
{ 3, 1, 0, 2, 132, 76, },
{ 4, 1, 0, 2, 132, 76, },
- { 5, 1, 0, 2, 132, 62, },
+ { 5, 1, 0, 2, 132, 58, },
{ 6, 1, 0, 2, 132, 76, },
{ 7, 1, 0, 2, 132, 54, },
{ 8, 1, 0, 2, 132, 76, },
{ 9, 1, 0, 2, 132, 127, },
{ 0, 1, 0, 2, 136, 76, },
- { 2, 1, 0, 2, 136, 62, },
+ { 2, 1, 0, 2, 136, 58, },
{ 1, 1, 0, 2, 136, 76, },
{ 3, 1, 0, 2, 136, 76, },
{ 4, 1, 0, 2, 136, 76, },
- { 5, 1, 0, 2, 136, 62, },
+ { 5, 1, 0, 2, 136, 58, },
{ 6, 1, 0, 2, 136, 76, },
{ 7, 1, 0, 2, 136, 54, },
{ 8, 1, 0, 2, 136, 76, },
{ 9, 1, 0, 2, 136, 127, },
- { 0, 1, 0, 2, 140, 70, },
- { 2, 1, 0, 2, 140, 62, },
+ { 0, 1, 0, 2, 140, 66, },
+ { 2, 1, 0, 2, 140, 58, },
{ 1, 1, 0, 2, 140, 76, },
- { 3, 1, 0, 2, 140, 70, },
+ { 3, 1, 0, 2, 140, 66, },
{ 4, 1, 0, 2, 140, 76, },
- { 5, 1, 0, 2, 140, 62, },
+ { 5, 1, 0, 2, 140, 58, },
{ 6, 1, 0, 2, 140, 70, },
{ 7, 1, 0, 2, 140, 54, },
{ 8, 1, 0, 2, 140, 70, },
{ 9, 1, 0, 2, 140, 127, },
{ 0, 1, 0, 2, 144, 76, },
{ 2, 1, 0, 2, 144, 127, },
- { 1, 1, 0, 2, 144, 127, },
+ { 1, 1, 0, 2, 144, 76, },
{ 3, 1, 0, 2, 144, 76, },
{ 4, 1, 0, 2, 144, 76, },
{ 5, 1, 0, 2, 144, 127, },
@@ -43068,7 +43068,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 8, 1, 0, 2, 144, 76, },
{ 9, 1, 0, 2, 144, 127, },
{ 0, 1, 0, 2, 149, 76, },
- { 2, 1, 0, 2, 149, -128, },
+ { 2, 1, 0, 2, 149, 28, },
{ 1, 1, 0, 2, 149, 127, },
{ 3, 1, 0, 2, 149, 76, },
{ 4, 1, 0, 2, 149, 74, },
@@ -43076,9 +43076,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 6, 1, 0, 2, 149, 76, },
{ 7, 1, 0, 2, 149, 54, },
{ 8, 1, 0, 2, 149, 76, },
- { 9, 1, 0, 2, 149, -128, },
+ { 9, 1, 0, 2, 149, 28, },
{ 0, 1, 0, 2, 153, 76, },
- { 2, 1, 0, 2, 153, -128, },
+ { 2, 1, 0, 2, 153, 28, },
{ 1, 1, 0, 2, 153, 127, },
{ 3, 1, 0, 2, 153, 76, },
{ 4, 1, 0, 2, 153, 74, },
@@ -43086,9 +43086,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 6, 1, 0, 2, 153, 76, },
{ 7, 1, 0, 2, 153, 54, },
{ 8, 1, 0, 2, 153, 76, },
- { 9, 1, 0, 2, 153, -128, },
+ { 9, 1, 0, 2, 153, 28, },
{ 0, 1, 0, 2, 157, 76, },
- { 2, 1, 0, 2, 157, -128, },
+ { 2, 1, 0, 2, 157, 28, },
{ 1, 1, 0, 2, 157, 127, },
{ 3, 1, 0, 2, 157, 76, },
{ 4, 1, 0, 2, 157, 74, },
@@ -43096,9 +43096,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 6, 1, 0, 2, 157, 76, },
{ 7, 1, 0, 2, 157, 54, },
{ 8, 1, 0, 2, 157, 76, },
- { 9, 1, 0, 2, 157, -128, },
+ { 9, 1, 0, 2, 157, 28, },
{ 0, 1, 0, 2, 161, 76, },
- { 2, 1, 0, 2, 161, -128, },
+ { 2, 1, 0, 2, 161, 28, },
{ 1, 1, 0, 2, 161, 127, },
{ 3, 1, 0, 2, 161, 76, },
{ 4, 1, 0, 2, 161, 74, },
@@ -43106,9 +43106,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 6, 1, 0, 2, 161, 76, },
{ 7, 1, 0, 2, 161, 54, },
{ 8, 1, 0, 2, 161, 76, },
- { 9, 1, 0, 2, 161, -128, },
+ { 9, 1, 0, 2, 161, 28, },
{ 0, 1, 0, 2, 165, 76, },
- { 2, 1, 0, 2, 165, -128, },
+ { 2, 1, 0, 2, 165, 28, },
{ 1, 1, 0, 2, 165, 127, },
{ 3, 1, 0, 2, 165, 76, },
{ 4, 1, 0, 2, 165, 74, },
@@ -43116,262 +43116,262 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 6, 1, 0, 2, 165, 76, },
{ 7, 1, 0, 2, 165, 54, },
{ 8, 1, 0, 2, 165, 76, },
- { 9, 1, 0, 2, 165, -128, },
- { 0, 1, 0, 3, 36, 68, },
- { 2, 1, 0, 3, 36, 38, },
+ { 9, 1, 0, 2, 165, 28, },
+ { 0, 1, 0, 3, 36, 64, },
+ { 2, 1, 0, 3, 36, 36, },
{ 1, 1, 0, 3, 36, 50, },
{ 3, 1, 0, 3, 36, 38, },
{ 4, 1, 0, 3, 36, 66, },
- { 5, 1, 0, 3, 36, 38, },
+ { 5, 1, 0, 3, 36, 36, },
{ 6, 1, 0, 3, 36, 52, },
{ 7, 1, 0, 3, 36, 30, },
{ 8, 1, 0, 3, 36, 50, },
{ 9, 1, 0, 3, 36, 38, },
{ 0, 1, 0, 3, 40, 68, },
- { 2, 1, 0, 3, 40, 38, },
+ { 2, 1, 0, 3, 40, 36, },
{ 1, 1, 0, 3, 40, 50, },
{ 3, 1, 0, 3, 40, 38, },
{ 4, 1, 0, 3, 40, 66, },
- { 5, 1, 0, 3, 40, 38, },
+ { 5, 1, 0, 3, 40, 36, },
{ 6, 1, 0, 3, 40, 52, },
{ 7, 1, 0, 3, 40, 30, },
{ 8, 1, 0, 3, 40, 50, },
{ 9, 1, 0, 3, 40, 38, },
{ 0, 1, 0, 3, 44, 68, },
- { 2, 1, 0, 3, 44, 38, },
+ { 2, 1, 0, 3, 44, 36, },
{ 1, 1, 0, 3, 44, 50, },
{ 3, 1, 0, 3, 44, 38, },
{ 4, 1, 0, 3, 44, 66, },
- { 5, 1, 0, 3, 44, 38, },
+ { 5, 1, 0, 3, 44, 36, },
{ 6, 1, 0, 3, 44, 52, },
{ 7, 1, 0, 3, 44, 30, },
{ 8, 1, 0, 3, 44, 50, },
{ 9, 1, 0, 3, 44, 38, },
{ 0, 1, 0, 3, 48, 68, },
- { 2, 1, 0, 3, 48, 38, },
+ { 2, 1, 0, 3, 48, 36, },
{ 1, 1, 0, 3, 48, 50, },
{ 3, 1, 0, 3, 48, 38, },
- { 4, 1, 0, 3, 48, 36, },
- { 5, 1, 0, 3, 48, 38, },
+ { 4, 1, 0, 3, 48, 42, },
+ { 5, 1, 0, 3, 48, 36, },
{ 6, 1, 0, 3, 48, 52, },
{ 7, 1, 0, 3, 48, 30, },
{ 8, 1, 0, 3, 48, 50, },
{ 9, 1, 0, 3, 48, 38, },
{ 0, 1, 0, 3, 52, 68, },
- { 2, 1, 0, 3, 52, 38, },
+ { 2, 1, 0, 3, 52, 36, },
{ 1, 1, 0, 3, 52, 50, },
{ 3, 1, 0, 3, 52, 40, },
{ 4, 1, 0, 3, 52, 66, },
- { 5, 1, 0, 3, 52, 38, },
+ { 5, 1, 0, 3, 52, 36, },
{ 6, 1, 0, 3, 52, 68, },
{ 7, 1, 0, 3, 52, 30, },
{ 8, 1, 0, 3, 52, 68, },
{ 9, 1, 0, 3, 52, 38, },
{ 0, 1, 0, 3, 56, 68, },
- { 2, 1, 0, 3, 56, 38, },
+ { 2, 1, 0, 3, 56, 36, },
{ 1, 1, 0, 3, 56, 50, },
{ 3, 1, 0, 3, 56, 40, },
{ 4, 1, 0, 3, 56, 66, },
- { 5, 1, 0, 3, 56, 38, },
+ { 5, 1, 0, 3, 56, 36, },
{ 6, 1, 0, 3, 56, 68, },
{ 7, 1, 0, 3, 56, 30, },
{ 8, 1, 0, 3, 56, 68, },
{ 9, 1, 0, 3, 56, 38, },
- { 0, 1, 0, 3, 60, 66, },
- { 2, 1, 0, 3, 60, 38, },
+ { 0, 1, 0, 3, 60, 68, },
+ { 2, 1, 0, 3, 60, 36, },
{ 1, 1, 0, 3, 60, 50, },
{ 3, 1, 0, 3, 60, 40, },
{ 4, 1, 0, 3, 60, 66, },
- { 5, 1, 0, 3, 60, 38, },
+ { 5, 1, 0, 3, 60, 36, },
{ 6, 1, 0, 3, 60, 66, },
{ 7, 1, 0, 3, 60, 30, },
{ 8, 1, 0, 3, 60, 66, },
{ 9, 1, 0, 3, 60, 38, },
- { 0, 1, 0, 3, 64, 68, },
- { 2, 1, 0, 3, 64, 38, },
+ { 0, 1, 0, 3, 64, 66, },
+ { 2, 1, 0, 3, 64, 36, },
{ 1, 1, 0, 3, 64, 50, },
{ 3, 1, 0, 3, 64, 40, },
{ 4, 1, 0, 3, 64, 66, },
- { 5, 1, 0, 3, 64, 38, },
+ { 5, 1, 0, 3, 64, 36, },
{ 6, 1, 0, 3, 64, 68, },
{ 7, 1, 0, 3, 64, 30, },
{ 8, 1, 0, 3, 64, 68, },
{ 9, 1, 0, 3, 64, 38, },
- { 0, 1, 0, 3, 100, 60, },
- { 2, 1, 0, 3, 100, 38, },
+ { 0, 1, 0, 3, 100, 64, },
+ { 2, 1, 0, 3, 100, 36, },
{ 1, 1, 0, 3, 100, 70, },
- { 3, 1, 0, 3, 100, 60, },
- { 4, 1, 0, 3, 100, 64, },
- { 5, 1, 0, 3, 100, 38, },
+ { 3, 1, 0, 3, 100, 64, },
+ { 4, 1, 0, 3, 100, 66, },
+ { 5, 1, 0, 3, 100, 36, },
{ 6, 1, 0, 3, 100, 60, },
{ 7, 1, 0, 3, 100, 30, },
{ 8, 1, 0, 3, 100, 60, },
{ 9, 1, 0, 3, 100, 127, },
{ 0, 1, 0, 3, 104, 68, },
- { 2, 1, 0, 3, 104, 38, },
+ { 2, 1, 0, 3, 104, 36, },
{ 1, 1, 0, 3, 104, 70, },
{ 3, 1, 0, 3, 104, 68, },
- { 4, 1, 0, 3, 104, 64, },
- { 5, 1, 0, 3, 104, 38, },
+ { 4, 1, 0, 3, 104, 66, },
+ { 5, 1, 0, 3, 104, 36, },
{ 6, 1, 0, 3, 104, 68, },
{ 7, 1, 0, 3, 104, 30, },
{ 8, 1, 0, 3, 104, 68, },
{ 9, 1, 0, 3, 104, 127, },
{ 0, 1, 0, 3, 108, 68, },
- { 2, 1, 0, 3, 108, 38, },
+ { 2, 1, 0, 3, 108, 36, },
{ 1, 1, 0, 3, 108, 70, },
{ 3, 1, 0, 3, 108, 68, },
- { 4, 1, 0, 3, 108, 64, },
- { 5, 1, 0, 3, 108, 38, },
+ { 4, 1, 0, 3, 108, 66, },
+ { 5, 1, 0, 3, 108, 36, },
{ 6, 1, 0, 3, 108, 68, },
{ 7, 1, 0, 3, 108, 30, },
{ 8, 1, 0, 3, 108, 68, },
{ 9, 1, 0, 3, 108, 127, },
{ 0, 1, 0, 3, 112, 68, },
- { 2, 1, 0, 3, 112, 38, },
+ { 2, 1, 0, 3, 112, 36, },
{ 1, 1, 0, 3, 112, 70, },
{ 3, 1, 0, 3, 112, 68, },
- { 4, 1, 0, 3, 112, 64, },
- { 5, 1, 0, 3, 112, 38, },
+ { 4, 1, 0, 3, 112, 66, },
+ { 5, 1, 0, 3, 112, 36, },
{ 6, 1, 0, 3, 112, 68, },
{ 7, 1, 0, 3, 112, 30, },
{ 8, 1, 0, 3, 112, 68, },
{ 9, 1, 0, 3, 112, 127, },
{ 0, 1, 0, 3, 116, 68, },
- { 2, 1, 0, 3, 116, 38, },
+ { 2, 1, 0, 3, 116, 36, },
{ 1, 1, 0, 3, 116, 70, },
{ 3, 1, 0, 3, 116, 68, },
- { 4, 1, 0, 3, 116, 64, },
- { 5, 1, 0, 3, 116, 38, },
+ { 4, 1, 0, 3, 116, 66, },
+ { 5, 1, 0, 3, 116, 36, },
{ 6, 1, 0, 3, 116, 68, },
{ 7, 1, 0, 3, 116, 30, },
{ 8, 1, 0, 3, 116, 68, },
{ 9, 1, 0, 3, 116, 127, },
{ 0, 1, 0, 3, 120, 68, },
- { 2, 1, 0, 3, 120, 38, },
+ { 2, 1, 0, 3, 120, 36, },
{ 1, 1, 0, 3, 120, 70, },
{ 3, 1, 0, 3, 120, 127, },
- { 4, 1, 0, 3, 120, 64, },
+ { 4, 1, 0, 3, 120, 66, },
{ 5, 1, 0, 3, 120, 127, },
{ 6, 1, 0, 3, 120, 68, },
{ 7, 1, 0, 3, 120, 30, },
{ 8, 1, 0, 3, 120, 68, },
{ 9, 1, 0, 3, 120, 127, },
{ 0, 1, 0, 3, 124, 68, },
- { 2, 1, 0, 3, 124, 38, },
+ { 2, 1, 0, 3, 124, 36, },
{ 1, 1, 0, 3, 124, 70, },
{ 3, 1, 0, 3, 124, 127, },
- { 4, 1, 0, 3, 124, 64, },
+ { 4, 1, 0, 3, 124, 66, },
{ 5, 1, 0, 3, 124, 127, },
{ 6, 1, 0, 3, 124, 68, },
{ 7, 1, 0, 3, 124, 30, },
{ 8, 1, 0, 3, 124, 68, },
{ 9, 1, 0, 3, 124, 127, },
{ 0, 1, 0, 3, 128, 68, },
- { 2, 1, 0, 3, 128, 38, },
+ { 2, 1, 0, 3, 128, 36, },
{ 1, 1, 0, 3, 128, 70, },
{ 3, 1, 0, 3, 128, 127, },
- { 4, 1, 0, 3, 128, 64, },
+ { 4, 1, 0, 3, 128, 66, },
{ 5, 1, 0, 3, 128, 127, },
{ 6, 1, 0, 3, 128, 68, },
{ 7, 1, 0, 3, 128, 30, },
{ 8, 1, 0, 3, 128, 68, },
{ 9, 1, 0, 3, 128, 127, },
{ 0, 1, 0, 3, 132, 68, },
- { 2, 1, 0, 3, 132, 38, },
+ { 2, 1, 0, 3, 132, 36, },
{ 1, 1, 0, 3, 132, 70, },
{ 3, 1, 0, 3, 132, 68, },
- { 4, 1, 0, 3, 132, 64, },
- { 5, 1, 0, 3, 132, 38, },
+ { 4, 1, 0, 3, 132, 66, },
+ { 5, 1, 0, 3, 132, 36, },
{ 6, 1, 0, 3, 132, 68, },
{ 7, 1, 0, 3, 132, 30, },
{ 8, 1, 0, 3, 132, 68, },
{ 9, 1, 0, 3, 132, 127, },
{ 0, 1, 0, 3, 136, 68, },
- { 2, 1, 0, 3, 136, 38, },
+ { 2, 1, 0, 3, 136, 36, },
{ 1, 1, 0, 3, 136, 70, },
{ 3, 1, 0, 3, 136, 68, },
- { 4, 1, 0, 3, 136, 64, },
- { 5, 1, 0, 3, 136, 38, },
+ { 4, 1, 0, 3, 136, 66, },
+ { 5, 1, 0, 3, 136, 36, },
{ 6, 1, 0, 3, 136, 68, },
{ 7, 1, 0, 3, 136, 30, },
{ 8, 1, 0, 3, 136, 68, },
{ 9, 1, 0, 3, 136, 127, },
- { 0, 1, 0, 3, 140, 60, },
- { 2, 1, 0, 3, 140, 38, },
+ { 0, 1, 0, 3, 140, 58, },
+ { 2, 1, 0, 3, 140, 36, },
{ 1, 1, 0, 3, 140, 70, },
- { 3, 1, 0, 3, 140, 60, },
- { 4, 1, 0, 3, 140, 64, },
- { 5, 1, 0, 3, 140, 38, },
+ { 3, 1, 0, 3, 140, 58, },
+ { 4, 1, 0, 3, 140, 66, },
+ { 5, 1, 0, 3, 140, 36, },
{ 6, 1, 0, 3, 140, 60, },
{ 7, 1, 0, 3, 140, 30, },
{ 8, 1, 0, 3, 140, 60, },
{ 9, 1, 0, 3, 140, 127, },
{ 0, 1, 0, 3, 144, 68, },
{ 2, 1, 0, 3, 144, 127, },
- { 1, 1, 0, 3, 144, 127, },
+ { 1, 1, 0, 3, 144, 70, },
{ 3, 1, 0, 3, 144, 68, },
- { 4, 1, 0, 3, 144, 64, },
+ { 4, 1, 0, 3, 144, 66, },
{ 5, 1, 0, 3, 144, 127, },
{ 6, 1, 0, 3, 144, 68, },
{ 7, 1, 0, 3, 144, 127, },
{ 8, 1, 0, 3, 144, 68, },
{ 9, 1, 0, 3, 144, 127, },
{ 0, 1, 0, 3, 149, 76, },
- { 2, 1, 0, 3, 149, -128, },
+ { 2, 1, 0, 3, 149, 4, },
{ 1, 1, 0, 3, 149, 127, },
{ 3, 1, 0, 3, 149, 76, },
- { 4, 1, 0, 3, 149, 60, },
+ { 4, 1, 0, 3, 149, 62, },
{ 5, 1, 0, 3, 149, 76, },
{ 6, 1, 0, 3, 149, 76, },
{ 7, 1, 0, 3, 149, 30, },
{ 8, 1, 0, 3, 149, 72, },
- { 9, 1, 0, 3, 149, -128, },
+ { 9, 1, 0, 3, 149, 4, },
{ 0, 1, 0, 3, 153, 76, },
- { 2, 1, 0, 3, 153, -128, },
+ { 2, 1, 0, 3, 153, 4, },
{ 1, 1, 0, 3, 153, 127, },
{ 3, 1, 0, 3, 153, 76, },
- { 4, 1, 0, 3, 153, 60, },
+ { 4, 1, 0, 3, 153, 62, },
{ 5, 1, 0, 3, 153, 76, },
{ 6, 1, 0, 3, 153, 76, },
{ 7, 1, 0, 3, 153, 30, },
{ 8, 1, 0, 3, 153, 76, },
- { 9, 1, 0, 3, 153, -128, },
+ { 9, 1, 0, 3, 153, 4, },
{ 0, 1, 0, 3, 157, 76, },
- { 2, 1, 0, 3, 157, -128, },
+ { 2, 1, 0, 3, 157, 4, },
{ 1, 1, 0, 3, 157, 127, },
{ 3, 1, 0, 3, 157, 76, },
- { 4, 1, 0, 3, 157, 60, },
+ { 4, 1, 0, 3, 157, 62, },
{ 5, 1, 0, 3, 157, 76, },
{ 6, 1, 0, 3, 157, 76, },
{ 7, 1, 0, 3, 157, 30, },
{ 8, 1, 0, 3, 157, 76, },
- { 9, 1, 0, 3, 157, -128, },
+ { 9, 1, 0, 3, 157, 4, },
{ 0, 1, 0, 3, 161, 76, },
- { 2, 1, 0, 3, 161, -128, },
+ { 2, 1, 0, 3, 161, 4, },
{ 1, 1, 0, 3, 161, 127, },
{ 3, 1, 0, 3, 161, 76, },
- { 4, 1, 0, 3, 161, 60, },
+ { 4, 1, 0, 3, 161, 62, },
{ 5, 1, 0, 3, 161, 76, },
{ 6, 1, 0, 3, 161, 76, },
{ 7, 1, 0, 3, 161, 30, },
{ 8, 1, 0, 3, 161, 76, },
- { 9, 1, 0, 3, 161, -128, },
+ { 9, 1, 0, 3, 161, 4, },
{ 0, 1, 0, 3, 165, 76, },
- { 2, 1, 0, 3, 165, -128, },
+ { 2, 1, 0, 3, 165, 4, },
{ 1, 1, 0, 3, 165, 127, },
{ 3, 1, 0, 3, 165, 76, },
- { 4, 1, 0, 3, 165, 60, },
+ { 4, 1, 0, 3, 165, 62, },
{ 5, 1, 0, 3, 165, 76, },
{ 6, 1, 0, 3, 165, 76, },
{ 7, 1, 0, 3, 165, 30, },
{ 8, 1, 0, 3, 165, 76, },
- { 9, 1, 0, 3, 165, -128, },
+ { 9, 1, 0, 3, 165, 4, },
{ 0, 1, 1, 2, 38, 66, },
{ 2, 1, 1, 2, 38, 64, },
- { 1, 1, 1, 2, 38, 62, },
+ { 1, 1, 1, 2, 38, 64, },
{ 3, 1, 1, 2, 38, 64, },
- { 4, 1, 1, 2, 38, 72, },
+ { 4, 1, 1, 2, 38, 64, },
{ 5, 1, 1, 2, 38, 64, },
{ 6, 1, 1, 2, 38, 64, },
{ 7, 1, 1, 2, 38, 54, },
@@ -43379,9 +43379,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 9, 1, 1, 2, 38, 64, },
{ 0, 1, 1, 2, 46, 72, },
{ 2, 1, 1, 2, 46, 64, },
- { 1, 1, 1, 2, 46, 62, },
+ { 1, 1, 1, 2, 46, 64, },
{ 3, 1, 1, 2, 46, 64, },
- { 4, 1, 1, 2, 46, 60, },
+ { 4, 1, 1, 2, 46, 70, },
{ 5, 1, 1, 2, 46, 64, },
{ 6, 1, 1, 2, 46, 64, },
{ 7, 1, 1, 2, 46, 54, },
@@ -43389,7 +43389,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 9, 1, 1, 2, 46, 64, },
{ 0, 1, 1, 2, 54, 72, },
{ 2, 1, 1, 2, 54, 64, },
- { 1, 1, 1, 2, 54, 62, },
+ { 1, 1, 1, 2, 54, 64, },
{ 3, 1, 1, 2, 54, 64, },
{ 4, 1, 1, 2, 54, 72, },
{ 5, 1, 1, 2, 54, 64, },
@@ -43397,21 +43397,21 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 1, 1, 2, 54, 54, },
{ 8, 1, 1, 2, 54, 72, },
{ 9, 1, 1, 2, 54, 64, },
- { 0, 1, 1, 2, 62, 64, },
+ { 0, 1, 1, 2, 62, 60, },
{ 2, 1, 1, 2, 62, 64, },
{ 1, 1, 1, 2, 62, 62, },
- { 3, 1, 1, 2, 62, 64, },
- { 4, 1, 1, 2, 62, 70, },
+ { 3, 1, 1, 2, 62, 60, },
+ { 4, 1, 1, 2, 62, 60, },
{ 5, 1, 1, 2, 62, 64, },
{ 6, 1, 1, 2, 62, 64, },
{ 7, 1, 1, 2, 62, 54, },
{ 8, 1, 1, 2, 62, 64, },
{ 9, 1, 1, 2, 62, 64, },
- { 0, 1, 1, 2, 102, 58, },
+ { 0, 1, 1, 2, 102, 60, },
{ 2, 1, 1, 2, 102, 64, },
{ 1, 1, 1, 2, 102, 72, },
- { 3, 1, 1, 2, 102, 58, },
- { 4, 1, 1, 2, 102, 72, },
+ { 3, 1, 1, 2, 102, 60, },
+ { 4, 1, 1, 2, 102, 64, },
{ 5, 1, 1, 2, 102, 64, },
{ 6, 1, 1, 2, 102, 58, },
{ 7, 1, 1, 2, 102, 54, },
@@ -43459,7 +43459,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 9, 1, 1, 2, 134, 127, },
{ 0, 1, 1, 2, 142, 72, },
{ 2, 1, 1, 2, 142, 127, },
- { 1, 1, 1, 2, 142, 127, },
+ { 1, 1, 1, 2, 142, 72, },
{ 3, 1, 1, 2, 142, 72, },
{ 4, 1, 1, 2, 142, 72, },
{ 5, 1, 1, 2, 142, 127, },
@@ -43468,7 +43468,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 8, 1, 1, 2, 142, 72, },
{ 9, 1, 1, 2, 142, 127, },
{ 0, 1, 1, 2, 151, 72, },
- { 2, 1, 1, 2, 151, -128, },
+ { 2, 1, 1, 2, 151, 28, },
{ 1, 1, 1, 2, 151, 127, },
{ 3, 1, 1, 2, 151, 72, },
{ 4, 1, 1, 2, 151, 72, },
@@ -43476,9 +43476,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 6, 1, 1, 2, 151, 72, },
{ 7, 1, 1, 2, 151, 54, },
{ 8, 1, 1, 2, 151, 72, },
- { 9, 1, 1, 2, 151, -128, },
+ { 9, 1, 1, 2, 151, 28, },
{ 0, 1, 1, 2, 159, 72, },
- { 2, 1, 1, 2, 159, -128, },
+ { 2, 1, 1, 2, 159, 28, },
{ 1, 1, 1, 2, 159, 127, },
{ 3, 1, 1, 2, 159, 72, },
{ 4, 1, 1, 2, 159, 72, },
@@ -43486,12 +43486,12 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 6, 1, 1, 2, 159, 72, },
{ 7, 1, 1, 2, 159, 54, },
{ 8, 1, 1, 2, 159, 72, },
- { 9, 1, 1, 2, 159, -128, },
+ { 9, 1, 1, 2, 159, 28, },
{ 0, 1, 1, 3, 38, 60, },
{ 2, 1, 1, 3, 38, 40, },
{ 1, 1, 1, 3, 38, 50, },
{ 3, 1, 1, 3, 38, 40, },
- { 4, 1, 1, 3, 38, 62, },
+ { 4, 1, 1, 3, 38, 54, },
{ 5, 1, 1, 3, 38, 40, },
{ 6, 1, 1, 3, 38, 52, },
{ 7, 1, 1, 3, 38, 30, },
@@ -43501,7 +43501,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 1, 1, 3, 46, 40, },
{ 1, 1, 1, 3, 46, 50, },
{ 3, 1, 1, 3, 46, 40, },
- { 4, 1, 1, 3, 46, 46, },
+ { 4, 1, 1, 3, 46, 54, },
{ 5, 1, 1, 3, 46, 40, },
{ 6, 1, 1, 3, 46, 52, },
{ 7, 1, 1, 3, 46, 30, },
@@ -43511,7 +43511,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 1, 1, 3, 54, 40, },
{ 1, 1, 1, 3, 54, 50, },
{ 3, 1, 1, 3, 54, 40, },
- { 4, 1, 1, 3, 54, 62, },
+ { 4, 1, 1, 3, 54, 66, },
{ 5, 1, 1, 3, 54, 40, },
{ 6, 1, 1, 3, 54, 68, },
{ 7, 1, 1, 3, 54, 30, },
@@ -43521,17 +43521,17 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 1, 1, 3, 62, 40, },
{ 1, 1, 1, 3, 62, 48, },
{ 3, 1, 1, 3, 62, 40, },
- { 4, 1, 1, 3, 62, 58, },
+ { 4, 1, 1, 3, 62, 50, },
{ 5, 1, 1, 3, 62, 40, },
{ 6, 1, 1, 3, 62, 58, },
{ 7, 1, 1, 3, 62, 30, },
{ 8, 1, 1, 3, 62, 58, },
{ 9, 1, 1, 3, 62, 40, },
- { 0, 1, 1, 3, 102, 54, },
+ { 0, 1, 1, 3, 102, 56, },
{ 2, 1, 1, 3, 102, 40, },
{ 1, 1, 1, 3, 102, 70, },
- { 3, 1, 1, 3, 102, 54, },
- { 4, 1, 1, 3, 102, 64, },
+ { 3, 1, 1, 3, 102, 56, },
+ { 4, 1, 1, 3, 102, 54, },
{ 5, 1, 1, 3, 102, 40, },
{ 6, 1, 1, 3, 102, 54, },
{ 7, 1, 1, 3, 102, 30, },
@@ -43541,7 +43541,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 1, 1, 3, 110, 40, },
{ 1, 1, 1, 3, 110, 70, },
{ 3, 1, 1, 3, 110, 68, },
- { 4, 1, 1, 3, 110, 64, },
+ { 4, 1, 1, 3, 110, 66, },
{ 5, 1, 1, 3, 110, 40, },
{ 6, 1, 1, 3, 110, 68, },
{ 7, 1, 1, 3, 110, 30, },
@@ -43551,7 +43551,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 1, 1, 3, 118, 40, },
{ 1, 1, 1, 3, 118, 70, },
{ 3, 1, 1, 3, 118, 127, },
- { 4, 1, 1, 3, 118, 64, },
+ { 4, 1, 1, 3, 118, 66, },
{ 5, 1, 1, 3, 118, 127, },
{ 6, 1, 1, 3, 118, 68, },
{ 7, 1, 1, 3, 118, 30, },
@@ -43561,7 +43561,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 1, 1, 3, 126, 40, },
{ 1, 1, 1, 3, 126, 70, },
{ 3, 1, 1, 3, 126, 127, },
- { 4, 1, 1, 3, 126, 64, },
+ { 4, 1, 1, 3, 126, 66, },
{ 5, 1, 1, 3, 126, 127, },
{ 6, 1, 1, 3, 126, 68, },
{ 7, 1, 1, 3, 126, 30, },
@@ -43571,7 +43571,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 2, 1, 1, 3, 134, 40, },
{ 1, 1, 1, 3, 134, 70, },
{ 3, 1, 1, 3, 134, 68, },
- { 4, 1, 1, 3, 134, 64, },
+ { 4, 1, 1, 3, 134, 66, },
{ 5, 1, 1, 3, 134, 40, },
{ 6, 1, 1, 3, 134, 68, },
{ 7, 1, 1, 3, 134, 30, },
@@ -43579,16 +43579,16 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 9, 1, 1, 3, 134, 127, },
{ 0, 1, 1, 3, 142, 68, },
{ 2, 1, 1, 3, 142, 127, },
- { 1, 1, 1, 3, 142, 127, },
+ { 1, 1, 1, 3, 142, 70, },
{ 3, 1, 1, 3, 142, 68, },
- { 4, 1, 1, 3, 142, 64, },
+ { 4, 1, 1, 3, 142, 66, },
{ 5, 1, 1, 3, 142, 127, },
{ 6, 1, 1, 3, 142, 68, },
{ 7, 1, 1, 3, 142, 127, },
{ 8, 1, 1, 3, 142, 68, },
{ 9, 1, 1, 3, 142, 127, },
{ 0, 1, 1, 3, 151, 72, },
- { 2, 1, 1, 3, 151, -128, },
+ { 2, 1, 1, 3, 151, 4, },
{ 1, 1, 1, 3, 151, 127, },
{ 3, 1, 1, 3, 151, 72, },
{ 4, 1, 1, 3, 151, 66, },
@@ -43596,9 +43596,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 6, 1, 1, 3, 151, 72, },
{ 7, 1, 1, 3, 151, 30, },
{ 8, 1, 1, 3, 151, 68, },
- { 9, 1, 1, 3, 151, -128, },
+ { 9, 1, 1, 3, 151, 4, },
{ 0, 1, 1, 3, 159, 72, },
- { 2, 1, 1, 3, 159, -128, },
+ { 2, 1, 1, 3, 159, 4, },
{ 1, 1, 1, 3, 159, 127, },
{ 3, 1, 1, 3, 159, 72, },
{ 4, 1, 1, 3, 159, 66, },
@@ -43606,32 +43606,32 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 6, 1, 1, 3, 159, 72, },
{ 7, 1, 1, 3, 159, 30, },
{ 8, 1, 1, 3, 159, 72, },
- { 9, 1, 1, 3, 159, -128, },
- { 0, 1, 2, 4, 42, 64, },
+ { 9, 1, 1, 3, 159, 4, },
+ { 0, 1, 2, 4, 42, 68, },
{ 2, 1, 2, 4, 42, 64, },
{ 1, 1, 2, 4, 42, 64, },
{ 3, 1, 2, 4, 42, 64, },
- { 4, 1, 2, 4, 42, 68, },
+ { 4, 1, 2, 4, 42, 60, },
{ 5, 1, 2, 4, 42, 64, },
{ 6, 1, 2, 4, 42, 64, },
{ 7, 1, 2, 4, 42, 54, },
{ 8, 1, 2, 4, 42, 62, },
{ 9, 1, 2, 4, 42, 64, },
- { 0, 1, 2, 4, 58, 62, },
+ { 0, 1, 2, 4, 58, 60, },
{ 2, 1, 2, 4, 58, 64, },
{ 1, 1, 2, 4, 58, 64, },
- { 3, 1, 2, 4, 58, 62, },
- { 4, 1, 2, 4, 58, 64, },
+ { 3, 1, 2, 4, 58, 60, },
+ { 4, 1, 2, 4, 58, 56, },
{ 5, 1, 2, 4, 58, 64, },
{ 6, 1, 2, 4, 58, 62, },
{ 7, 1, 2, 4, 58, 54, },
{ 8, 1, 2, 4, 58, 62, },
{ 9, 1, 2, 4, 58, 64, },
- { 0, 1, 2, 4, 106, 58, },
+ { 0, 1, 2, 4, 106, 60, },
{ 2, 1, 2, 4, 106, 64, },
{ 1, 1, 2, 4, 106, 72, },
- { 3, 1, 2, 4, 106, 58, },
- { 4, 1, 2, 4, 106, 66, },
+ { 3, 1, 2, 4, 106, 60, },
+ { 4, 1, 2, 4, 106, 58, },
{ 5, 1, 2, 4, 106, 64, },
{ 6, 1, 2, 4, 106, 58, },
{ 7, 1, 2, 4, 106, 54, },
@@ -43649,84 +43649,84 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 9, 1, 2, 4, 122, 127, },
{ 0, 1, 2, 4, 138, 72, },
{ 2, 1, 2, 4, 138, 127, },
- { 1, 1, 2, 4, 138, 127, },
+ { 1, 1, 2, 4, 138, 72, },
{ 3, 1, 2, 4, 138, 72, },
- { 4, 1, 2, 4, 138, 68, },
+ { 4, 1, 2, 4, 138, 70, },
{ 5, 1, 2, 4, 138, 127, },
{ 6, 1, 2, 4, 138, 72, },
{ 7, 1, 2, 4, 138, 127, },
{ 8, 1, 2, 4, 138, 72, },
{ 9, 1, 2, 4, 138, 127, },
{ 0, 1, 2, 4, 155, 72, },
- { 2, 1, 2, 4, 155, -128, },
+ { 2, 1, 2, 4, 155, 28, },
{ 1, 1, 2, 4, 155, 127, },
{ 3, 1, 2, 4, 155, 72, },
- { 4, 1, 2, 4, 155, 68, },
+ { 4, 1, 2, 4, 155, 62, },
{ 5, 1, 2, 4, 155, 72, },
{ 6, 1, 2, 4, 155, 72, },
{ 7, 1, 2, 4, 155, 54, },
{ 8, 1, 2, 4, 155, 68, },
- { 9, 1, 2, 4, 155, -128, },
- { 0, 1, 2, 5, 42, 54, },
+ { 9, 1, 2, 4, 155, 28, },
+ { 0, 1, 2, 5, 42, 56, },
{ 2, 1, 2, 5, 42, 40, },
{ 1, 1, 2, 5, 42, 50, },
{ 3, 1, 2, 5, 42, 40, },
- { 4, 1, 2, 5, 42, 58, },
+ { 4, 1, 2, 5, 42, 50, },
{ 5, 1, 2, 5, 42, 40, },
{ 6, 1, 2, 5, 42, 52, },
{ 7, 1, 2, 5, 42, 30, },
{ 8, 1, 2, 5, 42, 50, },
{ 9, 1, 2, 5, 42, 40, },
- { 0, 1, 2, 5, 58, 52, },
+ { 0, 1, 2, 5, 58, 54, },
{ 2, 1, 2, 5, 58, 40, },
{ 1, 1, 2, 5, 58, 50, },
{ 3, 1, 2, 5, 58, 40, },
- { 4, 1, 2, 5, 58, 56, },
+ { 4, 1, 2, 5, 58, 46, },
{ 5, 1, 2, 5, 58, 40, },
{ 6, 1, 2, 5, 58, 52, },
{ 7, 1, 2, 5, 58, 30, },
{ 8, 1, 2, 5, 58, 52, },
{ 9, 1, 2, 5, 58, 40, },
- { 0, 1, 2, 5, 106, 50, },
+ { 0, 1, 2, 5, 106, 48, },
{ 2, 1, 2, 5, 106, 40, },
{ 1, 1, 2, 5, 106, 72, },
- { 3, 1, 2, 5, 106, 50, },
- { 4, 1, 2, 5, 106, 56, },
+ { 3, 1, 2, 5, 106, 48, },
+ { 4, 1, 2, 5, 106, 50, },
{ 5, 1, 2, 5, 106, 40, },
{ 6, 1, 2, 5, 106, 50, },
{ 7, 1, 2, 5, 106, 30, },
{ 8, 1, 2, 5, 106, 50, },
{ 9, 1, 2, 5, 106, 127, },
- { 0, 1, 2, 5, 122, 66, },
+ { 0, 1, 2, 5, 122, 70, },
{ 2, 1, 2, 5, 122, 40, },
{ 1, 1, 2, 5, 122, 72, },
{ 3, 1, 2, 5, 122, 127, },
- { 4, 1, 2, 5, 122, 56, },
+ { 4, 1, 2, 5, 122, 62, },
{ 5, 1, 2, 5, 122, 127, },
{ 6, 1, 2, 5, 122, 66, },
{ 7, 1, 2, 5, 122, 30, },
{ 8, 1, 2, 5, 122, 66, },
{ 9, 1, 2, 5, 122, 127, },
- { 0, 1, 2, 5, 138, 66, },
+ { 0, 1, 2, 5, 138, 70, },
{ 2, 1, 2, 5, 138, 127, },
- { 1, 1, 2, 5, 138, 127, },
- { 3, 1, 2, 5, 138, 66, },
- { 4, 1, 2, 5, 138, 58, },
+ { 1, 1, 2, 5, 138, 72, },
+ { 3, 1, 2, 5, 138, 70, },
+ { 4, 1, 2, 5, 138, 62, },
{ 5, 1, 2, 5, 138, 127, },
{ 6, 1, 2, 5, 138, 66, },
{ 7, 1, 2, 5, 138, 127, },
{ 8, 1, 2, 5, 138, 66, },
{ 9, 1, 2, 5, 138, 127, },
- { 0, 1, 2, 5, 155, 62, },
- { 2, 1, 2, 5, 155, -128, },
+ { 0, 1, 2, 5, 155, 72, },
+ { 2, 1, 2, 5, 155, 4, },
{ 1, 1, 2, 5, 155, 127, },
- { 3, 1, 2, 5, 155, 62, },
- { 4, 1, 2, 5, 155, 58, },
+ { 3, 1, 2, 5, 155, 72, },
+ { 4, 1, 2, 5, 155, 52, },
{ 5, 1, 2, 5, 155, 72, },
{ 6, 1, 2, 5, 155, 62, },
{ 7, 1, 2, 5, 155, 30, },
{ 8, 1, 2, 5, 155, 62, },
- { 9, 1, 2, 5, 155, -128, },
+ { 9, 1, 2, 5, 155, 4, },
};
RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type5);
diff --git a/drivers/net/wireless/rsi/rsi_91x_ps.c b/drivers/net/wireless/rsi/rsi_91x_ps.c
index fdaa5a7260dd..a02904921729 100644
--- a/drivers/net/wireless/rsi/rsi_91x_ps.c
+++ b/drivers/net/wireless/rsi/rsi_91x_ps.c
@@ -16,7 +16,6 @@
#include <linux/etherdevice.h>
#include <linux/if.h>
-#include <linux/version.h>
#include "rsi_debugfs.h"
#include "rsi_mgmt.h"
#include "rsi_common.h"
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index fe0287b22a25..e0c502bc4270 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1513,7 +1513,7 @@ static int rsi_restore(struct device *dev)
}
static const struct dev_pm_ops rsi_pm_ops = {
.suspend = rsi_suspend,
- .resume = rsi_resume,
+ .resume_noirq = rsi_resume,
.freeze = rsi_freeze,
.thaw = rsi_thaw,
.restore = rsi_restore,
diff --git a/drivers/net/wireless/rsi/rsi_boot_params.h b/drivers/net/wireless/rsi/rsi_boot_params.h
index c1cf19d1e376..30e03aa6a529 100644
--- a/drivers/net/wireless/rsi/rsi_boot_params.h
+++ b/drivers/net/wireless/rsi/rsi_boot_params.h
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_coex.h b/drivers/net/wireless/rsi/rsi_coex.h
index 0fdc67f37a56..2c14e4c651b9 100644
--- a/drivers/net/wireless/rsi/rsi_coex.h
+++ b/drivers/net/wireless/rsi/rsi_coex.h
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2018 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
index 60f1f286b030..7aa5124575cf 100644
--- a/drivers/net/wireless/rsi/rsi_common.h
+++ b/drivers/net/wireless/rsi/rsi_common.h
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_debugfs.h b/drivers/net/wireless/rsi/rsi_debugfs.h
index 580ad3b3f710..a6a28640ad40 100644
--- a/drivers/net/wireless/rsi/rsi_debugfs.h
+++ b/drivers/net/wireless/rsi/rsi_debugfs.h
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h
index 46e36df9e8e3..d044a440fa08 100644
--- a/drivers/net/wireless/rsi/rsi_hal.h
+++ b/drivers/net/wireless/rsi/rsi_hal.h
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2017 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index 73a19e43106b..a1065e5a92b4 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index 2ce2dcf57441..236b21482f38 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_ps.h b/drivers/net/wireless/rsi/rsi_ps.h
index 98ff6a4ced57..0be2f1e201e5 100644
--- a/drivers/net/wireless/rsi/rsi_ps.h
+++ b/drivers/net/wireless/rsi/rsi_ps.h
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2017 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
index 1c756263cf15..7c91b126b350 100644
--- a/drivers/net/wireless/rsi/rsi_sdio.h
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -1,4 +1,4 @@
-/**
+/*
* @section LICENSE
* Copyright (c) 2014 Redpine Signals Inc.
*
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
index 8702f434b569..254d19b66412 100644
--- a/drivers/net/wireless/rsi/rsi_usb.h
+++ b/drivers/net/wireless/rsi/rsi_usb.h
@@ -1,4 +1,4 @@
-/**
+/*
* @section LICENSE
* Copyright (c) 2014 Redpine Signals Inc.
*
diff --git a/drivers/net/wireless/st/cw1200/bh.c b/drivers/net/wireless/st/cw1200/bh.c
index c364a3987618..8bade5d89f12 100644
--- a/drivers/net/wireless/st/cw1200/bh.c
+++ b/drivers/net/wireless/st/cw1200/bh.c
@@ -42,9 +42,6 @@ enum cw1200_bh_pm_state {
CW1200_BH_RESUME,
};
-typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv,
- u8 *data, size_t size);
-
static void cw1200_bh_work(struct work_struct *work)
{
struct cw1200_common *priv =
diff --git a/drivers/net/wireless/st/cw1200/wsm.h b/drivers/net/wireless/st/cw1200/wsm.h
index 1ffa47994bb9..89fdc9115e9d 100644
--- a/drivers/net/wireless/st/cw1200/wsm.h
+++ b/drivers/net/wireless/st/cw1200/wsm.h
@@ -785,8 +785,6 @@ struct wsm_tx_confirm {
};
/* 3.15 */
-typedef void (*wsm_tx_confirm_cb) (struct cw1200_common *priv,
- struct wsm_tx_confirm *arg);
/* Note that ideology of wsm_tx struct is different against the rest of
* WSM API. wsm_hdr is /not/ a caller-adapted struct to be used as an input
@@ -862,9 +860,6 @@ struct wsm_rx {
/* = sizeof(generic hi hdr) + sizeof(wsm hdr) */
#define WSM_RX_EXTRA_HEADROOM (16)
-typedef void (*wsm_rx_cb) (struct cw1200_common *priv, struct wsm_rx *arg,
- struct sk_buff **skb_p);
-
/* 3.17 */
struct wsm_event {
/* WSM_STATUS_... */
@@ -1180,8 +1175,6 @@ struct wsm_switch_channel {
int wsm_switch_channel(struct cw1200_common *priv,
const struct wsm_switch_channel *arg);
-typedef void (*wsm_channel_switch_cb) (struct cw1200_common *priv);
-
#define WSM_START_REQ_ID 0x0017
#define WSM_START_RESP_ID 0x0417
@@ -1240,8 +1233,6 @@ int wsm_start_find(struct cw1200_common *priv);
int wsm_stop_find(struct cw1200_common *priv);
-typedef void (*wsm_find_complete_cb) (struct cw1200_common *priv, u32 status);
-
struct wsm_suspend_resume {
/* See 3.52 */
/* Link ID */
@@ -1256,9 +1247,6 @@ struct wsm_suspend_resume {
/* [out] */ int queue;
};
-typedef void (*wsm_suspend_resume_cb) (struct cw1200_common *priv,
- struct wsm_suspend_resume *arg);
-
/* 3.54 Update-IE request. */
struct wsm_update_ie {
/* WSM_UPDATE_IE_... */
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
index e14d88e558f0..85abd0a2d1c9 100644
--- a/drivers/net/wireless/ti/wlcore/boot.c
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -72,6 +72,7 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ?
wl->min_mr_fw_ver : wl->min_sr_fw_ver;
char min_fw_str[32] = "";
+ int off = 0;
int i;
/* the chip must be exactly equal */
@@ -105,13 +106,15 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
return 0;
fail:
- for (i = 0; i < NUM_FW_VER; i++)
+ for (i = 0; i < NUM_FW_VER && off < sizeof(min_fw_str); i++)
if (min_ver[i] == WLCORE_FW_VER_IGNORE)
- snprintf(min_fw_str, sizeof(min_fw_str),
- "%s*.", min_fw_str);
+ off += snprintf(min_fw_str + off,
+ sizeof(min_fw_str) - off,
+ "*.");
else
- snprintf(min_fw_str, sizeof(min_fw_str),
- "%s%u.", min_fw_str, min_ver[i]);
+ off += snprintf(min_fw_str + off,
+ sizeof(min_fw_str) - off,
+ "%u.", min_ver[i]);
wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n"
"Please use at least FW %s\n"
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index b143293e694f..a9e13e6d65c5 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -78,13 +78,14 @@ static ssize_t sub## _ ##name## _read(struct file *file, \
struct wl1271 *wl = file->private_data; \
struct struct_type *stats = wl->stats.fw_stats; \
char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = ""; \
+ int pos = 0; \
int i; \
\
wl1271_debugfs_update_stats(wl); \
\
- for (i = 0; i < len; i++) \
- snprintf(buf, sizeof(buf), "%s[%d] = %d\n", \
- buf, i, stats->sub.name[i]); \
+ for (i = 0; i < len && pos < sizeof(buf); i++) \
+ pos += snprintf(buf + pos, sizeof(buf) - pos, \
+ "[%d] = %d\n", i, stats->sub.name[i]); \
\
return wl1271_format_buffer(userbuf, count, ppos, "%s", buf); \
} \
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index e98e04ee9a2c..91f276dd22a1 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -240,7 +240,7 @@ struct iw_mgmt_essid_pset {
} __packed;
/*
- * According to 802.11 Wireless Netowors, the definitive guide - O'Reilly
+ * According to 802.11 Wireless Networks, the definitive guide - O'Reilly
* Pg 75
*/
#define IW_DATA_RATE_MAX_LABELS 8
@@ -379,16 +379,7 @@ struct wl3501_get_confirm {
u8 mib_value[100];
};
-struct wl3501_join_req {
- u16 next_blk;
- u8 sig_id;
- u8 reserved;
- struct iw_mgmt_data_rset operational_rset;
- u16 reserved2;
- u16 timeout;
- u16 probe_delay;
- u8 timestamp[8];
- u8 local_time[8];
+struct wl3501_req {
u16 beacon_period;
u16 dtim_period;
u16 cap_info;
@@ -401,6 +392,19 @@ struct wl3501_join_req {
struct iw_mgmt_data_rset bss_basic_rset;
};
+struct wl3501_join_req {
+ u16 next_blk;
+ u8 sig_id;
+ u8 reserved;
+ struct iw_mgmt_data_rset operational_rset;
+ u16 reserved2;
+ u16 timeout;
+ u16 probe_delay;
+ u8 timestamp[8];
+ u8 local_time[8];
+ struct wl3501_req req;
+};
+
struct wl3501_join_confirm {
u16 next_blk;
u8 sig_id;
@@ -443,16 +447,7 @@ struct wl3501_scan_confirm {
u16 status;
char timestamp[8];
char localtime[8];
- u16 beacon_period;
- u16 dtim_period;
- u16 cap_info;
- u8 bss_type;
- u8 bssid[ETH_ALEN];
- struct iw_mgmt_essid_pset ssid;
- struct iw_mgmt_ds_pset ds_pset;
- struct iw_mgmt_cf_pset cf_pset;
- struct iw_mgmt_ibss_pset ibss_pset;
- struct iw_mgmt_data_rset bss_basic_rset;
+ struct wl3501_req req;
u8 rssi;
};
@@ -471,8 +466,10 @@ struct wl3501_md_req {
u16 size;
u8 pri;
u8 service_class;
- u8 daddr[ETH_ALEN];
- u8 saddr[ETH_ALEN];
+ struct {
+ u8 daddr[ETH_ALEN];
+ u8 saddr[ETH_ALEN];
+ } addr;
};
struct wl3501_md_ind {
@@ -484,8 +481,10 @@ struct wl3501_md_ind {
u8 reception;
u8 pri;
u8 service_class;
- u8 daddr[ETH_ALEN];
- u8 saddr[ETH_ALEN];
+ struct {
+ u8 daddr[ETH_ALEN];
+ u8 saddr[ETH_ALEN];
+ } addr;
};
struct wl3501_md_confirm {
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 8ca5789c7b37..672f5d5f3f2c 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -469,6 +469,7 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
struct wl3501_md_req sig = {
.sig_id = WL3501_SIG_MD_REQ,
};
+ size_t sig_addr_len = sizeof(sig.addr);
u8 *pdata = (char *)data;
int rc = -EIO;
@@ -484,9 +485,9 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
goto out;
}
rc = 0;
- memcpy(&sig.daddr[0], pdata, 12);
- pktlen = len - 12;
- pdata += 12;
+ memcpy(&sig.addr, pdata, sig_addr_len);
+ pktlen = len - sig_addr_len;
+ pdata += sig_addr_len;
sig.data = bf;
if (((*pdata) * 256 + (*(pdata + 1))) > 1500) {
u8 addr4[ETH_ALEN] = {
@@ -589,7 +590,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
struct wl3501_join_req sig = {
.sig_id = WL3501_SIG_JOIN_REQ,
.timeout = 10,
- .ds_pset = {
+ .req.ds_pset = {
.el = {
.id = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
.len = 1,
@@ -598,7 +599,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
},
};
- memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72);
+ memcpy(&sig.req, &this->bss_set[stas].req, sizeof(sig.req));
return wl3501_esbq_exec(this, &sig, sizeof(sig));
}
@@ -666,35 +667,37 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
if (sig.status == WL3501_STATUS_SUCCESS) {
pr_debug("success");
if ((this->net_type == IW_MODE_INFRA &&
- (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
+ (sig.req.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
(this->net_type == IW_MODE_ADHOC &&
- (sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
+ (sig.req.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
this->net_type == IW_MODE_AUTO) {
if (!this->essid.el.len)
matchflag = 1;
else if (this->essid.el.len == 3 &&
!memcmp(this->essid.essid, "ANY", 3))
matchflag = 1;
- else if (this->essid.el.len != sig.ssid.el.len)
+ else if (this->essid.el.len != sig.req.ssid.el.len)
matchflag = 0;
- else if (memcmp(this->essid.essid, sig.ssid.essid,
+ else if (memcmp(this->essid.essid, sig.req.ssid.essid,
this->essid.el.len))
matchflag = 0;
else
matchflag = 1;
if (matchflag) {
for (i = 0; i < this->bss_cnt; i++) {
- if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) {
+ if (ether_addr_equal_unaligned(this->bss_set[i].req.bssid,
+ sig.req.bssid)) {
matchflag = 0;
break;
}
}
}
if (matchflag && (i < 20)) {
- memcpy(&this->bss_set[i].beacon_period,
- &sig.beacon_period, 73);
+ memcpy(&this->bss_set[i].req,
+ &sig.req, sizeof(sig.req));
this->bss_cnt++;
this->rssi = sig.rssi;
+ this->bss_set[i].rssi = sig.rssi;
}
}
} else if (sig.status == WL3501_STATUS_TIMEOUT) {
@@ -886,19 +889,19 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr)
if (this->join_sta_bss < this->bss_cnt) {
const int i = this->join_sta_bss;
memcpy(this->bssid,
- this->bss_set[i].bssid, ETH_ALEN);
- this->chan = this->bss_set[i].ds_pset.chan;
+ this->bss_set[i].req.bssid, ETH_ALEN);
+ this->chan = this->bss_set[i].req.ds_pset.chan;
iw_copy_mgmt_info_element(&this->keep_essid.el,
- &this->bss_set[i].ssid.el);
+ &this->bss_set[i].req.ssid.el);
wl3501_mgmt_auth(this);
}
} else {
const int i = this->join_sta_bss;
- memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN);
- this->chan = this->bss_set[i].ds_pset.chan;
+ memcpy(&this->bssid, &this->bss_set[i].req.bssid, ETH_ALEN);
+ this->chan = this->bss_set[i].req.ds_pset.chan;
iw_copy_mgmt_info_element(&this->keep_essid.el,
- &this->bss_set[i].ssid.el);
+ &this->bss_set[i].req.ssid.el);
wl3501_online(dev);
}
} else {
@@ -980,7 +983,8 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
} else {
skb->dev = dev;
skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
- skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
+ skb_copy_to_linear_data(skb, (unsigned char *)&sig.addr,
+ sizeof(sig.addr));
wl3501_receive(this, skb->data, pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, dev);
@@ -1571,30 +1575,30 @@ static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info,
for (i = 0; i < this->bss_cnt; ++i) {
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN);
+ memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].req.bssid, ETH_ALEN);
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_ADDR_LEN);
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
- iwe.u.data.length = this->bss_set[i].ssid.el.len;
+ iwe.u.data.length = this->bss_set[i].req.ssid.el.len;
current_ev = iwe_stream_add_point(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe,
- this->bss_set[i].ssid.essid);
+ this->bss_set[i].req.ssid.essid);
iwe.cmd = SIOCGIWMODE;
- iwe.u.mode = this->bss_set[i].bss_type;
+ iwe.u.mode = this->bss_set[i].req.bss_type;
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_UINT_LEN);
iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = this->bss_set[i].ds_pset.chan;
+ iwe.u.freq.m = this->bss_set[i].req.ds_pset.chan;
iwe.u.freq.e = 0;
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_FREQ_LEN);
iwe.cmd = SIOCGIWENCODE;
- if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
+ if (this->bss_set[i].req.cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig
new file mode 100644
index 000000000000..7ad1920120bc
--- /dev/null
+++ b/drivers/net/wwan/Kconfig
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Wireless WAN device configuration
+#
+
+menuconfig WWAN
+ bool "Wireless WAN"
+ help
+ This section contains Wireless WAN configuration for WWAN framework
+ and drivers.
+
+if WWAN
+
+config WWAN_CORE
+ tristate "WWAN Driver Core"
+ help
+ Say Y here if you want to use the WWAN driver core. This driver
+ provides a common framework for WWAN drivers.
+
+ To compile this driver as a module, choose M here: the module will be
+ called wwan.
+
+config MHI_WWAN_CTRL
+ tristate "MHI WWAN control driver for QCOM-based PCIe modems"
+ select WWAN_CORE
+ depends on MHI_BUS
+ help
+ MHI WWAN CTRL allows QCOM-based PCIe modems to expose different modem
+ control protocols/ports to userspace, including AT, MBIM, QMI, DIAG
+ and FIREHOSE. These protocols can be accessed directly from userspace
+ (e.g. AT commands) or via libraries/tools (e.g. libmbim, libqmi,
+ libqcdm...).
+
+ To compile this driver as a module, choose M here: the module will be
+ called mhi_wwan_ctrl.
+
+endif # WWAN
diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile
new file mode 100644
index 000000000000..556cd90958ca
--- /dev/null
+++ b/drivers/net/wwan/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux WWAN device drivers.
+#
+
+obj-$(CONFIG_WWAN_CORE) += wwan.o
+wwan-objs += wwan_core.o
+
+obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ctrl.o
diff --git a/drivers/net/wwan/mhi_wwan_ctrl.c b/drivers/net/wwan/mhi_wwan_ctrl.c
new file mode 100644
index 000000000000..1bc6b69aa530
--- /dev/null
+++ b/drivers/net/wwan/mhi_wwan_ctrl.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */
+#include <linux/kernel.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/wwan.h>
+
+/* MHI wwan flags */
+enum mhi_wwan_flags {
+ MHI_WWAN_DL_CAP,
+ MHI_WWAN_UL_CAP,
+ MHI_WWAN_RX_REFILL,
+};
+
+#define MHI_WWAN_MAX_MTU 0x8000
+
+struct mhi_wwan_dev {
+ /* Lower level is a mhi dev, upper level is a wwan port */
+ struct mhi_device *mhi_dev;
+ struct wwan_port *wwan_port;
+
+ /* State and capabilities */
+ unsigned long flags;
+ size_t mtu;
+
+ /* Protect against concurrent TX and TX-completion (bh) */
+ spinlock_t tx_lock;
+
+ /* Protect RX budget and rx_refill scheduling */
+ spinlock_t rx_lock;
+ struct work_struct rx_refill;
+
+ /* RX budget is initially set to the size of the MHI RX queue and is
+ * used to limit the number of allocated and queued packets. It is
+ * decremented on data queueing and incremented on data release.
+ */
+ unsigned int rx_budget;
+};
+
+/* Increment RX budget and schedule RX refill if necessary */
+static void mhi_wwan_rx_budget_inc(struct mhi_wwan_dev *mhiwwan)
+{
+ spin_lock(&mhiwwan->rx_lock);
+
+ mhiwwan->rx_budget++;
+
+ if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
+ schedule_work(&mhiwwan->rx_refill);
+
+ spin_unlock(&mhiwwan->rx_lock);
+}
+
+/* Decrement RX budget if non-zero and return true on success */
+static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
+{
+ bool ret = false;
+
+ spin_lock(&mhiwwan->rx_lock);
+
+ if (mhiwwan->rx_budget) {
+ mhiwwan->rx_budget--;
+ if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
+ ret = true;
+ }
+
+ spin_unlock(&mhiwwan->rx_lock);
+
+ return ret;
+}
+
+static void __mhi_skb_destructor(struct sk_buff *skb)
+{
+ /* RX buffer has been consumed, increase the allowed budget */
+ mhi_wwan_rx_budget_inc(skb_shinfo(skb)->destructor_arg);
+}
+
+static void mhi_wwan_ctrl_refill_work(struct work_struct *work)
+{
+ struct mhi_wwan_dev *mhiwwan = container_of(work, struct mhi_wwan_dev, rx_refill);
+ struct mhi_device *mhi_dev = mhiwwan->mhi_dev;
+
+ while (mhi_wwan_rx_budget_dec(mhiwwan)) {
+ struct sk_buff *skb;
+
+ skb = alloc_skb(mhiwwan->mtu, GFP_KERNEL);
+ if (!skb) {
+ mhi_wwan_rx_budget_inc(mhiwwan);
+ break;
+ }
+
+ /* To prevent unlimited buffer allocation if nothing consumes
+ * the RX buffers (passed to WWAN core), track their lifespan
+ * to not allocate more than allowed budget.
+ */
+ skb->destructor = __mhi_skb_destructor;
+ skb_shinfo(skb)->destructor_arg = mhiwwan;
+
+ if (mhi_queue_skb(mhi_dev, DMA_FROM_DEVICE, skb, mhiwwan->mtu, MHI_EOT)) {
+ dev_err(&mhi_dev->dev, "Failed to queue buffer\n");
+ kfree_skb(skb);
+ break;
+ }
+ }
+}
+
+static int mhi_wwan_ctrl_start(struct wwan_port *port)
+{
+ struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
+ int ret;
+
+ /* Start mhi device's channel(s) */
+ ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev);
+ if (ret)
+ return ret;
+
+ /* Don't allocate more buffers than MHI channel queue size */
+ mhiwwan->rx_budget = mhi_get_free_desc_count(mhiwwan->mhi_dev, DMA_FROM_DEVICE);
+
+ /* Add buffers to the MHI inbound queue */
+ if (test_bit(MHI_WWAN_DL_CAP, &mhiwwan->flags)) {
+ set_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags);
+ mhi_wwan_ctrl_refill_work(&mhiwwan->rx_refill);
+ }
+
+ return 0;
+}
+
+static void mhi_wwan_ctrl_stop(struct wwan_port *port)
+{
+ struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
+
+ spin_lock(&mhiwwan->rx_lock);
+ clear_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags);
+ spin_unlock(&mhiwwan->rx_lock);
+
+ cancel_work_sync(&mhiwwan->rx_refill);
+
+ mhi_unprepare_from_transfer(mhiwwan->mhi_dev);
+}
+
+static int mhi_wwan_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
+ int ret;
+
+ if (skb->len > mhiwwan->mtu)
+ return -EMSGSIZE;
+
+ if (!test_bit(MHI_WWAN_UL_CAP, &mhiwwan->flags))
+ return -EOPNOTSUPP;
+
+ /* Queue the packet for MHI transfer and check fullness of the queue */
+ spin_lock_bh(&mhiwwan->tx_lock);
+ ret = mhi_queue_skb(mhiwwan->mhi_dev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
+ if (mhi_queue_is_full(mhiwwan->mhi_dev, DMA_TO_DEVICE))
+ wwan_port_txoff(port);
+ spin_unlock_bh(&mhiwwan->tx_lock);
+
+ return ret;
+}
+
+static const struct wwan_port_ops wwan_pops = {
+ .start = mhi_wwan_ctrl_start,
+ .stop = mhi_wwan_ctrl_stop,
+ .tx = mhi_wwan_ctrl_tx,
+};
+
+static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_result)
+{
+ struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev);
+ struct wwan_port *port = mhiwwan->wwan_port;
+ struct sk_buff *skb = mhi_result->buf_addr;
+
+ dev_dbg(&mhi_dev->dev, "%s: status: %d xfer_len: %zu\n", __func__,
+ mhi_result->transaction_status, mhi_result->bytes_xferd);
+
+ /* MHI core has done with the buffer, release it */
+ consume_skb(skb);
+
+ /* There is likely new slot available in the MHI queue, re-allow TX */
+ spin_lock_bh(&mhiwwan->tx_lock);
+ if (!mhi_queue_is_full(mhiwwan->mhi_dev, DMA_TO_DEVICE))
+ wwan_port_txon(port);
+ spin_unlock_bh(&mhiwwan->tx_lock);
+}
+
+static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_result)
+{
+ struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev);
+ struct wwan_port *port = mhiwwan->wwan_port;
+ struct sk_buff *skb = mhi_result->buf_addr;
+
+ dev_dbg(&mhi_dev->dev, "%s: status: %d receive_len: %zu\n", __func__,
+ mhi_result->transaction_status, mhi_result->bytes_xferd);
+
+ if (mhi_result->transaction_status &&
+ mhi_result->transaction_status != -EOVERFLOW) {
+ kfree_skb(skb);
+ return;
+ }
+
+ /* MHI core does not update skb->len, do it before forward */
+ skb_put(skb, mhi_result->bytes_xferd);
+ wwan_port_rx(port, skb);
+
+ /* Do not increment rx budget nor refill RX buffers now, wait for the
+ * buffer to be consumed. Done from __mhi_skb_destructor().
+ */
+}
+
+static int mhi_wwan_ctrl_probe(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id)
+{
+ struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_wwan_dev *mhiwwan;
+ struct wwan_port *port;
+
+ mhiwwan = kzalloc(sizeof(*mhiwwan), GFP_KERNEL);
+ if (!mhiwwan)
+ return -ENOMEM;
+
+ mhiwwan->mhi_dev = mhi_dev;
+ mhiwwan->mtu = MHI_WWAN_MAX_MTU;
+ INIT_WORK(&mhiwwan->rx_refill, mhi_wwan_ctrl_refill_work);
+ spin_lock_init(&mhiwwan->tx_lock);
+ spin_lock_init(&mhiwwan->rx_lock);
+
+ if (mhi_dev->dl_chan)
+ set_bit(MHI_WWAN_DL_CAP, &mhiwwan->flags);
+ if (mhi_dev->ul_chan)
+ set_bit(MHI_WWAN_UL_CAP, &mhiwwan->flags);
+
+ dev_set_drvdata(&mhi_dev->dev, mhiwwan);
+
+ /* Register as a wwan port, id->driver_data contains wwan port type */
+ port = wwan_create_port(&cntrl->mhi_dev->dev, id->driver_data,
+ &wwan_pops, mhiwwan);
+ if (IS_ERR(port)) {
+ kfree(mhiwwan);
+ return PTR_ERR(port);
+ }
+
+ mhiwwan->wwan_port = port;
+
+ return 0;
+};
+
+static void mhi_wwan_ctrl_remove(struct mhi_device *mhi_dev)
+{
+ struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev);
+
+ wwan_remove_port(mhiwwan->wwan_port);
+ kfree(mhiwwan);
+}
+
+static const struct mhi_device_id mhi_wwan_ctrl_match_table[] = {
+ { .chan = "DUN", .driver_data = WWAN_PORT_AT },
+ { .chan = "MBIM", .driver_data = WWAN_PORT_MBIM },
+ { .chan = "QMI", .driver_data = WWAN_PORT_QMI },
+ { .chan = "DIAG", .driver_data = WWAN_PORT_QCDM },
+ { .chan = "FIREHOSE", .driver_data = WWAN_PORT_FIREHOSE },
+ {},
+};
+MODULE_DEVICE_TABLE(mhi, mhi_wwan_ctrl_match_table);
+
+static struct mhi_driver mhi_wwan_ctrl_driver = {
+ .id_table = mhi_wwan_ctrl_match_table,
+ .remove = mhi_wwan_ctrl_remove,
+ .probe = mhi_wwan_ctrl_probe,
+ .ul_xfer_cb = mhi_ul_xfer_cb,
+ .dl_xfer_cb = mhi_dl_xfer_cb,
+ .driver = {
+ .name = "mhi_wwan_ctrl",
+ },
+};
+
+module_mhi_driver(mhi_wwan_ctrl_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI WWAN CTRL Driver");
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
new file mode 100644
index 000000000000..cff04e532c1e
--- /dev/null
+++ b/drivers/net/wwan/wwan_core.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/wwan.h>
+
+#define WWAN_MAX_MINORS 256 /* 256 minors allowed with register_chrdev() */
+
+static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
+static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
+static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
+static struct class *wwan_class;
+static int wwan_major;
+
+#define to_wwan_dev(d) container_of(d, struct wwan_device, dev)
+#define to_wwan_port(d) container_of(d, struct wwan_port, dev)
+
+/* WWAN port flags */
+#define WWAN_PORT_TX_OFF 0
+
+/**
+ * struct wwan_device - The structure that defines a WWAN device
+ *
+ * @id: WWAN device unique ID.
+ * @dev: Underlying device.
+ * @port_id: Current available port ID to pick.
+ */
+struct wwan_device {
+ unsigned int id;
+ struct device dev;
+ atomic_t port_id;
+};
+
+/**
+ * struct wwan_port - The structure that defines a WWAN port
+ * @type: Port type
+ * @start_count: Port start counter
+ * @flags: Store port state and capabilities
+ * @ops: Pointer to WWAN port operations
+ * @ops_lock: Protect port ops
+ * @dev: Underlying device
+ * @rxq: Buffer inbound queue
+ * @waitqueue: The waitqueue for port fops (read/write/poll)
+ */
+struct wwan_port {
+ enum wwan_port_type type;
+ unsigned int start_count;
+ unsigned long flags;
+ const struct wwan_port_ops *ops;
+ struct mutex ops_lock; /* Serialize ops + protect against removal */
+ struct device dev;
+ struct sk_buff_head rxq;
+ wait_queue_head_t waitqueue;
+};
+
+static void wwan_dev_destroy(struct device *dev)
+{
+ struct wwan_device *wwandev = to_wwan_dev(dev);
+
+ ida_free(&wwan_dev_ids, wwandev->id);
+ kfree(wwandev);
+}
+
+static const struct device_type wwan_dev_type = {
+ .name = "wwan_dev",
+ .release = wwan_dev_destroy,
+};
+
+static int wwan_dev_parent_match(struct device *dev, const void *parent)
+{
+ return (dev->type == &wwan_dev_type && dev->parent == parent);
+}
+
+static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
+{
+ struct device *dev;
+
+ dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wwan_dev(dev);
+}
+
+/* This function allocates and registers a new WWAN device OR if a WWAN device
+ * already exist for the given parent, it gets a reference and return it.
+ * This function is not exported (for now), it is called indirectly via
+ * wwan_create_port().
+ */
+static struct wwan_device *wwan_create_dev(struct device *parent)
+{
+ struct wwan_device *wwandev;
+ int err, id;
+
+ /* The 'find-alloc-register' operation must be protected against
+ * concurrent execution, a WWAN device is possibly shared between
+ * multiple callers or concurrently unregistered from wwan_remove_dev().
+ */
+ mutex_lock(&wwan_register_lock);
+
+ /* If wwandev already exists, return it */
+ wwandev = wwan_dev_get_by_parent(parent);
+ if (!IS_ERR(wwandev))
+ goto done_unlock;
+
+ id = ida_alloc(&wwan_dev_ids, GFP_KERNEL);
+ if (id < 0)
+ goto done_unlock;
+
+ wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL);
+ if (!wwandev) {
+ ida_free(&wwan_dev_ids, id);
+ goto done_unlock;
+ }
+
+ wwandev->dev.parent = parent;
+ wwandev->dev.class = wwan_class;
+ wwandev->dev.type = &wwan_dev_type;
+ wwandev->id = id;
+ dev_set_name(&wwandev->dev, "wwan%d", wwandev->id);
+
+ err = device_register(&wwandev->dev);
+ if (err) {
+ put_device(&wwandev->dev);
+ wwandev = NULL;
+ }
+
+done_unlock:
+ mutex_unlock(&wwan_register_lock);
+
+ return wwandev;
+}
+
+static int is_wwan_child(struct device *dev, void *data)
+{
+ return dev->class == wwan_class;
+}
+
+static void wwan_remove_dev(struct wwan_device *wwandev)
+{
+ int ret;
+
+ /* Prevent concurrent picking from wwan_create_dev */
+ mutex_lock(&wwan_register_lock);
+
+ /* WWAN device is created and registered (get+add) along with its first
+ * child port, and subsequent port registrations only grab a reference
+ * (get). The WWAN device must then be unregistered (del+put) along with
+ * its latest port, and reference simply dropped (put) otherwise.
+ */
+ ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
+ if (!ret)
+ device_unregister(&wwandev->dev);
+ else
+ put_device(&wwandev->dev);
+
+ mutex_unlock(&wwan_register_lock);
+}
+
+/* ------- WWAN port management ------- */
+
+static void wwan_port_destroy(struct device *dev)
+{
+ struct wwan_port *port = to_wwan_port(dev);
+
+ ida_free(&minors, MINOR(port->dev.devt));
+ skb_queue_purge(&port->rxq);
+ mutex_destroy(&port->ops_lock);
+ kfree(port);
+}
+
+static const struct device_type wwan_port_dev_type = {
+ .name = "wwan_port",
+ .release = wwan_port_destroy,
+};
+
+static int wwan_port_minor_match(struct device *dev, const void *minor)
+{
+ return (dev->type == &wwan_port_dev_type &&
+ MINOR(dev->devt) == *(unsigned int *)minor);
+}
+
+static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
+{
+ struct device *dev;
+
+ dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wwan_port(dev);
+}
+
+/* Keep aligned with wwan_port_type enum */
+static const char * const wwan_port_type_str[] = {
+ "AT",
+ "MBIM",
+ "QMI",
+ "QCDM",
+ "FIREHOSE"
+};
+
+struct wwan_port *wwan_create_port(struct device *parent,
+ enum wwan_port_type type,
+ const struct wwan_port_ops *ops,
+ void *drvdata)
+{
+ struct wwan_device *wwandev;
+ struct wwan_port *port;
+ int minor, err = -ENOMEM;
+
+ if (type >= WWAN_PORT_MAX || !ops)
+ return ERR_PTR(-EINVAL);
+
+ /* A port is always a child of a WWAN device, retrieve (allocate or
+ * pick) the WWAN device based on the provided parent device.
+ */
+ wwandev = wwan_create_dev(parent);
+ if (IS_ERR(wwandev))
+ return ERR_CAST(wwandev);
+
+ /* A port is exposed as character device, get a minor */
+ minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL);
+ if (minor < 0)
+ goto error_wwandev_remove;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ ida_free(&minors, minor);
+ goto error_wwandev_remove;
+ }
+
+ port->type = type;
+ port->ops = ops;
+ mutex_init(&port->ops_lock);
+ skb_queue_head_init(&port->rxq);
+ init_waitqueue_head(&port->waitqueue);
+
+ port->dev.parent = &wwandev->dev;
+ port->dev.class = wwan_class;
+ port->dev.type = &wwan_port_dev_type;
+ port->dev.devt = MKDEV(wwan_major, minor);
+ dev_set_drvdata(&port->dev, drvdata);
+
+ /* create unique name based on wwan device id, port index and type */
+ dev_set_name(&port->dev, "wwan%up%u%s", wwandev->id,
+ atomic_inc_return(&wwandev->port_id),
+ wwan_port_type_str[port->type]);
+
+ err = device_register(&port->dev);
+ if (err)
+ goto error_put_device;
+
+ return port;
+
+error_put_device:
+ put_device(&port->dev);
+error_wwandev_remove:
+ wwan_remove_dev(wwandev);
+
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(wwan_create_port);
+
+void wwan_remove_port(struct wwan_port *port)
+{
+ struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
+
+ mutex_lock(&port->ops_lock);
+ if (port->start_count)
+ port->ops->stop(port);
+ port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */
+ mutex_unlock(&port->ops_lock);
+
+ wake_up_interruptible(&port->waitqueue);
+
+ skb_queue_purge(&port->rxq);
+ dev_set_drvdata(&port->dev, NULL);
+ device_unregister(&port->dev);
+
+ /* Release related wwan device */
+ wwan_remove_dev(wwandev);
+}
+EXPORT_SYMBOL_GPL(wwan_remove_port);
+
+void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb)
+{
+ skb_queue_tail(&port->rxq, skb);
+ wake_up_interruptible(&port->waitqueue);
+}
+EXPORT_SYMBOL_GPL(wwan_port_rx);
+
+void wwan_port_txon(struct wwan_port *port)
+{
+ clear_bit(WWAN_PORT_TX_OFF, &port->flags);
+ wake_up_interruptible(&port->waitqueue);
+}
+EXPORT_SYMBOL_GPL(wwan_port_txon);
+
+void wwan_port_txoff(struct wwan_port *port)
+{
+ set_bit(WWAN_PORT_TX_OFF, &port->flags);
+}
+EXPORT_SYMBOL_GPL(wwan_port_txoff);
+
+void *wwan_port_get_drvdata(struct wwan_port *port)
+{
+ return dev_get_drvdata(&port->dev);
+}
+EXPORT_SYMBOL_GPL(wwan_port_get_drvdata);
+
+static int wwan_port_op_start(struct wwan_port *port)
+{
+ int ret = 0;
+
+ mutex_lock(&port->ops_lock);
+ if (!port->ops) { /* Port got unplugged */
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ /* If port is already started, don't start again */
+ if (!port->start_count)
+ ret = port->ops->start(port);
+
+ if (!ret)
+ port->start_count++;
+
+out_unlock:
+ mutex_unlock(&port->ops_lock);
+
+ return ret;
+}
+
+static void wwan_port_op_stop(struct wwan_port *port)
+{
+ mutex_lock(&port->ops_lock);
+ port->start_count--;
+ if (port->ops && !port->start_count)
+ port->ops->stop(port);
+ mutex_unlock(&port->ops_lock);
+}
+
+static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ int ret;
+
+ mutex_lock(&port->ops_lock);
+ if (!port->ops) { /* Port got unplugged */
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ ret = port->ops->tx(port, skb);
+
+out_unlock:
+ mutex_unlock(&port->ops_lock);
+
+ return ret;
+}
+
+static bool is_read_blocked(struct wwan_port *port)
+{
+ return skb_queue_empty(&port->rxq) && port->ops;
+}
+
+static bool is_write_blocked(struct wwan_port *port)
+{
+ return test_bit(WWAN_PORT_TX_OFF, &port->flags) && port->ops;
+}
+
+static int wwan_wait_rx(struct wwan_port *port, bool nonblock)
+{
+ if (!is_read_blocked(port))
+ return 0;
+
+ if (nonblock)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(port->waitqueue, !is_read_blocked(port)))
+ return -ERESTARTSYS;
+
+ return 0;
+}
+
+static int wwan_wait_tx(struct wwan_port *port, bool nonblock)
+{
+ if (!is_write_blocked(port))
+ return 0;
+
+ if (nonblock)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(port->waitqueue, !is_write_blocked(port)))
+ return -ERESTARTSYS;
+
+ return 0;
+}
+
+static int wwan_port_fops_open(struct inode *inode, struct file *file)
+{
+ struct wwan_port *port;
+ int err = 0;
+
+ port = wwan_port_get_by_minor(iminor(inode));
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ file->private_data = port;
+ stream_open(inode, file);
+
+ err = wwan_port_op_start(port);
+ if (err)
+ put_device(&port->dev);
+
+ return err;
+}
+
+static int wwan_port_fops_release(struct inode *inode, struct file *filp)
+{
+ struct wwan_port *port = filp->private_data;
+
+ wwan_port_op_stop(port);
+ put_device(&port->dev);
+
+ return 0;
+}
+
+static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct wwan_port *port = filp->private_data;
+ struct sk_buff *skb;
+ size_t copied;
+ int ret;
+
+ ret = wwan_wait_rx(port, !!(filp->f_flags & O_NONBLOCK));
+ if (ret)
+ return ret;
+
+ skb = skb_dequeue(&port->rxq);
+ if (!skb)
+ return -EIO;
+
+ copied = min_t(size_t, count, skb->len);
+ if (copy_to_user(buf, skb->data, copied)) {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+ skb_pull(skb, copied);
+
+ /* skb is not fully consumed, keep it in the queue */
+ if (skb->len)
+ skb_queue_head(&port->rxq, skb);
+ else
+ consume_skb(skb);
+
+ return copied;
+}
+
+static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ struct wwan_port *port = filp->private_data;
+ struct sk_buff *skb;
+ int ret;
+
+ ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK));
+ if (ret)
+ return ret;
+
+ skb = alloc_skb(count, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ if (copy_from_user(skb_put(skb, count), buf, count)) {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ ret = wwan_port_op_tx(port, skb);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ return count;
+}
+
+static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
+{
+ struct wwan_port *port = filp->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(filp, &port->waitqueue, wait);
+
+ if (!is_write_blocked(port))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ if (!is_read_blocked(port))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (!port->ops)
+ mask |= EPOLLHUP | EPOLLERR;
+
+ return mask;
+}
+
+static const struct file_operations wwan_port_fops = {
+ .owner = THIS_MODULE,
+ .open = wwan_port_fops_open,
+ .release = wwan_port_fops_release,
+ .read = wwan_port_fops_read,
+ .write = wwan_port_fops_write,
+ .poll = wwan_port_fops_poll,
+ .llseek = noop_llseek,
+};
+
+static int __init wwan_init(void)
+{
+ wwan_class = class_create(THIS_MODULE, "wwan");
+ if (IS_ERR(wwan_class))
+ return PTR_ERR(wwan_class);
+
+ /* chrdev used for wwan ports */
+ wwan_major = register_chrdev(0, "wwan_port", &wwan_port_fops);
+ if (wwan_major < 0) {
+ class_destroy(wwan_class);
+ return wwan_major;
+ }
+
+ return 0;
+}
+
+static void __exit wwan_exit(void)
+{
+ unregister_chrdev(wwan_major, "wwan_port");
+ class_destroy(wwan_class);
+}
+
+module_init(wwan_init);
+module_exit(wwan_exit);
+
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
+MODULE_DESCRIPTION("WWAN core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cc19cd9203da..44275908d61a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -608,8 +608,8 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
struct netfront_info *np = netdev_priv(dev);
struct netfront_queue *queue = NULL;
unsigned long irq_flags;
- int drops = 0;
- int i, err;
+ int nxmit = 0;
+ int i;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -622,15 +622,13 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
if (!xdpf)
continue;
- err = xennet_xdp_xmit_one(dev, queue, xdpf);
- if (err) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ if (xennet_xdp_xmit_one(dev, queue, xdpf))
+ break;
+ nxmit++;
}
spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
- return n - drops;
+ return nxmit;
}
@@ -875,7 +873,9 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
get_page(pdata);
xdpf = xdp_convert_buff_to_frame(xdp);
err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
- if (unlikely(err < 0))
+ if (unlikely(!err))
+ xdp_return_frame_rx_napi(xdpf);
+ else if (unlikely(err < 0))
trace_xdp_exception(queue->info->netdev, prog, act);
break;
case XDP_REDIRECT:
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
index 4dc7bd7e02b6..fe0719ed81a0 100644
--- a/drivers/nfc/fdp/fdp.c
+++ b/drivers/nfc/fdp/fdp.c
@@ -176,7 +176,7 @@ static void fdp_nci_set_data_pkt_counter(struct nci_dev *ndev,
*
* The firmware will be analyzed and applied when we send NCI_OP_PROP_PATCH_CMD
* command with NCI_PATCH_TYPE_EOT parameter. The device will send a
- * NFCC_PATCH_NTF packaet and a NCI_OP_CORE_RESET_NTF packet.
+ * NFCC_PATCH_NTF packet and a NCI_OP_CORE_RESET_NTF packet.
*/
static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
{
@@ -236,15 +236,12 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
static int fdp_nci_open(struct nci_dev *ndev)
{
- int r;
struct fdp_nci_info *info = nci_get_drvdata(ndev);
struct device *dev = &info->phy->i2c_dev->dev;
dev_dbg(dev, "%s\n", __func__);
- r = info->phy_ops->enable(info->phy);
-
- return r;
+ return info->phy_ops->enable(info->phy);
}
static int fdp_nci_close(struct nci_dev *ndev)
@@ -347,7 +344,7 @@ static int fdp_nci_patch_otp(struct nci_dev *ndev)
int r = 0;
if (info->otp_version >= info->otp_patch_version)
- goto out;
+ return r;
info->setup_patch_sent = 0;
info->setup_reset_ntf = 0;
@@ -356,19 +353,17 @@ static int fdp_nci_patch_otp(struct nci_dev *ndev)
/* Patch init request */
r = fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_OTP);
if (r)
- goto out;
+ return r;
/* Patch data connection creation */
conn_id = fdp_nci_create_conn(ndev);
- if (conn_id < 0) {
- r = conn_id;
- goto out;
- }
+ if (conn_id < 0)
+ return conn_id;
/* Send the patch over the data connection */
r = fdp_nci_send_patch(ndev, conn_id, NCI_PATCH_TYPE_OTP);
if (r)
- goto out;
+ return r;
/* Wait for all the packets to be send over i2c */
wait_event_interruptible(info->setup_wq,
@@ -380,13 +375,12 @@ static int fdp_nci_patch_otp(struct nci_dev *ndev)
/* Close the data connection */
r = nci_core_conn_close(info->ndev, conn_id);
if (r)
- goto out;
+ return r;
/* Patch finish message */
if (fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_EOT)) {
nfc_err(dev, "OTP patch error 0x%x\n", r);
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* If the patch notification didn't arrive yet, wait for it */
@@ -396,8 +390,7 @@ static int fdp_nci_patch_otp(struct nci_dev *ndev)
r = info->setup_patch_status;
if (r) {
nfc_err(dev, "OTP patch error 0x%x\n", r);
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
/*
@@ -406,7 +399,6 @@ static int fdp_nci_patch_otp(struct nci_dev *ndev)
*/
wait_event_interruptible(info->setup_wq, info->setup_reset_ntf);
-out:
return r;
}
@@ -418,7 +410,7 @@ static int fdp_nci_patch_ram(struct nci_dev *ndev)
int r = 0;
if (info->ram_version >= info->ram_patch_version)
- goto out;
+ return r;
info->setup_patch_sent = 0;
info->setup_reset_ntf = 0;
@@ -427,19 +419,17 @@ static int fdp_nci_patch_ram(struct nci_dev *ndev)
/* Patch init request */
r = fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_RAM);
if (r)
- goto out;
+ return r;
/* Patch data connection creation */
conn_id = fdp_nci_create_conn(ndev);
- if (conn_id < 0) {
- r = conn_id;
- goto out;
- }
+ if (conn_id < 0)
+ return conn_id;
/* Send the patch over the data connection */
r = fdp_nci_send_patch(ndev, conn_id, NCI_PATCH_TYPE_RAM);
if (r)
- goto out;
+ return r;
/* Wait for all the packets to be send over i2c */
wait_event_interruptible(info->setup_wq,
@@ -451,13 +441,12 @@ static int fdp_nci_patch_ram(struct nci_dev *ndev)
/* Close the data connection */
r = nci_core_conn_close(info->ndev, conn_id);
if (r)
- goto out;
+ return r;
/* Patch finish message */
if (fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_EOT)) {
nfc_err(dev, "RAM patch error 0x%x\n", r);
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* If the patch notification didn't arrive yet, wait for it */
@@ -467,8 +456,7 @@ static int fdp_nci_patch_ram(struct nci_dev *ndev)
r = info->setup_patch_status;
if (r) {
nfc_err(dev, "RAM patch error 0x%x\n", r);
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
/*
@@ -477,7 +465,6 @@ static int fdp_nci_patch_ram(struct nci_dev *ndev)
*/
wait_event_interruptible(info->setup_wq, info->setup_reset_ntf);
-out:
return r;
}
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index 0207e66cee21..795da9b85d56 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -40,11 +40,8 @@ static int pn533_i2c_send_ack(struct pn533 *dev, gfp_t flags)
struct i2c_client *client = phy->i2c_dev;
static const u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
/* spec 6.2.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */
- int rc;
-
- rc = i2c_master_send(client, ack, 6);
- return rc;
+ return i2c_master_send(client, ack, 6);
}
static int pn533_i2c_send_frame(struct pn533 *dev,
@@ -199,8 +196,7 @@ static int pn533_i2c_probe(struct i2c_client *client,
&phy->i2c_dev->dev);
if (IS_ERR(priv)) {
- r = PTR_ERR(priv);
- return r;
+ return PTR_ERR(priv);
}
phy->priv = priv;
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index f1469ac8ff42..2c7f9916f206 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -489,12 +489,8 @@ static int pn533_send_data_async(struct pn533 *dev, u8 cmd_code,
pn533_send_async_complete_t complete_cb,
void *complete_cb_context)
{
- int rc;
-
- rc = __pn533_send_async(dev, cmd_code, req, complete_cb,
+ return __pn533_send_async(dev, cmd_code, req, complete_cb,
complete_cb_context);
-
- return rc;
}
static int pn533_send_cmd_async(struct pn533 *dev, u8 cmd_code,
@@ -502,12 +498,8 @@ static int pn533_send_cmd_async(struct pn533 *dev, u8 cmd_code,
pn533_send_async_complete_t complete_cb,
void *complete_cb_context)
{
- int rc;
-
- rc = __pn533_send_async(dev, cmd_code, req, complete_cb,
+ return __pn533_send_async(dev, cmd_code, req, complete_cb,
complete_cb_context);
-
- return rc;
}
/*
@@ -706,6 +698,9 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a,
if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0)
return false;
+ if (type_a->nfcid_len > NFC_NFCID1_MAXSIZE)
+ return false;
+
return true;
}
@@ -2617,7 +2612,7 @@ static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf)
return rc;
}
- return rc;
+ return 0;
}
static int pn532_sam_configuration(struct nfc_dev *nfc_dev)
@@ -2791,7 +2786,6 @@ struct pn533 *pn53x_common_init(u32 device_type,
struct device *dev)
{
struct pn533 *priv;
- int rc = -ENOMEM;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -2833,7 +2827,7 @@ struct pn533 *pn53x_common_init(u32 device_type,
error:
kfree(priv);
- return ERR_PTR(rc);
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(pn53x_common_init);
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
index c00b7a07c3ee..865d3e3d1528 100644
--- a/drivers/nfc/s3fwrn5/core.c
+++ b/drivers/nfc/s3fwrn5/core.c
@@ -124,13 +124,12 @@ static int s3fwrn5_nci_post_setup(struct nci_dev *ndev)
if (s3fwrn5_firmware_init(info)) {
//skip bootloader mode
- ret = 0;
- goto out;
+ return 0;
}
ret = s3fwrn5_firmware_update(info);
if (ret < 0)
- goto out;
+ return ret;
/* NCI core reset */
@@ -139,12 +138,9 @@ static int s3fwrn5_nci_post_setup(struct nci_dev *ndev)
ret = nci_core_reset(info->ndev);
if (ret < 0)
- goto out;
-
- ret = nci_core_init(info->ndev);
+ return ret;
-out:
- return ret;
+ return nci_core_init(info->ndev);
}
static struct nci_ops s3fwrn5_nci_ops = {
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index 8db323adebf0..09df6ea65840 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -95,17 +95,14 @@ static int st_nci_spi_write(void *phy_id, struct sk_buff *skb)
*/
if (!r) {
skb_rx = alloc_skb(skb->len, GFP_KERNEL);
- if (!skb_rx) {
- r = -ENOMEM;
- goto exit;
- }
+ if (!skb_rx)
+ return -ENOMEM;
skb_put(skb_rx, skb->len);
memcpy(skb_rx->data, buf, skb->len);
ndlc_recv(phy->ndlc, skb_rx);
}
-exit:
return r;
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 41aa1f01fc07..18a267d5073f 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -6,6 +6,7 @@
#include <linux/highmem.h>
#include <linux/debugfs.h>
#include <linux/blkdev.h>
+#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/mutex.h>
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7daac795db39..ed10a8b66068 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -8,6 +8,7 @@
*/
#include <linux/blkdev.h>
+#include <linux/pagemap.h>
#include <linux/hdreg.h>
#include <linux/init.h>
#include <linux/platform_device.h>
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index bc0a27de69d4..dbac3a172a11 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -11,6 +11,7 @@
#include <linux/phy.h>
#include <linux/export.h>
#include <linux/device.h>
+#include <linux/nvmem-consumer.h>
/**
* of_get_phy_mode - Get phy mode for given device_node
@@ -45,42 +46,59 @@ int of_get_phy_mode(struct device_node *np, phy_interface_t *interface)
}
EXPORT_SYMBOL_GPL(of_get_phy_mode);
-static const void *of_get_mac_addr(struct device_node *np, const char *name)
+static int of_get_mac_addr(struct device_node *np, const char *name, u8 *addr)
{
struct property *pp = of_find_property(np, name, NULL);
- if (pp && pp->length == ETH_ALEN && is_valid_ether_addr(pp->value))
- return pp->value;
- return NULL;
+ if (pp && pp->length == ETH_ALEN && is_valid_ether_addr(pp->value)) {
+ memcpy(addr, pp->value, ETH_ALEN);
+ return 0;
+ }
+ return -ENODEV;
}
-static const void *of_get_mac_addr_nvmem(struct device_node *np)
+static int of_get_mac_addr_nvmem(struct device_node *np, u8 *addr)
{
- int ret;
- const void *mac;
- u8 nvmem_mac[ETH_ALEN];
struct platform_device *pdev = of_find_device_by_node(np);
+ struct nvmem_cell *cell;
+ const void *mac;
+ size_t len;
+ int ret;
- if (!pdev)
- return ERR_PTR(-ENODEV);
-
- ret = nvmem_get_mac_address(&pdev->dev, &nvmem_mac);
- if (ret) {
+ /* Try lookup by device first, there might be a nvmem_cell_lookup
+ * associated with a given device.
+ */
+ if (pdev) {
+ ret = nvmem_get_mac_address(&pdev->dev, addr);
put_device(&pdev->dev);
- return ERR_PTR(ret);
+ return ret;
}
- mac = devm_kmemdup(&pdev->dev, nvmem_mac, ETH_ALEN, GFP_KERNEL);
- put_device(&pdev->dev);
- if (!mac)
- return ERR_PTR(-ENOMEM);
+ cell = of_nvmem_cell_get(np, "mac-address");
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
- return mac;
+ mac = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(mac))
+ return PTR_ERR(mac);
+
+ if (len != ETH_ALEN || !is_valid_ether_addr(mac)) {
+ kfree(mac);
+ return -EINVAL;
+ }
+
+ memcpy(addr, mac, ETH_ALEN);
+ kfree(mac);
+
+ return 0;
}
/**
* of_get_mac_address()
* @np: Caller's Device Node
+ * @addr: Pointer to a six-byte array for the result
*
* Search the device tree for the best MAC address to use. 'mac-address' is
* checked first, because that is supposed to contain to "most recent" MAC
@@ -101,24 +119,27 @@ static const void *of_get_mac_addr_nvmem(struct device_node *np)
* this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
* but is all zeros.
*
- * Return: Will be a valid pointer on success and ERR_PTR in case of error.
+ * Return: 0 on success and errno in case of error.
*/
-const void *of_get_mac_address(struct device_node *np)
+int of_get_mac_address(struct device_node *np, u8 *addr)
{
- const void *addr;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
- addr = of_get_mac_addr(np, "mac-address");
- if (addr)
- return addr;
+ ret = of_get_mac_addr(np, "mac-address", addr);
+ if (!ret)
+ return 0;
- addr = of_get_mac_addr(np, "local-mac-address");
- if (addr)
- return addr;
+ ret = of_get_mac_addr(np, "local-mac-address", addr);
+ if (!ret)
+ return 0;
- addr = of_get_mac_addr(np, "address");
- if (addr)
- return addr;
+ ret = of_get_mac_addr(np, "address", addr);
+ if (!ret)
+ return 0;
- return of_get_mac_addr_nvmem(np);
+ return of_get_mac_addr_nvmem(np, addr);
}
EXPORT_SYMBOL(of_get_mac_address);
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 180c6fb3ef36..d80160cf34bb 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -1024,7 +1024,6 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
struct device_node *overlay_root = NULL;
*ovcs_id = 0;
- ret = 0;
if (overlay_fdt_size < sizeof(struct fdt_header) ||
fdt_check_header(overlay_fdt)) {
@@ -1195,8 +1194,6 @@ int of_overlay_remove(int *ovcs_id)
struct overlay_changeset *ovcs;
int ret, ret_apply, ret_tmp;
- ret = 0;
-
if (devicetree_corrupt()) {
pr_err("suspect devicetree state, refuse to remove overlay\n");
ret = -EBUSY;
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index 48b084e86dc6..0919ed99ba94 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -2224,15 +2224,3 @@ MODULE_PARM_DESC(features,
", bit 2: hardware SPP mode"
", bit 3: hardware EPP mode"
", bit 4: hardware ECP mode");
-
-/*--- Inform (X)Emacs about preferred coding style ---------------------*/
-/*
- * Local Variables:
- * mode: c
- * c-file-style: "linux"
- * indent-tabs-mode: t
- * tab-width: 8
- * fill-column: 78
- * ispell-local-dictionary: "american"
- * End:
- */
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 0d3719407b8b..6d7d64939f82 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -480,7 +480,7 @@ EXPORT_SYMBOL_GPL(pci_pasid_features);
#define PASID_NUMBER_SHIFT 8
#define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT)
/**
- * pci_max_pasid - Get maximum number of PASIDs supported by device
+ * pci_max_pasids - Get maximum number of PASIDs supported by device
* @pdev: PCI device structure
*
* Returns negative value when PASID capability is not present.
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 5aa8977d7b0f..2f2c8a1729f9 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -41,7 +41,6 @@ config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
- select PCI_MSI_ARCH_FALLBACKS
help
Say Y here if you want support for the PCIe host controller found
on NVIDIA Tegra SoCs.
@@ -59,7 +58,6 @@ config PCIE_RCAR_HOST
bool "Renesas R-Car PCIe host controller"
depends on ARCH_RENESAS || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
- select PCI_MSI_ARCH_FALLBACKS
help
Say Y here if you want PCIe controller support on R-Car SoCs in host
mode.
@@ -88,7 +86,7 @@ config PCI_HOST_GENERIC
config PCIE_XILINX
bool "Xilinx AXI PCIe host bridge support"
depends on OF || COMPILE_TEST
- select PCI_MSI_ARCH_FALLBACKS
+ depends on PCI_MSI_IRQ_DOMAIN
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
@@ -233,6 +231,19 @@ config PCIE_MEDIATEK
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
+config PCIE_MEDIATEK_GEN3
+ tristate "MediaTek Gen3 PCIe controller"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on PCI_MSI_IRQ_DOMAIN
+ help
+ Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
+ This PCIe controller is compatible with Gen3, Gen2 and Gen1 speed,
+ and support up to 256 MSI interrupt numbers for
+ multi-function devices.
+
+ Say Y here if you want to enable Gen3 PCIe controller support on
+ MediaTek SoCs.
+
config VMD
depends on PCI_MSI && X86_64 && SRCU
tristate "Intel Volume Management Device Driver"
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index e4559f2182f2..63e3880a3e87 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -11,10 +11,13 @@ obj-$(CONFIG_PCIE_RCAR_HOST) += pcie-rcar.o pcie-rcar-host.o
obj-$(CONFIG_PCIE_RCAR_EP) += pcie-rcar.o pcie-rcar-ep.o
obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
+obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
+obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o
obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
+obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
@@ -27,6 +30,7 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
+obj-$(CONFIG_PCIE_MEDIATEK_GEN3) += pcie-mediatek-gen3.o
obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
obj-$(CONFIG_VMD) += vmd.o
obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
@@ -47,8 +51,10 @@ obj-y += mobiveil/
# ARM64 and use internal ifdefs to only build the pieces we need
# depending on whether ACPI, the DT driver, or both are enabled.
-ifdef CONFIG_PCI
+ifdef CONFIG_ACPI
+ifdef CONFIG_PCI_QUIRKS
obj-$(CONFIG_ARM64) += pci-thunder-ecam.o
obj-$(CONFIG_ARM64) += pci-thunder-pem.o
obj-$(CONFIG_ARM64) += pci-xgene.o
endif
+endif
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 849f1e416ea5..35e61048e133 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* pci-j721e - PCIe controller driver for TI's J721E SoCs
*
* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
@@ -50,6 +51,7 @@ enum link_status {
struct j721e_pcie {
struct device *dev;
+ struct clk *refclk;
u32 mode;
u32 num_lanes;
struct cdns_pcie *cdns_pcie;
@@ -312,6 +314,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
struct cdns_pcie_ep *ep;
struct gpio_desc *gpiod;
void __iomem *base;
+ struct clk *clk;
u32 num_lanes;
u32 mode;
int ret;
@@ -411,6 +414,20 @@ static int j721e_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
+ clk = devm_clk_get_optional(dev, "pcie_refclk");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "failed to get pcie_refclk\n");
+ goto err_pcie_setup;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "failed to enable pcie_refclk\n");
+ goto err_get_sync;
+ }
+ pcie->refclk = clk;
+
/*
* "Power Sequencing and Reset Signal Timings" table in
* PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0
@@ -425,8 +442,10 @@ static int j721e_pcie_probe(struct platform_device *pdev)
}
ret = cdns_pcie_host_setup(rc);
- if (ret < 0)
+ if (ret < 0) {
+ clk_disable_unprepare(pcie->refclk);
goto err_pcie_setup;
+ }
break;
case PCI_MODE_EP:
@@ -479,6 +498,7 @@ static int j721e_pcie_remove(struct platform_device *pdev)
struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
struct device *dev = &pdev->dev;
+ clk_disable_unprepare(pcie->refclk);
cdns_pcie_disable_phy(cdns_pcie);
pm_runtime_put(dev);
pm_runtime_disable(dev);
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 22c5529e9a65..423d35872ce4 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -280,7 +280,7 @@ config PCIE_TEGRA194_EP
select PCIE_TEGRA194
help
Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
- work in host mode. There are two instances of PCIe controllers in
+ work in endpoint mode. There are two instances of PCIe controllers in
Tegra194. This controller can work either as EP or RC. In order to
enable host-specific features PCIE_TEGRA194_HOST must be selected and
in order to enable device-specific features PCIE_TEGRA194_EP must be
@@ -311,6 +311,7 @@ config PCIE_AL
depends on OF && (ARM64 || COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ select PCI_ECAM
help
Say Y here to enable support of the Amazon's Annapurna Labs PCIe
controller IP on Amazon SoCs. The PCIe controller uses the DesignWare
@@ -318,4 +319,13 @@ config PCIE_AL
required only for DT-based platforms. ACPI platforms with the
Annapurna Labs PCIe controller don't need to enable this.
+config PCIE_FU740
+ bool "SiFive FU740 PCIe host controller"
+ depends on PCI_MSI_IRQ_DOMAIN
+ depends on SOC_SIFIVE || COMPILE_TEST
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support for the SiFive
+ FU740.
+
endmenu
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index a751553fa0db..eca805c1a023 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
@@ -17,7 +18,6 @@ obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
obj-$(CONFIG_PCI_MESON) += pci-meson.o
-obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
@@ -31,7 +31,13 @@ obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
# ARM64 and use internal ifdefs to only build the pieces we need
# depending on whether ACPI, the DT driver, or both are enabled.
-ifdef CONFIG_PCI
+obj-$(CONFIG_PCIE_AL) += pcie-al.o
+obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
+
+ifdef CONFIG_ACPI
+ifdef CONFIG_PCI_QUIRKS
obj-$(CONFIG_ARM64) += pcie-al.o
obj-$(CONFIG_ARM64) += pcie-hisi.o
+obj-$(CONFIG_ARM64) += pcie-tegra194.o
+endif
endif
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 53aa35cb3a49..bde3b2824e89 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -346,8 +346,9 @@ static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
};
/**
- * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
- * registers
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
*
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
@@ -367,6 +368,8 @@ static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
/**
* ks_pcie_clear_dbi_mode() - Disable DBI mode
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
*
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
@@ -449,6 +452,7 @@ static struct pci_ops ks_child_pcie_ops = {
/**
* ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
+ * @bus: A pointer to the PCI bus structure.
*
* This sets BAR0 to enable inbound access for MSI_IRQ register
*/
@@ -488,6 +492,8 @@ static struct pci_ops ks_pcie_ops = {
/**
* ks_pcie_link_up() - Check if link up
+ * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
+ * controller driver information.
*/
static int ks_pcie_link_up(struct dw_pcie *pci)
{
@@ -605,7 +611,6 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
/**
* ks_pcie_legacy_irq_handler() - Handle legacy interrupt
- * @irq: IRQ line for legacy interrupts
* @desc: Pointer to irq descriptor
*
* Traverse through pending legacy interrupts and invoke handler for each. Also
@@ -798,7 +803,8 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
int ret;
pp->bridge->ops = &ks_pcie_ops;
- pp->bridge->child_ops = &ks_child_pcie_ops;
+ if (!ks_pcie->is_am6)
+ pp->bridge->child_ops = &ks_child_pcie_ops;
ret = ks_pcie_config_legacy_irq(ks_pcie);
if (ret)
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 39fe2ed5a6a2..39f4664bd84c 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -154,7 +154,7 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = pcie->drvdata->dw_pcie_ops;
- ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4),
+ ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4);
pcie->pci = pci;
pcie->ls_epc = ls_epc;
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 1c25d8337151..8d028a88b375 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -705,6 +705,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
}
}
+ dw_pcie_iatu_detect(pci);
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
return -EINVAL;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 7e55b2b66182..a608ae1fad57 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -398,9 +398,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (ret)
goto err_free_msi;
}
+ dw_pcie_iatu_detect(pci);
dw_pcie_setup_rc(pp);
- dw_pcie_msi_init(pp);
if (!dw_pcie_link_up(pci) && pci->ops && pci->ops->start_link) {
ret = pci->ops->start_link(pci);
@@ -551,6 +551,8 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
}
}
+ dw_pcie_msi_init(pp);
+
/* Setup RC BARs */
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 004cb860e266..a945f0c0e73d 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -660,11 +660,9 @@ static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
pci->num_ob_windows = ob;
}
-void dw_pcie_setup(struct dw_pcie *pci)
+void dw_pcie_iatu_detect(struct dw_pcie *pci)
{
- u32 val;
struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
if (pci->version >= 0x480A || (!pci->version &&
@@ -693,6 +691,13 @@ void dw_pcie_setup(struct dw_pcie *pci)
dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
pci->num_ob_windows, pci->num_ib_windows);
+}
+
+void dw_pcie_setup(struct dw_pcie *pci)
+{
+ u32 val;
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
if (pci->link_gen > 0)
dw_pcie_link_set_max_speed(pci, pci->link_gen);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 7247c8b01f04..7d6e9b7576be 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -306,6 +306,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
enum dw_pcie_region_type type);
void dw_pcie_setup(struct dw_pcie *pci);
+void dw_pcie_iatu_detect(struct dw_pcie *pci);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
new file mode 100644
index 000000000000..00cde9a248b5
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-fu740.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FU740 DesignWare PCIe Controller integration
+ * Copyright (C) 2019-2021 SiFive, Inc.
+ * Paul Walmsley
+ * Greentime Hu
+ *
+ * Based in part on the i.MX6 PCIe host controller shim which is:
+ *
+ * Copyright (C) 2013 Kosagi
+ * https://www.kosagi.com
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define to_fu740_pcie(x) dev_get_drvdata((x)->dev)
+
+struct fu740_pcie {
+ struct dw_pcie pci;
+ void __iomem *mgmt_base;
+ struct gpio_desc *reset;
+ struct gpio_desc *pwren;
+ struct clk *pcie_aux;
+ struct reset_control *rst;
+};
+
+#define SIFIVE_DEVICESRESETREG 0x28
+
+#define PCIEX8MGMT_PERST_N 0x0
+#define PCIEX8MGMT_APP_LTSSM_ENABLE 0x10
+#define PCIEX8MGMT_APP_HOLD_PHY_RST 0x18
+#define PCIEX8MGMT_DEVICE_TYPE 0x708
+#define PCIEX8MGMT_PHY0_CR_PARA_ADDR 0x860
+#define PCIEX8MGMT_PHY0_CR_PARA_RD_EN 0x870
+#define PCIEX8MGMT_PHY0_CR_PARA_RD_DATA 0x878
+#define PCIEX8MGMT_PHY0_CR_PARA_SEL 0x880
+#define PCIEX8MGMT_PHY0_CR_PARA_WR_DATA 0x888
+#define PCIEX8MGMT_PHY0_CR_PARA_WR_EN 0x890
+#define PCIEX8MGMT_PHY0_CR_PARA_ACK 0x898
+#define PCIEX8MGMT_PHY1_CR_PARA_ADDR 0x8a0
+#define PCIEX8MGMT_PHY1_CR_PARA_RD_EN 0x8b0
+#define PCIEX8MGMT_PHY1_CR_PARA_RD_DATA 0x8b8
+#define PCIEX8MGMT_PHY1_CR_PARA_SEL 0x8c0
+#define PCIEX8MGMT_PHY1_CR_PARA_WR_DATA 0x8c8
+#define PCIEX8MGMT_PHY1_CR_PARA_WR_EN 0x8d0
+#define PCIEX8MGMT_PHY1_CR_PARA_ACK 0x8d8
+
+#define PCIEX8MGMT_PHY_CDR_TRACK_EN BIT(0)
+#define PCIEX8MGMT_PHY_LOS_THRSHLD BIT(5)
+#define PCIEX8MGMT_PHY_TERM_EN BIT(9)
+#define PCIEX8MGMT_PHY_TERM_ACDC BIT(10)
+#define PCIEX8MGMT_PHY_EN BIT(11)
+#define PCIEX8MGMT_PHY_INIT_VAL (PCIEX8MGMT_PHY_CDR_TRACK_EN|\
+ PCIEX8MGMT_PHY_LOS_THRSHLD|\
+ PCIEX8MGMT_PHY_TERM_EN|\
+ PCIEX8MGMT_PHY_TERM_ACDC|\
+ PCIEX8MGMT_PHY_EN)
+
+#define PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 0x1008
+#define PCIEX8MGMT_PHY_LANE_OFF 0x100
+#define PCIEX8MGMT_PHY_LANE0_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 0)
+#define PCIEX8MGMT_PHY_LANE1_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 1)
+#define PCIEX8MGMT_PHY_LANE2_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 2)
+#define PCIEX8MGMT_PHY_LANE3_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 3)
+
+static void fu740_pcie_assert_reset(struct fu740_pcie *afp)
+{
+ /* Assert PERST_N GPIO */
+ gpiod_set_value_cansleep(afp->reset, 0);
+ /* Assert controller PERST_N */
+ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_PERST_N);
+}
+
+static void fu740_pcie_deassert_reset(struct fu740_pcie *afp)
+{
+ /* Deassert controller PERST_N */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PERST_N);
+ /* Deassert PERST_N GPIO */
+ gpiod_set_value_cansleep(afp->reset, 1);
+}
+
+static void fu740_pcie_power_on(struct fu740_pcie *afp)
+{
+ gpiod_set_value_cansleep(afp->pwren, 1);
+ /*
+ * Ensure that PERST has been asserted for at least 100 ms.
+ * Section 2.2 of PCI Express Card Electromechanical Specification
+ * Revision 3.0
+ */
+ msleep(100);
+}
+
+static void fu740_pcie_drive_reset(struct fu740_pcie *afp)
+{
+ fu740_pcie_assert_reset(afp);
+ fu740_pcie_power_on(afp);
+ fu740_pcie_deassert_reset(afp);
+}
+
+static void fu740_phyregwrite(const uint8_t phy, const uint16_t addr,
+ const uint16_t wrdata, struct fu740_pcie *afp)
+{
+ struct device *dev = afp->pci.dev;
+ void __iomem *phy_cr_para_addr;
+ void __iomem *phy_cr_para_wr_data;
+ void __iomem *phy_cr_para_wr_en;
+ void __iomem *phy_cr_para_ack;
+ int ret, val;
+
+ /* Setup */
+ if (phy) {
+ phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ADDR;
+ phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_DATA;
+ phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_EN;
+ phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ACK;
+ } else {
+ phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ADDR;
+ phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_DATA;
+ phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_EN;
+ phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ACK;
+ }
+
+ writel_relaxed(addr, phy_cr_para_addr);
+ writel_relaxed(wrdata, phy_cr_para_wr_data);
+ writel_relaxed(1, phy_cr_para_wr_en);
+
+ /* Wait for wait_idle */
+ ret = readl_poll_timeout(phy_cr_para_ack, val, val, 10, 5000);
+ if (ret)
+ dev_warn(dev, "Wait for wait_idle state failed!\n");
+
+ /* Clear */
+ writel_relaxed(0, phy_cr_para_wr_en);
+
+ /* Wait for ~wait_idle */
+ ret = readl_poll_timeout(phy_cr_para_ack, val, !val, 10, 5000);
+ if (ret)
+ dev_warn(dev, "Wait for !wait_idle state failed!\n");
+}
+
+static void fu740_pcie_init_phy(struct fu740_pcie *afp)
+{
+ /* Enable phy cr_para_sel interfaces */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_SEL);
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_SEL);
+
+ /*
+ * Wait 10 cr_para cycles to guarantee that the registers are ready
+ * to be edited.
+ */
+ ndelay(10);
+
+ /* Set PHY AC termination mode */
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+}
+
+static int fu740_pcie_start_link(struct dw_pcie *pci)
+{
+ struct device *dev = pci->dev;
+ struct fu740_pcie *afp = dev_get_drvdata(dev);
+
+ /* Enable LTSSM */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_LTSSM_ENABLE);
+ return 0;
+}
+
+static int fu740_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct fu740_pcie *afp = to_fu740_pcie(pci);
+ struct device *dev = pci->dev;
+ int ret;
+
+ /* Power on reset */
+ fu740_pcie_drive_reset(afp);
+
+ /* Enable pcieauxclk */
+ ret = clk_prepare_enable(afp->pcie_aux);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_aux clock\n");
+ return ret;
+ }
+
+ /*
+ * Assert hold_phy_rst (hold the controller LTSSM in reset after
+ * power_up_rst_n for register programming with cr_para)
+ */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST);
+
+ /* Deassert power_up_rst_n */
+ ret = reset_control_deassert(afp->rst);
+ if (ret) {
+ dev_err(dev, "unable to deassert pcie_power_up_rst_n\n");
+ return ret;
+ }
+
+ fu740_pcie_init_phy(afp);
+
+ /* Disable pcieauxclk */
+ clk_disable_unprepare(afp->pcie_aux);
+ /* Clear hold_phy_rst */
+ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST);
+ /* Enable pcieauxclk */
+ ret = clk_prepare_enable(afp->pcie_aux);
+ /* Set RC mode */
+ writel_relaxed(0x4, afp->mgmt_base + PCIEX8MGMT_DEVICE_TYPE);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops fu740_pcie_host_ops = {
+ .host_init = fu740_pcie_host_init,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = fu740_pcie_start_link,
+};
+
+static int fu740_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct fu740_pcie *afp;
+
+ afp = devm_kzalloc(dev, sizeof(*afp), GFP_KERNEL);
+ if (!afp)
+ return -ENOMEM;
+ pci = &afp->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &fu740_pcie_host_ops;
+
+ /* SiFive specific region: mgmt */
+ afp->mgmt_base = devm_platform_ioremap_resource_byname(pdev, "mgmt");
+ if (IS_ERR(afp->mgmt_base))
+ return PTR_ERR(afp->mgmt_base);
+
+ /* Fetch GPIOs */
+ afp->reset = devm_gpiod_get_optional(dev, "reset-gpios", GPIOD_OUT_LOW);
+ if (IS_ERR(afp->reset))
+ return dev_err_probe(dev, PTR_ERR(afp->reset), "unable to get reset-gpios\n");
+
+ afp->pwren = devm_gpiod_get_optional(dev, "pwren-gpios", GPIOD_OUT_LOW);
+ if (IS_ERR(afp->pwren))
+ return dev_err_probe(dev, PTR_ERR(afp->pwren), "unable to get pwren-gpios\n");
+
+ /* Fetch clocks */
+ afp->pcie_aux = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(afp->pcie_aux))
+ return dev_err_probe(dev, PTR_ERR(afp->pcie_aux),
+ "pcie_aux clock source missing or invalid\n");
+
+ /* Fetch reset */
+ afp->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(afp->rst))
+ return dev_err_probe(dev, PTR_ERR(afp->rst), "unable to get reset\n");
+
+ platform_set_drvdata(pdev, afp);
+
+ return dw_pcie_host_init(&pci->pp);
+}
+
+static void fu740_pcie_shutdown(struct platform_device *pdev)
+{
+ struct fu740_pcie *afp = platform_get_drvdata(pdev);
+
+ /* Bring down link, so bootloader gets clean state in case of reboot */
+ fu740_pcie_assert_reset(afp);
+}
+
+static const struct of_device_id fu740_pcie_of_match[] = {
+ { .compatible = "sifive,fu740-pcie", },
+ {},
+};
+
+static struct platform_driver fu740_pcie_driver = {
+ .driver = {
+ .name = "fu740-pcie",
+ .of_match_table = fu740_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fu740_pcie_probe,
+ .shutdown = fu740_pcie_shutdown,
+};
+
+builtin_platform_driver(fu740_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index 0cedd1f95f37..f89a7d24ba28 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -81,11 +81,6 @@ static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val)
writel(val, base + ofs);
}
-static inline u32 pcie_app_rd(struct intel_pcie_port *lpp, u32 ofs)
-{
- return readl(lpp->app_base + ofs);
-}
-
static inline void pcie_app_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val)
{
writel(val, lpp->app_base + ofs);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 6fa216e52d14..bafd2c6ab3c2 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -22,6 +22,8 @@
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include <linux/phy/phy.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -311,6 +313,104 @@ struct tegra_pcie_dw_of_data {
enum dw_pcie_device_mode mode;
};
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+struct tegra194_pcie_ecam {
+ void __iomem *config_base;
+ void __iomem *iatu_base;
+ void __iomem *dbi_base;
+};
+
+static int tegra194_acpi_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct tegra194_pcie_ecam *pcie_ecam;
+
+ pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
+ if (!pcie_ecam)
+ return -ENOMEM;
+
+ pcie_ecam->config_base = cfg->win;
+ pcie_ecam->iatu_base = cfg->win + SZ_256K;
+ pcie_ecam->dbi_base = cfg->win + SZ_512K;
+ cfg->priv = pcie_ecam;
+
+ return 0;
+}
+
+static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
+ u32 val, u32 reg)
+{
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+ writel(val, pcie_ecam->iatu_base + offset + reg);
+}
+
+static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
+{
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
+ PCIE_ATU_LOWER_BASE);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
+ PCIE_ATU_UPPER_BASE);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
+ PCIE_ATU_LOWER_TARGET);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
+ PCIE_ATU_LIMIT);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
+ PCIE_ATU_UPPER_TARGET);
+ atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
+ atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static void __iomem *tegra194_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
+ u32 busdev;
+ int type;
+
+ if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
+ return NULL;
+
+ if (bus->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ return pcie_ecam->dbi_base + where;
+ else
+ return NULL;
+ }
+
+ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+ PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+ if (bus->parent->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ type = PCIE_ATU_TYPE_CFG0;
+ else
+ return NULL;
+ } else {
+ type = PCIE_ATU_TYPE_CFG1;
+ }
+
+ program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
+ SZ_256K);
+
+ return pcie_ecam->config_base + where;
+}
+
+const struct pci_ecam_ops tegra194_pcie_ops = {
+ .init = tegra194_acpi_init,
+ .pci_ops = {
+ .map_bus = tegra194_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
+
+#ifdef CONFIG_PCIE_TEGRA194
+
static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
{
return container_of(pci, struct tegra_pcie_dw, pci);
@@ -1019,7 +1119,7 @@ static const struct dw_pcie_ops tegra_dw_pcie_ops = {
.stop_link = tegra_pcie_dw_stop_link,
};
-static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
+static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
.host_init = tegra_pcie_dw_host_init,
};
@@ -1645,7 +1745,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
if (pcie->ep_state == EP_STATE_ENABLED)
return;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
ret);
@@ -1881,7 +1981,7 @@ tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
return &tegra_pcie_epc_features;
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
.raise_irq = tegra_pcie_ep_raise_irq,
.get_features = tegra_pcie_ep_get_features,
};
@@ -2311,3 +2411,5 @@ MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
MODULE_LICENSE("GPL v2");
+
+#endif /* CONFIG_PCIE_TEGRA194 */
diff --git a/drivers/pci/controller/mobiveil/Kconfig b/drivers/pci/controller/mobiveil/Kconfig
index a62d247018cf..e4643fb94e78 100644
--- a/drivers/pci/controller/mobiveil/Kconfig
+++ b/drivers/pci/controller/mobiveil/Kconfig
@@ -24,8 +24,7 @@ config PCIE_MOBIVEIL_PLAT
config PCIE_LAYERSCAPE_GEN4
bool "Freescale Layerscape PCIe Gen4 controller"
- depends on PCI
- depends on OF && (ARM64 || ARCH_LAYERSCAPE)
+ depends on ARCH_LAYERSCAPE || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_MOBIVEIL_HOST
help
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index 6ab694f8d283..d3924a44db02 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -79,6 +79,7 @@ int pci_host_common_probe(struct platform_device *pdev)
bridge->sysdata = cfg;
bridge->ops = (struct pci_ops *)&ops->pci_ops;
+ bridge->msi_domain = true;
return pci_host_probe(bridge);
}
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 1ff4ce24f4b3..6511648271b2 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -473,7 +473,6 @@ struct hv_pcibus_device {
struct list_head dr_list;
struct msi_domain_info msi_info;
- struct msi_controller msi_chip;
struct irq_domain *irq_domain;
spinlock_t retarget_msi_interrupt_lock;
@@ -1866,9 +1865,6 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
if (!hbus->pci_bus)
return -ENODEV;
- hbus->pci_bus->msi = &hbus->msi_chip;
- hbus->pci_bus->msi->dev = &hbus->hdev->device;
-
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
hv_pci_assign_numa_node(hbus);
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 8fcabed7c6a6..8069bd9232d4 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -78,23 +79,8 @@
#define AFI_MSI_FPCI_BAR_ST 0x64
#define AFI_MSI_AXI_BAR_ST 0x68
-#define AFI_MSI_VEC0 0x6c
-#define AFI_MSI_VEC1 0x70
-#define AFI_MSI_VEC2 0x74
-#define AFI_MSI_VEC3 0x78
-#define AFI_MSI_VEC4 0x7c
-#define AFI_MSI_VEC5 0x80
-#define AFI_MSI_VEC6 0x84
-#define AFI_MSI_VEC7 0x88
-
-#define AFI_MSI_EN_VEC0 0x8c
-#define AFI_MSI_EN_VEC1 0x90
-#define AFI_MSI_EN_VEC2 0x94
-#define AFI_MSI_EN_VEC3 0x98
-#define AFI_MSI_EN_VEC4 0x9c
-#define AFI_MSI_EN_VEC5 0xa0
-#define AFI_MSI_EN_VEC6 0xa4
-#define AFI_MSI_EN_VEC7 0xa8
+#define AFI_MSI_VEC(x) (0x6c + ((x) * 4))
+#define AFI_MSI_EN_VEC(x) (0x8c + ((x) * 4))
#define AFI_CONFIGURATION 0xac
#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
@@ -280,10 +266,10 @@
#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
struct tegra_msi {
- struct msi_controller chip;
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
- struct mutex lock;
+ struct mutex map_lock;
+ spinlock_t mask_lock;
void *virt;
dma_addr_t phys;
int irq;
@@ -333,11 +319,6 @@ struct tegra_pcie_soc {
} ectl;
};
-static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
-{
- return container_of(chip, struct tegra_msi, chip);
-}
-
struct tegra_pcie {
struct device *dev;
@@ -372,6 +353,11 @@ struct tegra_pcie {
struct dentry *debugfs;
};
+static inline struct tegra_pcie *msi_to_pcie(struct tegra_msi *msi)
+{
+ return container_of(msi, struct tegra_pcie, msi);
+}
+
struct tegra_pcie_port {
struct tegra_pcie *pcie;
struct device_node *np;
@@ -1432,7 +1418,6 @@ static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
}
}
-
static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
@@ -1509,6 +1494,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
phys_put:
if (soc->program_uphy)
tegra_pcie_phys_put(pcie);
+
return err;
}
@@ -1551,161 +1537,227 @@ static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
afi_writel(pcie, val, AFI_PCIE_PME);
}
-static int tegra_msi_alloc(struct tegra_msi *chip)
-{
- int msi;
-
- mutex_lock(&chip->lock);
-
- msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
- if (msi < INT_PCI_MSI_NR)
- set_bit(msi, chip->used);
- else
- msi = -ENOSPC;
-
- mutex_unlock(&chip->lock);
-
- return msi;
-}
-
-static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
+static void tegra_pcie_msi_irq(struct irq_desc *desc)
{
- struct device *dev = chip->chip.dev;
-
- mutex_lock(&chip->lock);
-
- if (!test_bit(irq, chip->used))
- dev_err(dev, "trying to free unused MSI#%lu\n", irq);
- else
- clear_bit(irq, chip->used);
-
- mutex_unlock(&chip->lock);
-}
-
-static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
-{
- struct tegra_pcie *pcie = data;
- struct device *dev = pcie->dev;
+ struct tegra_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct tegra_msi *msi = &pcie->msi;
- unsigned int i, processed = 0;
+ struct device *dev = pcie->dev;
+ unsigned int i;
+
+ chained_irq_enter(chip, desc);
for (i = 0; i < 8; i++) {
- unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
+ unsigned long reg = afi_readl(pcie, AFI_MSI_VEC(i));
while (reg) {
unsigned int offset = find_first_bit(&reg, 32);
unsigned int index = i * 32 + offset;
unsigned int irq;
- /* clear the interrupt */
- afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
-
- irq = irq_find_mapping(msi->domain, index);
+ irq = irq_find_mapping(msi->domain->parent, index);
if (irq) {
- if (test_bit(index, msi->used))
- generic_handle_irq(irq);
- else
- dev_info(dev, "unhandled MSI\n");
+ generic_handle_irq(irq);
} else {
/*
* that's weird who triggered this?
* just clear it
*/
dev_info(dev, "unexpected MSI\n");
+ afi_writel(pcie, BIT(index % 32), AFI_MSI_VEC(index));
}
/* see if there's any more pending in this vector */
- reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
-
- processed++;
+ reg = afi_readl(pcie, AFI_MSI_VEC(i));
}
}
- return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
+ chained_irq_exit(chip, desc);
}
-static int tegra_msi_setup_irq(struct msi_controller *chip,
- struct pci_dev *pdev, struct msi_desc *desc)
+static void tegra_msi_top_irq_ack(struct irq_data *d)
{
- struct tegra_msi *msi = to_tegra_msi(chip);
- struct msi_msg msg;
- unsigned int irq;
- int hwirq;
+ irq_chip_ack_parent(d);
+}
- hwirq = tegra_msi_alloc(msi);
- if (hwirq < 0)
- return hwirq;
+static void tegra_msi_top_irq_mask(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
- irq = irq_create_mapping(msi->domain, hwirq);
- if (!irq) {
- tegra_msi_free(msi, hwirq);
- return -EINVAL;
- }
+static void tegra_msi_top_irq_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip tegra_msi_top_chip = {
+ .name = "Tegra PCIe MSI",
+ .irq_ack = tegra_msi_top_irq_ack,
+ .irq_mask = tegra_msi_top_irq_mask,
+ .irq_unmask = tegra_msi_top_irq_unmask,
+};
- irq_set_msi_desc(irq, desc);
+static void tegra_msi_irq_ack(struct irq_data *d)
+{
+ struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
+ struct tegra_pcie *pcie = msi_to_pcie(msi);
+ unsigned int index = d->hwirq / 32;
- msg.address_lo = lower_32_bits(msi->phys);
- msg.address_hi = upper_32_bits(msi->phys);
- msg.data = hwirq;
+ /* clear the interrupt */
+ afi_writel(pcie, BIT(d->hwirq % 32), AFI_MSI_VEC(index));
+}
- pci_write_msi_msg(irq, &msg);
+static void tegra_msi_irq_mask(struct irq_data *d)
+{
+ struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
+ struct tegra_pcie *pcie = msi_to_pcie(msi);
+ unsigned int index = d->hwirq / 32;
+ unsigned long flags;
+ u32 value;
- return 0;
+ spin_lock_irqsave(&msi->mask_lock, flags);
+ value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+ value &= ~BIT(d->hwirq % 32);
+ afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+ spin_unlock_irqrestore(&msi->mask_lock, flags);
}
-static void tegra_msi_teardown_irq(struct msi_controller *chip,
- unsigned int irq)
+static void tegra_msi_irq_unmask(struct irq_data *d)
{
- struct tegra_msi *msi = to_tegra_msi(chip);
- struct irq_data *d = irq_get_irq_data(irq);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
+ struct tegra_pcie *pcie = msi_to_pcie(msi);
+ unsigned int index = d->hwirq / 32;
+ unsigned long flags;
+ u32 value;
- irq_dispose_mapping(irq);
- tegra_msi_free(msi, hwirq);
+ spin_lock_irqsave(&msi->mask_lock, flags);
+ value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+ value |= BIT(d->hwirq % 32);
+ afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+ spin_unlock_irqrestore(&msi->mask_lock, flags);
}
-static struct irq_chip tegra_msi_irq_chip = {
- .name = "Tegra PCIe MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
+static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
+
+ msg->address_lo = lower_32_bits(msi->phys);
+ msg->address_hi = upper_32_bits(msi->phys);
+ msg->data = data->hwirq;
+}
+
+static struct irq_chip tegra_msi_bottom_chip = {
+ .name = "Tegra MSI",
+ .irq_ack = tegra_msi_irq_ack,
+ .irq_mask = tegra_msi_irq_mask,
+ .irq_unmask = tegra_msi_irq_unmask,
+ .irq_set_affinity = tegra_msi_set_affinity,
+ .irq_compose_msi_msg = tegra_compose_msi_msg,
};
-static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
+static int tegra_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
{
- irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
+ struct tegra_msi *msi = domain->host_data;
+ unsigned int i;
+ int hwirq;
+
+ mutex_lock(&msi->map_lock);
+
+ hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
+
+ mutex_unlock(&msi->map_lock);
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &tegra_msi_bottom_chip, domain->host_data,
+ handle_edge_irq, NULL, NULL);
tegra_cpuidle_pcie_irqs_in_use();
return 0;
}
-static const struct irq_domain_ops msi_domain_ops = {
- .map = tegra_msi_map,
+static void tegra_msi_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct tegra_msi *msi = domain->host_data;
+
+ mutex_lock(&msi->map_lock);
+
+ bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
+
+ mutex_unlock(&msi->map_lock);
+}
+
+static const struct irq_domain_ops tegra_msi_domain_ops = {
+ .alloc = tegra_msi_domain_alloc,
+ .free = tegra_msi_domain_free,
+};
+
+static struct msi_domain_info tegra_msi_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &tegra_msi_top_chip,
};
+static int tegra_allocate_domains(struct tegra_msi *msi)
+{
+ struct tegra_pcie *pcie = msi_to_pcie(msi);
+ struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct irq_domain *parent;
+
+ parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
+ &tegra_msi_domain_ops, msi);
+ if (!parent) {
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+
+ msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
+ if (!msi->domain) {
+ dev_err(pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(parent);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void tegra_free_domains(struct tegra_msi *msi)
+{
+ struct irq_domain *parent = msi->domain->parent;
+
+ irq_domain_remove(msi->domain);
+ irq_domain_remove(parent);
+}
+
static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
{
- struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
struct platform_device *pdev = to_platform_device(pcie->dev);
struct tegra_msi *msi = &pcie->msi;
struct device *dev = pcie->dev;
int err;
- mutex_init(&msi->lock);
-
- msi->chip.dev = dev;
- msi->chip.setup_irq = tegra_msi_setup_irq;
- msi->chip.teardown_irq = tegra_msi_teardown_irq;
+ mutex_init(&msi->map_lock);
+ spin_lock_init(&msi->mask_lock);
- msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
- &msi_domain_ops, &msi->chip);
- if (!msi->domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ err = tegra_allocate_domains(msi);
+ if (err)
+ return err;
}
err = platform_get_irq_byname(pdev, "msi");
@@ -1714,12 +1766,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
msi->irq = err;
- err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
- tegra_msi_irq_chip.name, pcie);
- if (err < 0) {
- dev_err(dev, "failed to request IRQ: %d\n", err);
- goto free_irq_domain;
- }
+ irq_set_chained_handler_and_data(msi->irq, tegra_pcie_msi_irq, pcie);
/* Though the PCIe controller can address >32-bit address space, to
* facilitate endpoints that support only 32-bit MSI target address,
@@ -1740,14 +1787,14 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
goto free_irq;
}
- host->msi = &msi->chip;
-
return 0;
free_irq:
- free_irq(msi->irq, pcie);
+ irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
free_irq_domain:
- irq_domain_remove(msi->domain);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ tegra_free_domains(msi);
+
return err;
}
@@ -1755,22 +1802,18 @@ static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
{
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_msi *msi = &pcie->msi;
- u32 reg;
+ u32 reg, msi_state[INT_PCI_MSI_NR / 32];
+ int i;
afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
/* this register is in 4K increments */
afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
- /* enable all MSI vectors */
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
+ /* Restore the MSI allocation state */
+ bitmap_to_arr32(msi_state, msi->used, INT_PCI_MSI_NR);
+ for (i = 0; i < ARRAY_SIZE(msi_state); i++)
+ afi_writel(pcie, msi_state[i], AFI_MSI_EN_VEC(i));
/* and unmask the MSI interrupt */
reg = afi_readl(pcie, AFI_INTR_MASK);
@@ -1786,16 +1829,16 @@ static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
DMA_ATTR_NO_KERNEL_MAPPING);
- if (msi->irq > 0)
- free_irq(msi->irq, pcie);
-
for (i = 0; i < INT_PCI_MSI_NR; i++) {
irq = irq_find_mapping(msi->domain, i);
if (irq > 0)
- irq_dispose_mapping(irq);
+ irq_domain_free_irqs(irq, 1);
}
- irq_domain_remove(msi->domain);
+ irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ tegra_free_domains(msi);
}
static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
@@ -1807,16 +1850,6 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
value &= ~AFI_INTR_MASK_MSI_MASK;
afi_writel(pcie, value, AFI_INTR_MASK);
- /* disable all MSI vectors */
- afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
- afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
- afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
- afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
- afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
- afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
- afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
- afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
-
return 0;
}
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index f964fd26f7e0..ffd84656544f 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -116,7 +116,7 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
* the config space access window. Since we are working with
* the high-order 32 bits, shift everything down by 32 bits.
*/
- node_bits = (cfg->res.start >> 32) & (1 << 12);
+ node_bits = upper_32_bits(cfg->res.start) & (1 << 12);
v |= node_bits;
set_val(v, where, size, val);
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index 1a3f70ac61fc..0660b9da204f 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -12,6 +12,7 @@
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "../pci.h"
#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
@@ -324,9 +325,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
* structure here for the BAR.
*/
bar4_start = res_pem->start + 0xf00000;
- pem_pci->ea_entry[0] = (u32)bar4_start | 2;
- pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
- pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
+ pem_pci->ea_entry[0] = lower_32_bits(bar4_start) | 2;
+ pem_pci->ea_entry[1] = lower_32_bits(res_pem->end - bar4_start) & ~3u;
+ pem_pci->ea_entry[2] = upper_32_bits(bar4_start);
cfg->priv = pem_pci;
return 0;
@@ -334,9 +335,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
-#define PEM_RES_BASE 0x87e0c0000000UL
-#define PEM_NODE_MASK GENMASK(45, 44)
-#define PEM_INDX_MASK GENMASK(26, 24)
+#define PEM_RES_BASE 0x87e0c0000000ULL
+#define PEM_NODE_MASK GENMASK_ULL(45, 44)
+#define PEM_INDX_MASK GENMASK_ULL(26, 24)
#define PEM_MIN_DOM_IN_NODE 4
#define PEM_MAX_DOM_IN_NODE 10
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 2afdc865253e..7f503dd4ff81 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -354,7 +354,8 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
if (IS_ERR(port->csr_base))
return PTR_ERR(port->csr_base);
- port->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ port->cfg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(port->cfg_base))
return PTR_ERR(port->cfg_base);
port->cfg_addr = res->start;
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index 42691dd8ebef..98aa1dccc6e6 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -236,10 +236,8 @@ static int altera_msi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"vector_slave");
msi->vector_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(msi->vector_base)) {
- dev_err(&pdev->dev, "failed to map vector_slave memory\n");
+ if (IS_ERR(msi->vector_base))
return PTR_ERR(msi->vector_base);
- }
msi->vector_phy = res->start;
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index e330e6811f0b..08bc788d9422 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -1148,6 +1148,7 @@ static int brcm_pcie_suspend(struct device *dev)
brcm_pcie_turn_off(pcie);
ret = brcm_phy_stop(pcie);
+ reset_control_rearm(pcie->rescal);
clk_disable_unprepare(pcie->clk);
return ret;
@@ -1163,9 +1164,13 @@ static int brcm_pcie_resume(struct device *dev)
base = pcie->base;
clk_prepare_enable(pcie->clk);
+ ret = reset_control_reset(pcie->rescal);
+ if (ret)
+ goto err_disable_clk;
+
ret = brcm_phy_start(pcie);
if (ret)
- goto err;
+ goto err_reset;
/* Take bridge out of reset so we can access the SERDES reg */
pcie->bridge_sw_init_set(pcie, 0);
@@ -1180,14 +1185,16 @@ static int brcm_pcie_resume(struct device *dev)
ret = brcm_pcie_setup(pcie);
if (ret)
- goto err;
+ goto err_reset;
if (pcie->msi)
brcm_msi_set_regs(pcie->msi);
return 0;
-err:
+err_reset:
+ reset_control_rearm(pcie->rescal);
+err_disable_clk:
clk_disable_unprepare(pcie->clk);
return ret;
}
@@ -1197,7 +1204,7 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie)
brcm_msi_remove(pcie);
brcm_pcie_turn_off(pcie);
brcm_phy_stop(pcie);
- reset_control_assert(pcie->rescal);
+ reset_control_rearm(pcie->rescal);
clk_disable_unprepare(pcie->clk);
}
@@ -1278,13 +1285,13 @@ static int brcm_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->perst_reset);
}
- ret = reset_control_deassert(pcie->rescal);
+ ret = reset_control_reset(pcie->rescal);
if (ret)
dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
ret = brcm_phy_start(pcie);
if (ret) {
- reset_control_assert(pcie->rescal);
+ reset_control_rearm(pcie->rescal);
clk_disable_unprepare(pcie->clk);
return ret;
}
@@ -1296,6 +1303,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
+ ret = -ENODEV;
goto fail;
}
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 908475d27e0e..eede4e8f3f75 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -271,7 +271,7 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
NULL, NULL);
}
- return hwirq;
+ return 0;
}
static void iproc_msi_irq_domain_free(struct irq_domain *domain,
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
new file mode 100644
index 000000000000..3c5b97716d40
--- /dev/null
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -0,0 +1,1027 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek PCIe host controller driver.
+ *
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Jianjun Wang <jianjun.wang@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "../pci.h"
+
+#define PCIE_SETTING_REG 0x80
+#define PCIE_PCI_IDS_1 0x9c
+#define PCI_CLASS(class) (class << 8)
+#define PCIE_RC_MODE BIT(0)
+
+#define PCIE_CFGNUM_REG 0x140
+#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
+#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
+#define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16))
+#define PCIE_CFG_FORCE_BYTE_EN BIT(20)
+#define PCIE_CFG_OFFSET_ADDR 0x1000
+#define PCIE_CFG_HEADER(bus, devfn) \
+ (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
+
+#define PCIE_RST_CTRL_REG 0x148
+#define PCIE_MAC_RSTB BIT(0)
+#define PCIE_PHY_RSTB BIT(1)
+#define PCIE_BRG_RSTB BIT(2)
+#define PCIE_PE_RSTB BIT(3)
+
+#define PCIE_LTSSM_STATUS_REG 0x150
+#define PCIE_LTSSM_STATE_MASK GENMASK(28, 24)
+#define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24)
+#define PCIE_LTSSM_STATE_L2_IDLE 0x14
+
+#define PCIE_LINK_STATUS_REG 0x154
+#define PCIE_PORT_LINKUP BIT(8)
+
+#define PCIE_MSI_SET_NUM 8
+#define PCIE_MSI_IRQS_PER_SET 32
+#define PCIE_MSI_IRQS_NUM \
+ (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
+
+#define PCIE_INT_ENABLE_REG 0x180
+#define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
+#define PCIE_MSI_SHIFT 8
+#define PCIE_INTX_SHIFT 24
+#define PCIE_INTX_ENABLE \
+ GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
+
+#define PCIE_INT_STATUS_REG 0x184
+#define PCIE_MSI_SET_ENABLE_REG 0x190
+#define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
+
+#define PCIE_MSI_SET_BASE_REG 0xc00
+#define PCIE_MSI_SET_OFFSET 0x10
+#define PCIE_MSI_SET_STATUS_OFFSET 0x04
+#define PCIE_MSI_SET_ENABLE_OFFSET 0x08
+
+#define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
+#define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
+
+#define PCIE_ICMD_PM_REG 0x198
+#define PCIE_TURN_OFF_LINK BIT(4)
+
+#define PCIE_TRANS_TABLE_BASE_REG 0x800
+#define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
+#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
+#define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc
+#define PCIE_ATR_TRSL_PARAM_OFFSET 0x10
+#define PCIE_ATR_TLB_SET_OFFSET 0x20
+
+#define PCIE_MAX_TRANS_TABLES 8
+#define PCIE_ATR_EN BIT(0)
+#define PCIE_ATR_SIZE(size) \
+ (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
+#define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0))
+#define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0)
+#define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1)
+#define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16))
+#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
+#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
+
+/**
+ * struct mtk_msi_set - MSI information for each set
+ * @base: IO mapped register base
+ * @msg_addr: MSI message address
+ * @saved_irq_state: IRQ enable state saved at suspend time
+ */
+struct mtk_msi_set {
+ void __iomem *base;
+ phys_addr_t msg_addr;
+ u32 saved_irq_state;
+};
+
+/**
+ * struct mtk_pcie_port - PCIe port information
+ * @dev: pointer to PCIe device
+ * @base: IO mapped register base
+ * @reg_base: physical register base
+ * @mac_reset: MAC reset control
+ * @phy_reset: PHY reset control
+ * @phy: PHY controller block
+ * @clks: PCIe clocks
+ * @num_clks: PCIe clocks count for this port
+ * @irq: PCIe controller interrupt number
+ * @saved_irq_state: IRQ enable state saved at suspend time
+ * @irq_lock: lock protecting IRQ register access
+ * @intx_domain: legacy INTx IRQ domain
+ * @msi_domain: MSI IRQ domain
+ * @msi_bottom_domain: MSI IRQ bottom domain
+ * @msi_sets: MSI sets information
+ * @lock: lock protecting IRQ bit map
+ * @msi_irq_in_use: bit map for assigned MSI IRQ
+ */
+struct mtk_pcie_port {
+ struct device *dev;
+ void __iomem *base;
+ phys_addr_t reg_base;
+ struct reset_control *mac_reset;
+ struct reset_control *phy_reset;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ int num_clks;
+
+ int irq;
+ u32 saved_irq_state;
+ raw_spinlock_t irq_lock;
+ struct irq_domain *intx_domain;
+ struct irq_domain *msi_domain;
+ struct irq_domain *msi_bottom_domain;
+ struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
+ struct mutex lock;
+ DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
+};
+
+/**
+ * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
+ * @bus: PCI bus to query
+ * @devfn: device/function number
+ * @where: offset in config space
+ * @size: data size in TLP header
+ *
+ * Set byte enable field and device information in configuration TLP header.
+ */
+static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
+ int where, int size)
+{
+ struct mtk_pcie_port *port = bus->sysdata;
+ int bytes;
+ u32 val;
+
+ bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
+
+ val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
+ PCIE_CFG_HEADER(bus->number, devfn);
+
+ writel_relaxed(val, port->base + PCIE_CFGNUM_REG);
+}
+
+static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct mtk_pcie_port *port = bus->sysdata;
+
+ return port->base + PCIE_CFG_OFFSET_ADDR + where;
+}
+
+static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ mtk_pcie_config_tlp_header(bus, devfn, where, size);
+
+ return pci_generic_config_read32(bus, devfn, where, size, val);
+}
+
+static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ mtk_pcie_config_tlp_header(bus, devfn, where, size);
+
+ if (size <= 2)
+ val <<= (where & 0x3) * 8;
+
+ return pci_generic_config_write32(bus, devfn, where, 4, val);
+}
+
+static struct pci_ops mtk_pcie_ops = {
+ .map_bus = mtk_pcie_map_bus,
+ .read = mtk_pcie_config_read,
+ .write = mtk_pcie_config_write,
+};
+
+static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
+ resource_size_t cpu_addr,
+ resource_size_t pci_addr,
+ resource_size_t size,
+ unsigned long type, int num)
+{
+ void __iomem *table;
+ u32 val;
+
+ if (num >= PCIE_MAX_TRANS_TABLES) {
+ dev_err(port->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
+ (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
+ return -ENODEV;
+ }
+
+ table = port->base + PCIE_TRANS_TABLE_BASE_REG +
+ num * PCIE_ATR_TLB_SET_OFFSET;
+
+ writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
+ table);
+ writel_relaxed(upper_32_bits(cpu_addr),
+ table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
+ writel_relaxed(lower_32_bits(pci_addr),
+ table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
+ writel_relaxed(upper_32_bits(pci_addr),
+ table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
+
+ if (type == IORESOURCE_IO)
+ val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
+ else
+ val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
+
+ writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
+
+ return 0;
+}
+
+static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
+{
+ int i;
+ u32 val;
+
+ for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
+ struct mtk_msi_set *msi_set = &port->msi_sets[i];
+
+ msi_set->base = port->base + PCIE_MSI_SET_BASE_REG +
+ i * PCIE_MSI_SET_OFFSET;
+ msi_set->msg_addr = port->reg_base + PCIE_MSI_SET_BASE_REG +
+ i * PCIE_MSI_SET_OFFSET;
+
+ /* Configure the MSI capture address */
+ writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
+ writel_relaxed(upper_32_bits(msi_set->msg_addr),
+ port->base + PCIE_MSI_SET_ADDR_HI_BASE +
+ i * PCIE_MSI_SET_ADDR_HI_OFFSET);
+ }
+
+ val = readl_relaxed(port->base + PCIE_MSI_SET_ENABLE_REG);
+ val |= PCIE_MSI_SET_ENABLE;
+ writel_relaxed(val, port->base + PCIE_MSI_SET_ENABLE_REG);
+
+ val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ val |= PCIE_MSI_ENABLE;
+ writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
+}
+
+static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
+{
+ struct resource_entry *entry;
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
+ unsigned int table_index = 0;
+ int err;
+ u32 val;
+
+ /* Set as RC mode */
+ val = readl_relaxed(port->base + PCIE_SETTING_REG);
+ val |= PCIE_RC_MODE;
+ writel_relaxed(val, port->base + PCIE_SETTING_REG);
+
+ /* Set class code */
+ val = readl_relaxed(port->base + PCIE_PCI_IDS_1);
+ val &= ~GENMASK(31, 8);
+ val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8);
+ writel_relaxed(val, port->base + PCIE_PCI_IDS_1);
+
+ /* Mask all INTx interrupts */
+ val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ val &= ~PCIE_INTX_ENABLE;
+ writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
+
+ /* Assert all reset signals */
+ val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
+ writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
+
+ /*
+ * Described in PCIe CEM specification setctions 2.2 (PERST# Signal)
+ * and 2.2.1 (Initial Power-Up (G3 to S0)).
+ * The deassertion of PERST# should be delayed 100ms (TPVPERL)
+ * for the power and clock to become stable.
+ */
+ msleep(100);
+
+ /* De-assert reset signals */
+ val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
+ writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
+
+ /* Check if the link is up or not */
+ err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_REG, val,
+ !!(val & PCIE_PORT_LINKUP), 20,
+ PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
+ if (err) {
+ val = readl_relaxed(port->base + PCIE_LTSSM_STATUS_REG);
+ dev_err(port->dev, "PCIe link down, ltssm reg val: %#x\n", val);
+ return err;
+ }
+
+ mtk_pcie_enable_msi(port);
+
+ /* Set PCIe translation windows */
+ resource_list_for_each_entry(entry, &host->windows) {
+ struct resource *res = entry->res;
+ unsigned long type = resource_type(res);
+ resource_size_t cpu_addr;
+ resource_size_t pci_addr;
+ resource_size_t size;
+ const char *range_type;
+
+ if (type == IORESOURCE_IO) {
+ cpu_addr = pci_pio_to_address(res->start);
+ range_type = "IO";
+ } else if (type == IORESOURCE_MEM) {
+ cpu_addr = res->start;
+ range_type = "MEM";
+ } else {
+ continue;
+ }
+
+ pci_addr = res->start - entry->offset;
+ size = resource_size(res);
+ err = mtk_pcie_set_trans_table(port, cpu_addr, pci_addr, size,
+ type, table_index);
+ if (err)
+ return err;
+
+ dev_dbg(port->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
+ range_type, table_index, (unsigned long long)cpu_addr,
+ (unsigned long long)pci_addr, (unsigned long long)size);
+
+ table_index++;
+ }
+
+ return 0;
+}
+
+static int mtk_pcie_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void mtk_pcie_msi_irq_mask(struct irq_data *data)
+{
+ pci_msi_mask_irq(data);
+ irq_chip_mask_parent(data);
+}
+
+static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
+{
+ pci_msi_unmask_irq(data);
+ irq_chip_unmask_parent(data);
+}
+
+static struct irq_chip mtk_msi_irq_chip = {
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = mtk_pcie_msi_irq_mask,
+ .irq_unmask = mtk_pcie_msi_irq_unmask,
+ .name = "MSI",
+};
+
+static struct msi_domain_info mtk_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &mtk_msi_irq_chip,
+};
+
+static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
+ struct mtk_pcie_port *port = data->domain->host_data;
+ unsigned long hwirq;
+
+ hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+ msg->address_hi = upper_32_bits(msi_set->msg_addr);
+ msg->address_lo = lower_32_bits(msi_set->msg_addr);
+ msg->data = hwirq;
+ dev_dbg(port->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
+ hwirq, msg->address_hi, msg->address_lo, msg->data);
+}
+
+static void mtk_msi_bottom_irq_ack(struct irq_data *data)
+{
+ struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
+ unsigned long hwirq;
+
+ hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+ writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
+}
+
+static void mtk_msi_bottom_irq_mask(struct irq_data *data)
+{
+ struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
+ struct mtk_pcie_port *port = data->domain->host_data;
+ unsigned long hwirq, flags;
+ u32 val;
+
+ hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+ raw_spin_lock_irqsave(&port->irq_lock, flags);
+ val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+ val &= ~BIT(hwirq);
+ writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+ raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+}
+
+static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
+{
+ struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
+ struct mtk_pcie_port *port = data->domain->host_data;
+ unsigned long hwirq, flags;
+ u32 val;
+
+ hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+ raw_spin_lock_irqsave(&port->irq_lock, flags);
+ val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+ val |= BIT(hwirq);
+ writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+ raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+}
+
+static struct irq_chip mtk_msi_bottom_irq_chip = {
+ .irq_ack = mtk_msi_bottom_irq_ack,
+ .irq_mask = mtk_msi_bottom_irq_mask,
+ .irq_unmask = mtk_msi_bottom_irq_unmask,
+ .irq_compose_msi_msg = mtk_compose_msi_msg,
+ .irq_set_affinity = mtk_pcie_set_affinity,
+ .name = "MSI",
+};
+
+static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *arg)
+{
+ struct mtk_pcie_port *port = domain->host_data;
+ struct mtk_msi_set *msi_set;
+ int i, hwirq, set_idx;
+
+ mutex_lock(&port->lock);
+
+ hwirq = bitmap_find_free_region(port->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
+ order_base_2(nr_irqs));
+
+ mutex_unlock(&port->lock);
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
+ msi_set = &port->msi_sets[set_idx];
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &mtk_msi_bottom_irq_chip, msi_set,
+ handle_edge_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct mtk_pcie_port *port = domain->host_data;
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+
+ mutex_lock(&port->lock);
+
+ bitmap_release_region(port->msi_irq_in_use, data->hwirq,
+ order_base_2(nr_irqs));
+
+ mutex_unlock(&port->lock);
+
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
+ .alloc = mtk_msi_bottom_domain_alloc,
+ .free = mtk_msi_bottom_domain_free,
+};
+
+static void mtk_intx_mask(struct irq_data *data)
+{
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->irq_lock, flags);
+ val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
+ writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
+ raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+}
+
+static void mtk_intx_unmask(struct irq_data *data)
+{
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->irq_lock, flags);
+ val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
+ writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
+ raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+}
+
+/**
+ * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
+ * @data: pointer to chip specific data
+ *
+ * As an emulated level IRQ, its interrupt status will remain
+ * until the corresponding de-assert message is received; hence that
+ * the status can only be cleared when the interrupt has been serviced.
+ */
+static void mtk_intx_eoi(struct irq_data *data)
+{
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ unsigned long hwirq;
+
+ hwirq = data->hwirq + PCIE_INTX_SHIFT;
+ writel_relaxed(BIT(hwirq), port->base + PCIE_INT_STATUS_REG);
+}
+
+static struct irq_chip mtk_intx_irq_chip = {
+ .irq_mask = mtk_intx_mask,
+ .irq_unmask = mtk_intx_unmask,
+ .irq_eoi = mtk_intx_eoi,
+ .irq_set_affinity = mtk_pcie_set_affinity,
+ .name = "INTx",
+};
+
+static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
+ handle_fasteoi_irq, "INTx");
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = mtk_pcie_intx_map,
+};
+
+static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *intc_node, *node = dev->of_node;
+ int ret;
+
+ raw_spin_lock_init(&port->irq_lock);
+
+ /* Setup INTx */
+ intc_node = of_get_child_by_name(node, "interrupt-controller");
+ if (!intc_node) {
+ dev_err(dev, "missing interrupt-controller node\n");
+ return -ENODEV;
+ }
+
+ port->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, port);
+ if (!port->intx_domain) {
+ dev_err(dev, "failed to create INTx IRQ domain\n");
+ return -ENODEV;
+ }
+
+ /* Setup MSI */
+ mutex_init(&port->lock);
+
+ port->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
+ &mtk_msi_bottom_domain_ops, port);
+ if (!port->msi_bottom_domain) {
+ dev_err(dev, "failed to create MSI bottom domain\n");
+ ret = -ENODEV;
+ goto err_msi_bottom_domain;
+ }
+
+ port->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
+ &mtk_msi_domain_info,
+ port->msi_bottom_domain);
+ if (!port->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ ret = -ENODEV;
+ goto err_msi_domain;
+ }
+
+ return 0;
+
+err_msi_domain:
+ irq_domain_remove(port->msi_bottom_domain);
+err_msi_bottom_domain:
+ irq_domain_remove(port->intx_domain);
+
+ return ret;
+}
+
+static void mtk_pcie_irq_teardown(struct mtk_pcie_port *port)
+{
+ irq_set_chained_handler_and_data(port->irq, NULL, NULL);
+
+ if (port->intx_domain)
+ irq_domain_remove(port->intx_domain);
+
+ if (port->msi_domain)
+ irq_domain_remove(port->msi_domain);
+
+ if (port->msi_bottom_domain)
+ irq_domain_remove(port->msi_bottom_domain);
+
+ irq_dispose_mapping(port->irq);
+}
+
+static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
+{
+ struct mtk_msi_set *msi_set = &port->msi_sets[set_idx];
+ unsigned long msi_enable, msi_status;
+ unsigned int virq;
+ irq_hw_number_t bit, hwirq;
+
+ msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+
+ do {
+ msi_status = readl_relaxed(msi_set->base +
+ PCIE_MSI_SET_STATUS_OFFSET);
+ msi_status &= msi_enable;
+ if (!msi_status)
+ break;
+
+ for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
+ hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
+ virq = irq_find_mapping(port->msi_bottom_domain, hwirq);
+ generic_handle_irq(virq);
+ }
+ } while (true);
+}
+
+static void mtk_pcie_irq_handler(struct irq_desc *desc)
+{
+ struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned long status;
+ unsigned int virq;
+ irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
+
+ chained_irq_enter(irqchip, desc);
+
+ status = readl_relaxed(port->base + PCIE_INT_STATUS_REG);
+ for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
+ PCIE_INTX_SHIFT) {
+ virq = irq_find_mapping(port->intx_domain,
+ irq_bit - PCIE_INTX_SHIFT);
+ generic_handle_irq(virq);
+ }
+
+ irq_bit = PCIE_MSI_SHIFT;
+ for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
+ PCIE_MSI_SHIFT) {
+ mtk_pcie_msi_handler(port, irq_bit - PCIE_MSI_SHIFT);
+
+ writel_relaxed(BIT(irq_bit), port->base + PCIE_INT_STATUS_REG);
+ }
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static int mtk_pcie_setup_irq(struct mtk_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int err;
+
+ err = mtk_pcie_init_irq_domains(port);
+ if (err)
+ return err;
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return port->irq;
+
+ irq_set_chained_handler_and_data(port->irq, mtk_pcie_irq_handler, port);
+
+ return 0;
+}
+
+static int mtk_pcie_parse_port(struct mtk_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *regs;
+ int ret;
+
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
+ if (!regs)
+ return -EINVAL;
+ port->base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(port->base)) {
+ dev_err(dev, "failed to map register base\n");
+ return PTR_ERR(port->base);
+ }
+
+ port->reg_base = regs->start;
+
+ port->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
+ if (IS_ERR(port->phy_reset)) {
+ ret = PTR_ERR(port->phy_reset);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get PHY reset\n");
+
+ return ret;
+ }
+
+ port->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
+ if (IS_ERR(port->mac_reset)) {
+ ret = PTR_ERR(port->mac_reset);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get MAC reset\n");
+
+ return ret;
+ }
+
+ port->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(port->phy)) {
+ ret = PTR_ERR(port->phy);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get PHY\n");
+
+ return ret;
+ }
+
+ port->num_clks = devm_clk_bulk_get_all(dev, &port->clks);
+ if (port->num_clks < 0) {
+ dev_err(dev, "failed to get clocks\n");
+ return port->num_clks;
+ }
+
+ return 0;
+}
+
+static int mtk_pcie_power_up(struct mtk_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ int err;
+
+ /* PHY power on and enable pipe clock */
+ reset_control_deassert(port->phy_reset);
+
+ err = phy_init(port->phy);
+ if (err) {
+ dev_err(dev, "failed to initialize PHY\n");
+ goto err_phy_init;
+ }
+
+ err = phy_power_on(port->phy);
+ if (err) {
+ dev_err(dev, "failed to power on PHY\n");
+ goto err_phy_on;
+ }
+
+ /* MAC power on and enable transaction layer clocks */
+ reset_control_deassert(port->mac_reset);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ err = clk_bulk_prepare_enable(port->num_clks, port->clks);
+ if (err) {
+ dev_err(dev, "failed to enable clocks\n");
+ goto err_clk_init;
+ }
+
+ return 0;
+
+err_clk_init:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ reset_control_assert(port->mac_reset);
+ phy_power_off(port->phy);
+err_phy_on:
+ phy_exit(port->phy);
+err_phy_init:
+ reset_control_assert(port->phy_reset);
+
+ return err;
+}
+
+static void mtk_pcie_power_down(struct mtk_pcie_port *port)
+{
+ clk_bulk_disable_unprepare(port->num_clks, port->clks);
+
+ pm_runtime_put_sync(port->dev);
+ pm_runtime_disable(port->dev);
+ reset_control_assert(port->mac_reset);
+
+ phy_power_off(port->phy);
+ phy_exit(port->phy);
+ reset_control_assert(port->phy_reset);
+}
+
+static int mtk_pcie_setup(struct mtk_pcie_port *port)
+{
+ int err;
+
+ err = mtk_pcie_parse_port(port);
+ if (err)
+ return err;
+
+ /* Don't touch the hardware registers before power up */
+ err = mtk_pcie_power_up(port);
+ if (err)
+ return err;
+
+ /* Try link up */
+ err = mtk_pcie_startup_port(port);
+ if (err)
+ goto err_setup;
+
+ err = mtk_pcie_setup_irq(port);
+ if (err)
+ goto err_setup;
+
+ return 0;
+
+err_setup:
+ mtk_pcie_power_down(port);
+
+ return err;
+}
+
+static int mtk_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_pcie_port *port;
+ struct pci_host_bridge *host;
+ int err;
+
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ if (!host)
+ return -ENOMEM;
+
+ port = pci_host_bridge_priv(host);
+
+ port->dev = dev;
+ platform_set_drvdata(pdev, port);
+
+ err = mtk_pcie_setup(port);
+ if (err)
+ return err;
+
+ host->ops = &mtk_pcie_ops;
+ host->sysdata = port;
+
+ err = pci_host_probe(host);
+ if (err) {
+ mtk_pcie_irq_teardown(port);
+ mtk_pcie_power_down(port);
+ return err;
+ }
+
+ return 0;
+}
+
+static int mtk_pcie_remove(struct platform_device *pdev)
+{
+ struct mtk_pcie_port *port = platform_get_drvdata(pdev);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
+
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(host->bus);
+ pci_remove_root_bus(host->bus);
+ pci_unlock_rescan_remove();
+
+ mtk_pcie_irq_teardown(port);
+ mtk_pcie_power_down(port);
+
+ return 0;
+}
+
+static void __maybe_unused mtk_pcie_irq_save(struct mtk_pcie_port *port)
+{
+ int i;
+
+ raw_spin_lock(&port->irq_lock);
+
+ port->saved_irq_state = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+
+ for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
+ struct mtk_msi_set *msi_set = &port->msi_sets[i];
+
+ msi_set->saved_irq_state = readl_relaxed(msi_set->base +
+ PCIE_MSI_SET_ENABLE_OFFSET);
+ }
+
+ raw_spin_unlock(&port->irq_lock);
+}
+
+static void __maybe_unused mtk_pcie_irq_restore(struct mtk_pcie_port *port)
+{
+ int i;
+
+ raw_spin_lock(&port->irq_lock);
+
+ writel_relaxed(port->saved_irq_state, port->base + PCIE_INT_ENABLE_REG);
+
+ for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
+ struct mtk_msi_set *msi_set = &port->msi_sets[i];
+
+ writel_relaxed(msi_set->saved_irq_state,
+ msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+ }
+
+ raw_spin_unlock(&port->irq_lock);
+}
+
+static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port)
+{
+ u32 val;
+
+ val = readl_relaxed(port->base + PCIE_ICMD_PM_REG);
+ val |= PCIE_TURN_OFF_LINK;
+ writel_relaxed(val, port->base + PCIE_ICMD_PM_REG);
+
+ /* Check the link is L2 */
+ return readl_poll_timeout(port->base + PCIE_LTSSM_STATUS_REG, val,
+ (PCIE_LTSSM_STATE(val) ==
+ PCIE_LTSSM_STATE_L2_IDLE), 20,
+ 50 * USEC_PER_MSEC);
+}
+
+static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
+{
+ struct mtk_pcie_port *port = dev_get_drvdata(dev);
+ int err;
+ u32 val;
+
+ /* Trigger link to L2 state */
+ err = mtk_pcie_turn_off_link(port);
+ if (err) {
+ dev_err(port->dev, "cannot enter L2 state\n");
+ return err;
+ }
+
+ /* Pull down the PERST# pin */
+ val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_PE_RSTB;
+ writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
+
+ dev_dbg(port->dev, "entered L2 states successfully");
+
+ mtk_pcie_irq_save(port);
+ mtk_pcie_power_down(port);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
+{
+ struct mtk_pcie_port *port = dev_get_drvdata(dev);
+ int err;
+
+ err = mtk_pcie_power_up(port);
+ if (err)
+ return err;
+
+ err = mtk_pcie_startup_port(port);
+ if (err) {
+ mtk_pcie_power_down(port);
+ return err;
+ }
+
+ mtk_pcie_irq_restore(port);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mtk_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
+ mtk_pcie_resume_noirq)
+};
+
+static const struct of_device_id mtk_pcie_of_match[] = {
+ { .compatible = "mediatek,mt8192-pcie" },
+ {},
+};
+
+static struct platform_driver mtk_pcie_driver = {
+ .probe = mtk_pcie_probe,
+ .remove = mtk_pcie_remove,
+ .driver = {
+ .name = "mtk-pcie",
+ .of_match_table = mtk_pcie_of_match,
+ .pm = &mtk_pcie_pm_ops,
+ },
+};
+
+module_platform_driver(mtk_pcie_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 23548b517e4b..62a042e75d9a 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -143,6 +143,7 @@ struct mtk_pcie_port;
* struct mtk_pcie_soc - differentiate between host generations
* @need_fix_class_id: whether this host's class ID needed to be fixed or not
* @need_fix_device_id: whether this host's device ID needed to be fixed or not
+ * @no_msi: Bridge has no MSI support, and relies on an external block
* @device_id: device ID which this host need to be fixed
* @ops: pointer to configuration access functions
* @startup: pointer to controller setting functions
@@ -151,6 +152,7 @@ struct mtk_pcie_port;
struct mtk_pcie_soc {
bool need_fix_class_id;
bool need_fix_device_id;
+ bool no_msi;
unsigned int device_id;
struct pci_ops *ops;
int (*startup)(struct mtk_pcie_port *port);
@@ -760,7 +762,7 @@ static struct pci_ops mtk_pcie_ops = {
static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
{
struct mtk_pcie *pcie = port->pcie;
- u32 func = PCI_FUNC(port->slot << 3);
+ u32 func = PCI_FUNC(port->slot);
u32 slot = PCI_SLOT(port->slot << 3);
u32 val;
int err;
@@ -1087,6 +1089,7 @@ static int mtk_pcie_probe(struct platform_device *pdev)
host->ops = pcie->soc->ops;
host->sysdata = pcie;
+ host->msi_domain = pcie->soc->no_msi;
err = pci_host_probe(host);
if (err)
@@ -1176,6 +1179,7 @@ static const struct dev_pm_ops mtk_pcie_pm_ops = {
};
static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
+ .no_msi = true,
.ops = &mtk_pcie_ops,
.startup = mtk_pcie_startup_port,
};
@@ -1210,6 +1214,7 @@ static const struct of_device_id mtk_pcie_ids[] = {
{ .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 },
{},
};
+MODULE_DEVICE_TABLE(of, mtk_pcie_ids);
static struct platform_driver mtk_pcie_driver = {
.probe = mtk_pcie_probe,
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index 04c19ff81aff..89c68c56d93b 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -301,27 +301,27 @@ static const struct cause event_cause[NUM_EVENTS] = {
LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
};
-struct event_map pcie_event_to_event[] = {
+static struct event_map pcie_event_to_event[] = {
PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
};
-struct event_map sec_error_to_event[] = {
+static struct event_map sec_error_to_event[] = {
SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
};
-struct event_map ded_error_to_event[] = {
+static struct event_map ded_error_to_event[] = {
DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
};
-struct event_map local_status_to_event[] = {
+static struct event_map local_status_to_event[] = {
LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
@@ -1023,10 +1023,8 @@ static int mc_platform_init(struct pci_config_window *cfg)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "unable to request IRQ%d\n", irq);
+ if (irq < 0)
return -ENODEV;
- }
for (i = 0; i < NUM_EVENTS; i++) {
event_irq = irq_create_mapping(port->event_domain, i);
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index a728e8f9ad3c..765cf2b45e24 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -35,18 +35,12 @@
struct rcar_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
- struct msi_controller chip;
- unsigned long pages;
- struct mutex lock;
+ struct mutex map_lock;
+ spinlock_t mask_lock;
int irq1;
int irq2;
};
-static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
-{
- return container_of(chip, struct rcar_msi, chip);
-}
-
/* Structure representing the PCIe interface */
struct rcar_pcie_host {
struct rcar_pcie pcie;
@@ -56,6 +50,11 @@ struct rcar_pcie_host {
int (*phy_init_fn)(struct rcar_pcie_host *host);
};
+static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi)
+{
+ return container_of(msi, struct rcar_pcie_host, msi);
+}
+
static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
{
unsigned int shift = BITS_PER_BYTE * (where & 3);
@@ -292,8 +291,6 @@ static int rcar_pcie_enable(struct rcar_pcie_host *host)
bridge->sysdata = host;
bridge->ops = &rcar_pcie_ops;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- bridge->msi = &host->msi.chip;
return pci_host_probe(bridge);
}
@@ -473,42 +470,6 @@ static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
return err;
}
-static int rcar_msi_alloc(struct rcar_msi *chip)
-{
- int msi;
-
- mutex_lock(&chip->lock);
-
- msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
- if (msi < INT_PCI_MSI_NR)
- set_bit(msi, chip->used);
- else
- msi = -ENOSPC;
-
- mutex_unlock(&chip->lock);
-
- return msi;
-}
-
-static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
-{
- int msi;
-
- mutex_lock(&chip->lock);
- msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
- order_base_2(no_irqs));
- mutex_unlock(&chip->lock);
-
- return msi;
-}
-
-static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
-{
- mutex_lock(&chip->lock);
- clear_bit(irq, chip->used);
- mutex_unlock(&chip->lock);
-}
-
static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
{
struct rcar_pcie_host *host = data;
@@ -527,18 +488,13 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
unsigned int index = find_first_bit(&reg, 32);
unsigned int msi_irq;
- /* clear the interrupt */
- rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
-
- msi_irq = irq_find_mapping(msi->domain, index);
+ msi_irq = irq_find_mapping(msi->domain->parent, index);
if (msi_irq) {
- if (test_bit(index, msi->used))
- generic_handle_irq(msi_irq);
- else
- dev_info(dev, "unhandled MSI\n");
+ generic_handle_irq(msi_irq);
} else {
/* Unknown MSI, just clear it */
dev_dbg(dev, "unexpected MSI\n");
+ rcar_pci_write_reg(pcie, BIT(index), PCIEMSIFR);
}
/* see if there's any more pending in this vector */
@@ -548,149 +504,169 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
- struct msi_desc *desc)
+static void rcar_msi_top_irq_ack(struct irq_data *d)
{
- struct rcar_msi *msi = to_rcar_msi(chip);
- struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
- msi.chip);
- struct rcar_pcie *pcie = &host->pcie;
- struct msi_msg msg;
- unsigned int irq;
- int hwirq;
+ irq_chip_ack_parent(d);
+}
- hwirq = rcar_msi_alloc(msi);
- if (hwirq < 0)
- return hwirq;
+static void rcar_msi_top_irq_mask(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
- irq = irq_find_mapping(msi->domain, hwirq);
- if (!irq) {
- rcar_msi_free(msi, hwirq);
- return -EINVAL;
- }
+static void rcar_msi_top_irq_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
- irq_set_msi_desc(irq, desc);
+static struct irq_chip rcar_msi_top_chip = {
+ .name = "PCIe MSI",
+ .irq_ack = rcar_msi_top_irq_ack,
+ .irq_mask = rcar_msi_top_irq_mask,
+ .irq_unmask = rcar_msi_top_irq_unmask,
+};
- msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
- msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
- msg.data = hwirq;
+static void rcar_msi_irq_ack(struct irq_data *d)
+{
+ struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
+ struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- pci_write_msi_msg(irq, &msg);
+ /* clear the interrupt */
+ rcar_pci_write_reg(pcie, BIT(d->hwirq), PCIEMSIFR);
+}
- return 0;
+static void rcar_msi_irq_mask(struct irq_data *d)
+{
+ struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
+ struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&msi->mask_lock, flags);
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value &= ~BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ spin_unlock_irqrestore(&msi->mask_lock, flags);
}
-static int rcar_msi_setup_irqs(struct msi_controller *chip,
- struct pci_dev *pdev, int nvec, int type)
+static void rcar_msi_irq_unmask(struct irq_data *d)
{
- struct rcar_msi *msi = to_rcar_msi(chip);
- struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
- msi.chip);
- struct rcar_pcie *pcie = &host->pcie;
- struct msi_desc *desc;
- struct msi_msg msg;
- unsigned int irq;
- int hwirq;
- int i;
+ struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
+ struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&msi->mask_lock, flags);
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value |= BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ spin_unlock_irqrestore(&msi->mask_lock, flags);
+}
- /* MSI-X interrupts are not supported */
- if (type == PCI_CAP_ID_MSIX)
- return -EINVAL;
+static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
- WARN_ON(!list_is_singular(&pdev->dev.msi_list));
- desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
+static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct rcar_msi *msi = irq_data_get_irq_chip_data(data);
+ struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- hwirq = rcar_msi_alloc_region(msi, nvec);
- if (hwirq < 0)
- return -ENOSPC;
+ msg->address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+ msg->address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
+ msg->data = data->hwirq;
+}
- irq = irq_find_mapping(msi->domain, hwirq);
- if (!irq)
- return -ENOSPC;
+static struct irq_chip rcar_msi_bottom_chip = {
+ .name = "Rcar MSI",
+ .irq_ack = rcar_msi_irq_ack,
+ .irq_mask = rcar_msi_irq_mask,
+ .irq_unmask = rcar_msi_irq_unmask,
+ .irq_set_affinity = rcar_msi_set_affinity,
+ .irq_compose_msi_msg = rcar_compose_msi_msg,
+};
- for (i = 0; i < nvec; i++) {
- /*
- * irq_create_mapping() called from rcar_pcie_probe() pre-
- * allocates descs, so there is no need to allocate descs here.
- * We can therefore assume that if irq_find_mapping() above
- * returns non-zero, then the descs are also successfully
- * allocated.
- */
- if (irq_set_msi_desc_off(irq, i, desc)) {
- /* TODO: clear */
- return -EINVAL;
- }
- }
+static int rcar_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct rcar_msi *msi = domain->host_data;
+ unsigned int i;
+ int hwirq;
- desc->nvec_used = nvec;
- desc->msi_attrib.multiple = order_base_2(nvec);
+ mutex_lock(&msi->map_lock);
- msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
- msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
- msg.data = hwirq;
+ hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
- pci_write_msi_msg(irq, &msg);
+ mutex_unlock(&msi->map_lock);
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &rcar_msi_bottom_chip, domain->host_data,
+ handle_edge_irq, NULL, NULL);
return 0;
}
-static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+static void rcar_msi_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
{
- struct rcar_msi *msi = to_rcar_msi(chip);
- struct irq_data *d = irq_get_irq_data(irq);
-
- rcar_msi_free(msi, d->hwirq);
-}
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct rcar_msi *msi = domain->host_data;
-static struct irq_chip rcar_msi_irq_chip = {
- .name = "R-Car PCIe MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+ mutex_lock(&msi->map_lock);
-static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
+ bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
- return 0;
+ mutex_unlock(&msi->map_lock);
}
-static const struct irq_domain_ops msi_domain_ops = {
- .map = rcar_msi_map,
+static const struct irq_domain_ops rcar_msi_domain_ops = {
+ .alloc = rcar_msi_domain_alloc,
+ .free = rcar_msi_domain_free,
+};
+
+static struct msi_domain_info rcar_msi_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &rcar_msi_top_chip,
};
-static void rcar_pcie_unmap_msi(struct rcar_pcie_host *host)
+static int rcar_allocate_domains(struct rcar_msi *msi)
{
- struct rcar_msi *msi = &host->msi;
- int i, irq;
+ struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
+ struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct irq_domain *parent;
+
+ parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
+ &rcar_msi_domain_ops, msi);
+ if (!parent) {
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
- for (i = 0; i < INT_PCI_MSI_NR; i++) {
- irq = irq_find_mapping(msi->domain, i);
- if (irq > 0)
- irq_dispose_mapping(irq);
+ msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent);
+ if (!msi->domain) {
+ dev_err(pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(parent);
+ return -ENOMEM;
}
- irq_domain_remove(msi->domain);
+ return 0;
}
-static void rcar_pcie_hw_enable_msi(struct rcar_pcie_host *host)
+static void rcar_free_domains(struct rcar_msi *msi)
{
- struct rcar_pcie *pcie = &host->pcie;
- struct rcar_msi *msi = &host->msi;
- unsigned long base;
-
- /* setup MSI data target */
- base = virt_to_phys((void *)msi->pages);
+ struct irq_domain *parent = msi->domain->parent;
- rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
- rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
-
- /* enable all MSI interrupts */
- rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
+ irq_domain_remove(msi->domain);
+ irq_domain_remove(parent);
}
static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
@@ -698,29 +674,24 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
struct rcar_pcie *pcie = &host->pcie;
struct device *dev = pcie->dev;
struct rcar_msi *msi = &host->msi;
- int err, i;
-
- mutex_init(&msi->lock);
+ struct resource res;
+ int err;
- msi->chip.dev = dev;
- msi->chip.setup_irq = rcar_msi_setup_irq;
- msi->chip.setup_irqs = rcar_msi_setup_irqs;
- msi->chip.teardown_irq = rcar_msi_teardown_irq;
+ mutex_init(&msi->map_lock);
+ spin_lock_init(&msi->mask_lock);
- msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
- &msi_domain_ops, &msi->chip);
- if (!msi->domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
+ err = of_address_to_resource(dev->of_node, 0, &res);
+ if (err)
+ return err;
- for (i = 0; i < INT_PCI_MSI_NR; i++)
- irq_create_mapping(msi->domain, i);
+ err = rcar_allocate_domains(msi);
+ if (err)
+ return err;
/* Two irqs are for MSI, but they are also used for non-MSI irqs */
err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
- rcar_msi_irq_chip.name, host);
+ rcar_msi_bottom_chip.name, host);
if (err < 0) {
dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
@@ -728,27 +699,32 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
- rcar_msi_irq_chip.name, host);
+ rcar_msi_bottom_chip.name, host);
if (err < 0) {
dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
- /* setup MSI data target */
- msi->pages = __get_free_pages(GFP_KERNEL | GFP_DMA32, 0);
- rcar_pcie_hw_enable_msi(host);
+ /* disable all MSIs */
+ rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
+
+ /*
+ * Setup MSI data target using RC base address address, which
+ * is guaranteed to be in the low 32bit range on any RCar HW.
+ */
+ rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
+ rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
return 0;
err:
- rcar_pcie_unmap_msi(host);
+ rcar_free_domains(msi);
return err;
}
static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
{
struct rcar_pcie *pcie = &host->pcie;
- struct rcar_msi *msi = &host->msi;
/* Disable all MSI interrupts */
rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
@@ -756,9 +732,7 @@ static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
/* Disable address decoding of the MSI interrupt, MSIFE */
rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
- free_pages(msi->pages, 0);
-
- rcar_pcie_unmap_msi(host);
+ rcar_free_domains(&host->msi);
}
static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
@@ -1011,8 +985,17 @@ static int __maybe_unused rcar_pcie_resume(struct device *dev)
dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
/* Enable MSI */
- if (IS_ENABLED(CONFIG_PCI_MSI))
- rcar_pcie_hw_enable_msi(host);
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ struct resource res;
+ u32 val;
+
+ of_address_to_resource(dev->of_node, 0, &res);
+ rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
+ rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
+
+ bitmap_to_arr32(&val, host->msi.used, INT_PCI_MSI_NR);
+ rcar_pci_write_reg(pcie, val, PCIEMSIIER);
+ }
rcar_pcie_hw_enable(host);
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 07e36661bbc2..8689311c5ef6 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -26,6 +26,7 @@
/* Bridge core config registers */
#define BRCFG_PCIE_RX0 0x00000000
+#define BRCFG_PCIE_RX1 0x00000004
#define BRCFG_INTERRUPT 0x00000010
#define BRCFG_PCIE_RX_MSG_FILTER 0x00000020
@@ -128,6 +129,7 @@
#define NWL_ECAM_VALUE_DEFAULT 12
#define CFG_DMA_REG_BAR GENMASK(2, 0)
+#define CFG_PCIE_CACHE GENMASK(7, 0)
#define INT_PCI_MSI_NR (2 * 32)
@@ -675,6 +677,11 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK,
BRCFG_PCIE_RX_MSG_FILTER);
+ /* This routes the PCIe DMA traffic to go through CCI path */
+ if (of_dma_is_coherent(dev->of_node))
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX1) |
+ CFG_PCIE_CACHE, BRCFG_PCIE_RX1);
+
err = nwl_wait_for_link(pcie);
if (err)
return err;
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index fa5baeb82653..14001febf59a 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -93,25 +93,23 @@
/**
* struct xilinx_pcie_port - PCIe port information
* @reg_base: IO Mapped Register Base
- * @irq: Interrupt number
- * @msi_pages: MSI pages
* @dev: Device pointer
+ * @msi_map: Bitmap of allocated MSIs
+ * @map_lock: Mutex protecting the MSI allocation
* @msi_domain: MSI IRQ domain pointer
* @leg_domain: Legacy IRQ domain pointer
* @resources: Bus Resources
*/
struct xilinx_pcie_port {
void __iomem *reg_base;
- u32 irq;
- unsigned long msi_pages;
struct device *dev;
+ unsigned long msi_map[BITS_TO_LONGS(XILINX_NUM_MSI_IRQS)];
+ struct mutex map_lock;
struct irq_domain *msi_domain;
struct irq_domain *leg_domain;
struct list_head resources;
};
-static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
-
static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
{
return readl(port->reg_base + reg);
@@ -196,151 +194,118 @@ static struct pci_ops xilinx_pcie_ops = {
/* MSI functions */
-/**
- * xilinx_pcie_destroy_msi - Free MSI number
- * @irq: IRQ to be freed
- */
-static void xilinx_pcie_destroy_msi(unsigned int irq)
+static void xilinx_msi_top_irq_ack(struct irq_data *d)
{
- struct msi_desc *msi;
- struct xilinx_pcie_port *port;
- struct irq_data *d = irq_get_irq_data(irq);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
-
- if (!test_bit(hwirq, msi_irq_in_use)) {
- msi = irq_get_msi_desc(irq);
- port = msi_desc_to_pci_sysdata(msi);
- dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
- } else {
- clear_bit(hwirq, msi_irq_in_use);
- }
+ /*
+ * xilinx_pcie_intr_handler() will have performed the Ack.
+ * Eventually, this should be fixed and the Ack be moved in
+ * the respective callbacks for INTx and MSI.
+ */
}
-/**
- * xilinx_pcie_assign_msi - Allocate MSI number
- *
- * Return: A valid IRQ on success and error value on failure.
- */
-static int xilinx_pcie_assign_msi(void)
-{
- int pos;
-
- pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
- if (pos < XILINX_NUM_MSI_IRQS)
- set_bit(pos, msi_irq_in_use);
- else
- return -ENOSPC;
+static struct irq_chip xilinx_msi_top_chip = {
+ .name = "PCIe MSI",
+ .irq_ack = xilinx_msi_top_irq_ack,
+};
- return pos;
+static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
}
-/**
- * xilinx_msi_teardown_irq - Destroy the MSI
- * @chip: MSI Chip descriptor
- * @irq: MSI IRQ to destroy
- */
-static void xilinx_msi_teardown_irq(struct msi_controller *chip,
- unsigned int irq)
+static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- xilinx_pcie_destroy_msi(irq);
- irq_dispose_mapping(irq);
+ struct xilinx_pcie_port *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t pa = ALIGN_DOWN(virt_to_phys(pcie), SZ_4K);
+
+ msg->address_lo = lower_32_bits(pa);
+ msg->address_hi = upper_32_bits(pa);
+ msg->data = data->hwirq;
}
-/**
- * xilinx_pcie_msi_setup_irq - Setup MSI request
- * @chip: MSI chip pointer
- * @pdev: PCIe device pointer
- * @desc: MSI descriptor pointer
- *
- * Return: '0' on success and error value on failure
- */
-static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip,
- struct pci_dev *pdev,
- struct msi_desc *desc)
-{
- struct xilinx_pcie_port *port = pdev->bus->sysdata;
- unsigned int irq;
- int hwirq;
- struct msi_msg msg;
- phys_addr_t msg_addr;
+static struct irq_chip xilinx_msi_bottom_chip = {
+ .name = "Xilinx MSI",
+ .irq_set_affinity = xilinx_msi_set_affinity,
+ .irq_compose_msi_msg = xilinx_compose_msi_msg,
+};
- hwirq = xilinx_pcie_assign_msi();
- if (hwirq < 0)
- return hwirq;
+static int xilinx_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct xilinx_pcie_port *port = domain->host_data;
+ int hwirq, i;
- irq = irq_create_mapping(port->msi_domain, hwirq);
- if (!irq)
- return -EINVAL;
+ mutex_lock(&port->map_lock);
- irq_set_msi_desc(irq, desc);
+ hwirq = bitmap_find_free_region(port->msi_map, XILINX_NUM_MSI_IRQS, order_base_2(nr_irqs));
- msg_addr = virt_to_phys((void *)port->msi_pages);
+ mutex_unlock(&port->map_lock);
- msg.address_hi = 0;
- msg.address_lo = msg_addr;
- msg.data = irq;
+ if (hwirq < 0)
+ return -ENOSPC;
- pci_write_msi_msg(irq, &msg);
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &xilinx_msi_bottom_chip, domain->host_data,
+ handle_edge_irq, NULL, NULL);
return 0;
}
-/* MSI Chip Descriptor */
-static struct msi_controller xilinx_pcie_msi_chip = {
- .setup_irq = xilinx_pcie_msi_setup_irq,
- .teardown_irq = xilinx_msi_teardown_irq,
-};
+static void xilinx_msi_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct xilinx_pcie_port *port = domain->host_data;
-/* HW Interrupt Chip Descriptor */
-static struct irq_chip xilinx_msi_irq_chip = {
- .name = "Xilinx PCIe MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+ mutex_lock(&port->map_lock);
-/**
- * xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid
- * @domain: IRQ domain
- * @irq: Virtual IRQ number
- * @hwirq: HW interrupt number
- *
- * Return: Always returns 0.
- */
-static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
+ bitmap_release_region(port->msi_map, d->hwirq, order_base_2(nr_irqs));
- return 0;
+ mutex_unlock(&port->map_lock);
}
-/* IRQ Domain operations */
-static const struct irq_domain_ops msi_domain_ops = {
- .map = xilinx_pcie_msi_map,
+static const struct irq_domain_ops xilinx_msi_domain_ops = {
+ .alloc = xilinx_msi_domain_alloc,
+ .free = xilinx_msi_domain_free,
};
-/**
- * xilinx_pcie_enable_msi - Enable MSI support
- * @port: PCIe port information
- */
-static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
+static struct msi_domain_info xilinx_msi_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .chip = &xilinx_msi_top_chip,
+};
+
+static int xilinx_allocate_msi_domains(struct xilinx_pcie_port *pcie)
{
- phys_addr_t msg_addr;
+ struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct irq_domain *parent;
- port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
- if (!port->msi_pages)
+ parent = irq_domain_create_linear(fwnode, XILINX_NUM_MSI_IRQS,
+ &xilinx_msi_domain_ops, pcie);
+ if (!parent) {
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
+ }
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
- msg_addr = virt_to_phys((void *)port->msi_pages);
- pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
- pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
+ pcie->msi_domain = pci_msi_create_irq_domain(fwnode, &xilinx_msi_info, parent);
+ if (!pcie->msi_domain) {
+ dev_err(pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(parent);
+ return -ENOMEM;
+ }
return 0;
}
+static void xilinx_free_msi_domains(struct xilinx_pcie_port *pcie)
+{
+ struct irq_domain *parent = pcie->msi_domain->parent;
+
+ irq_domain_remove(pcie->msi_domain);
+ irq_domain_remove(parent);
+}
+
/* INTx Functions */
/**
@@ -420,6 +385,8 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
}
if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) {
+ unsigned int irq;
+
val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
/* Check whether interrupt valid */
@@ -432,20 +399,19 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
XILINX_PCIE_RPIFR2_MSG_DATA;
+ irq = irq_find_mapping(port->msi_domain->parent, val);
} else {
val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
XILINX_PCIE_RPIFR1_INTR_SHIFT;
- val = irq_find_mapping(port->leg_domain, val);
+ irq = irq_find_mapping(port->leg_domain, val);
}
/* Clear interrupt FIFO register 1 */
pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
XILINX_PCIE_REG_RPIFR1);
- /* Handle the interrupt */
- if (IS_ENABLED(CONFIG_PCI_MSI) ||
- !(val & XILINX_PCIE_RPIFR1_MSI_INTR))
- generic_handle_irq(val);
+ if (irq)
+ generic_handle_irq(irq);
}
if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
@@ -491,12 +457,11 @@ error:
static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
{
struct device *dev = port->dev;
- struct device_node *node = dev->of_node;
struct device_node *pcie_intc_node;
int ret;
/* Setup INTx */
- pcie_intc_node = of_get_next_child(node, NULL);
+ pcie_intc_node = of_get_next_child(dev->of_node, NULL);
if (!pcie_intc_node) {
dev_err(dev, "No PCIe Intc node found\n");
return -ENODEV;
@@ -513,18 +478,14 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
/* Setup MSI */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- port->msi_domain = irq_domain_add_linear(node,
- XILINX_NUM_MSI_IRQS,
- &msi_domain_ops,
- &xilinx_pcie_msi_chip);
- if (!port->msi_domain) {
- dev_err(dev, "Failed to get a MSI IRQ domain\n");
- return -ENODEV;
- }
+ phys_addr_t pa = ALIGN_DOWN(virt_to_phys(port), SZ_4K);
- ret = xilinx_pcie_enable_msi(port);
+ ret = xilinx_allocate_msi_domains(port);
if (ret)
return ret;
+
+ pcie_write(port, upper_32_bits(pa), XILINX_PCIE_REG_MSIBASE1);
+ pcie_write(port, lower_32_bits(pa), XILINX_PCIE_REG_MSIBASE2);
}
return 0;
@@ -572,6 +533,7 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
struct resource regs;
+ unsigned int irq;
int err;
err = of_address_to_resource(node, 0, &regs);
@@ -584,12 +546,12 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
if (IS_ERR(port->reg_base))
return PTR_ERR(port->reg_base);
- port->irq = irq_of_parse_and_map(node, 0);
- err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
+ irq = irq_of_parse_and_map(node, 0);
+ err = devm_request_irq(dev, irq, xilinx_pcie_intr_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"xilinx-pcie", port);
if (err) {
- dev_err(dev, "unable to request irq %d\n", port->irq);
+ dev_err(dev, "unable to request irq %d\n", irq);
return err;
}
@@ -617,7 +579,7 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
return -ENODEV;
port = pci_host_bridge_priv(bridge);
-
+ mutex_init(&port->map_lock);
port->dev = dev;
err = xilinx_pcie_parse_dt(port);
@@ -637,11 +599,11 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
bridge->sysdata = port;
bridge->ops = &xilinx_pcie_ops;
-#ifdef CONFIG_PCI_MSI
- xilinx_pcie_msi_chip.dev = dev;
- bridge->msi = &xilinx_pcie_msi_chip;
-#endif
- return pci_host_probe(bridge);
+ err = pci_host_probe(bridge);
+ if (err)
+ xilinx_free_msi_domains(port);
+
+ return err;
}
static const struct of_device_id xilinx_pcie_of_match[] = {
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 5e80f28f0119..e3fcdfec58b3 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -28,6 +28,7 @@
#define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1)
#define PCI_REG_VMCONFIG 0x44
#define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3)
+#define VMCONFIG_MSI_REMAP 0x2
#define PCI_REG_VMLOCK 0x70
#define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
@@ -59,6 +60,13 @@ enum vmd_features {
* be used for MSI remapping
*/
VMD_FEAT_OFFSET_FIRST_VECTOR = (1 << 3),
+
+ /*
+ * Device can bypass remapping MSI-X transactions into its MSI-X table,
+ * avoiding the requirement of a VMD MSI domain for child device
+ * interrupt handling.
+ */
+ VMD_FEAT_CAN_BYPASS_MSI_REMAP = (1 << 4),
};
/*
@@ -306,6 +314,16 @@ static struct msi_domain_info vmd_msi_domain_info = {
.chip = &vmd_msi_controller,
};
+static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
+{
+ u16 reg;
+
+ pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, &reg);
+ reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
+ (reg | VMCONFIG_MSI_REMAP);
+ pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
+}
+
static int vmd_create_irq_domain(struct vmd_dev *vmd)
{
struct fwnode_handle *fn;
@@ -325,6 +343,13 @@ static int vmd_create_irq_domain(struct vmd_dev *vmd)
static void vmd_remove_irq_domain(struct vmd_dev *vmd)
{
+ /*
+ * Some production BIOS won't enable remapping between soft reboots.
+ * Ensure remapping is restored before unloading the driver.
+ */
+ if (!vmd->msix_count)
+ vmd_set_msi_remapping(vmd, true);
+
if (vmd->irq_domain) {
struct fwnode_handle *fn = vmd->irq_domain->fwnode;
@@ -679,15 +704,32 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
sd->node = pcibus_to_node(vmd->dev->bus);
- ret = vmd_create_irq_domain(vmd);
- if (ret)
- return ret;
-
/*
- * Override the irq domain bus token so the domain can be distinguished
- * from a regular PCI/MSI domain.
+ * Currently MSI remapping must be enabled in guest passthrough mode
+ * due to some missing interrupt remapping plumbing. This is probably
+ * acceptable because the guest is usually CPU-limited and MSI
+ * remapping doesn't become a performance bottleneck.
*/
- irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
+ if (!(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP) ||
+ offset[0] || offset[1]) {
+ ret = vmd_alloc_irqs(vmd);
+ if (ret)
+ return ret;
+
+ vmd_set_msi_remapping(vmd, true);
+
+ ret = vmd_create_irq_domain(vmd);
+ if (ret)
+ return ret;
+
+ /*
+ * Override the IRQ domain bus token so the domain can be
+ * distinguished from a regular PCI/MSI domain.
+ */
+ irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
+ } else {
+ vmd_set_msi_remapping(vmd, false);
+ }
pci_add_resource(&resources, &vmd->resources[0]);
pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
@@ -753,10 +795,6 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (features & VMD_FEAT_OFFSET_FIRST_VECTOR)
vmd->first_vec = 1;
- err = vmd_alloc_irqs(vmd);
- if (err)
- return err;
-
spin_lock_init(&vmd->cfg_lock);
pci_set_drvdata(dev, vmd);
err = vmd_enable_domain(vmd, features);
@@ -825,7 +863,8 @@ static const struct pci_device_id vmd_ids[] = {
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
- VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+ VMD_FEAT_HAS_BUS_RESTRICTIONS |
+ VMD_FEAT_CAN_BYPASS_MSI_REMAP,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index 338148cf56f5..bce274d02dcf 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* Endpoint Function Driver to implement Non-Transparent Bridge functionality
*
* Copyright (C) 2020 Texas Instruments
@@ -696,7 +696,8 @@ reset_handler:
/**
* epf_ntb_peer_spad_bar_clear() - Clear Peer Scratchpad BAR
- * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address.
*
*+-----------------+------->+------------------+ +-----------------+
*| BAR0 | | CONFIG REGION | | BAR0 |
@@ -740,6 +741,7 @@ static void epf_ntb_peer_spad_bar_clear(struct epf_ntb_epc *ntb_epc)
/**
* epf_ntb_peer_spad_bar_set() - Set peer scratchpad BAR
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
*
*+-----------------+------->+------------------+ +-----------------+
*| BAR0 | | CONFIG REGION | | BAR0 |
@@ -808,7 +810,8 @@ static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
/**
* epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
- * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address.
*
* +-----------------+------->+------------------+ +-----------------+
* | BAR0 | | CONFIG REGION | | BAR0 |
@@ -851,7 +854,8 @@ static void epf_ntb_config_sspad_bar_clear(struct epf_ntb_epc *ntb_epc)
/**
* epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
- * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address.
*
* +-----------------+------->+------------------+ +-----------------+
* | BAR0 | | CONFIG REGION | | BAR0 |
@@ -1312,6 +1316,7 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
/**
* epf_ntb_alloc_peer_mem() - Allocate memory in peer's outbound address space
+ * @dev: The PCI device.
* @ntb_epc: EPC associated with one of the HOST whose BAR holds peer's outbound
* address
* @bar: BAR of @ntb_epc in for which memory has to be allocated (could be
@@ -1660,7 +1665,6 @@ static int epf_ntb_init_epc_bar_interface(struct epf_ntb *ntb,
* epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
* constructs (scratchpad region, doorbell, memorywindow)
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
- * @type: PRIMARY interface or SECONDARY interface
*
* Wrapper to epf_ntb_init_epc_bar_interface() to identify the free BARs
* to be used for each of BAR_CONFIG, BAR_PEER_SPAD, BAR_DB_MW1, BAR_MW2,
@@ -2037,6 +2041,8 @@ static const struct config_item_type ntb_group_type = {
/**
* epf_ntb_add_cfs() - Add configfs directory specific to NTB
* @epf: NTB endpoint function device
+ * @group: A pointer to the config_group structure referencing a group of
+ * config_items of a specific type that belong to a specific sub-system.
*
* Add configfs directory specific to NTB. This directory will hold
* NTB specific properties like db_count, spad_count, num_mws etc.,
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index c0ac4e9cbe72..d2708ca4bece 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* Test driver to test endpoint functionality
*
* Copyright (C) 2017 Texas Instruments
@@ -833,15 +833,18 @@ static int pci_epf_test_bind(struct pci_epf *epf)
return -EINVAL;
epc_features = pci_epc_get_features(epc, epf->func_no);
- if (epc_features) {
- linkup_notifier = epc_features->linkup_notifier;
- core_init_notifier = epc_features->core_init_notifier;
- test_reg_bar = pci_epc_get_first_free_bar(epc_features);
- if (test_reg_bar < 0)
- return -EINVAL;
- pci_epf_configure_bar(epf, epc_features);
+ if (!epc_features) {
+ dev_err(&epf->dev, "epc_features not implemented\n");
+ return -EOPNOTSUPP;
}
+ linkup_notifier = epc_features->linkup_notifier;
+ core_init_notifier = epc_features->core_init_notifier;
+ test_reg_bar = pci_epc_get_first_free_bar(epc_features);
+ if (test_reg_bar < 0)
+ return -EINVAL;
+ pci_epf_configure_bar(epf, epc_features);
+
epf_test->test_reg_bar = test_reg_bar;
epf_test->epc_features = epc_features;
@@ -922,6 +925,7 @@ static int __init pci_epf_test_init(void)
ret = pci_epf_register_driver(&test_driver);
if (ret) {
+ destroy_workqueue(kpcitest_workqueue);
pr_err("Failed to register pci epf test driver --> %d\n", ret);
return ret;
}
@@ -932,6 +936,8 @@ module_init(pci_epf_test_init);
static void __exit pci_epf_test_exit(void)
{
+ if (kpcitest_workqueue)
+ destroy_workqueue(kpcitest_workqueue);
pci_epf_unregister_driver(&test_driver);
}
module_exit(pci_epf_test_exit);
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index cc8f9eb2b177..adec9bee72cf 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -594,6 +594,8 @@ EXPORT_SYMBOL_GPL(pci_epc_add_epf);
* pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
* @epc: the EPC device from which the endpoint function should be removed
* @epf: the endpoint function to be removed
+ * @type: identifies if the EPC is connected to the primary or secondary
+ * interface of EPF
*
* Invoke to remove PCI endpoint function from the endpoint controller.
*/
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 7646c8660d42..e9289d10f822 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -113,7 +113,7 @@ EXPORT_SYMBOL_GPL(pci_epf_bind);
void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
enum pci_epc_interface_type type)
{
- struct device *dev = epf->epc->dev.parent;
+ struct device *dev;
struct pci_epf_bar *epf_bar;
struct pci_epc *epc;
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 2750a64cecd3..4fedebf2f8c1 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -157,7 +157,7 @@ static int pcihp_is_ejectable(acpi_handle handle)
}
/**
- * acpi_pcihp_check_ejectable - check if handle is ejectable ACPI PCI slot
+ * acpi_pci_check_ejectable - check if handle is ejectable ACPI PCI slot
* @pbus: the PCI bus of the PCI slot corresponding to 'handle'
* @handle: ACPI handle to check
*
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index a74b274a8c45..1f8ab4377ad8 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -148,8 +148,7 @@ static inline struct acpiphp_root_context *to_acpiphp_root_context(struct acpi_h
* ACPI has no generic method of setting/getting attention status
* this allows for device specific driver registration
*/
-struct acpiphp_attention_info
-{
+struct acpiphp_attention_info {
int (*set_attn)(struct hotplug_slot *slot, u8 status);
int (*get_attn)(struct hotplug_slot *slot, u8 *status);
struct module *owner;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 3365c93abf0e..f031302ad401 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -533,6 +533,7 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
slot->flags &= ~SLOT_ENABLED;
continue;
}
+ pci_dev_put(dev);
}
}
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index 00cd2b43364f..7a65d427ac11 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -80,7 +80,7 @@ static u8 evbuffer[1024];
static void __iomem *compaq_int15_entry_point;
/* lock for ordering int15_bios_call() */
-static spinlock_t int15_lock;
+static DEFINE_SPINLOCK(int15_lock);
/* This is a series of function that deals with
@@ -415,9 +415,6 @@ void compaq_nvram_init(void __iomem *rom_start)
compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
dbg("int15 entry = %p\n", compaq_int15_entry_point);
-
- /* initialize our int15 lock */
- spin_lock_init(&int15_lock);
}
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index f8f056be71b7..014868752cd4 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -35,7 +35,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
return rc;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
- return zpci_configure_device(zdev, zdev->fh);
+ return zpci_scan_configured_device(zdev, zdev->fh);
}
static int disable_slot(struct hotplug_slot *hotplug_slot)
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index db047284c291..9e3b27744305 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -174,11 +174,6 @@ static inline u8 shpc_readb(struct controller *ctrl, int reg)
return readb(ctrl->creg + reg);
}
-static inline void shpc_writeb(struct controller *ctrl, int reg, u8 val)
-{
- writeb(val, ctrl->creg + reg);
-}
-
static inline u16 shpc_readw(struct controller *ctrl, int reg)
{
return readw(ctrl->creg + reg);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 4afd4ee4f7f0..afc06e6ce115 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -31,6 +31,7 @@ int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id)
return (dev->devfn + dev->sriov->offset +
dev->sriov->stride * vf_id) & 0xff;
}
+EXPORT_SYMBOL_GPL(pci_iov_virtfn_devfn);
/*
* Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may
@@ -157,6 +158,92 @@ failed:
return rc;
}
+#ifdef CONFIG_PCI_MSI
+static ssize_t sriov_vf_total_msix_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u32 vf_total_msix = 0;
+
+ device_lock(dev);
+ if (!pdev->driver || !pdev->driver->sriov_get_vf_total_msix)
+ goto unlock;
+
+ vf_total_msix = pdev->driver->sriov_get_vf_total_msix(pdev);
+unlock:
+ device_unlock(dev);
+ return sysfs_emit(buf, "%u\n", vf_total_msix);
+}
+static DEVICE_ATTR_RO(sriov_vf_total_msix);
+
+static ssize_t sriov_vf_msix_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *vf_dev = to_pci_dev(dev);
+ struct pci_dev *pdev = pci_physfn(vf_dev);
+ int val, ret;
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val < 0)
+ return -EINVAL;
+
+ device_lock(&pdev->dev);
+ if (!pdev->driver || !pdev->driver->sriov_set_msix_vec_count) {
+ ret = -EOPNOTSUPP;
+ goto err_pdev;
+ }
+
+ device_lock(&vf_dev->dev);
+ if (vf_dev->driver) {
+ /*
+ * A driver is already attached to this VF and has configured
+ * itself based on the current MSI-X vector count. Changing
+ * the vector size could mess up the driver, so block it.
+ */
+ ret = -EBUSY;
+ goto err_dev;
+ }
+
+ ret = pdev->driver->sriov_set_msix_vec_count(vf_dev, val);
+
+err_dev:
+ device_unlock(&vf_dev->dev);
+err_pdev:
+ device_unlock(&pdev->dev);
+ return ret ? : count;
+}
+static DEVICE_ATTR_WO(sriov_vf_msix_count);
+#endif
+
+static struct attribute *sriov_vf_dev_attrs[] = {
+#ifdef CONFIG_PCI_MSI
+ &dev_attr_sriov_vf_msix_count.attr,
+#endif
+ NULL,
+};
+
+static umode_t sriov_vf_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (!pdev->is_virtfn)
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group sriov_vf_dev_attr_group = {
+ .attrs = sriov_vf_dev_attrs,
+ .is_visible = sriov_vf_attrs_are_visible,
+};
+
int pci_iov_add_virtfn(struct pci_dev *dev, int id)
{
int i;
@@ -400,18 +487,21 @@ static DEVICE_ATTR_RO(sriov_stride);
static DEVICE_ATTR_RO(sriov_vf_device);
static DEVICE_ATTR_RW(sriov_drivers_autoprobe);
-static struct attribute *sriov_dev_attrs[] = {
+static struct attribute *sriov_pf_dev_attrs[] = {
&dev_attr_sriov_totalvfs.attr,
&dev_attr_sriov_numvfs.attr,
&dev_attr_sriov_offset.attr,
&dev_attr_sriov_stride.attr,
&dev_attr_sriov_vf_device.attr,
&dev_attr_sriov_drivers_autoprobe.attr,
+#ifdef CONFIG_PCI_MSI
+ &dev_attr_sriov_vf_total_msix.attr,
+#endif
NULL,
};
-static umode_t sriov_attrs_are_visible(struct kobject *kobj,
- struct attribute *a, int n)
+static umode_t sriov_pf_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
@@ -421,9 +511,9 @@ static umode_t sriov_attrs_are_visible(struct kobject *kobj,
return a->mode;
}
-const struct attribute_group sriov_dev_attr_group = {
- .attrs = sriov_dev_attrs,
- .is_visible = sriov_attrs_are_visible,
+const struct attribute_group sriov_pf_dev_attr_group = {
+ .attrs = sriov_pf_dev_attrs,
+ .is_visible = sriov_pf_attrs_are_visible,
};
int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 3162f88fe940..217dc9f0231f 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -64,39 +64,18 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
/* Arch hooks */
int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
- struct msi_controller *chip = dev->bus->msi;
- int err;
-
- if (!chip || !chip->setup_irq)
- return -EINVAL;
-
- err = chip->setup_irq(chip, dev, desc);
- if (err < 0)
- return err;
-
- irq_set_chip_data(desc->irq, chip);
-
- return 0;
+ return -EINVAL;
}
void __weak arch_teardown_msi_irq(unsigned int irq)
{
- struct msi_controller *chip = irq_get_chip_data(irq);
-
- if (!chip || !chip->teardown_irq)
- return;
-
- chip->teardown_irq(chip, irq);
}
int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
- struct msi_controller *chip = dev->bus->msi;
struct msi_desc *entry;
int ret;
- if (chip && chip->setup_irqs)
- return chip->setup_irqs(chip, dev, nvec, type);
/*
* If an architecture wants to support multiple MSI, it needs to
* override arch_setup_msi_irqs()
@@ -115,11 +94,7 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return 0;
}
-/*
- * We have a default implementation available as a separate non-weak
- * function, as it is used by the Xen x86 PCI code
- */
-void default_teardown_msi_irqs(struct pci_dev *dev)
+void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
{
int i;
struct msi_desc *entry;
@@ -129,11 +104,6 @@ void default_teardown_msi_irqs(struct pci_dev *dev)
for (i = 0; i < entry->nvec_used; i++)
arch_teardown_msi_irq(entry->irq + i);
}
-
-void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
-{
- return default_teardown_msi_irqs(dev);
-}
#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
static void default_restore_msi_irq(struct pci_dev *dev, int irq)
@@ -901,8 +871,15 @@ static int pci_msi_supported(struct pci_dev *dev, int nvec)
* Any bridge which does NOT route MSI transactions from its
* secondary bus to its primary bus must set NO_MSI flag on
* the secondary pci_bus.
- * We expect only arch-specific PCI host bus controller driver
- * or quirks for specific PCI bridges to be setting NO_MSI.
+ *
+ * The NO_MSI flag can either be set directly by:
+ * - arch-specific PCI host bus controller drivers (deprecated)
+ * - quirks for specific PCI bridges
+ *
+ * or indirectly by platform-specific PCI host bridge drivers by
+ * advertising the 'msi_domain' property, which results in
+ * the NO_MSI flag when no MSI domain is found for this bridge
+ * at probe time.
*/
for (bus = dev->bus; bus; bus = bus->parent)
if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 5ea472ae22ac..da5b414d585a 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -190,10 +190,18 @@ int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
/**
- * This function will try to obtain the host bridge domain number by
- * finding a property called "linux,pci-domain" of the given device node.
+ * of_get_pci_domain_nr - Find the host bridge domain number
+ * of the given device node.
+ * @node: Device tree node with the domain information.
*
- * @node: device tree node with the domain information
+ * This function will try to obtain the host bridge domain number by finding
+ * a property called "linux,pci-domain" of the given device node.
+ *
+ * Return:
+ * * > 0 - On success, an associated domain number.
+ * * -EINVAL - The property "linux,pci-domain" does not exist.
+ * * -ENODATA - The linux,pci-domain" property does not have value.
+ * * -EOVERFLOW - Invalid "linux,pci-domain" property value.
*
* Returns the associated domain number from DT in the range [0-0xffff], or
* a negative value if the required property is not found.
@@ -585,10 +593,16 @@ int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge)
#endif /* CONFIG_PCI */
/**
+ * of_pci_get_max_link_speed - Find the maximum link speed of the given device node.
+ * @node: Device tree node with the maximum link speed information.
+ *
* This function will try to find the limitation of link speed by finding
* a property called "max-link-speed" of the given device node.
*
- * @node: device tree node with the max link speed information
+ * Return:
+ * * > 0 - On success, a maximum link speed.
+ * * -EINVAL - Invalid "max-link-speed" property value, or failure to access
+ * the property of the device tree node.
*
* Returns the associated max link speed from DT, or a negative value if the
* required property is not found or is invalid.
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 53502a751914..36bc23e21759 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -1021,7 +1021,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
if (!error)
pci_dbg(dev, "power state changed by ACPI to %s\n",
- acpi_power_state_string(state_conv[state]));
+ acpi_power_state_string(adev->power.state));
return error;
}
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 781e45cf60d1..c32f3b7540e8 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -33,6 +33,21 @@
#include <linux/pci-acpi.h>
#include "pci.h"
+static bool device_has_acpi_name(struct device *dev)
+{
+#ifdef CONFIG_ACPI
+ acpi_handle handle = ACPI_HANDLE(dev);
+
+ if (!handle)
+ return false;
+
+ return acpi_check_dsm(handle, &pci_acpi_dsm_guid, 0x2,
+ 1 << DSM_PCI_DEVICE_NAME);
+#else
+ return false;
+#endif
+}
+
#ifdef CONFIG_DMI
enum smbios_attr_enum {
SMBIOS_ATTR_NONE = 0,
@@ -45,13 +60,9 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
{
const struct dmi_device *dmi;
struct dmi_dev_onboard *donboard;
- int domain_nr;
- int bus;
- int devfn;
-
- domain_nr = pci_domain_nr(pdev->bus);
- bus = pdev->bus->number;
- devfn = pdev->devfn;
+ int domain_nr = pci_domain_nr(pdev->bus);
+ int bus = pdev->bus->number;
+ int devfn = pdev->devfn;
dmi = NULL;
while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD,
@@ -62,13 +73,11 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
donboard->devfn == devfn) {
if (buf) {
if (attribute == SMBIOS_ATTR_INSTANCE_SHOW)
- return scnprintf(buf, PAGE_SIZE,
- "%d\n",
- donboard->instance);
+ return sysfs_emit(buf, "%d\n",
+ donboard->instance);
else if (attribute == SMBIOS_ATTR_LABEL_SHOW)
- return scnprintf(buf, PAGE_SIZE,
- "%s\n",
- dmi->name);
+ return sysfs_emit(buf, "%s\n",
+ dmi->name);
}
return strlen(dmi->name);
}
@@ -76,78 +85,52 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
return 0;
}
-static umode_t smbios_instance_string_exist(struct kobject *kobj,
- struct attribute *attr, int n)
+static ssize_t smbios_label_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct device *dev;
- struct pci_dev *pdev;
-
- dev = kobj_to_dev(kobj);
- pdev = to_pci_dev(dev);
-
- return find_smbios_instance_string(pdev, NULL, SMBIOS_ATTR_NONE) ?
- S_IRUGO : 0;
-}
-
-static ssize_t smbioslabel_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pci_dev *pdev;
- pdev = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
return find_smbios_instance_string(pdev, buf,
SMBIOS_ATTR_LABEL_SHOW);
}
+static struct device_attribute dev_attr_smbios_label = __ATTR(label, 0444,
+ smbios_label_show, NULL);
-static ssize_t smbiosinstance_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t index_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct pci_dev *pdev;
- pdev = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
return find_smbios_instance_string(pdev, buf,
SMBIOS_ATTR_INSTANCE_SHOW);
}
+static DEVICE_ATTR_RO(index);
-static struct device_attribute smbios_attr_label = {
- .attr = {.name = "label", .mode = 0444},
- .show = smbioslabel_show,
-};
-
-static struct device_attribute smbios_attr_instance = {
- .attr = {.name = "index", .mode = 0444},
- .show = smbiosinstance_show,
-};
-
-static struct attribute *smbios_attributes[] = {
- &smbios_attr_label.attr,
- &smbios_attr_instance.attr,
+static struct attribute *smbios_attrs[] = {
+ &dev_attr_smbios_label.attr,
+ &dev_attr_index.attr,
NULL,
};
-static const struct attribute_group smbios_attr_group = {
- .attrs = smbios_attributes,
- .is_visible = smbios_instance_string_exist,
-};
-
-static int pci_create_smbiosname_file(struct pci_dev *pdev)
+static umode_t smbios_attr_is_visible(struct kobject *kobj, struct attribute *a,
+ int n)
{
- return sysfs_create_group(&pdev->dev.kobj, &smbios_attr_group);
-}
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
-static void pci_remove_smbiosname_file(struct pci_dev *pdev)
-{
- sysfs_remove_group(&pdev->dev.kobj, &smbios_attr_group);
-}
-#else
-static inline int pci_create_smbiosname_file(struct pci_dev *pdev)
-{
- return -1;
-}
+ if (device_has_acpi_name(dev))
+ return 0;
-static inline void pci_remove_smbiosname_file(struct pci_dev *pdev)
-{
+ if (!find_smbios_instance_string(pdev, NULL, SMBIOS_ATTR_NONE))
+ return 0;
+
+ return a->mode;
}
+
+const struct attribute_group pci_dev_smbios_attr_group = {
+ .attrs = smbios_attrs,
+ .is_visible = smbios_attr_is_visible,
+};
#endif
#ifdef CONFIG_ACPI
@@ -169,11 +152,10 @@ static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
static int dsm_get_label(struct device *dev, char *buf,
enum acpi_attr_enum attr)
{
- acpi_handle handle;
+ acpi_handle handle = ACPI_HANDLE(dev);
union acpi_object *obj, *tmp;
int len = -1;
- handle = ACPI_HANDLE(dev);
if (!handle)
return -1;
@@ -209,103 +191,39 @@ static int dsm_get_label(struct device *dev, char *buf,
return len;
}
-static bool device_has_dsm(struct device *dev)
-{
- acpi_handle handle;
-
- handle = ACPI_HANDLE(dev);
- if (!handle)
- return false;
-
- return !!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 0x2,
- 1 << DSM_PCI_DEVICE_NAME);
-}
-
-static umode_t acpi_index_string_exist(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev;
-
- dev = kobj_to_dev(kobj);
-
- if (device_has_dsm(dev))
- return S_IRUGO;
-
- return 0;
-}
-
-static ssize_t acpilabel_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t label_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
return dsm_get_label(dev, buf, ACPI_ATTR_LABEL_SHOW);
}
+static DEVICE_ATTR_RO(label);
-static ssize_t acpiindex_show(struct device *dev,
+static ssize_t acpi_index_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return dsm_get_label(dev, buf, ACPI_ATTR_INDEX_SHOW);
}
+static DEVICE_ATTR_RO(acpi_index);
-static struct device_attribute acpi_attr_label = {
- .attr = {.name = "label", .mode = 0444},
- .show = acpilabel_show,
-};
-
-static struct device_attribute acpi_attr_index = {
- .attr = {.name = "acpi_index", .mode = 0444},
- .show = acpiindex_show,
-};
-
-static struct attribute *acpi_attributes[] = {
- &acpi_attr_label.attr,
- &acpi_attr_index.attr,
+static struct attribute *acpi_attrs[] = {
+ &dev_attr_label.attr,
+ &dev_attr_acpi_index.attr,
NULL,
};
-static const struct attribute_group acpi_attr_group = {
- .attrs = acpi_attributes,
- .is_visible = acpi_index_string_exist,
-};
-
-static int pci_create_acpi_index_label_files(struct pci_dev *pdev)
+static umode_t acpi_attr_is_visible(struct kobject *kobj, struct attribute *a,
+ int n)
{
- return sysfs_create_group(&pdev->dev.kobj, &acpi_attr_group);
-}
+ struct device *dev = kobj_to_dev(kobj);
-static int pci_remove_acpi_index_label_files(struct pci_dev *pdev)
-{
- sysfs_remove_group(&pdev->dev.kobj, &acpi_attr_group);
- return 0;
-}
-#else
-static inline int pci_create_acpi_index_label_files(struct pci_dev *pdev)
-{
- return -1;
-}
+ if (!device_has_acpi_name(dev))
+ return 0;
-static inline int pci_remove_acpi_index_label_files(struct pci_dev *pdev)
-{
- return -1;
+ return a->mode;
}
-static inline bool device_has_dsm(struct device *dev)
-{
- return false;
-}
+const struct attribute_group pci_dev_acpi_attr_group = {
+ .attrs = acpi_attrs,
+ .is_visible = acpi_attr_is_visible,
+};
#endif
-
-void pci_create_firmware_label_files(struct pci_dev *pdev)
-{
- if (device_has_dsm(&pdev->dev))
- pci_create_acpi_index_label_files(pdev);
- else
- pci_create_smbiosname_file(pdev);
-}
-
-void pci_remove_firmware_label_files(struct pci_dev *pdev)
-{
- if (device_has_dsm(&pdev->dev))
- pci_remove_acpi_index_label_files(pdev);
- else
- pci_remove_smbiosname_file(pdev);
-}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index f8afd54ca3e1..beb8d1f4fafe 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -39,7 +39,7 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
struct pci_dev *pdev; \
\
pdev = to_pci_dev(dev); \
- return sprintf(buf, format_string, pdev->field); \
+ return sysfs_emit(buf, format_string, pdev->field); \
} \
static DEVICE_ATTR_RO(field)
@@ -56,7 +56,7 @@ static ssize_t broken_parity_status_show(struct device *dev,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%u\n", pdev->broken_parity_status);
+ return sysfs_emit(buf, "%u\n", pdev->broken_parity_status);
}
static ssize_t broken_parity_status_store(struct device *dev,
@@ -129,7 +129,7 @@ static ssize_t power_state_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%s\n", pci_power_name(pdev->current_state));
+ return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state));
}
static DEVICE_ATTR_RO(power_state);
@@ -138,10 +138,10 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- char *str = buf;
int i;
int max;
resource_size_t start, end;
+ size_t len = 0;
if (pci_dev->subordinate)
max = DEVICE_COUNT_RESOURCE;
@@ -151,12 +151,12 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
for (i = 0; i < max; i++) {
struct resource *res = &pci_dev->resource[i];
pci_resource_to_user(pci_dev, i, res, &start, &end);
- str += sprintf(str, "0x%016llx 0x%016llx 0x%016llx\n",
- (unsigned long long)start,
- (unsigned long long)end,
- (unsigned long long)res->flags);
+ len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n",
+ (unsigned long long)start,
+ (unsigned long long)end,
+ (unsigned long long)res->flags);
}
- return (str - buf);
+ return len;
}
static DEVICE_ATTR_RO(resource);
@@ -165,8 +165,8 @@ static ssize_t max_link_speed_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%s\n",
- pci_speed_string(pcie_get_speed_cap(pdev)));
+ return sysfs_emit(buf, "%s\n",
+ pci_speed_string(pcie_get_speed_cap(pdev)));
}
static DEVICE_ATTR_RO(max_link_speed);
@@ -175,7 +175,7 @@ static ssize_t max_link_width_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%u\n", pcie_get_width_cap(pdev));
+ return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
}
static DEVICE_ATTR_RO(max_link_width);
@@ -193,7 +193,7 @@ static ssize_t current_link_speed_show(struct device *dev,
speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS];
- return sprintf(buf, "%s\n", pci_speed_string(speed));
+ return sysfs_emit(buf, "%s\n", pci_speed_string(speed));
}
static DEVICE_ATTR_RO(current_link_speed);
@@ -208,7 +208,7 @@ static ssize_t current_link_width_show(struct device *dev,
if (err)
return -EINVAL;
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
(linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
}
static DEVICE_ATTR_RO(current_link_width);
@@ -225,7 +225,7 @@ static ssize_t secondary_bus_number_show(struct device *dev,
if (err)
return -EINVAL;
- return sprintf(buf, "%u\n", sec_bus);
+ return sysfs_emit(buf, "%u\n", sec_bus);
}
static DEVICE_ATTR_RO(secondary_bus_number);
@@ -241,7 +241,7 @@ static ssize_t subordinate_bus_number_show(struct device *dev,
if (err)
return -EINVAL;
- return sprintf(buf, "%u\n", sub_bus);
+ return sysfs_emit(buf, "%u\n", sub_bus);
}
static DEVICE_ATTR_RO(subordinate_bus_number);
@@ -251,7 +251,7 @@ static ssize_t ari_enabled_show(struct device *dev,
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- return sprintf(buf, "%u\n", pci_ari_enabled(pci_dev->bus));
+ return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus));
}
static DEVICE_ATTR_RO(ari_enabled);
@@ -260,11 +260,11 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
- pci_dev->vendor, pci_dev->device,
- pci_dev->subsystem_vendor, pci_dev->subsystem_device,
- (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
- (u8)(pci_dev->class));
+ return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
+ pci_dev->vendor, pci_dev->device,
+ pci_dev->subsystem_vendor, pci_dev->subsystem_device,
+ (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
+ (u8)(pci_dev->class));
}
static DEVICE_ATTR_RO(modalias);
@@ -302,7 +302,7 @@ static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
struct pci_dev *pdev;
pdev = to_pci_dev(dev);
- return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt));
+ return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt));
}
static DEVICE_ATTR_RW(enable);
@@ -338,7 +338,7 @@ static ssize_t numa_node_store(struct device *dev,
static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", dev->numa_node);
+ return sysfs_emit(buf, "%d\n", dev->numa_node);
}
static DEVICE_ATTR_RW(numa_node);
#endif
@@ -348,7 +348,7 @@ static ssize_t dma_mask_bits_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%d\n", fls64(pdev->dma_mask));
+ return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask));
}
static DEVICE_ATTR_RO(dma_mask_bits);
@@ -356,7 +356,7 @@ static ssize_t consistent_dma_mask_bits_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", fls64(dev->coherent_dma_mask));
+ return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask));
}
static DEVICE_ATTR_RO(consistent_dma_mask_bits);
@@ -366,9 +366,9 @@ static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_bus *subordinate = pdev->subordinate;
- return sprintf(buf, "%u\n", subordinate ?
- !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
- : !pdev->no_msi);
+ return sysfs_emit(buf, "%u\n", subordinate ?
+ !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
+ : !pdev->no_msi);
}
static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
@@ -523,7 +523,7 @@ static ssize_t d3cold_allowed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%u\n", pdev->d3cold_allowed);
+ return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed);
}
static DEVICE_ATTR_RW(d3cold_allowed);
#endif
@@ -537,7 +537,7 @@ static ssize_t devspec_show(struct device *dev,
if (np == NULL)
return 0;
- return sprintf(buf, "%pOF", np);
+ return sysfs_emit(buf, "%pOF", np);
}
static DEVICE_ATTR_RO(devspec);
#endif
@@ -583,7 +583,7 @@ static ssize_t driver_override_show(struct device *dev,
ssize_t len;
device_lock(dev);
- len = scnprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+ len = sysfs_emit(buf, "%s\n", pdev->driver_override);
device_unlock(dev);
return len;
}
@@ -658,11 +658,11 @@ static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
struct pci_dev *vga_dev = vga_default_device();
if (vga_dev)
- return sprintf(buf, "%u\n", (pdev == vga_dev));
+ return sysfs_emit(buf, "%u\n", (pdev == vga_dev));
- return sprintf(buf, "%u\n",
- !!(pdev->resource[PCI_ROM_RESOURCE].flags &
- IORESOURCE_ROM_SHADOW));
+ return sysfs_emit(buf, "%u\n",
+ !!(pdev->resource[PCI_ROM_RESOURCE].flags &
+ IORESOURCE_ROM_SHADOW));
}
static DEVICE_ATTR_RO(boot_vga);
@@ -808,6 +808,29 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
return count;
}
+static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
+
+static struct bin_attribute *pci_dev_config_attrs[] = {
+ &bin_attr_config,
+ NULL,
+};
+
+static umode_t pci_dev_config_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *a, int n)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ a->size = PCI_CFG_SPACE_SIZE;
+ if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
+ a->size = PCI_CFG_SPACE_EXP_SIZE;
+
+ return a->attr.mode;
+}
+
+static const struct attribute_group pci_dev_config_attr_group = {
+ .bin_attrs = pci_dev_config_attrs,
+ .is_bin_visible = pci_dev_config_attr_is_visible,
+};
#ifdef HAVE_PCI_LEGACY
/**
@@ -1283,25 +1306,32 @@ static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
return count;
}
+static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
-static const struct bin_attribute pci_config_attr = {
- .attr = {
- .name = "config",
- .mode = 0644,
- },
- .size = PCI_CFG_SPACE_SIZE,
- .read = pci_read_config,
- .write = pci_write_config,
+static struct bin_attribute *pci_dev_rom_attrs[] = {
+ &bin_attr_rom,
+ NULL,
};
-static const struct bin_attribute pcie_config_attr = {
- .attr = {
- .name = "config",
- .mode = 0644,
- },
- .size = PCI_CFG_SPACE_EXP_SIZE,
- .read = pci_read_config,
- .write = pci_write_config,
+static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *a, int n)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+ size_t rom_size;
+
+ /* If the device has a ROM, try to expose it in sysfs. */
+ rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
+ if (!rom_size)
+ return 0;
+
+ a->size = rom_size;
+
+ return a->attr.mode;
+}
+
+static const struct attribute_group pci_dev_rom_attr_group = {
+ .bin_attrs = pci_dev_rom_attrs,
+ .is_bin_visible = pci_dev_rom_attr_is_visible,
};
static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
@@ -1325,102 +1355,35 @@ static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
return count;
}
+static DEVICE_ATTR_WO(reset);
-static DEVICE_ATTR(reset, 0200, NULL, reset_store);
+static struct attribute *pci_dev_reset_attrs[] = {
+ &dev_attr_reset.attr,
+ NULL,
+};
-static int pci_create_capabilities_sysfs(struct pci_dev *dev)
+static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj,
+ struct attribute *a, int n)
{
- int retval;
-
- pcie_vpd_create_sysfs_dev_files(dev);
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- if (dev->reset_fn) {
- retval = device_create_file(&dev->dev, &dev_attr_reset);
- if (retval)
- goto error;
- }
- return 0;
+ if (!pdev->reset_fn)
+ return 0;
-error:
- pcie_vpd_remove_sysfs_dev_files(dev);
- return retval;
+ return a->mode;
}
+static const struct attribute_group pci_dev_reset_attr_group = {
+ .attrs = pci_dev_reset_attrs,
+ .is_visible = pci_dev_reset_attr_is_visible,
+};
+
int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
{
- int retval;
- int rom_size;
- struct bin_attribute *attr;
-
if (!sysfs_initialized)
return -EACCES;
- if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
- retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr);
- else
- retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr);
- if (retval)
- goto err;
-
- retval = pci_create_resource_files(pdev);
- if (retval)
- goto err_config_file;
-
- /* If the device has a ROM, try to expose it in sysfs. */
- rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
- if (rom_size) {
- attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
- if (!attr) {
- retval = -ENOMEM;
- goto err_resource_files;
- }
- sysfs_bin_attr_init(attr);
- attr->size = rom_size;
- attr->attr.name = "rom";
- attr->attr.mode = 0600;
- attr->read = pci_read_rom;
- attr->write = pci_write_rom;
- retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
- if (retval) {
- kfree(attr);
- goto err_resource_files;
- }
- pdev->rom_attr = attr;
- }
-
- /* add sysfs entries for various capabilities */
- retval = pci_create_capabilities_sysfs(pdev);
- if (retval)
- goto err_rom_file;
-
- pci_create_firmware_label_files(pdev);
-
- return 0;
-
-err_rom_file:
- if (pdev->rom_attr) {
- sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
- kfree(pdev->rom_attr);
- pdev->rom_attr = NULL;
- }
-err_resource_files:
- pci_remove_resource_files(pdev);
-err_config_file:
- if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
- sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
- else
- sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
-err:
- return retval;
-}
-
-static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
-{
- pcie_vpd_remove_sysfs_dev_files(dev);
- if (dev->reset_fn) {
- device_remove_file(&dev->dev, &dev_attr_reset);
- dev->reset_fn = 0;
- }
+ return pci_create_resource_files(pdev);
}
/**
@@ -1434,22 +1397,7 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
if (!sysfs_initialized)
return;
- pci_remove_capabilities_sysfs(pdev);
-
- if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
- sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
- else
- sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
-
pci_remove_resource_files(pdev);
-
- if (pdev->rom_attr) {
- sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
- kfree(pdev->rom_attr);
- pdev->rom_attr = NULL;
- }
-
- pci_remove_firmware_label_files(pdev);
}
static int __init pci_sysfs_init(void)
@@ -1540,6 +1488,16 @@ static const struct attribute_group pci_dev_group = {
const struct attribute_group *pci_dev_groups[] = {
&pci_dev_group,
+ &pci_dev_config_attr_group,
+ &pci_dev_rom_attr_group,
+ &pci_dev_reset_attr_group,
+ &pci_dev_vpd_attr_group,
+#ifdef CONFIG_DMI
+ &pci_dev_smbios_attr_group,
+#endif
+#ifdef CONFIG_ACPI
+ &pci_dev_acpi_attr_group,
+#endif
NULL,
};
@@ -1567,7 +1525,8 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
&pci_dev_attr_group,
&pci_dev_hp_attr_group,
#ifdef CONFIG_PCI_IOV
- &sriov_dev_attr_group,
+ &sriov_pf_dev_attr_group,
+ &sriov_vf_dev_attr_group,
#endif
&pci_bridge_attr_group,
&pcie_dev_attr_group,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e4d4e399004b..b717680377a9 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -693,6 +693,36 @@ u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
EXPORT_SYMBOL_GPL(pci_find_ht_capability);
/**
+ * pci_find_vsec_capability - Find a vendor-specific extended capability
+ * @dev: PCI device to query
+ * @vendor: Vendor ID for which capability is defined
+ * @cap: Vendor-specific capability ID
+ *
+ * If @dev has Vendor ID @vendor, search for a VSEC capability with
+ * VSEC ID @cap. If found, return the capability offset in
+ * config space; otherwise return 0.
+ */
+u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
+{
+ u16 vsec = 0;
+ u32 header;
+
+ if (vendor != dev->vendor)
+ return 0;
+
+ while ((vsec = pci_find_next_ext_capability(dev, vsec,
+ PCI_EXT_CAP_ID_VNDR))) {
+ if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
+ &header) == PCIBIOS_SUCCESSFUL &&
+ PCI_VNDR_HEADER_ID(header) == cap)
+ return vsec;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
+
+/**
* pci_find_parent_resource - return resource region of parent bus of given
* region
* @dev: PCI device structure contains resources to be searched
@@ -4042,6 +4072,7 @@ phys_addr_t pci_pio_to_address(unsigned long pio)
return address;
}
+EXPORT_SYMBOL_GPL(pci_pio_to_address);
unsigned long __weak pci_address_to_pio(phys_addr_t address)
{
@@ -4102,7 +4133,7 @@ void pci_unmap_iospace(struct resource *res)
#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
- unmap_kernel_range(vaddr, resource_size(res));
+ vunmap_range(vaddr, vaddr + resource_size(res));
#endif
}
EXPORT_SYMBOL(pci_unmap_iospace);
@@ -4444,6 +4475,23 @@ void pci_clear_mwi(struct pci_dev *dev)
EXPORT_SYMBOL(pci_clear_mwi);
/**
+ * pci_disable_parity - disable parity checking for device
+ * @dev: the PCI device to operate on
+ *
+ * Disable parity checking for device @dev
+ */
+void pci_disable_parity(struct pci_dev *dev)
+{
+ u16 cmd;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_PARITY) {
+ cmd &= ~PCI_COMMAND_PARITY;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+}
+
+/**
* pci_intx - enables/disables PCI INTx for device dev
* @pdev: the PCI device to operate on
* @enable: boolean: whether to enable or disable PCI INTx
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index ef7c4661314f..37c913bbc6e1 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -21,16 +21,10 @@ bool pcie_cap_has_rtctl(const struct pci_dev *dev);
int pci_create_sysfs_dev_files(struct pci_dev *pdev);
void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
-#if !defined(CONFIG_DMI) && !defined(CONFIG_ACPI)
-static inline void pci_create_firmware_label_files(struct pci_dev *pdev)
-{ return; }
-static inline void pci_remove_firmware_label_files(struct pci_dev *pdev)
-{ return; }
-#else
-void pci_create_firmware_label_files(struct pci_dev *pdev);
-void pci_remove_firmware_label_files(struct pci_dev *pdev);
-#endif
void pci_cleanup_rom(struct pci_dev *dev);
+#ifdef CONFIG_DMI
+extern const struct attribute_group pci_dev_smbios_attr_group;
+#endif
enum pci_mmap_api {
PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */
@@ -141,10 +135,9 @@ static inline bool pcie_downstream_port(const struct pci_dev *dev)
type == PCI_EXP_TYPE_PCIE_BRIDGE;
}
-int pci_vpd_init(struct pci_dev *dev);
+void pci_vpd_init(struct pci_dev *dev);
void pci_vpd_release(struct pci_dev *dev);
-void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev);
-void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev);
+extern const struct attribute_group pci_dev_vpd_attr_group;
/* PCI Virtual Channel */
int pci_save_vc_state(struct pci_dev *dev);
@@ -501,7 +494,8 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno);
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
void pci_restore_iov_state(struct pci_dev *dev);
int pci_iov_bus_range(struct pci_bus *bus);
-extern const struct attribute_group sriov_dev_attr_group;
+extern const struct attribute_group sriov_pf_dev_attr_group;
+extern const struct attribute_group sriov_vf_dev_attr_group;
#else
static inline int pci_iov_init(struct pci_dev *dev)
{
@@ -624,6 +618,12 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
struct resource *res);
+#else
+static inline int acpi_get_rc_resources(struct device *dev, const char *hid,
+ u16 segment, struct resource *res)
+{
+ return -ENODEV;
+}
#endif
int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
@@ -696,6 +696,7 @@ static inline int pci_aer_raw_clear_status(struct pci_dev *dev) { return -EINVAL
#ifdef CONFIG_ACPI
int pci_acpi_program_hp_params(struct pci_dev *dev);
+extern const struct attribute_group pci_dev_acpi_attr_group;
#else
static inline int pci_acpi_program_hp_params(struct pci_dev *dev)
{
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index ba22388342d1..ec943cee5ecc 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -129,7 +129,7 @@ static const char * const ecrc_policy_str[] = {
};
/**
- * enable_ercr_checking - enable PCIe ECRC checking for a device
+ * enable_ecrc_checking - enable PCIe ECRC checking for a device
* @dev: the PCI device
*
* Returns 0 on success, or negative on failure.
@@ -153,7 +153,7 @@ static int enable_ecrc_checking(struct pci_dev *dev)
}
/**
- * disable_ercr_checking - disables PCIe ECRC checking for a device
+ * disable_ecrc_checking - disables PCIe ECRC checking for a device
* @dev: the PCI device
*
* Returns 0 on success, or negative on failure.
@@ -1442,7 +1442,7 @@ static struct pcie_port_service_driver aerdriver = {
};
/**
- * aer_service_init - register AER root service driver
+ * pcie_aer_init - register AER root service driver
*
* Invoked when AER root service driver is loaded.
*/
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 3fc08488d65f..1d0dd77fed3a 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -463,7 +463,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
};
/**
- * pcie_pme_service_init - Register the PCIe PME service driver.
+ * pcie_pme_init - Register the PCIe PME service driver.
*/
int __init pcie_pme_init(void)
{
diff --git a/drivers/pci/pcie/rcec.c b/drivers/pci/pcie/rcec.c
index 2c5c552994e4..d0bcd141ac9c 100644
--- a/drivers/pci/pcie/rcec.c
+++ b/drivers/pci/pcie/rcec.c
@@ -32,7 +32,7 @@ static bool rcec_assoc_rciep(struct pci_dev *rcec, struct pci_dev *rciep)
/* Same bus, so check bitmap */
for_each_set_bit(devn, &bitmap, 32)
- if (devn == rciep->devfn)
+ if (devn == PCI_SLOT(rciep->devfn))
return true;
return false;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 953f15abc850..3a62d09b8869 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -895,7 +895,6 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
/* Temporarily move resources off the list */
list_splice_init(&bridge->windows, &resources);
bus->sysdata = bridge->sysdata;
- bus->msi = bridge->msi;
bus->ops = bridge->ops;
bus->number = bus->busn_res.start = bridge->busnr;
#ifdef CONFIG_PCI_DOMAINS_GENERIC
@@ -926,6 +925,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
device_enable_async_suspend(bus->bridge);
pci_set_bus_of_node(bus);
pci_set_bus_msi_domain(bus);
+ if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev))
+ bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
if (!parent)
set_dev_node(bus->bridge, pcibus_to_node(bus));
@@ -1053,7 +1054,6 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
return NULL;
child->parent = parent;
- child->msi = parent->msi;
child->sysdata = parent->sysdata;
child->bus_flags = parent->bus_flags;
@@ -2353,6 +2353,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
pci_set_of_node(dev);
if (pci_setup_device(dev)) {
+ pci_release_of_node(dev);
pci_bus_put(dev->bus);
kfree(dev);
return NULL;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 653660e3ba9e..dcb229de1acb 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -206,16 +206,11 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
/*
- * The Mellanox Tavor device gives false positive parity errors. Mark this
- * device with a broken_parity_status to allow PCI scanning code to "skip"
- * this now blacklisted device.
+ * The Mellanox Tavor device gives false positive parity errors. Disable
+ * parity error reporting.
*/
-static void quirk_mellanox_tavor(struct pci_dev *dev)
-{
- dev->broken_parity_status = 1; /* This device gives false positives */
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, pci_disable_parity);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, pci_disable_parity);
/*
* Deal with broken BIOSes that neglect to enable passive release,
@@ -2585,10 +2580,8 @@ static int msi_ht_cap_enabled(struct pci_dev *dev)
/* Check the HyperTransport MSI mapping to know whether MSI is enabled or not */
static void quirk_msi_ht_cap(struct pci_dev *dev)
{
- if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
- pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
- dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
- }
+ if (!msi_ht_cap_enabled(dev))
+ quirk_disable_msi(dev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
quirk_msi_ht_cap);
@@ -2601,9 +2594,6 @@ static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
{
struct pci_dev *pdev;
- if (!dev->subordinate)
- return;
-
/*
* Check HT MSI cap on this chipset and the root one. A single one
* having MSI is enough to be sure that MSI is supported.
@@ -2611,10 +2601,8 @@ static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
pdev = pci_get_slot(dev->bus, 0);
if (!pdev)
return;
- if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
- pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
- dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
- }
+ if (!msi_ht_cap_enabled(pdev))
+ quirk_msi_ht_cap(dev);
pci_dev_put(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
@@ -3922,6 +3910,7 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
reset_ivb_igd },
{ PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr },
{ PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
+ { PCI_VENDOR_ID_INTEL, 0x0a54, delay_250ms_after_flr },
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
reset_chelsio_generic_dev },
{ 0 }
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 95dec03d9f2a..dd12c2fcc7dc 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -19,6 +19,8 @@ static void pci_stop_dev(struct pci_dev *dev)
pci_pme_active(dev, false);
if (pci_dev_is_added(dev)) {
+ dev->reset_fn = 0;
+
device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index 7915d10f9aa1..26bf7c877de5 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -16,12 +16,10 @@
struct pci_vpd_ops {
ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
- int (*set_size)(struct pci_dev *dev, size_t len);
};
struct pci_vpd {
const struct pci_vpd_ops *ops;
- struct bin_attribute *attr; /* Descriptor for sysfs VPD entry */
struct mutex lock;
unsigned int len;
u16 flag;
@@ -30,6 +28,11 @@ struct pci_vpd {
unsigned int valid:1;
};
+static struct pci_dev *pci_get_func0_dev(struct pci_dev *dev)
+{
+ return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+}
+
/**
* pci_read_vpd - Read one entry from Vital Product Data
* @dev: pci device struct
@@ -60,19 +63,6 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
}
EXPORT_SYMBOL(pci_write_vpd);
-/**
- * pci_set_vpd_size - Set size of Vital Product Data space
- * @dev: pci device struct
- * @len: size of vpd space
- */
-int pci_set_vpd_size(struct pci_dev *dev, size_t len)
-{
- if (!dev->vpd || !dev->vpd->ops)
- return -ENODEV;
- return dev->vpd->ops->set_size(dev, len);
-}
-EXPORT_SYMBOL(pci_set_vpd_size);
-
#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
/**
@@ -85,10 +75,14 @@ static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
size_t off = 0;
unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */
- while (off < old_size &&
- pci_read_vpd(dev, off, 1, header) == 1) {
+ while (off < old_size && pci_read_vpd(dev, off, 1, header) == 1) {
unsigned char tag;
+ if (!header[0] && !off) {
+ pci_info(dev, "Invalid VPD tag 00, assume missing optional VPD EPROM\n");
+ return 0;
+ }
+
if (header[0] & PCI_VPD_LRDT) {
/* Large Resource Data Type Tag */
tag = pci_vpd_lrdt_tag(header);
@@ -297,30 +291,15 @@ out:
return ret ? ret : count;
}
-static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
-{
- struct pci_vpd *vpd = dev->vpd;
-
- if (len == 0 || len > PCI_VPD_MAX_SIZE)
- return -EIO;
-
- vpd->valid = 1;
- vpd->len = len;
-
- return 0;
-}
-
static const struct pci_vpd_ops pci_vpd_ops = {
.read = pci_vpd_read,
.write = pci_vpd_write,
- .set_size = pci_vpd_set_size,
};
static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
void *arg)
{
- struct pci_dev *tdev = pci_get_slot(dev->bus,
- PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ struct pci_dev *tdev = pci_get_func0_dev(dev);
ssize_t ret;
if (!tdev)
@@ -334,8 +313,7 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
const void *arg)
{
- struct pci_dev *tdev = pci_get_slot(dev->bus,
- PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ struct pci_dev *tdev = pci_get_func0_dev(dev);
ssize_t ret;
if (!tdev)
@@ -346,38 +324,23 @@ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
return ret;
}
-static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
-{
- struct pci_dev *tdev = pci_get_slot(dev->bus,
- PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
- int ret;
-
- if (!tdev)
- return -ENODEV;
-
- ret = pci_set_vpd_size(tdev, len);
- pci_dev_put(tdev);
- return ret;
-}
-
static const struct pci_vpd_ops pci_vpd_f0_ops = {
.read = pci_vpd_f0_read,
.write = pci_vpd_f0_write,
- .set_size = pci_vpd_f0_set_size,
};
-int pci_vpd_init(struct pci_dev *dev)
+void pci_vpd_init(struct pci_dev *dev)
{
struct pci_vpd *vpd;
u8 cap;
cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
if (!cap)
- return -ENODEV;
+ return;
vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
if (!vpd)
- return -ENOMEM;
+ return;
vpd->len = PCI_VPD_MAX_SIZE;
if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
@@ -389,7 +352,6 @@ int pci_vpd_init(struct pci_dev *dev)
vpd->busy = 0;
vpd->valid = 0;
dev->vpd = vpd;
- return 0;
}
void pci_vpd_release(struct pci_dev *dev)
@@ -397,102 +359,56 @@ void pci_vpd_release(struct pci_dev *dev)
kfree(dev->vpd);
}
-static ssize_t read_vpd_attr(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off,
+ size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
- if (bin_attr->size > 0) {
- if (off > bin_attr->size)
- count = 0;
- else if (count > bin_attr->size - off)
- count = bin_attr->size - off;
- }
-
return pci_read_vpd(dev, off, count, buf);
}
-static ssize_t write_vpd_attr(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off,
+ size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
- if (bin_attr->size > 0) {
- if (off > bin_attr->size)
- count = 0;
- else if (count > bin_attr->size - off)
- count = bin_attr->size - off;
- }
-
return pci_write_vpd(dev, off, count, buf);
}
+static BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
-void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev)
-{
- int retval;
- struct bin_attribute *attr;
-
- if (!dev->vpd)
- return;
+static struct bin_attribute *vpd_attrs[] = {
+ &bin_attr_vpd,
+ NULL,
+};
- attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
- if (!attr)
- return;
+static umode_t vpd_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *a, int n)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- sysfs_bin_attr_init(attr);
- attr->size = 0;
- attr->attr.name = "vpd";
- attr->attr.mode = S_IRUSR | S_IWUSR;
- attr->read = read_vpd_attr;
- attr->write = write_vpd_attr;
- retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
- if (retval) {
- kfree(attr);
- return;
- }
+ if (!pdev->vpd)
+ return 0;
- dev->vpd->attr = attr;
+ return a->attr.mode;
}
-void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev)
-{
- if (dev->vpd && dev->vpd->attr) {
- sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
- kfree(dev->vpd->attr);
- }
-}
+const struct attribute_group pci_dev_vpd_attr_group = {
+ .bin_attrs = vpd_attrs,
+ .is_bin_visible = vpd_attr_is_visible,
+};
-int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt)
+int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt)
{
- int i;
+ int i = 0;
- for (i = off; i < len; ) {
- u8 val = buf[i];
-
- if (val & PCI_VPD_LRDT) {
- /* Don't return success of the tag isn't complete */
- if (i + PCI_VPD_LRDT_TAG_SIZE > len)
- break;
-
- if (val == rdt)
- return i;
-
- i += PCI_VPD_LRDT_TAG_SIZE +
- pci_vpd_lrdt_size(&buf[i]);
- } else {
- u8 tag = val & ~PCI_VPD_SRDT_LEN_MASK;
-
- if (tag == rdt)
- return i;
-
- if (tag == PCI_VPD_SRDT_END)
- break;
+ /* look for LRDT tags only, end tag is the only SRDT tag */
+ while (i + PCI_VPD_LRDT_TAG_SIZE <= len && buf[i] & PCI_VPD_LRDT) {
+ if (buf[i] == rdt)
+ return i;
- i += PCI_VPD_SRDT_TAG_SIZE +
- pci_vpd_srdt_size(&buf[i]);
- }
+ i += PCI_VPD_LRDT_TAG_SIZE + pci_vpd_lrdt_size(buf + i);
}
return -ENOENT;
@@ -530,7 +446,7 @@ static void quirk_f0_vpd_link(struct pci_dev *dev)
if (!PCI_FUNC(dev->devfn))
return;
- f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ f0 = pci_get_func0_dev(dev);
if (!f0)
return;
@@ -570,7 +486,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
/*
* The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
* device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
@@ -578,51 +493,16 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd);
-/*
- * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
- * VPD end tag will hang the device. This problem was initially
- * observed when a vpd entry was created in sysfs
- * ('/sys/bus/pci/devices/<id>/vpd'). A read to this sysfs entry
- * will dump 32k of data. Reading a full 32k will cause an access
- * beyond the VPD end tag causing the device to hang. Once the device
- * is hung, the bnx2 driver will not be able to reset the device.
- * We believe that it is legal to read beyond the end tag and
- * therefore the solution is to limit the read/write length.
- */
-static void quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
+static void pci_vpd_set_size(struct pci_dev *dev, size_t len)
{
- /*
- * Only disable the VPD capability for 5706, 5706S, 5708,
- * 5708S and 5709 rev. A
- */
- if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
- (dev->device == PCI_DEVICE_ID_NX2_5706S) ||
- (dev->device == PCI_DEVICE_ID_NX2_5708) ||
- (dev->device == PCI_DEVICE_ID_NX2_5708S) ||
- ((dev->device == PCI_DEVICE_ID_NX2_5709) &&
- (dev->revision & 0xf0) == 0x0)) {
- if (dev->vpd)
- dev->vpd->len = 0x80;
- }
+ struct pci_vpd *vpd = dev->vpd;
+
+ if (!vpd || len == 0 || len > PCI_VPD_MAX_SIZE)
+ return;
+
+ vpd->valid = 1;
+ vpd->len = len;
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5706,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5706S,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5708,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5708S,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5709,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5709S,
- quirk_brcm_570x_limit_vpd);
static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
{
@@ -642,9 +522,9 @@ static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
* limits.
*/
if (chip == 0x0 && prod >= 0x20)
- pci_set_vpd_size(dev, 8192);
+ pci_vpd_set_size(dev, 8192);
else if (chip >= 0x4 && func < 0x8)
- pci_set_vpd_size(dev, 2048);
+ pci_vpd_set_size(dev, 2048);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 2d7502648219..b7a8f3a1921f 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -693,7 +693,7 @@ static int pcifront_connect_and_init_dma(struct pcifront_device *pdev)
spin_unlock(&pcifront_dev_lock);
- if (!err && !swiotlb_nr_tbl()) {
+ if (!err && !is_swiotlb_active()) {
err = pci_xen_swiotlb_init_late();
if (err)
dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n");
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index e6939103991b..948b763dc451 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -75,7 +75,7 @@ void release_cis_mem(struct pcmcia_socket *s)
mutex_unlock(&s->ops_mutex);
}
-/**
+/*
* set_cis_map() - map the card memory at "card_offset" into virtual space.
*
* If flags & MAP_ATTRIB, map the attribute space, otherwise
@@ -126,7 +126,7 @@ static void __iomem *set_cis_map(struct pcmcia_socket *s,
#define IS_ATTR 1
#define IS_INDIRECT 8
-/**
+/*
* pcmcia_read_cis_mem() - low-level function to read CIS memory
*
* must be called with ops_mutex held
@@ -206,7 +206,7 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
}
-/**
+/*
* pcmcia_write_cis_mem() - low-level function to write CIS memory
*
* Probably only useful for writing one-byte registers. Must be called
@@ -277,7 +277,7 @@ int pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
}
-/**
+/*
* read_cis_cache() - read CIS memory or its associated cache
*
* This is a wrapper around read_cis_mem, with the same interface,
@@ -365,7 +365,7 @@ void destroy_cis_cache(struct pcmcia_socket *s)
}
}
-/**
+/*
* verify_cis_cache() - does the CIS match what is in the CIS cache?
*/
int verify_cis_cache(struct pcmcia_socket *s)
@@ -401,7 +401,7 @@ int verify_cis_cache(struct pcmcia_socket *s)
return 0;
}
-/**
+/*
* pcmcia_replace_cis() - use a replacement CIS instead of the card's CIS
*
* For really bad cards, we provide a facility for uploading a
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 72114907c0e4..bd81aa64d011 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -83,7 +83,7 @@ struct pcmcia_dynid {
};
/**
- * pcmcia_store_new_id - add a new PCMCIA device ID to this driver and re-probe devices
+ * new_id_store() - add a new PCMCIA device ID to this driver and re-probe devices
* @driver: target device driver
* @buf: buffer for scanning device ID data
* @count: input size
@@ -371,9 +371,6 @@ static int pcmcia_device_remove(struct device *dev)
pcmcia_card_remove(p_dev->socket, p_dev);
/* detach the "instance" */
- if (!p_drv)
- return 0;
-
if (p_drv->remove)
p_drv->remove(p_dev);
@@ -389,7 +386,7 @@ static int pcmcia_device_remove(struct device *dev)
"pcmcia: driver %s did not release window properly\n",
p_drv->name);
- /* references from pcmcia_probe_device */
+ /* references from pcmcia_device_probe */
pcmcia_put_dev(p_dev);
module_put(p_drv->owner);
diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c
index e4c4daf92038..d2d0ed4b27c8 100644
--- a/drivers/pcmcia/pcmcia_cis.c
+++ b/drivers/pcmcia/pcmcia_cis.c
@@ -122,7 +122,7 @@ next_entry:
}
-/**
+/*
* pcmcia_io_cfg_data_width() - convert cfgtable to data path width parameter
*/
static int pcmcia_io_cfg_data_width(unsigned int flags)
@@ -143,7 +143,7 @@ struct pcmcia_cfg_mem {
cistpl_cftable_entry_t dflt;
};
-/**
+/*
* pcmcia_do_loop_config() - internal helper for pcmcia_loop_config()
*
* pcmcia_do_loop_config() is the internal callback for the call from
@@ -289,7 +289,7 @@ struct pcmcia_loop_mem {
void *priv_data);
};
-/**
+/*
* pcmcia_do_loop_tuple() - internal helper for pcmcia_loop_config()
*
* pcmcia_do_loop_tuple() is the internal callback for the call from
@@ -337,7 +337,7 @@ struct pcmcia_loop_get {
cisdata_t **buf;
};
-/**
+/*
* pcmcia_do_get_tuple() - internal helper for pcmcia_get_tuple()
*
* pcmcia_do_get_tuple() is the internal callback for the call from
@@ -386,7 +386,7 @@ size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code,
EXPORT_SYMBOL(pcmcia_get_tuple);
-/**
+/*
* pcmcia_do_get_mac() - internal helper for pcmcia_get_mac_from_cis()
*
* pcmcia_do_get_mac() is the internal callback for the call from
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index e3a6b6c8a5b0..c1c197292111 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -144,7 +144,7 @@ static int alloc_io_space(struct pcmcia_socket *s, struct resource *res,
}
-/**
+/*
* pcmcia_access_config() - read or write card configuration registers
*
* pcmcia_access_config() reads and writes configuration registers in
@@ -184,7 +184,7 @@ static int pcmcia_access_config(struct pcmcia_device *p_dev,
}
-/**
+/*
* pcmcia_read_config_byte() - read a byte from a card configuration register
*
* pcmcia_read_config_byte() reads a byte from a configuration register in
@@ -197,7 +197,7 @@ int pcmcia_read_config_byte(struct pcmcia_device *p_dev, off_t where, u8 *val)
EXPORT_SYMBOL(pcmcia_read_config_byte);
-/**
+/*
* pcmcia_write_config_byte() - write a byte to a card configuration register
*
* pcmcia_write_config_byte() writes a byte to a configuration register in
@@ -720,7 +720,8 @@ static irqreturn_t test_action(int cpl, void *dev_id)
/**
* pcmcia_setup_isa_irq() - determine whether an ISA IRQ can be used
- * @p_dev - the associated PCMCIA device
+ * @p_dev: the associated PCMCIA device
+ * @type: IRQ type (flags)
*
* locking note: must be called with ops_mutex locked.
*/
@@ -785,7 +786,7 @@ void pcmcia_cleanup_irq(struct pcmcia_socket *s)
/**
* pcmcia_setup_irq() - determine IRQ to be used for device
- * @p_dev - the associated PCMCIA device
+ * @p_dev: the associated PCMCIA device
*
* locking note: must be called with ops_mutex locked.
*/
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 3b05760e69d6..bb15a8bdbaab 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -257,7 +257,7 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
/*======================================================================*/
-/**
+/*
* readable() - iomem validation function for cards with a valid CIS
*/
static int readable(struct pcmcia_socket *s, struct resource *res,
@@ -288,7 +288,7 @@ static int readable(struct pcmcia_socket *s, struct resource *res,
return 0;
}
-/**
+/*
* checksum() - iomem validation function for simple memory cards
*/
static int checksum(struct pcmcia_socket *s, struct resource *res,
@@ -343,9 +343,9 @@ static int checksum(struct pcmcia_socket *s, struct resource *res,
*/
static int do_validate_mem(struct pcmcia_socket *s,
unsigned long base, unsigned long size,
- int validate (struct pcmcia_socket *s,
- struct resource *res,
- unsigned int *value))
+ int (*validate)(struct pcmcia_socket *s,
+ struct resource *res,
+ unsigned int *value))
{
struct socket_data *s_data = s->resource_data;
struct resource *res1, *res2;
@@ -398,12 +398,12 @@ static int do_validate_mem(struct pcmcia_socket *s,
* function returns the size of the usable memory area.
*/
static int do_mem_probe(struct pcmcia_socket *s, u_long base, u_long num,
- int validate (struct pcmcia_socket *s,
- struct resource *res,
- unsigned int *value),
- int fallback (struct pcmcia_socket *s,
- struct resource *res,
- unsigned int *value))
+ int (*validate)(struct pcmcia_socket *s,
+ struct resource *res,
+ unsigned int *value),
+ int (*fallback)(struct pcmcia_socket *s,
+ struct resource *res,
+ unsigned int *value))
{
struct socket_data *s_data = s->resource_data;
u_long i, j, bad, fail, step;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 2d10d84fb79c..d4f7f1f9cc77 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -581,33 +581,6 @@ static const struct attribute_group armpmu_common_attr_group = {
.attrs = armpmu_common_attrs,
};
-/* Set at runtime when we know what CPU type we are. */
-static struct arm_pmu *__oprofile_cpu_pmu;
-
-/*
- * Despite the names, these two functions are CPU-specific and are used
- * by the OProfile/perf code.
- */
-const char *perf_pmu_name(void)
-{
- if (!__oprofile_cpu_pmu)
- return NULL;
-
- return __oprofile_cpu_pmu->name;
-}
-EXPORT_SYMBOL_GPL(perf_pmu_name);
-
-int perf_num_counters(void)
-{
- int max_events = 0;
-
- if (__oprofile_cpu_pmu != NULL)
- max_events = __oprofile_cpu_pmu->num_events;
-
- return max_events;
-}
-EXPORT_SYMBOL_GPL(perf_num_counters);
-
static int armpmu_count_irq_users(const int irq)
{
int cpu, count = 0;
@@ -979,9 +952,6 @@ int armpmu_register(struct arm_pmu *pmu)
if (ret)
goto out_destroy;
- if (!__oprofile_cpu_pmu)
- __oprofile_cpu_pmu = pmu;
-
pr_info("enabled with %s PMU driver, %d counters available%s\n",
pmu->name, pmu->num_events,
has_nmi ? ", using NMIs" : "");
diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c
index 14e0551cd319..77fe65367ce5 100644
--- a/drivers/phy/phy-core-mipi-dphy.c
+++ b/drivers/phy/phy-core-mipi-dphy.c
@@ -12,8 +12,6 @@
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
-#define PSEC_PER_SEC 1000000000000LL
-
/*
* Minimum D-PHY timings based on MIPI D-PHY specification. Derived
* from the valid ranges specified in Section 6.9, Table 14, Page 41
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
index 8af8c6c5cc02..347dc79a18c1 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -11,16 +11,16 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <linux/time64.h>
+
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
-#include <linux/pm_runtime.h>
-#include <linux/mfd/syscon.h>
-
-#define PSEC_PER_SEC 1000000000000LL
#define UPDATE(x, h, l) (((x) << (l)) & GENMASK((h), (l)))
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index b7675cce0027..c2c7e7963ed0 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -208,13 +208,18 @@ config PINCTRL_OXNAS
select MFD_SYSCON
config PINCTRL_ROCKCHIP
- bool
+ tristate "Rockchip gpio and pinctrl driver"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
+ select GPIOLIB
select PINMUX
select GENERIC_PINCONF
select GENERIC_IRQ_CHIP
select MFD_SYSCON
select OF_GPIO
+ default ARCH_ROCKCHIP
+ help
+ This support pinctrl and gpio driver for Rockchip SoCs.
config PINCTRL_SINGLE
tristate "One-register-per-pin type device tree based pinctrl driver"
@@ -318,6 +323,20 @@ config PINCTRL_ZYNQ
help
This selects the pinctrl driver for Xilinx Zynq.
+config PINCTRL_ZYNQMP
+ tristate "Pinctrl driver for Xilinx ZynqMP"
+ depends on ZYNQMP_FIRMWARE
+ select PINMUX
+ select GENERIC_PINCONF
+ default ZYNQMP_FIRMWARE
+ help
+ This selects the pinctrl driver for Xilinx ZynqMP platform.
+ This driver will query the pin information from the firmware
+ and allow configuring the pins.
+ Configuration can include the mux function to select on those
+ pin(s)/group(s), and various pin configuration parameters
+ such as pull-up, slew rate, etc.
+
config PINCTRL_INGENIC
bool "Pinctrl driver for the Ingenic JZ47xx SoCs"
default MACH_INGENIC
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 8bf459c32a76..5ef5334a797f 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
+obj-$(CONFIG_PINCTRL_ZYNQMP) += pinctrl-zynqmp.o
obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index 0ed14de0134c..c9c5efc92731 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -29,6 +29,68 @@ config PINCTRL_BCM2835
help
Say Y here to enable the Broadcom BCM2835 GPIO driver.
+config PINCTRL_BCM63XX
+ bool
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIO_REGMAP
+
+config PINCTRL_BCM6318
+ bool "Broadcom BCM6318 GPIO driver"
+ depends on (BMIPS_GENERIC || COMPILE_TEST)
+ depends on OF
+ select PINCTRL_BCM63XX
+ default BMIPS_GENERIC
+ help
+ Say Y here to enable the Broadcom BCM6318 GPIO driver.
+
+config PINCTRL_BCM6328
+ bool "Broadcom BCM6328 GPIO driver"
+ depends on (BMIPS_GENERIC || COMPILE_TEST)
+ depends on OF
+ select PINCTRL_BCM63XX
+ default BMIPS_GENERIC
+ help
+ Say Y here to enable the Broadcom BCM6328 GPIO driver.
+
+config PINCTRL_BCM6358
+ bool "Broadcom BCM6358 GPIO driver"
+ depends on (BMIPS_GENERIC || COMPILE_TEST)
+ depends on OF
+ select PINCTRL_BCM63XX
+ default BMIPS_GENERIC
+ help
+ Say Y here to enable the Broadcom BCM6358 GPIO driver.
+
+config PINCTRL_BCM6362
+ bool "Broadcom BCM6362 GPIO driver"
+ depends on (BMIPS_GENERIC || COMPILE_TEST)
+ depends on OF
+ select PINCTRL_BCM63XX
+ default BMIPS_GENERIC
+ help
+ Say Y here to enable the Broadcom BCM6362 GPIO driver.
+
+config PINCTRL_BCM6368
+ bool "Broadcom BCM6368 GPIO driver"
+ depends on (BMIPS_GENERIC || COMPILE_TEST)
+ depends on OF
+ select PINCTRL_BCM63XX
+ default BMIPS_GENERIC
+ help
+ Say Y here to enable the Broadcom BCM6368 GPIO driver.
+
+config PINCTRL_BCM63268
+ bool "Broadcom BCM63268 GPIO driver"
+ depends on (BMIPS_GENERIC || COMPILE_TEST)
+ depends on OF
+ select PINCTRL_BCM63XX
+ default BMIPS_GENERIC
+ help
+ Say Y here to enable the Broadcom BCM63268 GPIO driver.
+
config PINCTRL_IPROC_GPIO
bool "Broadcom iProc GPIO (with PINCONF) driver"
depends on OF_GPIO && (ARCH_BCM_IPROC || COMPILE_TEST)
diff --git a/drivers/pinctrl/bcm/Makefile b/drivers/pinctrl/bcm/Makefile
index 79d5e49fdd9a..00c7b7775e63 100644
--- a/drivers/pinctrl/bcm/Makefile
+++ b/drivers/pinctrl/bcm/Makefile
@@ -3,6 +3,13 @@
obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o
obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
+obj-$(CONFIG_PINCTRL_BCM63XX) += pinctrl-bcm63xx.o
+obj-$(CONFIG_PINCTRL_BCM6318) += pinctrl-bcm6318.o
+obj-$(CONFIG_PINCTRL_BCM6328) += pinctrl-bcm6328.o
+obj-$(CONFIG_PINCTRL_BCM6358) += pinctrl-bcm6358.o
+obj-$(CONFIG_PINCTRL_BCM6362) += pinctrl-bcm6362.o
+obj-$(CONFIG_PINCTRL_BCM6368) += pinctrl-bcm6368.o
+obj-$(CONFIG_PINCTRL_BCM63268) += pinctrl-bcm63268.o
obj-$(CONFIG_PINCTRL_IPROC_GPIO) += pinctrl-iproc-gpio.o
obj-$(CONFIG_PINCTRL_CYGNUS_MUX) += pinctrl-cygnus-mux.o
obj-$(CONFIG_PINCTRL_NS) += pinctrl-ns.o
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6318.c b/drivers/pinctrl/bcm/pinctrl-bcm6318.c
new file mode 100644
index 000000000000..77fd9b58067d
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6318.c
@@ -0,0 +1,498 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for BCM6318 GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../pinctrl-utils.h"
+
+#include "pinctrl-bcm63xx.h"
+
+#define BCM6318_NUM_GPIOS 50
+#define BCM6318_NUM_MUX 48
+
+#define BCM6318_MODE_REG 0x18
+#define BCM6318_MUX_REG 0x1c
+#define BCM6328_MUX_MASK GENMASK(1, 0)
+#define BCM6318_PAD_REG 0x54
+#define BCM6328_PAD_MASK GENMASK(3, 0)
+
+struct bcm6318_pingroup {
+ const char *name;
+ const unsigned * const pins;
+ const unsigned num_pins;
+};
+
+struct bcm6318_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+
+ unsigned mode_val:1;
+ unsigned mux_val:2;
+};
+
+static const struct pinctrl_pin_desc bcm6318_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+ PINCTRL_PIN(23, "gpio23"),
+ PINCTRL_PIN(24, "gpio24"),
+ PINCTRL_PIN(25, "gpio25"),
+ PINCTRL_PIN(26, "gpio26"),
+ PINCTRL_PIN(27, "gpio27"),
+ PINCTRL_PIN(28, "gpio28"),
+ PINCTRL_PIN(29, "gpio29"),
+ PINCTRL_PIN(30, "gpio30"),
+ PINCTRL_PIN(31, "gpio31"),
+ PINCTRL_PIN(32, "gpio32"),
+ PINCTRL_PIN(33, "gpio33"),
+ PINCTRL_PIN(34, "gpio34"),
+ PINCTRL_PIN(35, "gpio35"),
+ PINCTRL_PIN(36, "gpio36"),
+ PINCTRL_PIN(37, "gpio37"),
+ PINCTRL_PIN(38, "gpio38"),
+ PINCTRL_PIN(39, "gpio39"),
+ PINCTRL_PIN(40, "gpio40"),
+ PINCTRL_PIN(41, "gpio41"),
+ PINCTRL_PIN(42, "gpio42"),
+ PINCTRL_PIN(43, "gpio43"),
+ PINCTRL_PIN(44, "gpio44"),
+ PINCTRL_PIN(45, "gpio45"),
+ PINCTRL_PIN(46, "gpio46"),
+ PINCTRL_PIN(47, "gpio47"),
+ PINCTRL_PIN(48, "gpio48"),
+ PINCTRL_PIN(49, "gpio49"),
+};
+
+static unsigned gpio0_pins[] = { 0 };
+static unsigned gpio1_pins[] = { 1 };
+static unsigned gpio2_pins[] = { 2 };
+static unsigned gpio3_pins[] = { 3 };
+static unsigned gpio4_pins[] = { 4 };
+static unsigned gpio5_pins[] = { 5 };
+static unsigned gpio6_pins[] = { 6 };
+static unsigned gpio7_pins[] = { 7 };
+static unsigned gpio8_pins[] = { 8 };
+static unsigned gpio9_pins[] = { 9 };
+static unsigned gpio10_pins[] = { 10 };
+static unsigned gpio11_pins[] = { 11 };
+static unsigned gpio12_pins[] = { 12 };
+static unsigned gpio13_pins[] = { 13 };
+static unsigned gpio14_pins[] = { 14 };
+static unsigned gpio15_pins[] = { 15 };
+static unsigned gpio16_pins[] = { 16 };
+static unsigned gpio17_pins[] = { 17 };
+static unsigned gpio18_pins[] = { 18 };
+static unsigned gpio19_pins[] = { 19 };
+static unsigned gpio20_pins[] = { 20 };
+static unsigned gpio21_pins[] = { 21 };
+static unsigned gpio22_pins[] = { 22 };
+static unsigned gpio23_pins[] = { 23 };
+static unsigned gpio24_pins[] = { 24 };
+static unsigned gpio25_pins[] = { 25 };
+static unsigned gpio26_pins[] = { 26 };
+static unsigned gpio27_pins[] = { 27 };
+static unsigned gpio28_pins[] = { 28 };
+static unsigned gpio29_pins[] = { 29 };
+static unsigned gpio30_pins[] = { 30 };
+static unsigned gpio31_pins[] = { 31 };
+static unsigned gpio32_pins[] = { 32 };
+static unsigned gpio33_pins[] = { 33 };
+static unsigned gpio34_pins[] = { 34 };
+static unsigned gpio35_pins[] = { 35 };
+static unsigned gpio36_pins[] = { 36 };
+static unsigned gpio37_pins[] = { 37 };
+static unsigned gpio38_pins[] = { 38 };
+static unsigned gpio39_pins[] = { 39 };
+static unsigned gpio40_pins[] = { 40 };
+static unsigned gpio41_pins[] = { 41 };
+static unsigned gpio42_pins[] = { 42 };
+static unsigned gpio43_pins[] = { 43 };
+static unsigned gpio44_pins[] = { 44 };
+static unsigned gpio45_pins[] = { 45 };
+static unsigned gpio46_pins[] = { 46 };
+static unsigned gpio47_pins[] = { 47 };
+static unsigned gpio48_pins[] = { 48 };
+static unsigned gpio49_pins[] = { 49 };
+
+#define BCM6318_GROUP(n) \
+ { \
+ .name = #n, \
+ .pins = n##_pins, \
+ .num_pins = ARRAY_SIZE(n##_pins), \
+ }
+
+static struct bcm6318_pingroup bcm6318_groups[] = {
+ BCM6318_GROUP(gpio0),
+ BCM6318_GROUP(gpio1),
+ BCM6318_GROUP(gpio2),
+ BCM6318_GROUP(gpio3),
+ BCM6318_GROUP(gpio4),
+ BCM6318_GROUP(gpio5),
+ BCM6318_GROUP(gpio6),
+ BCM6318_GROUP(gpio7),
+ BCM6318_GROUP(gpio8),
+ BCM6318_GROUP(gpio9),
+ BCM6318_GROUP(gpio10),
+ BCM6318_GROUP(gpio11),
+ BCM6318_GROUP(gpio12),
+ BCM6318_GROUP(gpio13),
+ BCM6318_GROUP(gpio14),
+ BCM6318_GROUP(gpio15),
+ BCM6318_GROUP(gpio16),
+ BCM6318_GROUP(gpio17),
+ BCM6318_GROUP(gpio18),
+ BCM6318_GROUP(gpio19),
+ BCM6318_GROUP(gpio20),
+ BCM6318_GROUP(gpio21),
+ BCM6318_GROUP(gpio22),
+ BCM6318_GROUP(gpio23),
+ BCM6318_GROUP(gpio24),
+ BCM6318_GROUP(gpio25),
+ BCM6318_GROUP(gpio26),
+ BCM6318_GROUP(gpio27),
+ BCM6318_GROUP(gpio28),
+ BCM6318_GROUP(gpio29),
+ BCM6318_GROUP(gpio30),
+ BCM6318_GROUP(gpio31),
+ BCM6318_GROUP(gpio32),
+ BCM6318_GROUP(gpio33),
+ BCM6318_GROUP(gpio34),
+ BCM6318_GROUP(gpio35),
+ BCM6318_GROUP(gpio36),
+ BCM6318_GROUP(gpio37),
+ BCM6318_GROUP(gpio38),
+ BCM6318_GROUP(gpio39),
+ BCM6318_GROUP(gpio40),
+ BCM6318_GROUP(gpio41),
+ BCM6318_GROUP(gpio42),
+ BCM6318_GROUP(gpio43),
+ BCM6318_GROUP(gpio44),
+ BCM6318_GROUP(gpio45),
+ BCM6318_GROUP(gpio46),
+ BCM6318_GROUP(gpio47),
+ BCM6318_GROUP(gpio48),
+ BCM6318_GROUP(gpio49),
+};
+
+/* GPIO_MODE */
+static const char * const led_groups[] = {
+ "gpio0",
+ "gpio1",
+ "gpio2",
+ "gpio3",
+ "gpio4",
+ "gpio5",
+ "gpio6",
+ "gpio7",
+ "gpio8",
+ "gpio9",
+ "gpio10",
+ "gpio11",
+ "gpio12",
+ "gpio13",
+ "gpio14",
+ "gpio15",
+ "gpio16",
+ "gpio17",
+ "gpio18",
+ "gpio19",
+ "gpio20",
+ "gpio21",
+ "gpio22",
+ "gpio23",
+};
+
+/* PINMUX_SEL */
+static const char * const ephy0_spd_led_groups[] = {
+ "gpio0",
+};
+
+static const char * const ephy1_spd_led_groups[] = {
+ "gpio1",
+};
+
+static const char * const ephy2_spd_led_groups[] = {
+ "gpio2",
+};
+
+static const char * const ephy3_spd_led_groups[] = {
+ "gpio3",
+};
+
+static const char * const ephy0_act_led_groups[] = {
+ "gpio4",
+};
+
+static const char * const ephy1_act_led_groups[] = {
+ "gpio5",
+};
+
+static const char * const ephy2_act_led_groups[] = {
+ "gpio6",
+};
+
+static const char * const ephy3_act_led_groups[] = {
+ "gpio7",
+};
+
+static const char * const serial_led_data_groups[] = {
+ "gpio6",
+};
+
+static const char * const serial_led_clk_groups[] = {
+ "gpio7",
+};
+
+static const char * const inet_act_led_groups[] = {
+ "gpio8",
+};
+
+static const char * const inet_fail_led_groups[] = {
+ "gpio9",
+};
+
+static const char * const dsl_led_groups[] = {
+ "gpio10",
+};
+
+static const char * const post_fail_led_groups[] = {
+ "gpio11",
+};
+
+static const char * const wlan_wps_led_groups[] = {
+ "gpio12",
+};
+
+static const char * const usb_pwron_groups[] = {
+ "gpio13",
+};
+
+static const char * const usb_device_led_groups[] = {
+ "gpio13",
+};
+
+static const char * const usb_active_groups[] = {
+ "gpio40",
+};
+
+#define BCM6318_MODE_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .mode_val = 1, \
+ }
+
+#define BCM6318_MUX_FUN(n, mux) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .mux_val = mux, \
+ }
+
+static const struct bcm6318_function bcm6318_funcs[] = {
+ BCM6318_MODE_FUN(led),
+ BCM6318_MUX_FUN(ephy0_spd_led, 1),
+ BCM6318_MUX_FUN(ephy1_spd_led, 1),
+ BCM6318_MUX_FUN(ephy2_spd_led, 1),
+ BCM6318_MUX_FUN(ephy3_spd_led, 1),
+ BCM6318_MUX_FUN(ephy0_act_led, 1),
+ BCM6318_MUX_FUN(ephy1_act_led, 1),
+ BCM6318_MUX_FUN(ephy2_act_led, 1),
+ BCM6318_MUX_FUN(ephy3_act_led, 1),
+ BCM6318_MUX_FUN(serial_led_data, 3),
+ BCM6318_MUX_FUN(serial_led_clk, 3),
+ BCM6318_MUX_FUN(inet_act_led, 1),
+ BCM6318_MUX_FUN(inet_fail_led, 1),
+ BCM6318_MUX_FUN(dsl_led, 1),
+ BCM6318_MUX_FUN(post_fail_led, 1),
+ BCM6318_MUX_FUN(wlan_wps_led, 1),
+ BCM6318_MUX_FUN(usb_pwron, 1),
+ BCM6318_MUX_FUN(usb_device_led, 2),
+ BCM6318_MUX_FUN(usb_active, 2),
+};
+
+static inline unsigned int bcm6318_mux_off(unsigned int pin)
+{
+ return BCM6318_MUX_REG + (pin / 16) * 4;
+}
+
+static inline unsigned int bcm6318_pad_off(unsigned int pin)
+{
+ return BCM6318_PAD_REG + (pin / 8) * 4;
+}
+
+static int bcm6318_pinctrl_get_group_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6318_groups);
+}
+
+static const char *bcm6318_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ return bcm6318_groups[group].name;
+}
+
+static int bcm6318_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group, const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = bcm6318_groups[group].pins;
+ *num_pins = bcm6318_groups[group].num_pins;
+
+ return 0;
+}
+
+static int bcm6318_pinctrl_get_func_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6318_funcs);
+}
+
+static const char *bcm6318_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return bcm6318_funcs[selector].name;
+}
+
+static int bcm6318_pinctrl_get_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *groups = bcm6318_funcs[selector].groups;
+ *num_groups = bcm6318_funcs[selector].num_groups;
+
+ return 0;
+}
+
+static inline void bcm6318_rmw_mux(struct bcm63xx_pinctrl *pc, unsigned pin,
+ unsigned int mode, unsigned int mux)
+{
+ if (pin < BCM63XX_BANK_GPIOS)
+ regmap_update_bits(pc->regs, BCM6318_MODE_REG, BIT(pin),
+ mode ? BIT(pin) : 0);
+
+ if (pin < BCM6318_NUM_MUX)
+ regmap_update_bits(pc->regs,
+ bcm6318_mux_off(pin),
+ BCM6328_MUX_MASK << ((pin % 16) * 2),
+ mux << ((pin % 16) * 2));
+}
+
+static inline void bcm6318_set_pad(struct bcm63xx_pinctrl *pc, unsigned pin,
+ uint8_t val)
+{
+ regmap_update_bits(pc->regs, bcm6318_pad_off(pin),
+ BCM6328_PAD_MASK << ((pin % 8) * 4),
+ val << ((pin % 8) * 4));
+}
+
+static int bcm6318_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned selector, unsigned group)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ const struct bcm6318_pingroup *pg = &bcm6318_groups[group];
+ const struct bcm6318_function *f = &bcm6318_funcs[selector];
+
+ bcm6318_rmw_mux(pc, pg->pins[0], f->mode_val, f->mux_val);
+
+ return 0;
+}
+
+static int bcm6318_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ /* disable all functions using this pin */
+ if (offset < 13) {
+ /* GPIOs 0-12 use mux 0 as GPIO function */
+ bcm6318_rmw_mux(pc, offset, 0, 0);
+ } else if (offset < 42) {
+ /* GPIOs 13-41 use mux 3 as GPIO function */
+ bcm6318_rmw_mux(pc, offset, 0, 3);
+
+ bcm6318_set_pad(pc, offset, 0);
+ }
+
+ return 0;
+}
+
+static struct pinctrl_ops bcm6318_pctl_ops = {
+ .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .get_group_name = bcm6318_pinctrl_get_group_name,
+ .get_group_pins = bcm6318_pinctrl_get_group_pins,
+ .get_groups_count = bcm6318_pinctrl_get_group_count,
+};
+
+static struct pinmux_ops bcm6318_pmx_ops = {
+ .get_function_groups = bcm6318_pinctrl_get_groups,
+ .get_function_name = bcm6318_pinctrl_get_func_name,
+ .get_functions_count = bcm6318_pinctrl_get_func_count,
+ .gpio_request_enable = bcm6318_gpio_request_enable,
+ .set_mux = bcm6318_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct bcm63xx_pinctrl_soc bcm6318_soc = {
+ .ngpios = BCM6318_NUM_GPIOS,
+ .npins = ARRAY_SIZE(bcm6318_pins),
+ .pctl_ops = &bcm6318_pctl_ops,
+ .pins = bcm6318_pins,
+ .pmx_ops = &bcm6318_pmx_ops,
+};
+
+static int bcm6318_pinctrl_probe(struct platform_device *pdev)
+{
+ return bcm63xx_pinctrl_probe(pdev, &bcm6318_soc, NULL);
+}
+
+static const struct of_device_id bcm6318_pinctrl_match[] = {
+ { .compatible = "brcm,bcm6318-pinctrl", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm6318_pinctrl_driver = {
+ .probe = bcm6318_pinctrl_probe,
+ .driver = {
+ .name = "bcm6318-pinctrl",
+ .of_match_table = bcm6318_pinctrl_match,
+ },
+};
+
+builtin_platform_driver(bcm6318_pinctrl_driver);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm63268.c b/drivers/pinctrl/bcm/pinctrl-bcm63268.c
new file mode 100644
index 000000000000..d4c5fad7fb7d
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm63268.c
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for BCM63268 GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../pinctrl-utils.h"
+
+#include "pinctrl-bcm63xx.h"
+
+#define BCM63268_NUM_GPIOS 52
+#define BCM63268_NUM_LEDS 24
+
+#define BCM63268_LED_REG 0x10
+#define BCM63268_MODE_REG 0x18
+#define BCM63268_CTRL_REG 0x1c
+#define BCM63268_BASEMODE_REG 0x38
+#define BCM63268_BASEMODE_NAND BIT(2) /* GPIOs 2-7, 24-31 */
+#define BCM63268_BASEMODE_GPIO35 BIT(4) /* GPIO 35 */
+#define BCM63268_BASEMODE_DECTPD BIT(5) /* GPIOs 8/9 */
+#define BCM63268_BASEMODE_VDSL_PHY_0 BIT(6) /* GPIOs 10/11 */
+#define BCM63268_BASEMODE_VDSL_PHY_1 BIT(7) /* GPIOs 12/13 */
+#define BCM63268_BASEMODE_VDSL_PHY_2 BIT(8) /* GPIOs 24/25 */
+#define BCM63268_BASEMODE_VDSL_PHY_3 BIT(9) /* GPIOs 26/27 */
+
+enum bcm63268_pinctrl_reg {
+ BCM63268_LEDCTRL,
+ BCM63268_MODE,
+ BCM63268_CTRL,
+ BCM63268_BASEMODE,
+};
+
+struct bcm63268_pingroup {
+ const char *name;
+ const unsigned * const pins;
+ const unsigned num_pins;
+};
+
+struct bcm63268_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+
+ enum bcm63268_pinctrl_reg reg;
+ uint32_t mask;
+};
+
+#define BCM63268_PIN(a, b, basemode) \
+ { \
+ .number = a, \
+ .name = b, \
+ .drv_data = (void *)(basemode) \
+ }
+
+static const struct pinctrl_pin_desc bcm63268_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ BCM63268_PIN(2, "gpio2", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(3, "gpio3", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(4, "gpio4", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(5, "gpio5", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(6, "gpio6", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(7, "gpio7", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(8, "gpio8", BCM63268_BASEMODE_DECTPD),
+ BCM63268_PIN(9, "gpio9", BCM63268_BASEMODE_DECTPD),
+ BCM63268_PIN(10, "gpio10", BCM63268_BASEMODE_VDSL_PHY_0),
+ BCM63268_PIN(11, "gpio11", BCM63268_BASEMODE_VDSL_PHY_0),
+ BCM63268_PIN(12, "gpio12", BCM63268_BASEMODE_VDSL_PHY_1),
+ BCM63268_PIN(13, "gpio13", BCM63268_BASEMODE_VDSL_PHY_1),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+ PINCTRL_PIN(23, "gpio23"),
+ BCM63268_PIN(24, "gpio24",
+ BCM63268_BASEMODE_NAND | BCM63268_BASEMODE_VDSL_PHY_2),
+ BCM63268_PIN(25, "gpio25",
+ BCM63268_BASEMODE_NAND | BCM63268_BASEMODE_VDSL_PHY_2),
+ BCM63268_PIN(26, "gpio26",
+ BCM63268_BASEMODE_NAND | BCM63268_BASEMODE_VDSL_PHY_3),
+ BCM63268_PIN(27, "gpio27",
+ BCM63268_BASEMODE_NAND | BCM63268_BASEMODE_VDSL_PHY_3),
+ BCM63268_PIN(28, "gpio28", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(29, "gpio29", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(30, "gpio30", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(31, "gpio31", BCM63268_BASEMODE_NAND),
+ PINCTRL_PIN(32, "gpio32"),
+ PINCTRL_PIN(33, "gpio33"),
+ PINCTRL_PIN(34, "gpio34"),
+ PINCTRL_PIN(35, "gpio35"),
+ PINCTRL_PIN(36, "gpio36"),
+ PINCTRL_PIN(37, "gpio37"),
+ PINCTRL_PIN(38, "gpio38"),
+ PINCTRL_PIN(39, "gpio39"),
+ PINCTRL_PIN(40, "gpio40"),
+ PINCTRL_PIN(41, "gpio41"),
+ PINCTRL_PIN(42, "gpio42"),
+ PINCTRL_PIN(43, "gpio43"),
+ PINCTRL_PIN(44, "gpio44"),
+ PINCTRL_PIN(45, "gpio45"),
+ PINCTRL_PIN(46, "gpio46"),
+ PINCTRL_PIN(47, "gpio47"),
+ PINCTRL_PIN(48, "gpio48"),
+ PINCTRL_PIN(49, "gpio49"),
+ PINCTRL_PIN(50, "gpio50"),
+ PINCTRL_PIN(51, "gpio51"),
+};
+
+static unsigned gpio0_pins[] = { 0 };
+static unsigned gpio1_pins[] = { 1 };
+static unsigned gpio2_pins[] = { 2 };
+static unsigned gpio3_pins[] = { 3 };
+static unsigned gpio4_pins[] = { 4 };
+static unsigned gpio5_pins[] = { 5 };
+static unsigned gpio6_pins[] = { 6 };
+static unsigned gpio7_pins[] = { 7 };
+static unsigned gpio8_pins[] = { 8 };
+static unsigned gpio9_pins[] = { 9 };
+static unsigned gpio10_pins[] = { 10 };
+static unsigned gpio11_pins[] = { 11 };
+static unsigned gpio12_pins[] = { 12 };
+static unsigned gpio13_pins[] = { 13 };
+static unsigned gpio14_pins[] = { 14 };
+static unsigned gpio15_pins[] = { 15 };
+static unsigned gpio16_pins[] = { 16 };
+static unsigned gpio17_pins[] = { 17 };
+static unsigned gpio18_pins[] = { 18 };
+static unsigned gpio19_pins[] = { 19 };
+static unsigned gpio20_pins[] = { 20 };
+static unsigned gpio21_pins[] = { 21 };
+static unsigned gpio22_pins[] = { 22 };
+static unsigned gpio23_pins[] = { 23 };
+static unsigned gpio24_pins[] = { 24 };
+static unsigned gpio25_pins[] = { 25 };
+static unsigned gpio26_pins[] = { 26 };
+static unsigned gpio27_pins[] = { 27 };
+static unsigned gpio28_pins[] = { 28 };
+static unsigned gpio29_pins[] = { 29 };
+static unsigned gpio30_pins[] = { 30 };
+static unsigned gpio31_pins[] = { 31 };
+static unsigned gpio32_pins[] = { 32 };
+static unsigned gpio33_pins[] = { 33 };
+static unsigned gpio34_pins[] = { 34 };
+static unsigned gpio35_pins[] = { 35 };
+static unsigned gpio36_pins[] = { 36 };
+static unsigned gpio37_pins[] = { 37 };
+static unsigned gpio38_pins[] = { 38 };
+static unsigned gpio39_pins[] = { 39 };
+static unsigned gpio40_pins[] = { 40 };
+static unsigned gpio41_pins[] = { 41 };
+static unsigned gpio42_pins[] = { 42 };
+static unsigned gpio43_pins[] = { 43 };
+static unsigned gpio44_pins[] = { 44 };
+static unsigned gpio45_pins[] = { 45 };
+static unsigned gpio46_pins[] = { 46 };
+static unsigned gpio47_pins[] = { 47 };
+static unsigned gpio48_pins[] = { 48 };
+static unsigned gpio49_pins[] = { 49 };
+static unsigned gpio50_pins[] = { 50 };
+static unsigned gpio51_pins[] = { 51 };
+
+static unsigned nand_grp_pins[] = {
+ 2, 3, 4, 5, 6, 7, 24,
+ 25, 26, 27, 28, 29, 30, 31,
+};
+
+static unsigned dectpd_grp_pins[] = { 8, 9 };
+static unsigned vdsl_phy0_grp_pins[] = { 10, 11 };
+static unsigned vdsl_phy1_grp_pins[] = { 12, 13 };
+static unsigned vdsl_phy2_grp_pins[] = { 24, 25 };
+static unsigned vdsl_phy3_grp_pins[] = { 26, 27 };
+
+#define BCM63268_GROUP(n) \
+ { \
+ .name = #n, \
+ .pins = n##_pins, \
+ .num_pins = ARRAY_SIZE(n##_pins), \
+ }
+
+static struct bcm63268_pingroup bcm63268_groups[] = {
+ BCM63268_GROUP(gpio0),
+ BCM63268_GROUP(gpio1),
+ BCM63268_GROUP(gpio2),
+ BCM63268_GROUP(gpio3),
+ BCM63268_GROUP(gpio4),
+ BCM63268_GROUP(gpio5),
+ BCM63268_GROUP(gpio6),
+ BCM63268_GROUP(gpio7),
+ BCM63268_GROUP(gpio8),
+ BCM63268_GROUP(gpio9),
+ BCM63268_GROUP(gpio10),
+ BCM63268_GROUP(gpio11),
+ BCM63268_GROUP(gpio12),
+ BCM63268_GROUP(gpio13),
+ BCM63268_GROUP(gpio14),
+ BCM63268_GROUP(gpio15),
+ BCM63268_GROUP(gpio16),
+ BCM63268_GROUP(gpio17),
+ BCM63268_GROUP(gpio18),
+ BCM63268_GROUP(gpio19),
+ BCM63268_GROUP(gpio20),
+ BCM63268_GROUP(gpio21),
+ BCM63268_GROUP(gpio22),
+ BCM63268_GROUP(gpio23),
+ BCM63268_GROUP(gpio24),
+ BCM63268_GROUP(gpio25),
+ BCM63268_GROUP(gpio26),
+ BCM63268_GROUP(gpio27),
+ BCM63268_GROUP(gpio28),
+ BCM63268_GROUP(gpio29),
+ BCM63268_GROUP(gpio30),
+ BCM63268_GROUP(gpio31),
+ BCM63268_GROUP(gpio32),
+ BCM63268_GROUP(gpio33),
+ BCM63268_GROUP(gpio34),
+ BCM63268_GROUP(gpio35),
+ BCM63268_GROUP(gpio36),
+ BCM63268_GROUP(gpio37),
+ BCM63268_GROUP(gpio38),
+ BCM63268_GROUP(gpio39),
+ BCM63268_GROUP(gpio40),
+ BCM63268_GROUP(gpio41),
+ BCM63268_GROUP(gpio42),
+ BCM63268_GROUP(gpio43),
+ BCM63268_GROUP(gpio44),
+ BCM63268_GROUP(gpio45),
+ BCM63268_GROUP(gpio46),
+ BCM63268_GROUP(gpio47),
+ BCM63268_GROUP(gpio48),
+ BCM63268_GROUP(gpio49),
+ BCM63268_GROUP(gpio50),
+ BCM63268_GROUP(gpio51),
+
+ /* multi pin groups */
+ BCM63268_GROUP(nand_grp),
+ BCM63268_GROUP(dectpd_grp),
+ BCM63268_GROUP(vdsl_phy0_grp),
+ BCM63268_GROUP(vdsl_phy1_grp),
+ BCM63268_GROUP(vdsl_phy2_grp),
+ BCM63268_GROUP(vdsl_phy3_grp),
+};
+
+static const char * const led_groups[] = {
+ "gpio0",
+ "gpio1",
+ "gpio2",
+ "gpio3",
+ "gpio4",
+ "gpio5",
+ "gpio6",
+ "gpio7",
+ "gpio8",
+ "gpio9",
+ "gpio10",
+ "gpio11",
+ "gpio12",
+ "gpio13",
+ "gpio14",
+ "gpio15",
+ "gpio16",
+ "gpio17",
+ "gpio18",
+ "gpio19",
+ "gpio20",
+ "gpio21",
+ "gpio22",
+ "gpio23",
+};
+
+static const char * const serial_led_clk_groups[] = {
+ "gpio0",
+};
+
+static const char * const serial_led_data_groups[] = {
+ "gpio1",
+};
+
+static const char * const hsspi_cs4_groups[] = {
+ "gpio16",
+};
+
+static const char * const hsspi_cs5_groups[] = {
+ "gpio17",
+};
+
+static const char * const hsspi_cs6_groups[] = {
+ "gpio8",
+};
+
+static const char * const hsspi_cs7_groups[] = {
+ "gpio9",
+};
+
+static const char * const uart1_scts_groups[] = {
+ "gpio10",
+ "gpio24",
+};
+
+static const char * const uart1_srts_groups[] = {
+ "gpio11",
+ "gpio25",
+};
+
+static const char * const uart1_sdin_groups[] = {
+ "gpio12",
+ "gpio26",
+};
+
+static const char * const uart1_sdout_groups[] = {
+ "gpio13",
+ "gpio27",
+};
+
+static const char * const ntr_pulse_in_groups[] = {
+ "gpio14",
+ "gpio28",
+};
+
+static const char * const dsl_ntr_pulse_out_groups[] = {
+ "gpio15",
+ "gpio29",
+};
+
+static const char * const adsl_spi_miso_groups[] = {
+ "gpio18",
+};
+
+static const char * const adsl_spi_mosi_groups[] = {
+ "gpio19",
+};
+
+static const char * const vreg_clk_groups[] = {
+ "gpio22",
+};
+
+static const char * const pcie_clkreq_b_groups[] = {
+ "gpio23",
+};
+
+static const char * const switch_led_clk_groups[] = {
+ "gpio30",
+};
+
+static const char * const switch_led_data_groups[] = {
+ "gpio31",
+};
+
+static const char * const wifi_groups[] = {
+ "gpio32",
+ "gpio33",
+ "gpio34",
+ "gpio35",
+ "gpio36",
+ "gpio37",
+ "gpio38",
+ "gpio39",
+ "gpio40",
+ "gpio41",
+ "gpio42",
+ "gpio43",
+ "gpio44",
+ "gpio45",
+ "gpio46",
+ "gpio47",
+ "gpio48",
+ "gpio49",
+ "gpio50",
+ "gpio51",
+};
+
+static const char * const nand_groups[] = {
+ "nand_grp",
+};
+
+static const char * const dectpd_groups[] = {
+ "dectpd_grp",
+};
+
+static const char * const vdsl_phy_override_0_groups[] = {
+ "vdsl_phy_override_0_grp",
+};
+
+static const char * const vdsl_phy_override_1_groups[] = {
+ "vdsl_phy_override_1_grp",
+};
+
+static const char * const vdsl_phy_override_2_groups[] = {
+ "vdsl_phy_override_2_grp",
+};
+
+static const char * const vdsl_phy_override_3_groups[] = {
+ "vdsl_phy_override_3_grp",
+};
+
+#define BCM63268_LED_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM63268_LEDCTRL, \
+ }
+
+#define BCM63268_MODE_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM63268_MODE, \
+ }
+
+#define BCM63268_CTRL_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM63268_CTRL, \
+ }
+
+#define BCM63268_BASEMODE_FUN(n, val) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM63268_BASEMODE, \
+ .mask = val, \
+ }
+
+static const struct bcm63268_function bcm63268_funcs[] = {
+ BCM63268_LED_FUN(led),
+ BCM63268_MODE_FUN(serial_led_clk),
+ BCM63268_MODE_FUN(serial_led_data),
+ BCM63268_MODE_FUN(hsspi_cs6),
+ BCM63268_MODE_FUN(hsspi_cs7),
+ BCM63268_MODE_FUN(uart1_scts),
+ BCM63268_MODE_FUN(uart1_srts),
+ BCM63268_MODE_FUN(uart1_sdin),
+ BCM63268_MODE_FUN(uart1_sdout),
+ BCM63268_MODE_FUN(ntr_pulse_in),
+ BCM63268_MODE_FUN(dsl_ntr_pulse_out),
+ BCM63268_MODE_FUN(hsspi_cs4),
+ BCM63268_MODE_FUN(hsspi_cs5),
+ BCM63268_MODE_FUN(adsl_spi_miso),
+ BCM63268_MODE_FUN(adsl_spi_mosi),
+ BCM63268_MODE_FUN(vreg_clk),
+ BCM63268_MODE_FUN(pcie_clkreq_b),
+ BCM63268_MODE_FUN(switch_led_clk),
+ BCM63268_MODE_FUN(switch_led_data),
+ BCM63268_CTRL_FUN(wifi),
+ BCM63268_BASEMODE_FUN(nand, BCM63268_BASEMODE_NAND),
+ BCM63268_BASEMODE_FUN(dectpd, BCM63268_BASEMODE_DECTPD),
+ BCM63268_BASEMODE_FUN(vdsl_phy_override_0,
+ BCM63268_BASEMODE_VDSL_PHY_0),
+ BCM63268_BASEMODE_FUN(vdsl_phy_override_1,
+ BCM63268_BASEMODE_VDSL_PHY_1),
+ BCM63268_BASEMODE_FUN(vdsl_phy_override_2,
+ BCM63268_BASEMODE_VDSL_PHY_2),
+ BCM63268_BASEMODE_FUN(vdsl_phy_override_3,
+ BCM63268_BASEMODE_VDSL_PHY_3),
+};
+
+static int bcm63268_pinctrl_get_group_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm63268_groups);
+}
+
+static const char *bcm63268_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ return bcm63268_groups[group].name;
+}
+
+static int bcm63268_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = bcm63268_groups[group].pins;
+ *num_pins = bcm63268_groups[group].num_pins;
+
+ return 0;
+}
+
+static int bcm63268_pinctrl_get_func_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm63268_funcs);
+}
+
+static const char *bcm63268_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return bcm63268_funcs[selector].name;
+}
+
+static int bcm63268_pinctrl_get_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *groups = bcm63268_funcs[selector].groups;
+ *num_groups = bcm63268_funcs[selector].num_groups;
+
+ return 0;
+}
+
+static void bcm63268_set_gpio(struct bcm63xx_pinctrl *pc, unsigned pin)
+{
+ const struct pinctrl_pin_desc *desc = &bcm63268_pins[pin];
+ unsigned int basemode = (unsigned long) desc->drv_data;
+ unsigned int mask = BIT(bcm63xx_bank_pin(pin));
+
+ if (basemode)
+ regmap_update_bits(pc->regs, BCM63268_BASEMODE_REG, basemode,
+ 0);
+
+ if (pin < BCM63XX_BANK_GPIOS) {
+ /* base mode: 0 => gpio, 1 => mux function */
+ regmap_update_bits(pc->regs, BCM63268_MODE_REG, mask, 0);
+
+ /* pins 0-23 might be muxed to led */
+ if (pin < BCM63268_NUM_LEDS)
+ regmap_update_bits(pc->regs, BCM63268_LED_REG, mask,
+ 0);
+ } else if (pin < BCM63268_NUM_GPIOS) {
+ /* ctrl reg: 0 => wifi function, 1 => gpio */
+ regmap_update_bits(pc->regs, BCM63268_CTRL_REG, mask, mask);
+ }
+}
+
+static int bcm63268_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned selector, unsigned group)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ const struct bcm63268_pingroup *pg = &bcm63268_groups[group];
+ const struct bcm63268_function *f = &bcm63268_funcs[selector];
+ unsigned i;
+ unsigned int reg;
+ unsigned int val, mask;
+
+ for (i = 0; i < pg->num_pins; i++)
+ bcm63268_set_gpio(pc, pg->pins[i]);
+
+ switch (f->reg) {
+ case BCM63268_LEDCTRL:
+ reg = BCM63268_LED_REG;
+ mask = BIT(pg->pins[0]);
+ val = BIT(pg->pins[0]);
+ break;
+ case BCM63268_MODE:
+ reg = BCM63268_MODE_REG;
+ mask = BIT(pg->pins[0]);
+ val = BIT(pg->pins[0]);
+ break;
+ case BCM63268_CTRL:
+ reg = BCM63268_CTRL_REG;
+ mask = BIT(pg->pins[0]);
+ val = 0;
+ break;
+ case BCM63268_BASEMODE:
+ reg = BCM63268_BASEMODE_REG;
+ mask = f->mask;
+ val = f->mask;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(pc->regs, reg, mask, val);
+
+ return 0;
+}
+
+static int bcm63268_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ /* disable all functions using this pin */
+ bcm63268_set_gpio(pc, offset);
+
+ return 0;
+}
+
+static struct pinctrl_ops bcm63268_pctl_ops = {
+ .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .get_group_name = bcm63268_pinctrl_get_group_name,
+ .get_group_pins = bcm63268_pinctrl_get_group_pins,
+ .get_groups_count = bcm63268_pinctrl_get_group_count,
+};
+
+static struct pinmux_ops bcm63268_pmx_ops = {
+ .get_function_groups = bcm63268_pinctrl_get_groups,
+ .get_function_name = bcm63268_pinctrl_get_func_name,
+ .get_functions_count = bcm63268_pinctrl_get_func_count,
+ .gpio_request_enable = bcm63268_gpio_request_enable,
+ .set_mux = bcm63268_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct bcm63xx_pinctrl_soc bcm63268_soc = {
+ .ngpios = BCM63268_NUM_GPIOS,
+ .npins = ARRAY_SIZE(bcm63268_pins),
+ .pctl_ops = &bcm63268_pctl_ops,
+ .pins = bcm63268_pins,
+ .pmx_ops = &bcm63268_pmx_ops,
+};
+
+static int bcm63268_pinctrl_probe(struct platform_device *pdev)
+{
+ return bcm63xx_pinctrl_probe(pdev, &bcm63268_soc, NULL);
+}
+
+static const struct of_device_id bcm63268_pinctrl_match[] = {
+ { .compatible = "brcm,bcm63268-pinctrl", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm63268_pinctrl_driver = {
+ .probe = bcm63268_pinctrl_probe,
+ .driver = {
+ .name = "bcm63268-pinctrl",
+ .of_match_table = bcm63268_pinctrl_match,
+ },
+};
+
+builtin_platform_driver(bcm63268_pinctrl_driver);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6328.c b/drivers/pinctrl/bcm/pinctrl-bcm6328.c
new file mode 100644
index 000000000000..c9efce600550
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6328.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for BCM6328 GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../pinctrl-utils.h"
+
+#include "pinctrl-bcm63xx.h"
+
+#define BCM6328_NUM_GPIOS 32
+
+#define BCM6328_MODE_REG 0x18
+#define BCM6328_MUX_HI_REG 0x1c
+#define BCM6328_MUX_LO_REG 0x20
+#define BCM6328_MUX_OTHER_REG 0x24
+#define BCM6328_MUX_MASK GENMASK(1, 0)
+
+struct bcm6328_pingroup {
+ const char *name;
+ const unsigned * const pins;
+ const unsigned num_pins;
+};
+
+struct bcm6328_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+
+ unsigned mode_val:1;
+ unsigned mux_val:2;
+};
+
+static const unsigned int bcm6328_mux[] = {
+ BCM6328_MUX_LO_REG,
+ BCM6328_MUX_HI_REG,
+ BCM6328_MUX_OTHER_REG
+};
+
+static const struct pinctrl_pin_desc bcm6328_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+ PINCTRL_PIN(23, "gpio23"),
+ PINCTRL_PIN(24, "gpio24"),
+ PINCTRL_PIN(25, "gpio25"),
+ PINCTRL_PIN(26, "gpio26"),
+ PINCTRL_PIN(27, "gpio27"),
+ PINCTRL_PIN(28, "gpio28"),
+ PINCTRL_PIN(29, "gpio29"),
+ PINCTRL_PIN(30, "gpio30"),
+ PINCTRL_PIN(31, "gpio31"),
+
+ /*
+ * No idea where they really are; so let's put them according
+ * to their mux offsets.
+ */
+ PINCTRL_PIN(36, "hsspi_cs1"),
+ PINCTRL_PIN(38, "usb_p2"),
+};
+
+static unsigned gpio0_pins[] = { 0 };
+static unsigned gpio1_pins[] = { 1 };
+static unsigned gpio2_pins[] = { 2 };
+static unsigned gpio3_pins[] = { 3 };
+static unsigned gpio4_pins[] = { 4 };
+static unsigned gpio5_pins[] = { 5 };
+static unsigned gpio6_pins[] = { 6 };
+static unsigned gpio7_pins[] = { 7 };
+static unsigned gpio8_pins[] = { 8 };
+static unsigned gpio9_pins[] = { 9 };
+static unsigned gpio10_pins[] = { 10 };
+static unsigned gpio11_pins[] = { 11 };
+static unsigned gpio12_pins[] = { 12 };
+static unsigned gpio13_pins[] = { 13 };
+static unsigned gpio14_pins[] = { 14 };
+static unsigned gpio15_pins[] = { 15 };
+static unsigned gpio16_pins[] = { 16 };
+static unsigned gpio17_pins[] = { 17 };
+static unsigned gpio18_pins[] = { 18 };
+static unsigned gpio19_pins[] = { 19 };
+static unsigned gpio20_pins[] = { 20 };
+static unsigned gpio21_pins[] = { 21 };
+static unsigned gpio22_pins[] = { 22 };
+static unsigned gpio23_pins[] = { 23 };
+static unsigned gpio24_pins[] = { 24 };
+static unsigned gpio25_pins[] = { 25 };
+static unsigned gpio26_pins[] = { 26 };
+static unsigned gpio27_pins[] = { 27 };
+static unsigned gpio28_pins[] = { 28 };
+static unsigned gpio29_pins[] = { 29 };
+static unsigned gpio30_pins[] = { 30 };
+static unsigned gpio31_pins[] = { 31 };
+
+static unsigned hsspi_cs1_pins[] = { 36 };
+static unsigned usb_port1_pins[] = { 38 };
+
+#define BCM6328_GROUP(n) \
+ { \
+ .name = #n, \
+ .pins = n##_pins, \
+ .num_pins = ARRAY_SIZE(n##_pins), \
+ }
+
+static struct bcm6328_pingroup bcm6328_groups[] = {
+ BCM6328_GROUP(gpio0),
+ BCM6328_GROUP(gpio1),
+ BCM6328_GROUP(gpio2),
+ BCM6328_GROUP(gpio3),
+ BCM6328_GROUP(gpio4),
+ BCM6328_GROUP(gpio5),
+ BCM6328_GROUP(gpio6),
+ BCM6328_GROUP(gpio7),
+ BCM6328_GROUP(gpio8),
+ BCM6328_GROUP(gpio9),
+ BCM6328_GROUP(gpio10),
+ BCM6328_GROUP(gpio11),
+ BCM6328_GROUP(gpio12),
+ BCM6328_GROUP(gpio13),
+ BCM6328_GROUP(gpio14),
+ BCM6328_GROUP(gpio15),
+ BCM6328_GROUP(gpio16),
+ BCM6328_GROUP(gpio17),
+ BCM6328_GROUP(gpio18),
+ BCM6328_GROUP(gpio19),
+ BCM6328_GROUP(gpio20),
+ BCM6328_GROUP(gpio21),
+ BCM6328_GROUP(gpio22),
+ BCM6328_GROUP(gpio23),
+ BCM6328_GROUP(gpio24),
+ BCM6328_GROUP(gpio25),
+ BCM6328_GROUP(gpio26),
+ BCM6328_GROUP(gpio27),
+ BCM6328_GROUP(gpio28),
+ BCM6328_GROUP(gpio29),
+ BCM6328_GROUP(gpio30),
+ BCM6328_GROUP(gpio31),
+
+ BCM6328_GROUP(hsspi_cs1),
+ BCM6328_GROUP(usb_port1),
+};
+
+/* GPIO_MODE */
+static const char * const led_groups[] = {
+ "gpio0",
+ "gpio1",
+ "gpio2",
+ "gpio3",
+ "gpio4",
+ "gpio5",
+ "gpio6",
+ "gpio7",
+ "gpio8",
+ "gpio9",
+ "gpio10",
+ "gpio11",
+ "gpio12",
+ "gpio13",
+ "gpio14",
+ "gpio15",
+ "gpio16",
+ "gpio17",
+ "gpio18",
+ "gpio19",
+ "gpio20",
+ "gpio21",
+ "gpio22",
+ "gpio23",
+};
+
+/* PINMUX_SEL */
+static const char * const serial_led_data_groups[] = {
+ "gpio6",
+};
+
+static const char * const serial_led_clk_groups[] = {
+ "gpio7",
+};
+
+static const char * const inet_act_led_groups[] = {
+ "gpio11",
+};
+
+static const char * const pcie_clkreq_groups[] = {
+ "gpio16",
+};
+
+static const char * const ephy0_act_led_groups[] = {
+ "gpio25",
+};
+
+static const char * const ephy1_act_led_groups[] = {
+ "gpio26",
+};
+
+static const char * const ephy2_act_led_groups[] = {
+ "gpio27",
+};
+
+static const char * const ephy3_act_led_groups[] = {
+ "gpio28",
+};
+
+static const char * const hsspi_cs1_groups[] = {
+ "hsspi_cs1"
+};
+
+static const char * const usb_host_port_groups[] = {
+ "usb_port1",
+};
+
+static const char * const usb_device_port_groups[] = {
+ "usb_port1",
+};
+
+#define BCM6328_MODE_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .mode_val = 1, \
+ }
+
+#define BCM6328_MUX_FUN(n, mux) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .mux_val = mux, \
+ }
+
+static const struct bcm6328_function bcm6328_funcs[] = {
+ BCM6328_MODE_FUN(led),
+ BCM6328_MUX_FUN(serial_led_data, 2),
+ BCM6328_MUX_FUN(serial_led_clk, 2),
+ BCM6328_MUX_FUN(inet_act_led, 1),
+ BCM6328_MUX_FUN(pcie_clkreq, 2),
+ BCM6328_MUX_FUN(ephy0_act_led, 1),
+ BCM6328_MUX_FUN(ephy1_act_led, 1),
+ BCM6328_MUX_FUN(ephy2_act_led, 1),
+ BCM6328_MUX_FUN(ephy3_act_led, 1),
+ BCM6328_MUX_FUN(hsspi_cs1, 2),
+ BCM6328_MUX_FUN(usb_host_port, 1),
+ BCM6328_MUX_FUN(usb_device_port, 2),
+};
+
+static inline unsigned int bcm6328_mux_off(unsigned int pin)
+{
+ return bcm6328_mux[pin / 16];
+}
+
+static int bcm6328_pinctrl_get_group_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6328_groups);
+}
+
+static const char *bcm6328_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ return bcm6328_groups[group].name;
+}
+
+static int bcm6328_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group, const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = bcm6328_groups[group].pins;
+ *num_pins = bcm6328_groups[group].num_pins;
+
+ return 0;
+}
+
+static int bcm6328_pinctrl_get_func_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6328_funcs);
+}
+
+static const char *bcm6328_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return bcm6328_funcs[selector].name;
+}
+
+static int bcm6328_pinctrl_get_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *groups = bcm6328_funcs[selector].groups;
+ *num_groups = bcm6328_funcs[selector].num_groups;
+
+ return 0;
+}
+
+static void bcm6328_rmw_mux(struct bcm63xx_pinctrl *pc, unsigned pin,
+ unsigned int mode, unsigned int mux)
+{
+ if (pin < BCM6328_NUM_GPIOS)
+ regmap_update_bits(pc->regs, BCM6328_MODE_REG, BIT(pin),
+ mode ? BIT(pin) : 0);
+
+ regmap_update_bits(pc->regs, bcm6328_mux_off(pin),
+ BCM6328_MUX_MASK << ((pin % 16) * 2),
+ mux << ((pin % 16) * 2));
+}
+
+static int bcm6328_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned selector, unsigned group)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ const struct bcm6328_pingroup *pg = &bcm6328_groups[group];
+ const struct bcm6328_function *f = &bcm6328_funcs[selector];
+
+ bcm6328_rmw_mux(pc, pg->pins[0], f->mode_val, f->mux_val);
+
+ return 0;
+}
+
+static int bcm6328_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ /* disable all functions using this pin */
+ bcm6328_rmw_mux(pc, offset, 0, 0);
+
+ return 0;
+}
+
+static struct pinctrl_ops bcm6328_pctl_ops = {
+ .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .get_group_name = bcm6328_pinctrl_get_group_name,
+ .get_group_pins = bcm6328_pinctrl_get_group_pins,
+ .get_groups_count = bcm6328_pinctrl_get_group_count,
+};
+
+static struct pinmux_ops bcm6328_pmx_ops = {
+ .get_function_groups = bcm6328_pinctrl_get_groups,
+ .get_function_name = bcm6328_pinctrl_get_func_name,
+ .get_functions_count = bcm6328_pinctrl_get_func_count,
+ .gpio_request_enable = bcm6328_gpio_request_enable,
+ .set_mux = bcm6328_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct bcm63xx_pinctrl_soc bcm6328_soc = {
+ .ngpios = BCM6328_NUM_GPIOS,
+ .npins = ARRAY_SIZE(bcm6328_pins),
+ .pctl_ops = &bcm6328_pctl_ops,
+ .pins = bcm6328_pins,
+ .pmx_ops = &bcm6328_pmx_ops,
+};
+
+static int bcm6328_pinctrl_probe(struct platform_device *pdev)
+{
+ return bcm63xx_pinctrl_probe(pdev, &bcm6328_soc, NULL);
+}
+
+static const struct of_device_id bcm6328_pinctrl_match[] = {
+ { .compatible = "brcm,bcm6328-pinctrl", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm6328_pinctrl_driver = {
+ .probe = bcm6328_pinctrl_probe,
+ .driver = {
+ .name = "bcm6328-pinctrl",
+ .of_match_table = bcm6328_pinctrl_match,
+ },
+};
+
+builtin_platform_driver(bcm6328_pinctrl_driver);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6358.c b/drivers/pinctrl/bcm/pinctrl-bcm6358.c
new file mode 100644
index 000000000000..d638578727f3
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6358.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for BCM6358 GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../pinctrl-utils.h"
+
+#include "pinctrl-bcm63xx.h"
+
+#define BCM6358_NUM_GPIOS 40
+
+#define BCM6358_MODE_REG 0x18
+#define BCM6358_MODE_MUX_NONE 0
+#define BCM6358_MODE_MUX_EBI_CS BIT(5)
+#define BCM6358_MODE_MUX_UART1 BIT(6)
+#define BCM6358_MODE_MUX_SPI_CS BIT(7)
+#define BCM6358_MODE_MUX_ASYNC_MODEM BIT(8)
+#define BCM6358_MODE_MUX_LEGACY_LED BIT(9)
+#define BCM6358_MODE_MUX_SERIAL_LED BIT(10)
+#define BCM6358_MODE_MUX_LED BIT(11)
+#define BCM6358_MODE_MUX_UTOPIA BIT(12)
+#define BCM6358_MODE_MUX_CLKRST BIT(13)
+#define BCM6358_MODE_MUX_PWM_SYN_CLK BIT(14)
+#define BCM6358_MODE_MUX_SYS_IRQ BIT(15)
+
+struct bcm6358_pingroup {
+ const char *name;
+ const unsigned * const pins;
+ const unsigned num_pins;
+
+ const uint16_t mode_val;
+
+ /* non-GPIO function muxes require the gpio direction to be set */
+ const uint16_t direction;
+};
+
+struct bcm6358_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+};
+
+struct bcm6358_priv {
+ struct regmap_field *overlays;
+};
+
+#define BCM6358_GPIO_PIN(a, b, bit1, bit2, bit3) \
+ { \
+ .number = a, \
+ .name = b, \
+ .drv_data = (void *)(BCM6358_MODE_MUX_##bit1 | \
+ BCM6358_MODE_MUX_##bit2 | \
+ BCM6358_MODE_MUX_##bit3), \
+ }
+
+static const struct pinctrl_pin_desc bcm6358_pins[] = {
+ BCM6358_GPIO_PIN(0, "gpio0", LED, NONE, NONE),
+ BCM6358_GPIO_PIN(1, "gpio1", LED, NONE, NONE),
+ BCM6358_GPIO_PIN(2, "gpio2", LED, NONE, NONE),
+ BCM6358_GPIO_PIN(3, "gpio3", LED, NONE, NONE),
+ PINCTRL_PIN(4, "gpio4"),
+ BCM6358_GPIO_PIN(5, "gpio5", SYS_IRQ, NONE, NONE),
+ BCM6358_GPIO_PIN(6, "gpio6", SERIAL_LED, NONE, NONE),
+ BCM6358_GPIO_PIN(7, "gpio7", SERIAL_LED, NONE, NONE),
+ BCM6358_GPIO_PIN(8, "gpio8", PWM_SYN_CLK, NONE, NONE),
+ BCM6358_GPIO_PIN(9, "gpio09", LEGACY_LED, NONE, NONE),
+ BCM6358_GPIO_PIN(10, "gpio10", LEGACY_LED, NONE, NONE),
+ BCM6358_GPIO_PIN(11, "gpio11", LEGACY_LED, NONE, NONE),
+ BCM6358_GPIO_PIN(12, "gpio12", LEGACY_LED, ASYNC_MODEM, UTOPIA),
+ BCM6358_GPIO_PIN(13, "gpio13", LEGACY_LED, ASYNC_MODEM, UTOPIA),
+ BCM6358_GPIO_PIN(14, "gpio14", LEGACY_LED, ASYNC_MODEM, UTOPIA),
+ BCM6358_GPIO_PIN(15, "gpio15", LEGACY_LED, ASYNC_MODEM, UTOPIA),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ BCM6358_GPIO_PIN(22, "gpio22", UTOPIA, NONE, NONE),
+ BCM6358_GPIO_PIN(23, "gpio23", UTOPIA, NONE, NONE),
+ BCM6358_GPIO_PIN(24, "gpio24", UTOPIA, NONE, NONE),
+ BCM6358_GPIO_PIN(25, "gpio25", UTOPIA, NONE, NONE),
+ BCM6358_GPIO_PIN(26, "gpio26", UTOPIA, NONE, NONE),
+ BCM6358_GPIO_PIN(27, "gpio27", UTOPIA, NONE, NONE),
+ BCM6358_GPIO_PIN(28, "gpio28", UTOPIA, UART1, NONE),
+ BCM6358_GPIO_PIN(29, "gpio29", UTOPIA, UART1, NONE),
+ BCM6358_GPIO_PIN(30, "gpio30", UTOPIA, UART1, EBI_CS),
+ BCM6358_GPIO_PIN(31, "gpio31", UTOPIA, UART1, EBI_CS),
+ BCM6358_GPIO_PIN(32, "gpio32", SPI_CS, NONE, NONE),
+ BCM6358_GPIO_PIN(33, "gpio33", SPI_CS, NONE, NONE),
+ PINCTRL_PIN(34, "gpio34"),
+ PINCTRL_PIN(35, "gpio35"),
+ PINCTRL_PIN(36, "gpio36"),
+ PINCTRL_PIN(37, "gpio37"),
+ PINCTRL_PIN(38, "gpio38"),
+ PINCTRL_PIN(39, "gpio39"),
+};
+
+static unsigned ebi_cs_grp_pins[] = { 30, 31 };
+
+static unsigned uart1_grp_pins[] = { 28, 29, 30, 31 };
+
+static unsigned spi_cs_grp_pins[] = { 32, 33 };
+
+static unsigned async_modem_grp_pins[] = { 12, 13, 14, 15 };
+
+static unsigned serial_led_grp_pins[] = { 6, 7 };
+
+static unsigned legacy_led_grp_pins[] = { 9, 10, 11, 12, 13, 14, 15 };
+
+static unsigned led_grp_pins[] = { 0, 1, 2, 3 };
+
+static unsigned utopia_grp_pins[] = {
+ 12, 13, 14, 15, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static unsigned pwm_syn_clk_grp_pins[] = { 8 };
+
+static unsigned sys_irq_grp_pins[] = { 5 };
+
+#define BCM6358_GPIO_MUX_GROUP(n, bit, dir) \
+ { \
+ .name = #n, \
+ .pins = n##_pins, \
+ .num_pins = ARRAY_SIZE(n##_pins), \
+ .mode_val = BCM6358_MODE_MUX_##bit, \
+ .direction = dir, \
+ }
+
+static const struct bcm6358_pingroup bcm6358_groups[] = {
+ BCM6358_GPIO_MUX_GROUP(ebi_cs_grp, EBI_CS, 0x3),
+ BCM6358_GPIO_MUX_GROUP(uart1_grp, UART1, 0x2),
+ BCM6358_GPIO_MUX_GROUP(spi_cs_grp, SPI_CS, 0x6),
+ BCM6358_GPIO_MUX_GROUP(async_modem_grp, ASYNC_MODEM, 0x6),
+ BCM6358_GPIO_MUX_GROUP(legacy_led_grp, LEGACY_LED, 0x7f),
+ BCM6358_GPIO_MUX_GROUP(serial_led_grp, SERIAL_LED, 0x3),
+ BCM6358_GPIO_MUX_GROUP(led_grp, LED, 0xf),
+ BCM6358_GPIO_MUX_GROUP(utopia_grp, UTOPIA, 0x000f),
+ BCM6358_GPIO_MUX_GROUP(pwm_syn_clk_grp, PWM_SYN_CLK, 0x1),
+ BCM6358_GPIO_MUX_GROUP(sys_irq_grp, SYS_IRQ, 0x1),
+};
+
+static const char * const ebi_cs_groups[] = {
+ "ebi_cs_grp"
+};
+
+static const char * const uart1_groups[] = {
+ "uart1_grp"
+};
+
+static const char * const spi_cs_2_3_groups[] = {
+ "spi_cs_2_3_grp"
+};
+
+static const char * const async_modem_groups[] = {
+ "async_modem_grp"
+};
+
+static const char * const legacy_led_groups[] = {
+ "legacy_led_grp",
+};
+
+static const char * const serial_led_groups[] = {
+ "serial_led_grp",
+};
+
+static const char * const led_groups[] = {
+ "led_grp",
+};
+
+static const char * const clkrst_groups[] = {
+ "clkrst_grp",
+};
+
+static const char * const pwm_syn_clk_groups[] = {
+ "pwm_syn_clk_grp",
+};
+
+static const char * const sys_irq_groups[] = {
+ "sys_irq_grp",
+};
+
+#define BCM6358_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ }
+
+static const struct bcm6358_function bcm6358_funcs[] = {
+ BCM6358_FUN(ebi_cs),
+ BCM6358_FUN(uart1),
+ BCM6358_FUN(spi_cs_2_3),
+ BCM6358_FUN(async_modem),
+ BCM6358_FUN(legacy_led),
+ BCM6358_FUN(serial_led),
+ BCM6358_FUN(led),
+ BCM6358_FUN(clkrst),
+ BCM6358_FUN(pwm_syn_clk),
+ BCM6358_FUN(sys_irq),
+};
+
+static int bcm6358_pinctrl_get_group_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6358_groups);
+}
+
+static const char *bcm6358_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ return bcm6358_groups[group].name;
+}
+
+static int bcm6358_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group, const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = bcm6358_groups[group].pins;
+ *num_pins = bcm6358_groups[group].num_pins;
+
+ return 0;
+}
+
+static int bcm6358_pinctrl_get_func_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6358_funcs);
+}
+
+static const char *bcm6358_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return bcm6358_funcs[selector].name;
+}
+
+static int bcm6358_pinctrl_get_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *groups = bcm6358_funcs[selector].groups;
+ *num_groups = bcm6358_funcs[selector].num_groups;
+
+ return 0;
+}
+
+static int bcm6358_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned selector, unsigned group)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ struct bcm6358_priv *priv = pc->driver_data;
+ const struct bcm6358_pingroup *pg = &bcm6358_groups[group];
+ unsigned int val = pg->mode_val;
+ unsigned int mask = val;
+ unsigned pin;
+
+ for (pin = 0; pin < pg->num_pins; pin++)
+ mask |= (unsigned long)bcm6358_pins[pin].drv_data;
+
+ regmap_field_update_bits(priv->overlays, mask, val);
+
+ for (pin = 0; pin < pg->num_pins; pin++) {
+ struct pinctrl_gpio_range *range;
+ unsigned int hw_gpio = bcm6358_pins[pin].number;
+
+ range = pinctrl_find_gpio_range_from_pin(pctldev, hw_gpio);
+ if (range) {
+ struct gpio_chip *gc = range->gc;
+
+ if (pg->direction & BIT(pin))
+ gc->direction_output(gc, hw_gpio, 0);
+ else
+ gc->direction_input(gc, hw_gpio);
+ }
+ }
+
+ return 0;
+}
+
+static int bcm6358_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ struct bcm6358_priv *priv = pc->driver_data;
+ unsigned int mask;
+
+ mask = (unsigned long) bcm6358_pins[offset].drv_data;
+ if (!mask)
+ return 0;
+
+ /* disable all functions using this pin */
+ return regmap_field_update_bits(priv->overlays, mask, 0);
+}
+
+static struct pinctrl_ops bcm6358_pctl_ops = {
+ .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .get_group_name = bcm6358_pinctrl_get_group_name,
+ .get_group_pins = bcm6358_pinctrl_get_group_pins,
+ .get_groups_count = bcm6358_pinctrl_get_group_count,
+};
+
+static struct pinmux_ops bcm6358_pmx_ops = {
+ .get_function_groups = bcm6358_pinctrl_get_groups,
+ .get_function_name = bcm6358_pinctrl_get_func_name,
+ .get_functions_count = bcm6358_pinctrl_get_func_count,
+ .gpio_request_enable = bcm6358_gpio_request_enable,
+ .set_mux = bcm6358_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct bcm63xx_pinctrl_soc bcm6358_soc = {
+ .ngpios = BCM6358_NUM_GPIOS,
+ .npins = ARRAY_SIZE(bcm6358_pins),
+ .pctl_ops = &bcm6358_pctl_ops,
+ .pins = bcm6358_pins,
+ .pmx_ops = &bcm6358_pmx_ops,
+};
+
+static int bcm6358_pinctrl_probe(struct platform_device *pdev)
+{
+ struct reg_field overlays = REG_FIELD(BCM6358_MODE_REG, 0, 15);
+ struct device *dev = &pdev->dev;
+ struct bcm63xx_pinctrl *pc;
+ struct bcm6358_priv *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ err = bcm63xx_pinctrl_probe(pdev, &bcm6358_soc, (void *) priv);
+ if (err)
+ return err;
+
+ pc = platform_get_drvdata(pdev);
+
+ priv->overlays = devm_regmap_field_alloc(dev, pc->regs, overlays);
+ if (IS_ERR(priv->overlays))
+ return PTR_ERR(priv->overlays);
+
+ return 0;
+}
+
+static const struct of_device_id bcm6358_pinctrl_match[] = {
+ { .compatible = "brcm,bcm6358-pinctrl", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm6358_pinctrl_driver = {
+ .probe = bcm6358_pinctrl_probe,
+ .driver = {
+ .name = "bcm6358-pinctrl",
+ .of_match_table = bcm6358_pinctrl_match,
+ },
+};
+
+builtin_platform_driver(bcm6358_pinctrl_driver);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6362.c b/drivers/pinctrl/bcm/pinctrl-bcm6362.c
new file mode 100644
index 000000000000..40ef495b6301
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6362.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for BCM6362 GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../pinctrl-utils.h"
+
+#include "pinctrl-bcm63xx.h"
+
+#define BCM6362_BANK_GPIOS 32
+#define BCM6362_NUM_GPIOS 48
+#define BCM6362_NUM_LEDS 24
+
+#define BCM6362_LED_REG 0x10
+#define BCM6362_MODE_REG 0x18
+#define BCM6362_CTRL_REG 0x1c
+#define BCM6362_BASEMODE_REG 0x38
+#define BASEMODE_NAND BIT(2)
+
+enum bcm6362_pinctrl_reg {
+ BCM6362_LEDCTRL,
+ BCM6362_MODE,
+ BCM6362_CTRL,
+ BCM6362_BASEMODE,
+};
+
+struct bcm6362_pingroup {
+ const char *name;
+ const unsigned * const pins;
+ const unsigned num_pins;
+};
+
+struct bcm6362_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+
+ enum bcm6362_pinctrl_reg reg;
+ uint32_t basemode_mask;
+};
+
+#define BCM6362_PIN(a, b, mask) \
+ { \
+ .number = a, \
+ .name = b, \
+ .drv_data = (void *)(mask), \
+ }
+
+static const struct pinctrl_pin_desc bcm6362_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ BCM6362_PIN(8, "gpio8", BASEMODE_NAND),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ BCM6362_PIN(12, "gpio12", BASEMODE_NAND),
+ BCM6362_PIN(13, "gpio13", BASEMODE_NAND),
+ BCM6362_PIN(14, "gpio14", BASEMODE_NAND),
+ BCM6362_PIN(15, "gpio15", BASEMODE_NAND),
+ BCM6362_PIN(16, "gpio16", BASEMODE_NAND),
+ BCM6362_PIN(17, "gpio17", BASEMODE_NAND),
+ BCM6362_PIN(18, "gpio18", BASEMODE_NAND),
+ BCM6362_PIN(19, "gpio19", BASEMODE_NAND),
+ BCM6362_PIN(20, "gpio20", BASEMODE_NAND),
+ BCM6362_PIN(21, "gpio21", BASEMODE_NAND),
+ BCM6362_PIN(22, "gpio22", BASEMODE_NAND),
+ BCM6362_PIN(23, "gpio23", BASEMODE_NAND),
+ PINCTRL_PIN(24, "gpio24"),
+ PINCTRL_PIN(25, "gpio25"),
+ PINCTRL_PIN(26, "gpio26"),
+ BCM6362_PIN(27, "gpio27", BASEMODE_NAND),
+ PINCTRL_PIN(28, "gpio28"),
+ PINCTRL_PIN(29, "gpio29"),
+ PINCTRL_PIN(30, "gpio30"),
+ PINCTRL_PIN(31, "gpio31"),
+ PINCTRL_PIN(32, "gpio32"),
+ PINCTRL_PIN(33, "gpio33"),
+ PINCTRL_PIN(34, "gpio34"),
+ PINCTRL_PIN(35, "gpio35"),
+ PINCTRL_PIN(36, "gpio36"),
+ PINCTRL_PIN(37, "gpio37"),
+ PINCTRL_PIN(38, "gpio38"),
+ PINCTRL_PIN(39, "gpio39"),
+ PINCTRL_PIN(40, "gpio40"),
+ PINCTRL_PIN(41, "gpio41"),
+ PINCTRL_PIN(42, "gpio42"),
+ PINCTRL_PIN(43, "gpio43"),
+ PINCTRL_PIN(44, "gpio44"),
+ PINCTRL_PIN(45, "gpio45"),
+ PINCTRL_PIN(46, "gpio46"),
+ PINCTRL_PIN(47, "gpio47"),
+};
+
+static unsigned gpio0_pins[] = { 0 };
+static unsigned gpio1_pins[] = { 1 };
+static unsigned gpio2_pins[] = { 2 };
+static unsigned gpio3_pins[] = { 3 };
+static unsigned gpio4_pins[] = { 4 };
+static unsigned gpio5_pins[] = { 5 };
+static unsigned gpio6_pins[] = { 6 };
+static unsigned gpio7_pins[] = { 7 };
+static unsigned gpio8_pins[] = { 8 };
+static unsigned gpio9_pins[] = { 9 };
+static unsigned gpio10_pins[] = { 10 };
+static unsigned gpio11_pins[] = { 11 };
+static unsigned gpio12_pins[] = { 12 };
+static unsigned gpio13_pins[] = { 13 };
+static unsigned gpio14_pins[] = { 14 };
+static unsigned gpio15_pins[] = { 15 };
+static unsigned gpio16_pins[] = { 16 };
+static unsigned gpio17_pins[] = { 17 };
+static unsigned gpio18_pins[] = { 18 };
+static unsigned gpio19_pins[] = { 19 };
+static unsigned gpio20_pins[] = { 20 };
+static unsigned gpio21_pins[] = { 21 };
+static unsigned gpio22_pins[] = { 22 };
+static unsigned gpio23_pins[] = { 23 };
+static unsigned gpio24_pins[] = { 24 };
+static unsigned gpio25_pins[] = { 25 };
+static unsigned gpio26_pins[] = { 26 };
+static unsigned gpio27_pins[] = { 27 };
+static unsigned gpio28_pins[] = { 28 };
+static unsigned gpio29_pins[] = { 29 };
+static unsigned gpio30_pins[] = { 30 };
+static unsigned gpio31_pins[] = { 31 };
+static unsigned gpio32_pins[] = { 32 };
+static unsigned gpio33_pins[] = { 33 };
+static unsigned gpio34_pins[] = { 34 };
+static unsigned gpio35_pins[] = { 35 };
+static unsigned gpio36_pins[] = { 36 };
+static unsigned gpio37_pins[] = { 37 };
+static unsigned gpio38_pins[] = { 38 };
+static unsigned gpio39_pins[] = { 39 };
+static unsigned gpio40_pins[] = { 40 };
+static unsigned gpio41_pins[] = { 41 };
+static unsigned gpio42_pins[] = { 42 };
+static unsigned gpio43_pins[] = { 43 };
+static unsigned gpio44_pins[] = { 44 };
+static unsigned gpio45_pins[] = { 45 };
+static unsigned gpio46_pins[] = { 46 };
+static unsigned gpio47_pins[] = { 47 };
+
+static unsigned nand_grp_pins[] = {
+ 8, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 27,
+};
+
+#define BCM6362_GROUP(n) \
+ { \
+ .name = #n, \
+ .pins = n##_pins, \
+ .num_pins = ARRAY_SIZE(n##_pins), \
+ }
+
+static struct bcm6362_pingroup bcm6362_groups[] = {
+ BCM6362_GROUP(gpio0),
+ BCM6362_GROUP(gpio1),
+ BCM6362_GROUP(gpio2),
+ BCM6362_GROUP(gpio3),
+ BCM6362_GROUP(gpio4),
+ BCM6362_GROUP(gpio5),
+ BCM6362_GROUP(gpio6),
+ BCM6362_GROUP(gpio7),
+ BCM6362_GROUP(gpio8),
+ BCM6362_GROUP(gpio9),
+ BCM6362_GROUP(gpio10),
+ BCM6362_GROUP(gpio11),
+ BCM6362_GROUP(gpio12),
+ BCM6362_GROUP(gpio13),
+ BCM6362_GROUP(gpio14),
+ BCM6362_GROUP(gpio15),
+ BCM6362_GROUP(gpio16),
+ BCM6362_GROUP(gpio17),
+ BCM6362_GROUP(gpio18),
+ BCM6362_GROUP(gpio19),
+ BCM6362_GROUP(gpio20),
+ BCM6362_GROUP(gpio21),
+ BCM6362_GROUP(gpio22),
+ BCM6362_GROUP(gpio23),
+ BCM6362_GROUP(gpio24),
+ BCM6362_GROUP(gpio25),
+ BCM6362_GROUP(gpio26),
+ BCM6362_GROUP(gpio27),
+ BCM6362_GROUP(gpio28),
+ BCM6362_GROUP(gpio29),
+ BCM6362_GROUP(gpio30),
+ BCM6362_GROUP(gpio31),
+ BCM6362_GROUP(gpio32),
+ BCM6362_GROUP(gpio33),
+ BCM6362_GROUP(gpio34),
+ BCM6362_GROUP(gpio35),
+ BCM6362_GROUP(gpio36),
+ BCM6362_GROUP(gpio37),
+ BCM6362_GROUP(gpio38),
+ BCM6362_GROUP(gpio39),
+ BCM6362_GROUP(gpio40),
+ BCM6362_GROUP(gpio41),
+ BCM6362_GROUP(gpio42),
+ BCM6362_GROUP(gpio43),
+ BCM6362_GROUP(gpio44),
+ BCM6362_GROUP(gpio45),
+ BCM6362_GROUP(gpio46),
+ BCM6362_GROUP(gpio47),
+ BCM6362_GROUP(nand_grp),
+};
+
+static const char * const led_groups[] = {
+ "gpio0",
+ "gpio1",
+ "gpio2",
+ "gpio3",
+ "gpio4",
+ "gpio5",
+ "gpio6",
+ "gpio7",
+ "gpio8",
+ "gpio9",
+ "gpio10",
+ "gpio11",
+ "gpio12",
+ "gpio13",
+ "gpio14",
+ "gpio15",
+ "gpio16",
+ "gpio17",
+ "gpio18",
+ "gpio19",
+ "gpio20",
+ "gpio21",
+ "gpio22",
+ "gpio23",
+};
+
+static const char * const usb_device_led_groups[] = {
+ "gpio0",
+};
+
+static const char * const sys_irq_groups[] = {
+ "gpio1",
+};
+
+static const char * const serial_led_clk_groups[] = {
+ "gpio2",
+};
+
+static const char * const serial_led_data_groups[] = {
+ "gpio3",
+};
+
+static const char * const robosw_led_data_groups[] = {
+ "gpio4",
+};
+
+static const char * const robosw_led_clk_groups[] = {
+ "gpio5",
+};
+
+static const char * const robosw_led0_groups[] = {
+ "gpio6",
+};
+
+static const char * const robosw_led1_groups[] = {
+ "gpio7",
+};
+
+static const char * const inet_led_groups[] = {
+ "gpio8",
+};
+
+static const char * const spi_cs2_groups[] = {
+ "gpio9",
+};
+
+static const char * const spi_cs3_groups[] = {
+ "gpio10",
+};
+
+static const char * const ntr_pulse_groups[] = {
+ "gpio11",
+};
+
+static const char * const uart1_scts_groups[] = {
+ "gpio12",
+};
+
+static const char * const uart1_srts_groups[] = {
+ "gpio13",
+};
+
+static const char * const uart1_sdin_groups[] = {
+ "gpio14",
+};
+
+static const char * const uart1_sdout_groups[] = {
+ "gpio15",
+};
+
+static const char * const adsl_spi_miso_groups[] = {
+ "gpio16",
+};
+
+static const char * const adsl_spi_mosi_groups[] = {
+ "gpio17",
+};
+
+static const char * const adsl_spi_clk_groups[] = {
+ "gpio18",
+};
+
+static const char * const adsl_spi_cs_groups[] = {
+ "gpio19",
+};
+
+static const char * const ephy0_led_groups[] = {
+ "gpio20",
+};
+
+static const char * const ephy1_led_groups[] = {
+ "gpio21",
+};
+
+static const char * const ephy2_led_groups[] = {
+ "gpio22",
+};
+
+static const char * const ephy3_led_groups[] = {
+ "gpio23",
+};
+
+static const char * const ext_irq0_groups[] = {
+ "gpio24",
+};
+
+static const char * const ext_irq1_groups[] = {
+ "gpio25",
+};
+
+static const char * const ext_irq2_groups[] = {
+ "gpio26",
+};
+
+static const char * const ext_irq3_groups[] = {
+ "gpio27",
+};
+
+static const char * const wifi_groups[] = {
+ "gpio32",
+ "gpio33",
+ "gpio34",
+ "gpio35",
+ "gpio36",
+ "gpio37",
+ "gpio38",
+ "gpio39",
+ "gpio40",
+ "gpio41",
+ "gpio42",
+ "gpio43",
+ "gpio44",
+ "gpio45",
+ "gpio46",
+ "gpio47",
+};
+
+static const char * const nand_groups[] = {
+ "nand_grp",
+};
+
+#define BCM6362_LED_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM6362_LEDCTRL, \
+ }
+
+#define BCM6362_MODE_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM6362_MODE, \
+ }
+
+#define BCM6362_CTRL_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM6362_CTRL, \
+ }
+
+#define BCM6362_BASEMODE_FUN(n, mask) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM6362_BASEMODE, \
+ .basemode_mask = (mask), \
+ }
+
+static const struct bcm6362_function bcm6362_funcs[] = {
+ BCM6362_LED_FUN(led),
+ BCM6362_MODE_FUN(usb_device_led),
+ BCM6362_MODE_FUN(sys_irq),
+ BCM6362_MODE_FUN(serial_led_clk),
+ BCM6362_MODE_FUN(serial_led_data),
+ BCM6362_MODE_FUN(robosw_led_data),
+ BCM6362_MODE_FUN(robosw_led_clk),
+ BCM6362_MODE_FUN(robosw_led0),
+ BCM6362_MODE_FUN(robosw_led1),
+ BCM6362_MODE_FUN(inet_led),
+ BCM6362_MODE_FUN(spi_cs2),
+ BCM6362_MODE_FUN(spi_cs3),
+ BCM6362_MODE_FUN(ntr_pulse),
+ BCM6362_MODE_FUN(uart1_scts),
+ BCM6362_MODE_FUN(uart1_srts),
+ BCM6362_MODE_FUN(uart1_sdin),
+ BCM6362_MODE_FUN(uart1_sdout),
+ BCM6362_MODE_FUN(adsl_spi_miso),
+ BCM6362_MODE_FUN(adsl_spi_mosi),
+ BCM6362_MODE_FUN(adsl_spi_clk),
+ BCM6362_MODE_FUN(adsl_spi_cs),
+ BCM6362_MODE_FUN(ephy0_led),
+ BCM6362_MODE_FUN(ephy1_led),
+ BCM6362_MODE_FUN(ephy2_led),
+ BCM6362_MODE_FUN(ephy3_led),
+ BCM6362_MODE_FUN(ext_irq0),
+ BCM6362_MODE_FUN(ext_irq1),
+ BCM6362_MODE_FUN(ext_irq2),
+ BCM6362_MODE_FUN(ext_irq3),
+ BCM6362_CTRL_FUN(wifi),
+ BCM6362_BASEMODE_FUN(nand, BASEMODE_NAND),
+};
+
+static int bcm6362_pinctrl_get_group_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6362_groups);
+}
+
+static const char *bcm6362_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ return bcm6362_groups[group].name;
+}
+
+static int bcm6362_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group, const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = bcm6362_groups[group].pins;
+ *num_pins = bcm6362_groups[group].num_pins;
+
+ return 0;
+}
+
+static int bcm6362_pinctrl_get_func_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6362_funcs);
+}
+
+static const char *bcm6362_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return bcm6362_funcs[selector].name;
+}
+
+static int bcm6362_pinctrl_get_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *groups = bcm6362_funcs[selector].groups;
+ *num_groups = bcm6362_funcs[selector].num_groups;
+
+ return 0;
+}
+
+static void bcm6362_set_gpio(struct bcm63xx_pinctrl *pc, unsigned pin)
+{
+ const struct pinctrl_pin_desc *desc = &bcm6362_pins[pin];
+ unsigned int basemode = (uintptr_t)desc->drv_data;
+ unsigned int mask = bcm63xx_bank_pin(pin);
+
+ if (basemode)
+ regmap_update_bits(pc->regs, BCM6362_BASEMODE_REG, basemode, 0);
+
+ if (pin < BCM63XX_BANK_GPIOS) {
+ /* base mode 0 => gpio 1 => mux function */
+ regmap_update_bits(pc->regs, BCM6362_MODE_REG, mask, 0);
+
+ /* pins 0-23 might be muxed to led */
+ if (pin < BCM6362_NUM_LEDS)
+ regmap_update_bits(pc->regs, BCM6362_LED_REG, mask, 0);
+ } else {
+ /* ctrl reg 0 => wifi function 1 => gpio */
+ regmap_update_bits(pc->regs, BCM6362_CTRL_REG, mask, mask);
+ }
+}
+
+static int bcm6362_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned selector, unsigned group)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ const struct bcm6362_pingroup *pg = &bcm6362_groups[group];
+ const struct bcm6362_function *f = &bcm6362_funcs[selector];
+ unsigned i;
+ unsigned int reg;
+ unsigned int val, mask;
+
+ for (i = 0; i < pg->num_pins; i++)
+ bcm6362_set_gpio(pc, pg->pins[i]);
+
+ switch (f->reg) {
+ case BCM6362_LEDCTRL:
+ reg = BCM6362_LED_REG;
+ mask = BIT(pg->pins[0]);
+ val = BIT(pg->pins[0]);
+ break;
+ case BCM6362_MODE:
+ reg = BCM6362_MODE_REG;
+ mask = BIT(pg->pins[0]);
+ val = BIT(pg->pins[0]);
+ break;
+ case BCM6362_CTRL:
+ reg = BCM6362_CTRL_REG;
+ mask = BIT(pg->pins[0]);
+ val = 0;
+ break;
+ case BCM6362_BASEMODE:
+ reg = BCM6362_BASEMODE_REG;
+ mask = f->basemode_mask;
+ val = f->basemode_mask;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(pc->regs, reg, mask, val);
+
+ return 0;
+}
+
+static int bcm6362_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ /* disable all functions using this pin */
+ bcm6362_set_gpio(pc, offset);
+
+ return 0;
+}
+
+static struct pinctrl_ops bcm6362_pctl_ops = {
+ .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .get_group_name = bcm6362_pinctrl_get_group_name,
+ .get_group_pins = bcm6362_pinctrl_get_group_pins,
+ .get_groups_count = bcm6362_pinctrl_get_group_count,
+};
+
+static struct pinmux_ops bcm6362_pmx_ops = {
+ .get_function_groups = bcm6362_pinctrl_get_groups,
+ .get_function_name = bcm6362_pinctrl_get_func_name,
+ .get_functions_count = bcm6362_pinctrl_get_func_count,
+ .gpio_request_enable = bcm6362_gpio_request_enable,
+ .set_mux = bcm6362_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct bcm63xx_pinctrl_soc bcm6362_soc = {
+ .ngpios = BCM6362_NUM_GPIOS,
+ .npins = ARRAY_SIZE(bcm6362_pins),
+ .pctl_ops = &bcm6362_pctl_ops,
+ .pins = bcm6362_pins,
+ .pmx_ops = &bcm6362_pmx_ops,
+};
+
+static int bcm6362_pinctrl_probe(struct platform_device *pdev)
+{
+ return bcm63xx_pinctrl_probe(pdev, &bcm6362_soc, NULL);
+}
+
+static const struct of_device_id bcm6362_pinctrl_match[] = {
+ { .compatible = "brcm,bcm6362-pinctrl", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm6362_pinctrl_driver = {
+ .probe = bcm6362_pinctrl_probe,
+ .driver = {
+ .name = "bcm6362-pinctrl",
+ .of_match_table = bcm6362_pinctrl_match,
+ },
+};
+
+builtin_platform_driver(bcm6362_pinctrl_driver);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6368.c b/drivers/pinctrl/bcm/pinctrl-bcm6368.c
new file mode 100644
index 000000000000..838095f9e890
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6368.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for BCM6368 GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../pinctrl-utils.h"
+
+#include "pinctrl-bcm63xx.h"
+
+#define BCM6368_NUM_GPIOS 38
+
+#define BCM6368_MODE_REG 0x18
+#define BCM6368_BASEMODE_REG 0x38
+#define BCM6368_BASEMODE_MASK 0x7
+#define BCM6368_BASEMODE_GPIO 0x0
+#define BCM6368_BASEMODE_UART1 0x1
+
+struct bcm6368_pingroup {
+ const char *name;
+ const unsigned * const pins;
+ const unsigned num_pins;
+};
+
+struct bcm6368_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+
+ unsigned dir_out:16;
+ unsigned basemode:3;
+};
+
+struct bcm6368_priv {
+ struct regmap_field *overlays;
+};
+
+#define BCM6368_BASEMODE_PIN(a, b) \
+ { \
+ .number = a, \
+ .name = b, \
+ .drv_data = (void *)true \
+ }
+
+static const struct pinctrl_pin_desc bcm6368_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+ PINCTRL_PIN(23, "gpio23"),
+ PINCTRL_PIN(24, "gpio24"),
+ PINCTRL_PIN(25, "gpio25"),
+ PINCTRL_PIN(26, "gpio26"),
+ PINCTRL_PIN(27, "gpio27"),
+ PINCTRL_PIN(28, "gpio28"),
+ PINCTRL_PIN(29, "gpio29"),
+ BCM6368_BASEMODE_PIN(30, "gpio30"),
+ BCM6368_BASEMODE_PIN(31, "gpio31"),
+ BCM6368_BASEMODE_PIN(32, "gpio32"),
+ BCM6368_BASEMODE_PIN(33, "gpio33"),
+ PINCTRL_PIN(34, "gpio34"),
+ PINCTRL_PIN(35, "gpio35"),
+ PINCTRL_PIN(36, "gpio36"),
+ PINCTRL_PIN(37, "gpio37"),
+};
+
+static unsigned gpio0_pins[] = { 0 };
+static unsigned gpio1_pins[] = { 1 };
+static unsigned gpio2_pins[] = { 2 };
+static unsigned gpio3_pins[] = { 3 };
+static unsigned gpio4_pins[] = { 4 };
+static unsigned gpio5_pins[] = { 5 };
+static unsigned gpio6_pins[] = { 6 };
+static unsigned gpio7_pins[] = { 7 };
+static unsigned gpio8_pins[] = { 8 };
+static unsigned gpio9_pins[] = { 9 };
+static unsigned gpio10_pins[] = { 10 };
+static unsigned gpio11_pins[] = { 11 };
+static unsigned gpio12_pins[] = { 12 };
+static unsigned gpio13_pins[] = { 13 };
+static unsigned gpio14_pins[] = { 14 };
+static unsigned gpio15_pins[] = { 15 };
+static unsigned gpio16_pins[] = { 16 };
+static unsigned gpio17_pins[] = { 17 };
+static unsigned gpio18_pins[] = { 18 };
+static unsigned gpio19_pins[] = { 19 };
+static unsigned gpio20_pins[] = { 20 };
+static unsigned gpio21_pins[] = { 21 };
+static unsigned gpio22_pins[] = { 22 };
+static unsigned gpio23_pins[] = { 23 };
+static unsigned gpio24_pins[] = { 24 };
+static unsigned gpio25_pins[] = { 25 };
+static unsigned gpio26_pins[] = { 26 };
+static unsigned gpio27_pins[] = { 27 };
+static unsigned gpio28_pins[] = { 28 };
+static unsigned gpio29_pins[] = { 29 };
+static unsigned gpio30_pins[] = { 30 };
+static unsigned gpio31_pins[] = { 31 };
+static unsigned uart1_grp_pins[] = { 30, 31, 32, 33 };
+
+#define BCM6368_GROUP(n) \
+ { \
+ .name = #n, \
+ .pins = n##_pins, \
+ .num_pins = ARRAY_SIZE(n##_pins), \
+ }
+
+static struct bcm6368_pingroup bcm6368_groups[] = {
+ BCM6368_GROUP(gpio0),
+ BCM6368_GROUP(gpio1),
+ BCM6368_GROUP(gpio2),
+ BCM6368_GROUP(gpio3),
+ BCM6368_GROUP(gpio4),
+ BCM6368_GROUP(gpio5),
+ BCM6368_GROUP(gpio6),
+ BCM6368_GROUP(gpio7),
+ BCM6368_GROUP(gpio8),
+ BCM6368_GROUP(gpio9),
+ BCM6368_GROUP(gpio10),
+ BCM6368_GROUP(gpio11),
+ BCM6368_GROUP(gpio12),
+ BCM6368_GROUP(gpio13),
+ BCM6368_GROUP(gpio14),
+ BCM6368_GROUP(gpio15),
+ BCM6368_GROUP(gpio16),
+ BCM6368_GROUP(gpio17),
+ BCM6368_GROUP(gpio18),
+ BCM6368_GROUP(gpio19),
+ BCM6368_GROUP(gpio20),
+ BCM6368_GROUP(gpio21),
+ BCM6368_GROUP(gpio22),
+ BCM6368_GROUP(gpio23),
+ BCM6368_GROUP(gpio24),
+ BCM6368_GROUP(gpio25),
+ BCM6368_GROUP(gpio26),
+ BCM6368_GROUP(gpio27),
+ BCM6368_GROUP(gpio28),
+ BCM6368_GROUP(gpio29),
+ BCM6368_GROUP(gpio30),
+ BCM6368_GROUP(gpio31),
+ BCM6368_GROUP(uart1_grp),
+};
+
+static const char * const analog_afe_0_groups[] = {
+ "gpio0",
+};
+
+static const char * const analog_afe_1_groups[] = {
+ "gpio1",
+};
+
+static const char * const sys_irq_groups[] = {
+ "gpio2",
+};
+
+static const char * const serial_led_data_groups[] = {
+ "gpio3",
+};
+
+static const char * const serial_led_clk_groups[] = {
+ "gpio4",
+};
+
+static const char * const inet_led_groups[] = {
+ "gpio5",
+};
+
+static const char * const ephy0_led_groups[] = {
+ "gpio6",
+};
+
+static const char * const ephy1_led_groups[] = {
+ "gpio7",
+};
+
+static const char * const ephy2_led_groups[] = {
+ "gpio8",
+};
+
+static const char * const ephy3_led_groups[] = {
+ "gpio9",
+};
+
+static const char * const robosw_led_data_groups[] = {
+ "gpio10",
+};
+
+static const char * const robosw_led_clk_groups[] = {
+ "gpio11",
+};
+
+static const char * const robosw_led0_groups[] = {
+ "gpio12",
+};
+
+static const char * const robosw_led1_groups[] = {
+ "gpio13",
+};
+
+static const char * const usb_device_led_groups[] = {
+ "gpio14",
+};
+
+static const char * const pci_req1_groups[] = {
+ "gpio16",
+};
+
+static const char * const pci_gnt1_groups[] = {
+ "gpio17",
+};
+
+static const char * const pci_intb_groups[] = {
+ "gpio18",
+};
+
+static const char * const pci_req0_groups[] = {
+ "gpio19",
+};
+
+static const char * const pci_gnt0_groups[] = {
+ "gpio20",
+};
+
+static const char * const pcmcia_cd1_groups[] = {
+ "gpio22",
+};
+
+static const char * const pcmcia_cd2_groups[] = {
+ "gpio23",
+};
+
+static const char * const pcmcia_vs1_groups[] = {
+ "gpio24",
+};
+
+static const char * const pcmcia_vs2_groups[] = {
+ "gpio25",
+};
+
+static const char * const ebi_cs2_groups[] = {
+ "gpio26",
+};
+
+static const char * const ebi_cs3_groups[] = {
+ "gpio27",
+};
+
+static const char * const spi_cs2_groups[] = {
+ "gpio28",
+};
+
+static const char * const spi_cs3_groups[] = {
+ "gpio29",
+};
+
+static const char * const spi_cs4_groups[] = {
+ "gpio30",
+};
+
+static const char * const spi_cs5_groups[] = {
+ "gpio31",
+};
+
+static const char * const uart1_groups[] = {
+ "uart1_grp",
+};
+
+#define BCM6368_FUN(n, out) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .dir_out = out, \
+ }
+
+#define BCM6368_BASEMODE_FUN(n, val, out) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .basemode = BCM6368_BASEMODE_##val, \
+ .dir_out = out, \
+ }
+
+static const struct bcm6368_function bcm6368_funcs[] = {
+ BCM6368_FUN(analog_afe_0, 1),
+ BCM6368_FUN(analog_afe_1, 1),
+ BCM6368_FUN(sys_irq, 1),
+ BCM6368_FUN(serial_led_data, 1),
+ BCM6368_FUN(serial_led_clk, 1),
+ BCM6368_FUN(inet_led, 1),
+ BCM6368_FUN(ephy0_led, 1),
+ BCM6368_FUN(ephy1_led, 1),
+ BCM6368_FUN(ephy2_led, 1),
+ BCM6368_FUN(ephy3_led, 1),
+ BCM6368_FUN(robosw_led_data, 1),
+ BCM6368_FUN(robosw_led_clk, 1),
+ BCM6368_FUN(robosw_led0, 1),
+ BCM6368_FUN(robosw_led1, 1),
+ BCM6368_FUN(usb_device_led, 1),
+ BCM6368_FUN(pci_req1, 0),
+ BCM6368_FUN(pci_gnt1, 0),
+ BCM6368_FUN(pci_intb, 0),
+ BCM6368_FUN(pci_req0, 0),
+ BCM6368_FUN(pci_gnt0, 0),
+ BCM6368_FUN(pcmcia_cd1, 0),
+ BCM6368_FUN(pcmcia_cd2, 0),
+ BCM6368_FUN(pcmcia_vs1, 0),
+ BCM6368_FUN(pcmcia_vs2, 0),
+ BCM6368_FUN(ebi_cs2, 1),
+ BCM6368_FUN(ebi_cs3, 1),
+ BCM6368_FUN(spi_cs2, 1),
+ BCM6368_FUN(spi_cs3, 1),
+ BCM6368_FUN(spi_cs4, 1),
+ BCM6368_FUN(spi_cs5, 1),
+ BCM6368_BASEMODE_FUN(uart1, UART1, 0x6),
+};
+
+static int bcm6368_pinctrl_get_group_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6368_groups);
+}
+
+static const char *bcm6368_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ return bcm6368_groups[group].name;
+}
+
+static int bcm6368_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group, const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = bcm6368_groups[group].pins;
+ *num_pins = bcm6368_groups[group].num_pins;
+
+ return 0;
+}
+
+static int bcm6368_pinctrl_get_func_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6368_funcs);
+}
+
+static const char *bcm6368_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return bcm6368_funcs[selector].name;
+}
+
+static int bcm6368_pinctrl_get_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *groups = bcm6368_funcs[selector].groups;
+ *num_groups = bcm6368_funcs[selector].num_groups;
+
+ return 0;
+}
+
+static int bcm6368_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned selector, unsigned group)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ struct bcm6368_priv *priv = pc->driver_data;
+ const struct bcm6368_pingroup *pg = &bcm6368_groups[group];
+ const struct bcm6368_function *fun = &bcm6368_funcs[selector];
+ int i, pin;
+
+ if (fun->basemode) {
+ unsigned int mask = 0;
+
+ for (i = 0; i < pg->num_pins; i++) {
+ pin = pg->pins[i];
+ if (pin < BCM63XX_BANK_GPIOS)
+ mask |= BIT(pin);
+ }
+
+ regmap_update_bits(pc->regs, BCM6368_MODE_REG, mask, 0);
+ regmap_field_write(priv->overlays, fun->basemode);
+ } else {
+ pin = pg->pins[0];
+
+ if (bcm6368_pins[pin].drv_data)
+ regmap_field_write(priv->overlays,
+ BCM6368_BASEMODE_GPIO);
+
+ regmap_update_bits(pc->regs, BCM6368_MODE_REG, BIT(pin),
+ BIT(pin));
+ }
+
+ for (pin = 0; pin < pg->num_pins; pin++) {
+ struct pinctrl_gpio_range *range;
+ int hw_gpio = bcm6368_pins[pin].number;
+
+ range = pinctrl_find_gpio_range_from_pin(pctldev, hw_gpio);
+ if (range) {
+ struct gpio_chip *gc = range->gc;
+
+ if (fun->dir_out & BIT(pin))
+ gc->direction_output(gc, hw_gpio, 0);
+ else
+ gc->direction_input(gc, hw_gpio);
+ }
+ }
+
+ return 0;
+}
+
+static int bcm6368_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ struct bcm6368_priv *priv = pc->driver_data;
+
+ if (offset >= BCM63XX_BANK_GPIOS && !bcm6368_pins[offset].drv_data)
+ return 0;
+
+ /* disable all functions using this pin */
+ if (offset < BCM63XX_BANK_GPIOS)
+ regmap_update_bits(pc->regs, BCM6368_MODE_REG, BIT(offset), 0);
+
+ if (bcm6368_pins[offset].drv_data)
+ regmap_field_write(priv->overlays, BCM6368_BASEMODE_GPIO);
+
+ return 0;
+}
+
+static struct pinctrl_ops bcm6368_pctl_ops = {
+ .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .get_group_name = bcm6368_pinctrl_get_group_name,
+ .get_group_pins = bcm6368_pinctrl_get_group_pins,
+ .get_groups_count = bcm6368_pinctrl_get_group_count,
+};
+
+static struct pinmux_ops bcm6368_pmx_ops = {
+ .get_function_groups = bcm6368_pinctrl_get_groups,
+ .get_function_name = bcm6368_pinctrl_get_func_name,
+ .get_functions_count = bcm6368_pinctrl_get_func_count,
+ .gpio_request_enable = bcm6368_gpio_request_enable,
+ .set_mux = bcm6368_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct bcm63xx_pinctrl_soc bcm6368_soc = {
+ .ngpios = BCM6368_NUM_GPIOS,
+ .npins = ARRAY_SIZE(bcm6368_pins),
+ .pctl_ops = &bcm6368_pctl_ops,
+ .pins = bcm6368_pins,
+ .pmx_ops = &bcm6368_pmx_ops,
+};
+
+static int bcm6368_pinctrl_probe(struct platform_device *pdev)
+{
+ struct reg_field overlays = REG_FIELD(BCM6368_BASEMODE_REG, 0, 15);
+ struct device *dev = &pdev->dev;
+ struct bcm63xx_pinctrl *pc;
+ struct bcm6368_priv *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ err = bcm63xx_pinctrl_probe(pdev, &bcm6368_soc, (void *) priv);
+ if (err)
+ return err;
+
+ pc = platform_get_drvdata(pdev);
+
+ priv->overlays = devm_regmap_field_alloc(dev, pc->regs, overlays);
+ if (IS_ERR(priv->overlays))
+ return PTR_ERR(priv->overlays);
+
+ return 0;
+}
+
+static const struct of_device_id bcm6368_pinctrl_match[] = {
+ { .compatible = "brcm,bcm6368-pinctrl", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm6368_pinctrl_driver = {
+ .probe = bcm6368_pinctrl_probe,
+ .driver = {
+ .name = "bcm6368-pinctrl",
+ .of_match_table = bcm6368_pinctrl_match,
+ },
+};
+
+builtin_platform_driver(bcm6368_pinctrl_driver);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm63xx.c b/drivers/pinctrl/bcm/pinctrl-bcm63xx.c
new file mode 100644
index 000000000000..e1285fe2fbc0
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm63xx.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for BCM63xx GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/gpio/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-bcm63xx.h"
+
+#define BCM63XX_BANK_SIZE 4
+
+#define BCM63XX_DIROUT_REG 0x04
+#define BCM63XX_DATA_REG 0x0c
+
+static int bcm63xx_reg_mask_xlate(struct gpio_regmap *gpio,
+ unsigned int base, unsigned int offset,
+ unsigned int *reg, unsigned int *mask)
+{
+ unsigned int line = offset % BCM63XX_BANK_GPIOS;
+ unsigned int stride = offset / BCM63XX_BANK_GPIOS;
+
+ *reg = base - stride * BCM63XX_BANK_SIZE;
+ *mask = BIT(line);
+
+ return 0;
+}
+
+static const struct of_device_id bcm63xx_gpio_of_match[] = {
+ { .compatible = "brcm,bcm6318-gpio", },
+ { .compatible = "brcm,bcm6328-gpio", },
+ { .compatible = "brcm,bcm6358-gpio", },
+ { .compatible = "brcm,bcm6362-gpio", },
+ { .compatible = "brcm,bcm6368-gpio", },
+ { .compatible = "brcm,bcm63268-gpio", },
+ { /* sentinel */ }
+};
+
+static int bcm63xx_gpio_probe(struct device *dev, struct device_node *node,
+ const struct bcm63xx_pinctrl_soc *soc,
+ struct bcm63xx_pinctrl *pc)
+{
+ struct gpio_regmap_config grc = {0};
+
+ grc.parent = dev;
+ grc.fwnode = &node->fwnode;
+ grc.ngpio = soc->ngpios;
+ grc.ngpio_per_reg = BCM63XX_BANK_GPIOS;
+ grc.regmap = pc->regs;
+ grc.reg_dat_base = BCM63XX_DATA_REG;
+ grc.reg_dir_out_base = BCM63XX_DIROUT_REG;
+ grc.reg_set_base = BCM63XX_DATA_REG;
+ grc.reg_mask_xlate = bcm63xx_reg_mask_xlate;
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &grc));
+}
+
+int bcm63xx_pinctrl_probe(struct platform_device *pdev,
+ const struct bcm63xx_pinctrl_soc *soc,
+ void *driver_data)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm63xx_pinctrl *pc;
+ struct device_node *node;
+ int err;
+
+ pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pc);
+
+ pc->dev = dev;
+ pc->driver_data = driver_data;
+
+ pc->regs = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(pc->regs))
+ return PTR_ERR(pc->regs);
+
+ pc->pctl_desc.name = dev_name(dev);
+ pc->pctl_desc.pins = soc->pins;
+ pc->pctl_desc.npins = soc->npins;
+ pc->pctl_desc.pctlops = soc->pctl_ops;
+ pc->pctl_desc.pmxops = soc->pmx_ops;
+ pc->pctl_desc.owner = THIS_MODULE;
+
+ pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc);
+ if (IS_ERR(pc->pctl_dev))
+ return PTR_ERR(pc->pctl_dev);
+
+ for_each_child_of_node(dev->parent->of_node, node) {
+ if (of_match_node(bcm63xx_gpio_of_match, node)) {
+ err = bcm63xx_gpio_probe(dev, node, soc, pc);
+ if (err) {
+ dev_err(dev, "could not add GPIO chip\n");
+ of_node_put(node);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm63xx.h b/drivers/pinctrl/bcm/pinctrl-bcm63xx.h
new file mode 100644
index 000000000000..3bdb50021f1b
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm63xx.h
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#ifndef __PINCTRL_BCM63XX_H__
+#define __PINCTRL_BCM63XX_H__
+
+#include <linux/pinctrl/pinctrl.h>
+
+#define BCM63XX_BANK_GPIOS 32
+
+struct bcm63xx_pinctrl_soc {
+ struct pinctrl_ops *pctl_ops;
+ struct pinmux_ops *pmx_ops;
+
+ const struct pinctrl_pin_desc *pins;
+ unsigned npins;
+
+ unsigned int ngpios;
+};
+
+struct bcm63xx_pinctrl {
+ struct device *dev;
+ struct regmap *regs;
+
+ struct pinctrl_desc pctl_desc;
+ struct pinctrl_dev *pctl_dev;
+
+ void *driver_data;
+};
+
+static inline unsigned int bcm63xx_bank_pin(unsigned int pin)
+{
+ return pin % BCM63XX_BANK_GPIOS;
+}
+
+int bcm63xx_pinctrl_probe(struct platform_device *pdev,
+ const struct bcm63xx_pinctrl_soc *soc,
+ void *driver_data);
+
+#endif /* __PINCTRL_BCM63XX_H__ */
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 6e6825d17a1d..a4ac87c8b4f8 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -160,7 +160,7 @@ int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name)
}
/**
- * pin_get_name_from_id() - look up a pin name from a pin id
+ * pin_get_name() - look up a pin name from a pin id
* @pctldev: the pin control device to lookup the pin on
* @pin: pin number/id to look up
*/
@@ -1258,7 +1258,7 @@ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
p->state = NULL;
- /* Apply all the settings for the new state */
+ /* Apply all the settings for the new state - pinmux first */
list_for_each_entry(setting, &state->settings, node) {
switch (setting->type) {
case PIN_MAP_TYPE_MUX_GROUP:
@@ -1266,6 +1266,29 @@ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
break;
case PIN_MAP_TYPE_CONFIGS_PIN:
case PIN_MAP_TYPE_CONFIGS_GROUP:
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret < 0)
+ goto unapply_new_state;
+
+ /* Do not link hogs (circular dependency) */
+ if (p != setting->pctldev->p)
+ pinctrl_link_add(setting->pctldev, p->dev);
+ }
+
+ /* Apply all the settings for the new state - pinconf after */
+ list_for_each_entry(setting, &state->settings, node) {
+ switch (setting->type) {
+ case PIN_MAP_TYPE_MUX_GROUP:
+ ret = 0;
+ break;
+ case PIN_MAP_TYPE_CONFIGS_PIN:
+ case PIN_MAP_TYPE_CONFIGS_GROUP:
ret = pinconf_apply_setting(setting);
break;
default:
@@ -1892,11 +1915,11 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
dev_name(pctldev->dev));
return;
}
- debugfs_create_file("pins", S_IFREG | S_IRUGO,
+ debugfs_create_file("pins", 0444,
device_root, pctldev, &pinctrl_pins_fops);
- debugfs_create_file("pingroups", S_IFREG | S_IRUGO,
+ debugfs_create_file("pingroups", 0444,
device_root, pctldev, &pinctrl_groups_fops);
- debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
+ debugfs_create_file("gpio-ranges", 0444,
device_root, pctldev, &pinctrl_gpioranges_fops);
if (pctldev->desc->pmxops)
pinmux_init_device_debugfs(device_root, pctldev);
@@ -1918,11 +1941,11 @@ static void pinctrl_init_debugfs(void)
return;
}
- debugfs_create_file("pinctrl-devices", S_IFREG | S_IRUGO,
+ debugfs_create_file("pinctrl-devices", 0444,
debugfs_root, NULL, &pinctrl_devices_fops);
- debugfs_create_file("pinctrl-maps", S_IFREG | S_IRUGO,
+ debugfs_create_file("pinctrl-maps", 0444,
debugfs_root, NULL, &pinctrl_maps_fops);
- debugfs_create_file("pinctrl-handles", S_IFREG | S_IRUGO,
+ debugfs_create_file("pinctrl-handles", 0444,
debugfs_root, NULL, &pinctrl_fops);
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1.c b/drivers/pinctrl/freescale/pinctrl-imx1.c
index faf770f13bc7..1e2b0fe9ffd6 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1.c
@@ -262,6 +262,7 @@ static struct platform_driver imx1_pinctrl_driver = {
.driver = {
.name = "imx1-pinctrl",
.of_match_table = imx1_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
};
builtin_platform_driver_probe(imx1_pinctrl_driver, imx1_pinctrl_probe);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
index a899a398b6bb..51748da1668f 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx25.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
@@ -323,7 +323,8 @@ static int imx25_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx25_pinctrl_driver = {
.driver = {
.name = "imx25-pinctrl",
- .of_match_table = of_match_ptr(imx25_pinctrl_of_match),
+ .of_match_table = imx25_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx25_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx27.c b/drivers/pinctrl/freescale/pinctrl-imx27.c
index b4dfc1676cbc..67e7105be4f3 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx27.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx27.c
@@ -396,7 +396,8 @@ static int imx27_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx27_pinctrl_driver = {
.driver = {
.name = "imx27-pinctrl",
- .of_match_table = of_match_ptr(imx27_pinctrl_of_match),
+ .of_match_table = imx27_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx27_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx35.c b/drivers/pinctrl/freescale/pinctrl-imx35.c
index 871bb419e2f0..c8671ad5214c 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx35.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx35.c
@@ -1014,6 +1014,7 @@ static struct platform_driver imx35_pinctrl_driver = {
.driver = {
.name = "imx35-pinctrl",
.of_match_table = imx35_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx35_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx50.c b/drivers/pinctrl/freescale/pinctrl-imx50.c
index cf182c040e0b..a245b4011c00 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx50.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx50.c
@@ -399,7 +399,8 @@ static int imx50_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx50_pinctrl_driver = {
.driver = {
.name = "imx50-pinctrl",
- .of_match_table = of_match_ptr(imx50_pinctrl_of_match),
+ .of_match_table = imx50_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx50_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx51.c b/drivers/pinctrl/freescale/pinctrl-imx51.c
index e5c261e2bf1e..307cf5fe4d15 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx51.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx51.c
@@ -776,6 +776,7 @@ static struct platform_driver imx51_pinctrl_driver = {
.driver = {
.name = "imx51-pinctrl",
.of_match_table = imx51_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx51_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx53.c b/drivers/pinctrl/freescale/pinctrl-imx53.c
index 64c97aaf20c7..02bf3bda69ac 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx53.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx53.c
@@ -463,6 +463,7 @@ static struct platform_driver imx53_pinctrl_driver = {
.driver = {
.name = "imx53-pinctrl",
.of_match_table = imx53_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx53_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6dl.c b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
index 0858b4d79ed2..2b6d5141a477 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6dl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
@@ -473,6 +473,7 @@ static struct platform_driver imx6dl_pinctrl_driver = {
.driver = {
.name = "imx6dl-pinctrl",
.of_match_table = imx6dl_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx6dl_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6q.c b/drivers/pinctrl/freescale/pinctrl-imx6q.c
index 078ed6a331fd..a7507def26a9 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6q.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6q.c
@@ -475,6 +475,7 @@ static struct platform_driver imx6q_pinctrl_driver = {
.driver = {
.name = "imx6q-pinctrl",
.of_match_table = imx6q_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx6q_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sl.c b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
index 9d2e6f987aa7..236f3bf120c2 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
@@ -379,6 +379,7 @@ static struct platform_driver imx6sl_pinctrl_driver = {
.driver = {
.name = "imx6sl-pinctrl",
.of_match_table = imx6sl_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx6sl_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sll.c b/drivers/pinctrl/freescale/pinctrl-imx6sll.c
index 0618f4d887fd..dfefcecbe072 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sll.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sll.c
@@ -345,7 +345,7 @@ static int imx6sll_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx6sll_pinctrl_driver = {
.driver = {
.name = "imx6sll-pinctrl",
- .of_match_table = of_match_ptr(imx6sll_pinctrl_of_match),
+ .of_match_table = imx6sll_pinctrl_of_match,
.suppress_bind_attrs = true,
},
.probe = imx6sll_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sx.c b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
index c7e2b1f94f01..b7b97c274dcc 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
@@ -382,7 +382,8 @@ static int imx6sx_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx6sx_pinctrl_driver = {
.driver = {
.name = "imx6sx-pinctrl",
- .of_match_table = of_match_ptr(imx6sx_pinctrl_of_match),
+ .of_match_table = imx6sx_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx6sx_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6ul.c b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
index 7e37627c63f5..3b8747482e36 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6ul.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
@@ -342,7 +342,8 @@ static int imx6ul_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx6ul_pinctrl_driver = {
.driver = {
.name = "imx6ul-pinctrl",
- .of_match_table = of_match_ptr(imx6ul_pinctrl_of_match),
+ .of_match_table = imx6ul_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx6ul_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx7d.c b/drivers/pinctrl/freescale/pinctrl-imx7d.c
index 369d3e59fdd6..4126387344cb 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx7d.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx7d.c
@@ -386,7 +386,8 @@ static int imx7d_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx7d_pinctrl_driver = {
.driver = {
.name = "imx7d-pinctrl",
- .of_match_table = of_match_ptr(imx7d_pinctrl_of_match),
+ .of_match_table = imx7d_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx7d_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx7ulp.c b/drivers/pinctrl/freescale/pinctrl-imx7ulp.c
index 922ff73c7087..1915378d92b2 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx7ulp.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx7ulp.c
@@ -303,7 +303,7 @@ static int imx7ulp_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx7ulp_pinctrl_driver = {
.driver = {
.name = "imx7ulp-pinctrl",
- .of_match_table = of_match_ptr(imx7ulp_pinctrl_of_match),
+ .of_match_table = imx7ulp_pinctrl_of_match,
.suppress_bind_attrs = true,
},
.probe = imx7ulp_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8dxl.c b/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
index d3020c0cd55d..041455c13d0d 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
@@ -184,7 +184,7 @@ static int imx8dxl_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx8dxl_pinctrl_driver = {
.driver = {
.name = "fsl,imx8dxl-iomuxc",
- .of_match_table = of_match_ptr(imx8dxl_pinctrl_of_match),
+ .of_match_table = imx8dxl_pinctrl_of_match,
.suppress_bind_attrs = true,
},
.probe = imx8dxl_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mm.c b/drivers/pinctrl/freescale/pinctrl-imx8mm.c
index 31c5d8861406..39dc73281ce6 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8mm.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mm.c
@@ -337,7 +337,7 @@ static int imx8mm_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx8mm_pinctrl_driver = {
.driver = {
.name = "imx8mm-pinctrl",
- .of_match_table = of_match_ptr(imx8mm_pinctrl_of_match),
+ .of_match_table = imx8mm_pinctrl_of_match,
.suppress_bind_attrs = true,
},
.probe = imx8mm_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mn.c b/drivers/pinctrl/freescale/pinctrl-imx8mn.c
index 14c9deb51fec..448a79eb4568 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8mn.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mn.c
@@ -337,7 +337,7 @@ static int imx8mn_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx8mn_pinctrl_driver = {
.driver = {
.name = "imx8mn-pinctrl",
- .of_match_table = of_match_ptr(imx8mn_pinctrl_of_match),
+ .of_match_table = imx8mn_pinctrl_of_match,
.suppress_bind_attrs = true,
},
.probe = imx8mn_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mp.c b/drivers/pinctrl/freescale/pinctrl-imx8mp.c
index bf4bbb5e2446..88abc257318f 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8mp.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mp.c
@@ -335,7 +335,8 @@ static int imx8mp_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx8mp_pinctrl_driver = {
.driver = {
.name = "imx8mp-pinctrl",
- .of_match_table = of_match_ptr(imx8mp_pinctrl_of_match),
+ .of_match_table = imx8mp_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx8mp_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mq.c b/drivers/pinctrl/freescale/pinctrl-imx8mq.c
index ae3ea5b5c204..3ed3c98bcedb 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8mq.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mq.c
@@ -340,7 +340,7 @@ static int imx8mq_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx8mq_pinctrl_driver = {
.driver = {
.name = "imx8mq-pinctrl",
- .of_match_table = of_match_ptr(imx8mq_pinctrl_of_match),
+ .of_match_table = imx8mq_pinctrl_of_match,
.pm = &imx_pinctrl_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8qm.c b/drivers/pinctrl/freescale/pinctrl-imx8qm.c
index 8f46b9404cd7..2e2d30dc13f7 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8qm.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8qm.c
@@ -317,7 +317,7 @@ static int imx8qm_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx8qm_pinctrl_driver = {
.driver = {
.name = "imx8qm-pinctrl",
- .of_match_table = of_match_ptr(imx8qm_pinctrl_of_match),
+ .of_match_table = imx8qm_pinctrl_of_match,
.suppress_bind_attrs = true,
},
.probe = imx8qm_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8qxp.c b/drivers/pinctrl/freescale/pinctrl-imx8qxp.c
index 6776ad6a3a27..4f97813ba8b7 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8qxp.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8qxp.c
@@ -223,7 +223,7 @@ static int imx8qxp_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx8qxp_pinctrl_driver = {
.driver = {
.name = "imx8qxp-pinctrl",
- .of_match_table = of_match_ptr(imx8qxp_pinctrl_of_match),
+ .of_match_table = imx8qxp_pinctrl_of_match,
.suppress_bind_attrs = true,
},
.probe = imx8qxp_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c
index 37602b053ed2..700e5a136814 100644
--- a/drivers/pinctrl/freescale/pinctrl-vf610.c
+++ b/drivers/pinctrl/freescale/pinctrl-vf610.c
@@ -336,6 +336,7 @@ static struct platform_driver vf610_pinctrl_driver = {
.driver = {
.name = "vf610-pinctrl",
.of_match_table = vf610_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = vf610_pinctrl_probe,
};
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 9f3361c13ded..85750974d182 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1173,16 +1173,15 @@ static int intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
for (gpp = 0; gpp < community->ngpps; gpp++) {
const struct intel_padgroup *padgrp = &community->gpps[gpp];
unsigned long pending, enabled, gpp_offset;
- unsigned long flags;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock(&pctrl->lock);
pending = readl(community->regs + community->is_offset +
padgrp->reg_num * 4);
enabled = readl(community->regs + community->ie_offset +
padgrp->reg_num * 4);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock(&pctrl->lock);
/* Only interrupts that are enabled */
pending &= enabled;
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index eef17f228669..90f0c8255eaf 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -147,6 +147,12 @@ config PINCTRL_MT8192
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
+config PINCTRL_MT8195
+ bool "Mediatek MT8195 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ select PINCTRL_MTK_PARIS
+
config PINCTRL_MT8516
bool "Mediatek MT8516 pin control"
depends on OF
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 01218bf4dc30..06fde993ace2 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -21,5 +21,6 @@ obj-$(CONFIG_PINCTRL_MT8167) += pinctrl-mt8167.o
obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
obj-$(CONFIG_PINCTRL_MT8183) += pinctrl-mt8183.o
obj-$(CONFIG_PINCTRL_MT8192) += pinctrl-mt8192.o
+obj-$(CONFIG_PINCTRL_MT8195) += pinctrl-mt8195.o
obj-$(CONFIG_PINCTRL_MT8516) += pinctrl-mt8516.o
obj-$(CONFIG_PINCTRL_MT6397) += pinctrl-mt6397.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
index 0fa7de43bc4c..3a4a23c40a71 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.c
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -619,6 +619,8 @@ int mtk_moore_pinctrl_probe(struct platform_device *pdev,
hw->nbase = hw->soc->nbase_names;
+ spin_lock_init(&hw->lock);
+
/* Copy from internal struct mtk_pin_desc to register to the core */
pins = devm_kmalloc_array(&pdev->dev, hw->soc->npins, sizeof(*pins),
GFP_KERNEL);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8195.c b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
new file mode 100644
index 000000000000..a7500e18bb1d
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ */
+
+#include "pinctrl-mtk-mt8195.h"
+#include "pinctrl-paris.h"
+
+/* MT8195 have multiple bases to program pin configuration listed as the below:
+ * iocfg[0]:0x10005000, iocfg[1]:0x11d10000, iocfg[2]:0x11d30000,
+ * iocfg[3]:0x11d40000, iocfg[4]:0x11e20000, iocfg[5]:0x11eb0000,
+ * iocfg[6]:0x11f40000.
+ * _i_based could be used to indicate what base the pin should be mapped into.
+ */
+
+#define PIN_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 0)
+
+#define PINS_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 1)
+
+static const struct mtk_pin_field_calc mt8195_pin_mode_range[] = {
+ PIN_FIELD(0, 144, 0x300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_dir_range[] = {
+ PIN_FIELD(0, 144, 0x0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_di_range[] = {
+ PIN_FIELD(0, 144, 0x200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_do_range[] = {
+ PIN_FIELD(0, 144, 0x100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_ies_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x040, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 4, 0x040, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 4, 0x040, 0x10, 2, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x040, 0x10, 3, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x040, 0x10, 4, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x040, 0x10, 5, 1),
+ PIN_FIELD_BASE(6, 6, 4, 0x040, 0x10, 6, 1),
+ PIN_FIELD_BASE(7, 7, 4, 0x040, 0x10, 7, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x040, 0x10, 13, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x040, 0x10, 8, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x040, 0x10, 14, 1),
+ PIN_FIELD_BASE(11, 11, 4, 0x040, 0x10, 9, 1),
+ PIN_FIELD_BASE(12, 12, 4, 0x040, 0x10, 15, 1),
+ PIN_FIELD_BASE(13, 13, 4, 0x040, 0x10, 10, 1),
+ PIN_FIELD_BASE(14, 14, 4, 0x040, 0x10, 16, 1),
+ PIN_FIELD_BASE(15, 15, 4, 0x040, 0x10, 11, 1),
+ PIN_FIELD_BASE(16, 16, 4, 0x040, 0x10, 17, 1),
+ PIN_FIELD_BASE(17, 17, 4, 0x040, 0x10, 12, 1),
+ PIN_FIELD_BASE(18, 18, 2, 0x040, 0x10, 5, 1),
+ PIN_FIELD_BASE(19, 19, 2, 0x040, 0x10, 12, 1),
+ PIN_FIELD_BASE(20, 20, 2, 0x040, 0x10, 11, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x040, 0x10, 10, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x040, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x040, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x040, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x040, 0x10, 4, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x040, 0x10, 3, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x040, 0x10, 6, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x040, 0x10, 7, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x040, 0x10, 8, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x040, 0x10, 9, 1),
+ PIN_FIELD_BASE(31, 31, 1, 0x060, 0x10, 13, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x060, 0x10, 12, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x060, 0x10, 11, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x060, 0x10, 14, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x060, 0x10, 15, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x070, 0x10, 3, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x070, 0x10, 6, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x070, 0x10, 4, 1),
+ PIN_FIELD_BASE(39, 39, 1, 0x070, 0x10, 5, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x070, 0x10, 8, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x070, 0x10, 7, 1),
+ PIN_FIELD_BASE(42, 42, 1, 0x070, 0x10, 10, 1),
+ PIN_FIELD_BASE(43, 43, 1, 0x070, 0x10, 9, 1),
+ PIN_FIELD_BASE(44, 44, 1, 0x070, 0x10, 20, 1),
+ PIN_FIELD_BASE(45, 45, 1, 0x070, 0x10, 21, 1),
+ PIN_FIELD_BASE(46, 46, 1, 0x060, 0x10, 18, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x060, 0x10, 16, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x060, 0x10, 19, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x060, 0x10, 17, 1),
+ PIN_FIELD_BASE(50, 50, 1, 0x060, 0x10, 25, 1),
+ PIN_FIELD_BASE(51, 51, 1, 0x060, 0x10, 20, 1),
+ PIN_FIELD_BASE(52, 52, 1, 0x060, 0x10, 26, 1),
+ PIN_FIELD_BASE(53, 53, 1, 0x060, 0x10, 21, 1),
+ PIN_FIELD_BASE(54, 54, 1, 0x060, 0x10, 22, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x060, 0x10, 23, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x060, 0x10, 24, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x060, 0x10, 29, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x060, 0x10, 27, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x060, 0x10, 30, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x060, 0x10, 28, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x060, 0x10, 8, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x060, 0x10, 7, 1),
+ PIN_FIELD_BASE(63, 63, 1, 0x060, 0x10, 10, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x060, 0x10, 9, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x070, 0x10, 1, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x060, 0x10, 31, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x070, 0x10, 0, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x070, 0x10, 2, 1),
+ PIN_FIELD_BASE(69, 69, 1, 0x060, 0x10, 0, 1),
+ PIN_FIELD_BASE(70, 70, 1, 0x060, 0x10, 6, 1),
+ PIN_FIELD_BASE(71, 71, 1, 0x060, 0x10, 4, 1),
+ PIN_FIELD_BASE(72, 72, 1, 0x060, 0x10, 5, 1),
+ PIN_FIELD_BASE(73, 73, 1, 0x060, 0x10, 1, 1),
+ PIN_FIELD_BASE(74, 74, 1, 0x060, 0x10, 2, 1),
+ PIN_FIELD_BASE(75, 75, 1, 0x060, 0x10, 3, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x070, 0x10, 11, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x030, 0x10, 1, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x030, 0x10, 2, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x030, 0x10, 9, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x030, 0x10, 10, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x030, 0x10, 11, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x030, 0x10, 12, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x030, 0x10, 13, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x030, 0x10, 14, 1),
+ PIN_FIELD_BASE(85, 85, 3, 0x030, 0x10, 15, 1),
+ PIN_FIELD_BASE(86, 86, 3, 0x030, 0x10, 16, 1),
+ PIN_FIELD_BASE(87, 87, 3, 0x030, 0x10, 3, 1),
+ PIN_FIELD_BASE(88, 88, 3, 0x030, 0x10, 4, 1),
+ PIN_FIELD_BASE(89, 89, 3, 0x030, 0x10, 5, 1),
+ PIN_FIELD_BASE(90, 90, 3, 0x030, 0x10, 6, 1),
+ PIN_FIELD_BASE(91, 91, 3, 0x030, 0x10, 7, 1),
+ PIN_FIELD_BASE(92, 92, 3, 0x030, 0x10, 8, 1),
+ PIN_FIELD_BASE(93, 93, 3, 0x030, 0x10, 18, 1),
+ PIN_FIELD_BASE(94, 94, 3, 0x030, 0x10, 19, 1),
+ PIN_FIELD_BASE(95, 95, 3, 0x030, 0x10, 17, 1),
+ PIN_FIELD_BASE(96, 96, 3, 0x030, 0x10, 0, 1),
+ PIN_FIELD_BASE(97, 97, 3, 0x030, 0x10, 20, 1),
+ PIN_FIELD_BASE(98, 98, 3, 0x030, 0x10, 28, 1),
+ PIN_FIELD_BASE(99, 99, 3, 0x030, 0x10, 27, 1),
+ PIN_FIELD_BASE(100, 100, 3, 0x030, 0x10, 30, 1),
+ PIN_FIELD_BASE(101, 101, 3, 0x030, 0x10, 29, 1),
+ PIN_FIELD_BASE(102, 102, 3, 0x040, 0x10, 0, 1),
+ PIN_FIELD_BASE(103, 103, 3, 0x030, 0x10, 31, 1),
+ PIN_FIELD_BASE(104, 104, 3, 0x030, 0x10, 25, 1),
+ PIN_FIELD_BASE(105, 105, 3, 0x030, 0x10, 26, 1),
+ PIN_FIELD_BASE(106, 106, 3, 0x030, 0x10, 23, 1),
+ PIN_FIELD_BASE(107, 107, 3, 0x030, 0x10, 24, 1),
+ PIN_FIELD_BASE(108, 108, 3, 0x030, 0x10, 22, 1),
+ PIN_FIELD_BASE(109, 109, 3, 0x030, 0x10, 21, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x010, 0x10, 1, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x010, 0x10, 0, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x010, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x010, 0x10, 3, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x010, 0x10, 4, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x010, 0x10, 5, 1),
+ PIN_FIELD_BASE(116, 116, 6, 0x030, 0x10, 9, 1),
+ PIN_FIELD_BASE(117, 117, 6, 0x030, 0x10, 8, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x030, 0x10, 7, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x030, 0x10, 6, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x030, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x030, 0x10, 1, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x030, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x030, 0x10, 5, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x030, 0x10, 4, 1),
+ PIN_FIELD_BASE(125, 125, 6, 0x030, 0x10, 3, 1),
+ PIN_FIELD_BASE(126, 126, 6, 0x030, 0x10, 2, 1),
+ PIN_FIELD_BASE(127, 127, 6, 0x030, 0x10, 10, 1),
+ PIN_FIELD_BASE(128, 128, 3, 0x040, 0x10, 3, 1),
+ PIN_FIELD_BASE(129, 129, 3, 0x040, 0x10, 1, 1),
+ PIN_FIELD_BASE(130, 130, 3, 0x040, 0x10, 4, 1),
+ PIN_FIELD_BASE(131, 131, 3, 0x040, 0x10, 2, 1),
+ PIN_FIELD_BASE(132, 132, 6, 0x030, 0x10, 13, 1),
+ PIN_FIELD_BASE(133, 133, 6, 0x030, 0x10, 12, 1),
+ PIN_FIELD_BASE(134, 134, 6, 0x030, 0x10, 15, 1),
+ PIN_FIELD_BASE(135, 135, 6, 0x030, 0x10, 14, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x070, 0x10, 13, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x070, 0x10, 12, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x070, 0x10, 15, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x070, 0x10, 14, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x070, 0x10, 17, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x070, 0x10, 16, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x070, 0x10, 19, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x070, 0x10, 18, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_smt_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x0d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 4, 0x0d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 4, 0x0d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x0d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x0d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x0d0, 0x10, 5, 1),
+ PINS_FIELD_BASE(6, 7, 4, 0x0d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x0d0, 0x10, 12, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x0d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x0d0, 0x10, 13, 1),
+ PIN_FIELD_BASE(11, 11, 4, 0x0d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(12, 12, 4, 0x0d0, 0x10, 14, 1),
+ PIN_FIELD_BASE(13, 13, 4, 0x0d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(14, 14, 4, 0x0d0, 0x10, 15, 1),
+ PIN_FIELD_BASE(15, 15, 4, 0x0d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(16, 16, 4, 0x0d0, 0x10, 16, 1),
+ PIN_FIELD_BASE(17, 17, 4, 0x0d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(18, 18, 2, 0x090, 0x10, 11, 1),
+ PIN_FIELD_BASE(19, 19, 2, 0x090, 0x10, 10, 1),
+ PIN_FIELD_BASE(20, 20, 2, 0x090, 0x10, 9, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x090, 0x10, 11, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x090, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x090, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x090, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x090, 0x10, 4, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x090, 0x10, 3, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x090, 0x10, 5, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x090, 0x10, 6, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x090, 0x10, 7, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x090, 0x10, 8, 1),
+ PINS_FIELD_BASE(31, 33, 1, 0x0f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x0f0, 0x10, 0, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x0f0, 0x10, 1, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x0f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x0f0, 0x10, 2, 1),
+ PINS_FIELD_BASE(38, 39, 1, 0x0f0, 0x10, 5, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x0f0, 0x10, 14, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x0f0, 0x10, 13, 1),
+ PIN_FIELD_BASE(42, 42, 1, 0x0f0, 0x10, 16, 1),
+ PIN_FIELD_BASE(43, 43, 1, 0x0f0, 0x10, 15, 1),
+ PIN_FIELD_BASE(44, 44, 1, 0x0f0, 0x10, 25, 1),
+ PIN_FIELD_BASE(45, 45, 1, 0x0f0, 0x10, 26, 1),
+ PINS_FIELD_BASE(46, 47, 1, 0x0f0, 0x10, 5, 1),
+ PINS_FIELD_BASE(48, 51, 1, 0x0f0, 0x10, 6, 1),
+ PINS_FIELD_BASE(52, 55, 1, 0x0f0, 0x10, 7, 1),
+ PINS_FIELD_BASE(56, 59, 1, 0x0f0, 0x10, 8, 1),
+ PINS_FIELD_BASE(60, 63, 1, 0x0f0, 0x10, 9, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x0f0, 0x10, 10, 1),
+ PINS_FIELD_BASE(65, 68, 1, 0x0f0, 0x10, 3, 1),
+ PINS_FIELD_BASE(69, 71, 1, 0x0f0, 0x10, 10, 1),
+ PINS_FIELD_BASE(72, 75, 1, 0x0f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x0f0, 0x10, 12, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x0e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x0e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x0e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x0e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x0e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x0e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x0e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x0e0, 0x10, 11, 1),
+ PINS_FIELD_BASE(85, 88, 3, 0x0e0, 0x10, 14, 1),
+ PIN_FIELD_BASE(89, 89, 3, 0x0e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(90, 90, 3, 0x0e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(91, 91, 3, 0x0e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(92, 92, 3, 0x0e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(93, 93, 3, 0x0e0, 0x10, 12, 1),
+ PIN_FIELD_BASE(94, 94, 3, 0x0e0, 0x10, 13, 1),
+ PINS_FIELD_BASE(95, 98, 3, 0x0e0, 0x10, 15, 1),
+ PINS_FIELD_BASE(99, 102, 3, 0x0e0, 0x10, 16, 1),
+ PINS_FIELD_BASE(103, 104, 3, 0x0e0, 0x10, 17, 1),
+ PIN_FIELD_BASE(105, 105, 3, 0x0e0, 0x10, 18, 1),
+ PINS_FIELD_BASE(106, 107, 3, 0x0e0, 0x10, 17, 1),
+ PINS_FIELD_BASE(108, 109, 3, 0x0e0, 0x10, 18, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x070, 0x10, 1, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x070, 0x10, 0, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x070, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x070, 0x10, 3, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x070, 0x10, 4, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x070, 0x10, 5, 1),
+ PIN_FIELD_BASE(116, 116, 6, 0x0c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(117, 117, 6, 0x0c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x0c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x0c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x0c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x0c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x0c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x0c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x0c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(125, 125, 6, 0x0c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(126, 126, 6, 0x0c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(127, 127, 6, 0x0c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(128, 128, 3, 0x0e0, 0x10, 18, 1),
+ PINS_FIELD_BASE(129, 131, 3, 0x0e0, 0x10, 19, 1),
+ PIN_FIELD_BASE(132, 132, 6, 0x0c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(133, 133, 6, 0x0c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(134, 134, 6, 0x0c0, 0x10, 15, 1),
+ PIN_FIELD_BASE(135, 135, 6, 0x0c0, 0x10, 14, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x0f0, 0x10, 18, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x0f0, 0x10, 17, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x0f0, 0x10, 20, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x0f0, 0x10, 19, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x0f0, 0x10, 22, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x0f0, 0x10, 21, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x0f0, 0x10, 24, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0f0, 0x10, 23, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_pu_range[] = {
+ PIN_FIELD_BASE(6, 6, 4, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(7, 7, 4, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(11, 11, 4, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(12, 12, 4, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(13, 13, 4, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(14, 14, 4, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(15, 15, 4, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(16, 16, 4, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(17, 17, 4, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(18, 18, 2, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(19, 19, 2, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(20, 20, 2, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(31, 31, 1, 0x00a0, 0x10, 13, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x00a0, 0x10, 14, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x00a0, 0x10, 15, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(39, 39, 1, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(42, 42, 1, 0x00b0, 0x10, 10, 1),
+ PIN_FIELD_BASE(43, 43, 1, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(44, 44, 1, 0x00b0, 0x10, 21, 1),
+ PIN_FIELD_BASE(45, 45, 1, 0x00b0, 0x10, 22, 1),
+ PIN_FIELD_BASE(46, 46, 1, 0x00a0, 0x10, 18, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x00a0, 0x10, 16, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x00a0, 0x10, 19, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x00a0, 0x10, 17, 1),
+ PIN_FIELD_BASE(50, 50, 1, 0x00a0, 0x10, 25, 1),
+ PIN_FIELD_BASE(51, 51, 1, 0x00a0, 0x10, 20, 1),
+ PIN_FIELD_BASE(52, 52, 1, 0x00a0, 0x10, 26, 1),
+ PIN_FIELD_BASE(53, 53, 1, 0x00a0, 0x10, 21, 1),
+ PIN_FIELD_BASE(54, 54, 1, 0x00a0, 0x10, 22, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x00a0, 0x10, 23, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x00a0, 0x10, 24, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x00a0, 0x10, 29, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x00a0, 0x10, 27, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x00a0, 0x10, 30, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x00a0, 0x10, 28, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(63, 63, 1, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x00a0, 0x10, 31, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(69, 69, 1, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(70, 70, 1, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(71, 71, 1, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(72, 72, 1, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(73, 73, 1, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(74, 74, 1, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(75, 75, 1, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(97, 97, 3, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(98, 98, 3, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(99, 99, 3, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(100, 100, 3, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(101, 101, 3, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(102, 102, 3, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(103, 103, 3, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(108, 108, 3, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(109, 109, 3, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(128, 128, 3, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(129, 129, 3, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(130, 130, 3, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(131, 131, 3, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(132, 132, 6, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(133, 133, 6, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(134, 134, 6, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(135, 135, 6, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x00b0, 0x10, 14, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x00b0, 0x10, 13, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x00b0, 0x10, 16, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x00b0, 0x10, 15, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x00b0, 0x10, 18, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x00b0, 0x10, 17, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x00b0, 0x10, 20, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x00b0, 0x10, 19, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_pd_range[] = {
+ PIN_FIELD_BASE(6, 6, 4, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(7, 7, 4, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(11, 11, 4, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(12, 12, 4, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(13, 13, 4, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(14, 14, 4, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(15, 15, 4, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(16, 16, 4, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(17, 17, 4, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(18, 18, 2, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(19, 19, 2, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(20, 20, 2, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(31, 31, 1, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(39, 39, 1, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(42, 42, 1, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(43, 43, 1, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(44, 44, 1, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(45, 45, 1, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(46, 46, 1, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(50, 50, 1, 0x0080, 0x10, 25, 1),
+ PIN_FIELD_BASE(51, 51, 1, 0x0080, 0x10, 20, 1),
+ PIN_FIELD_BASE(52, 52, 1, 0x0080, 0x10, 26, 1),
+ PIN_FIELD_BASE(53, 53, 1, 0x0080, 0x10, 21, 1),
+ PIN_FIELD_BASE(54, 54, 1, 0x0080, 0x10, 22, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x0080, 0x10, 23, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x0080, 0x10, 24, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x0080, 0x10, 29, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x0080, 0x10, 27, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x0080, 0x10, 30, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x0080, 0x10, 28, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(63, 63, 1, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x0080, 0x10, 31, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(69, 69, 1, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(70, 70, 1, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(71, 71, 1, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(72, 72, 1, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(73, 73, 1, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(74, 74, 1, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(75, 75, 1, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(97, 97, 3, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(98, 98, 3, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(99, 99, 3, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(100, 100, 3, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(101, 101, 3, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(102, 102, 3, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(103, 103, 3, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(108, 108, 3, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(109, 109, 3, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(128, 128, 3, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(129, 129, 3, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(130, 130, 3, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(131, 131, 3, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(132, 132, 6, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(133, 133, 6, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(134, 134, 6, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(135, 135, 6, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0090, 0x10, 19, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_pupd_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 4, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 4, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x0060, 0x10, 13, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x0060, 0x10, 14, 1),
+ PIN_FIELD_BASE(85, 85, 3, 0x0060, 0x10, 15, 1),
+ PIN_FIELD_BASE(86, 86, 3, 0x0060, 0x10, 16, 1),
+ PIN_FIELD_BASE(87, 87, 3, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(88, 88, 3, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(89, 89, 3, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(90, 90, 3, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(91, 91, 3, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(92, 92, 3, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(93, 93, 3, 0x0060, 0x10, 18, 1),
+ PIN_FIELD_BASE(94, 94, 3, 0x0060, 0x10, 19, 1),
+ PIN_FIELD_BASE(95, 95, 3, 0x0060, 0x10, 17, 1),
+ PIN_FIELD_BASE(96, 96, 3, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(104, 104, 3, 0x0060, 0x10, 22, 1),
+ PIN_FIELD_BASE(105, 105, 3, 0x0060, 0x10, 23, 1),
+ PIN_FIELD_BASE(106, 106, 3, 0x0060, 0x10, 20, 1),
+ PIN_FIELD_BASE(107, 107, 3, 0x0060, 0x10, 21, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x0020, 0x10, 1, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x0020, 0x10, 0, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x0020, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x0020, 0x10, 3, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x0020, 0x10, 4, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x0020, 0x10, 5, 1),
+ PIN_FIELD_BASE(116, 116, 6, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(117, 117, 6, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(125, 125, 6, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(126, 126, 6, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(127, 127, 6, 0x0050, 0x10, 10, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_r0_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 4, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 4, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(85, 85, 3, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(86, 86, 3, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(87, 87, 3, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(88, 88, 3, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(89, 89, 3, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(90, 90, 3, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(91, 91, 3, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(92, 92, 3, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(93, 93, 3, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(94, 94, 3, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(95, 95, 3, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(96, 96, 3, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(104, 104, 3, 0x0080, 0x10, 22, 1),
+ PIN_FIELD_BASE(105, 105, 3, 0x0080, 0x10, 23, 1),
+ PIN_FIELD_BASE(106, 106, 3, 0x0080, 0x10, 20, 1),
+ PIN_FIELD_BASE(107, 107, 3, 0x0080, 0x10, 21, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x0030, 0x10, 1, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x0030, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x0030, 0x10, 4, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x0030, 0x10, 5, 1),
+ PIN_FIELD_BASE(116, 116, 6, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(117, 117, 6, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(125, 125, 6, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(126, 126, 6, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(127, 127, 6, 0x0070, 0x10, 10, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_r1_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 4, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 4, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(85, 85, 3, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(86, 86, 3, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(87, 87, 3, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(88, 88, 3, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(89, 89, 3, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(90, 90, 3, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(91, 91, 3, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(92, 92, 3, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(93, 93, 3, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(94, 94, 3, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(95, 95, 3, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(96, 96, 3, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(104, 104, 3, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(105, 105, 3, 0x0090, 0x10, 23, 1),
+ PIN_FIELD_BASE(106, 106, 3, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(107, 107, 3, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(116, 116, 6, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(117, 117, 6, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(125, 125, 6, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(126, 126, 6, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(127, 127, 6, 0x0080, 0x10, 10, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_drv_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x000, 0x10, 0, 3),
+ PIN_FIELD_BASE(1, 1, 4, 0x000, 0x10, 3, 3),
+ PIN_FIELD_BASE(2, 2, 4, 0x000, 0x10, 6, 3),
+ PIN_FIELD_BASE(3, 3, 4, 0x000, 0x10, 9, 3),
+ PIN_FIELD_BASE(4, 4, 4, 0x000, 0x10, 12, 3),
+ PIN_FIELD_BASE(5, 5, 4, 0x000, 0x10, 15, 3),
+ PINS_FIELD_BASE(6, 7, 4, 0x000, 0x10, 18, 3),
+ PIN_FIELD_BASE(8, 8, 4, 0x010, 0x10, 6, 3),
+ PIN_FIELD_BASE(9, 9, 4, 0x000, 0x10, 21, 3),
+ PIN_FIELD_BASE(10, 10, 4, 0x010, 0x10, 9, 3),
+ PIN_FIELD_BASE(11, 11, 4, 0x000, 0x10, 24, 3),
+ PIN_FIELD_BASE(12, 12, 4, 0x010, 0x10, 12, 3),
+ PIN_FIELD_BASE(13, 13, 4, 0x010, 0x10, 27, 3),
+ PIN_FIELD_BASE(14, 14, 4, 0x010, 0x10, 15, 3),
+ PIN_FIELD_BASE(15, 15, 4, 0x010, 0x10, 0, 3),
+ PIN_FIELD_BASE(16, 16, 4, 0x010, 0x10, 18, 3),
+ PIN_FIELD_BASE(17, 17, 4, 0x010, 0x10, 3, 3),
+ PIN_FIELD_BASE(18, 18, 2, 0x010, 0x10, 6, 3),
+ PIN_FIELD_BASE(19, 19, 2, 0x010, 0x10, 3, 3),
+ PIN_FIELD_BASE(20, 20, 2, 0x010, 0x10, 0, 3),
+ PIN_FIELD_BASE(21, 21, 2, 0x000, 0x10, 27, 3),
+ PIN_FIELD_BASE(22, 22, 2, 0x000, 0x10, 0, 3),
+ PIN_FIELD_BASE(23, 23, 2, 0x000, 0x10, 3, 3),
+ PIN_FIELD_BASE(24, 24, 2, 0x000, 0x10, 6, 3),
+ PIN_FIELD_BASE(25, 25, 2, 0x000, 0x10, 12, 3),
+ PIN_FIELD_BASE(26, 26, 2, 0x000, 0x10, 9, 3),
+ PIN_FIELD_BASE(27, 27, 2, 0x000, 0x10, 15, 3),
+ PIN_FIELD_BASE(28, 28, 2, 0x000, 0x10, 18, 3),
+ PIN_FIELD_BASE(29, 29, 2, 0x000, 0x10, 21, 3),
+ PIN_FIELD_BASE(30, 30, 2, 0x000, 0x10, 24, 3),
+ PINS_FIELD_BASE(31, 33, 1, 0x010, 0x10, 0, 3),
+ PIN_FIELD_BASE(34, 34, 1, 0x000, 0x10, 21, 3),
+ PIN_FIELD_BASE(35, 35, 1, 0x000, 0x10, 24, 3),
+ PIN_FIELD_BASE(36, 36, 1, 0x010, 0x10, 0, 3),
+ PIN_FIELD_BASE(37, 37, 1, 0x010, 0x10, 21, 3),
+ PINS_FIELD_BASE(38, 39, 1, 0x010, 0x10, 3, 3),
+ PIN_FIELD_BASE(40, 40, 1, 0x010, 0x10, 27, 3),
+ PIN_FIELD_BASE(41, 41, 1, 0x010, 0x10, 24, 3),
+ PIN_FIELD_BASE(42, 42, 1, 0x020, 0x10, 3, 3),
+ PIN_FIELD_BASE(43, 43, 1, 0x020, 0x10, 0, 3),
+ PIN_FIELD_BASE(44, 44, 1, 0x030, 0x10, 0, 3),
+ PIN_FIELD_BASE(45, 45, 1, 0x030, 0x10, 3, 3),
+ PINS_FIELD_BASE(46, 47, 1, 0x010, 0x10, 3, 3),
+ PINS_FIELD_BASE(48, 51, 1, 0x010, 0x10, 6, 3),
+ PINS_FIELD_BASE(52, 55, 1, 0x010, 0x10, 9, 3),
+ PINS_FIELD_BASE(56, 59, 1, 0x010, 0x10, 12, 3),
+ PINS_FIELD_BASE(60, 63, 1, 0x010, 0x10, 15, 3),
+ PIN_FIELD_BASE(64, 64, 1, 0x010, 0x10, 18, 3),
+ PINS_FIELD_BASE(65, 68, 1, 0x000, 0x10, 27, 3),
+ PIN_FIELD_BASE(69, 69, 1, 0x000, 0x10, 0, 3),
+ PIN_FIELD_BASE(70, 70, 1, 0x000, 0x10, 18, 3),
+ PIN_FIELD_BASE(71, 71, 1, 0x000, 0x10, 12, 3),
+ PIN_FIELD_BASE(72, 72, 1, 0x000, 0x10, 15, 3),
+ PIN_FIELD_BASE(73, 73, 1, 0x000, 0x10, 3, 3),
+ PIN_FIELD_BASE(74, 74, 1, 0x000, 0x10, 6, 3),
+ PIN_FIELD_BASE(75, 75, 1, 0x000, 0x10, 9, 3),
+ PIN_FIELD_BASE(76, 76, 1, 0x010, 0x10, 18, 3),
+ PIN_FIELD_BASE(77, 77, 3, 0x000, 0x10, 0, 3),
+ PIN_FIELD_BASE(78, 78, 3, 0x000, 0x10, 15, 3),
+ PIN_FIELD_BASE(79, 79, 3, 0x000, 0x10, 18, 3),
+ PIN_FIELD_BASE(80, 80, 3, 0x000, 0x10, 21, 3),
+ PIN_FIELD_BASE(81, 81, 3, 0x000, 0x10, 28, 3),
+ PIN_FIELD_BASE(82, 82, 3, 0x000, 0x10, 27, 3),
+ PIN_FIELD_BASE(83, 83, 3, 0x010, 0x10, 0, 3),
+ PIN_FIELD_BASE(84, 84, 3, 0x010, 0x10, 3, 3),
+ PINS_FIELD_BASE(85, 88, 3, 0x010, 0x10, 15, 3),
+ PIN_FIELD_BASE(89, 89, 3, 0x000, 0x10, 3, 3),
+ PIN_FIELD_BASE(90, 90, 3, 0x000, 0x10, 6, 3),
+ PIN_FIELD_BASE(91, 91, 3, 0x000, 0x10, 9, 3),
+ PIN_FIELD_BASE(92, 92, 3, 0x000, 0x10, 12, 3),
+ PIN_FIELD_BASE(93, 93, 3, 0x010, 0x10, 6, 3),
+ PIN_FIELD_BASE(94, 94, 3, 0x010, 0x10, 9, 3),
+ PINS_FIELD_BASE(95, 98, 3, 0x010, 0x10, 18, 3),
+ PINS_FIELD_BASE(99, 102, 3, 0x010, 0x10, 21, 3),
+ PINS_FIELD_BASE(103, 104, 3, 0x010, 0x10, 24, 3),
+ PIN_FIELD_BASE(105, 105, 3, 0x010, 0x10, 27, 3),
+ PINS_FIELD_BASE(106, 107, 3, 0x010, 0x10, 24, 3),
+ PINS_FIELD_BASE(108, 109, 3, 0x010, 0x10, 27, 3),
+ PIN_FIELD_BASE(110, 110, 5, 0x000, 0x10, 3, 3),
+ PIN_FIELD_BASE(111, 111, 5, 0x000, 0x10, 0, 3),
+ PIN_FIELD_BASE(112, 112, 5, 0x000, 0x10, 6, 3),
+ PIN_FIELD_BASE(113, 113, 5, 0x000, 0x10, 9, 3),
+ PIN_FIELD_BASE(114, 114, 5, 0x000, 0x10, 12, 3),
+ PIN_FIELD_BASE(115, 115, 5, 0x000, 0x10, 15, 3),
+ PIN_FIELD_BASE(116, 116, 6, 0x000, 0x10, 27, 3),
+ PIN_FIELD_BASE(117, 117, 6, 0x000, 0x10, 24, 3),
+ PIN_FIELD_BASE(118, 118, 6, 0x000, 0x10, 21, 3),
+ PIN_FIELD_BASE(119, 119, 6, 0x000, 0x10, 18, 3),
+ PIN_FIELD_BASE(120, 120, 6, 0x010, 0x10, 3, 3),
+ PIN_FIELD_BASE(121, 121, 6, 0x000, 0x10, 3, 3),
+ PIN_FIELD_BASE(122, 122, 6, 0x000, 0x10, 0, 3),
+ PIN_FIELD_BASE(123, 123, 6, 0x000, 0x10, 15, 3),
+ PIN_FIELD_BASE(124, 124, 6, 0x000, 0x10, 12, 3),
+ PIN_FIELD_BASE(125, 125, 6, 0x000, 0x10, 9, 3),
+ PIN_FIELD_BASE(126, 126, 6, 0x000, 0x10, 6, 3),
+ PIN_FIELD_BASE(127, 127, 6, 0x010, 0x10, 0, 3),
+ PIN_FIELD_BASE(128, 128, 3, 0x010, 0x10, 27, 3),
+ PINS_FIELD_BASE(129, 130, 3, 0x020, 0x10, 0, 3),
+ PINS_FIELD_BASE(131, 131, 3, 0x010, 0x10, 12, 3),
+ PIN_FIELD_BASE(132, 132, 6, 0x010, 0x10, 9, 3),
+ PIN_FIELD_BASE(133, 133, 6, 0x010, 0x10, 6, 3),
+ PIN_FIELD_BASE(134, 134, 6, 0x010, 0x10, 15, 3),
+ PIN_FIELD_BASE(135, 135, 6, 0x010, 0x10, 12, 3),
+ PIN_FIELD_BASE(136, 136, 1, 0x020, 0x10, 9, 3),
+ PIN_FIELD_BASE(137, 137, 1, 0x020, 0x10, 6, 3),
+ PIN_FIELD_BASE(138, 138, 1, 0x020, 0x10, 15, 3),
+ PIN_FIELD_BASE(139, 139, 1, 0x020, 0x10, 12, 3),
+ PIN_FIELD_BASE(140, 140, 1, 0x020, 0x10, 21, 3),
+ PIN_FIELD_BASE(141, 141, 1, 0x020, 0x10, 18, 3),
+ PIN_FIELD_BASE(142, 142, 1, 0x020, 0x10, 27, 3),
+ PIN_FIELD_BASE(143, 143, 1, 0x020, 0x10, 24, 3),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_drv_adv_range[] = {
+ PIN_FIELD_BASE(8, 8, 4, 0x020, 0x10, 15, 3),
+ PIN_FIELD_BASE(9, 9, 4, 0x020, 0x10, 0, 3),
+ PIN_FIELD_BASE(10, 10, 4, 0x020, 0x10, 18, 3),
+ PIN_FIELD_BASE(11, 11, 4, 0x020, 0x10, 3, 3),
+ PIN_FIELD_BASE(12, 12, 4, 0x020, 0x10, 21, 3),
+ PIN_FIELD_BASE(13, 13, 4, 0x020, 0x10, 6, 3),
+ PIN_FIELD_BASE(14, 14, 4, 0x020, 0x10, 24, 3),
+ PIN_FIELD_BASE(15, 15, 4, 0x020, 0x10, 9, 3),
+ PIN_FIELD_BASE(16, 16, 4, 0x020, 0x10, 27, 3),
+ PIN_FIELD_BASE(17, 17, 4, 0x020, 0x10, 12, 3),
+ PIN_FIELD_BASE(29, 29, 2, 0x020, 0x10, 0, 3),
+ PIN_FIELD_BASE(30, 30, 2, 0x020, 0x10, 3, 3),
+ PIN_FIELD_BASE(34, 34, 1, 0x040, 0x10, 0, 3),
+ PIN_FIELD_BASE(35, 35, 1, 0x040, 0x10, 3, 3),
+ PIN_FIELD_BASE(44, 44, 1, 0x040, 0x10, 6, 3),
+ PIN_FIELD_BASE(45, 45, 1, 0x040, 0x10, 9, 3),
+};
+
+static const struct mtk_pin_reg_calc mt8195_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt8195_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8195_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8195_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8195_pin_do_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8195_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8195_pin_ies_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8195_pin_pu_range),
+ [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt8195_pin_pd_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt8195_pin_drv_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8195_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8195_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8195_pin_r1_range),
+ [PINCTRL_PIN_REG_DRV_ADV] = MTK_RANGE(mt8195_pin_drv_adv_range),
+};
+
+static const char * const mt8195_pinctrl_register_base_names[] = {
+ "iocfg0", "iocfg_bm", "iocfg_bl", "iocfg_br", "iocfg_lm",
+ "iocfg_rb", "iocfg_tl",
+};
+
+static const struct mtk_eint_hw mt8195_eint_hw = {
+ .port_mask = 0xf,
+ .ports = 7,
+ .ap_num = 225,
+ .db_cnt = 32,
+};
+
+static const struct mtk_pin_soc mt8195_data = {
+ .reg_cal = mt8195_reg_cals,
+ .pins = mtk_pins_mt8195,
+ .npins = ARRAY_SIZE(mtk_pins_mt8195),
+ .ngrps = ARRAY_SIZE(mtk_pins_mt8195),
+ .eint_hw = &mt8195_eint_hw,
+ .nfuncs = 8,
+ .gpio_m = 0,
+ .base_names = mt8195_pinctrl_register_base_names,
+ .nbase_names = ARRAY_SIZE(mt8195_pinctrl_register_base_names),
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_drive_get = mtk_pinconf_adv_drive_get_raw,
+ .adv_drive_set = mtk_pinconf_adv_drive_set_raw,
+};
+
+static const struct of_device_id mt8195_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt8195-pinctrl", },
+ { }
+};
+
+static int mt8195_pinctrl_probe(struct platform_device *pdev)
+{
+ return mtk_paris_pinctrl_probe(pdev, &mt8195_data);
+}
+
+static struct platform_driver mt8195_pinctrl_driver = {
+ .driver = {
+ .name = "mt8195-pinctrl",
+ .of_match_table = mt8195_pinctrl_of_match,
+ },
+ .probe = mt8195_pinctrl_probe,
+};
+
+static int __init mt8195_pinctrl_init(void)
+{
+ return platform_driver_register(&mt8195_pinctrl_driver);
+}
+arch_initcall(mt8195_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 72f17f26acd8..5b3b048725cc 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -57,11 +57,16 @@ static u32 mtk_r32(struct mtk_pinctrl *pctl, u8 i, u32 reg)
void mtk_rmw(struct mtk_pinctrl *pctl, u8 i, u32 reg, u32 mask, u32 set)
{
u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pctl->lock, flags);
val = mtk_r32(pctl, i, reg);
val &= ~mask;
val |= set;
mtk_w32(pctl, i, reg, val);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static int mtk_hw_pin_field_lookup(struct mtk_pinctrl *hw,
@@ -1027,6 +1032,20 @@ int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw,
}
EXPORT_SYMBOL_GPL(mtk_pinconf_adv_drive_get);
+int mtk_pinconf_adv_drive_set_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg)
+{
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV_ADV, arg);
+}
+EXPORT_SYMBOL_GPL(mtk_pinconf_adv_drive_set_raw);
+
+int mtk_pinconf_adv_drive_get_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 *val)
+{
+ return mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV_ADV, val);
+}
+EXPORT_SYMBOL_GPL(mtk_pinconf_adv_drive_get_raw);
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_DESCRIPTION("Pin configuration library module for mediatek SoCs");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
index e2aae285b5fc..a6f1bdb2083b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
@@ -66,6 +66,7 @@ enum {
PINCTRL_PIN_REG_DRV_EN,
PINCTRL_PIN_REG_DRV_E0,
PINCTRL_PIN_REG_DRV_E1,
+ PINCTRL_PIN_REG_DRV_ADV,
PINCTRL_PIN_REG_MAX,
};
@@ -251,6 +252,8 @@ struct mtk_pinctrl {
struct mtk_eint *eint;
struct mtk_pinctrl_group *groups;
const char **grp_names;
+ /* lock pin's register resource to avoid multiple threads issue*/
+ spinlock_t lock;
};
void mtk_rmw(struct mtk_pinctrl *pctl, u8 i, u32 reg, u32 mask, u32 set);
@@ -314,6 +317,10 @@ int mtk_pinconf_adv_drive_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 arg);
int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 *val);
+int mtk_pinconf_adv_drive_set_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg);
+int mtk_pinconf_adv_drive_get_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 *val);
bool mtk_is_virt_gpio(struct mtk_pinctrl *hw, unsigned int gpio_n);
#endif /* __PINCTRL_MTK_COMMON_V2_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8195.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8195.h
new file mode 100644
index 000000000000..de4a8a80bf1d
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8195.h
@@ -0,0 +1,1669 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ */
+
+#ifndef __PINCTRL_MTK_MT8195_H
+#define __PINCTRL_MTK_MT8195_H
+
+#include "pinctrl-paris.h"
+
+static const struct mtk_pin_desc mtk_pins_mt8195[] = {
+ MTK_PIN(
+ 0, "GPIO0",
+ MTK_EINT_FUNCTION(0, 0),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "TP_GPIO0_AO"),
+ MTK_FUNCTION(2, "MSDC2_CMD"),
+ MTK_FUNCTION(3, "TDMIN_MCK"),
+ MTK_FUNCTION(4, "CLKM0"),
+ MTK_FUNCTION(5, "PERSTN_1"),
+ MTK_FUNCTION(6, "IDDIG_1P"),
+ MTK_FUNCTION(7, "DMIC4_CLK")
+ ),
+ MTK_PIN(
+ 1, "GPIO1",
+ MTK_EINT_FUNCTION(0, 1),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "TP_GPIO1_AO"),
+ MTK_FUNCTION(2, "MSDC2_CLK"),
+ MTK_FUNCTION(3, "TDMIN_DI"),
+ MTK_FUNCTION(4, "CLKM1"),
+ MTK_FUNCTION(5, "CLKREQN_1"),
+ MTK_FUNCTION(6, "USB_DRVVBUS_1P"),
+ MTK_FUNCTION(7, "DMIC4_DAT")
+ ),
+ MTK_PIN(
+ 2, "GPIO2",
+ MTK_EINT_FUNCTION(0, 2),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "TP_GPIO2_AO"),
+ MTK_FUNCTION(2, "MSDC2_DAT3"),
+ MTK_FUNCTION(3, "TDMIN_LRCK"),
+ MTK_FUNCTION(4, "CLKM2"),
+ MTK_FUNCTION(5, "WAKEN_1"),
+ MTK_FUNCTION(7, "DMIC2_CLK")
+ ),
+ MTK_PIN(
+ 3, "GPIO3",
+ MTK_EINT_FUNCTION(0, 3),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "TP_GPIO3_AO"),
+ MTK_FUNCTION(2, "MSDC2_DAT0"),
+ MTK_FUNCTION(3, "TDMIN_BCK"),
+ MTK_FUNCTION(4, "CLKM3"),
+ MTK_FUNCTION(7, "DMIC2_DAT")
+ ),
+ MTK_PIN(
+ 4, "GPIO4",
+ MTK_EINT_FUNCTION(0, 4),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "TP_GPIO4_AO"),
+ MTK_FUNCTION(2, "MSDC2_DAT2"),
+ MTK_FUNCTION(3, "SPDIF_IN1"),
+ MTK_FUNCTION(4, "UTXD3"),
+ MTK_FUNCTION(5, "SDA2"),
+ MTK_FUNCTION(7, "IDDIG_2P")
+ ),
+ MTK_PIN(
+ 5, "GPIO5",
+ MTK_EINT_FUNCTION(0, 5),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "TP_GPIO5_AO"),
+ MTK_FUNCTION(2, "MSDC2_DAT1"),
+ MTK_FUNCTION(3, "SPDIF_IN0"),
+ MTK_FUNCTION(4, "URXD3"),
+ MTK_FUNCTION(5, "SCL2"),
+ MTK_FUNCTION(7, "USB_DRVVBUS_2P")
+ ),
+ MTK_PIN(
+ 6, "GPIO6",
+ MTK_EINT_FUNCTION(0, 6),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "TP_GPIO6_AO"),
+ MTK_FUNCTION(2, "DP_TX_HPD"),
+ MTK_FUNCTION(3, "I2SO1_D4"),
+ MTK_FUNCTION(4, "UTXD4"),
+ MTK_FUNCTION(5, "CMVREF3"),
+ MTK_FUNCTION(7, "DMIC3_CLK")
+ ),
+ MTK_PIN(
+ 7, "GPIO7",
+ MTK_EINT_FUNCTION(0, 7),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(1, "TP_GPIO7_AO"),
+ MTK_FUNCTION(2, "EDP_TX_HPD"),
+ MTK_FUNCTION(3, "I2SO1_D5"),
+ MTK_FUNCTION(4, "URXD4"),
+ MTK_FUNCTION(5, "CMVREF4"),
+ MTK_FUNCTION(7, "DMIC3_DAT")
+ ),
+ MTK_PIN(
+ 8, "GPIO8",
+ MTK_EINT_FUNCTION(0, 8),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(1, "SDA0"),
+ MTK_FUNCTION(2, "PWM_0"),
+ MTK_FUNCTION(4, "SPDIF_OUT"),
+ MTK_FUNCTION(6, "LVTS_FOUT"),
+ MTK_FUNCTION(7, "DBG_MON_A0")
+ ),
+ MTK_PIN(
+ 9, "GPIO9",
+ MTK_EINT_FUNCTION(0, 9),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(1, "SCL0"),
+ MTK_FUNCTION(2, "PWM_1"),
+ MTK_FUNCTION(4, "IR_IN"),
+ MTK_FUNCTION(6, "LVTS_SDO"),
+ MTK_FUNCTION(7, "DBG_MON_A1")
+ ),
+ MTK_PIN(
+ 10, "GPIO10",
+ MTK_EINT_FUNCTION(0, 10),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(1, "SDA1"),
+ MTK_FUNCTION(2, "PWM_2"),
+ MTK_FUNCTION(3, "ADSP_URXD0"),
+ MTK_FUNCTION(4, "SPDIF_IN1"),
+ MTK_FUNCTION(6, "LVTS_SCF"),
+ MTK_FUNCTION(7, "DBG_MON_A2")
+ ),
+ MTK_PIN(
+ 11, "GPIO11",
+ MTK_EINT_FUNCTION(0, 11),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(1, "SCL1"),
+ MTK_FUNCTION(2, "PWM_3"),
+ MTK_FUNCTION(3, "ADSP_UTXD0"),
+ MTK_FUNCTION(4, "SPDIF_IN0"),
+ MTK_FUNCTION(6, "LVTS_SCK"),
+ MTK_FUNCTION(7, "DBG_MON_A3")
+ ),
+ MTK_PIN(
+ 12, "GPIO12",
+ MTK_EINT_FUNCTION(0, 12),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(1, "SDA2"),
+ MTK_FUNCTION(2, "DMIC3_DAT_R"),
+ MTK_FUNCTION(3, "I2SO1_D6"),
+ MTK_FUNCTION(6, "LVTS_SDI"),
+ MTK_FUNCTION(7, "DBG_MON_A4")
+ ),
+ MTK_PIN(
+ 13, "GPIO13",
+ MTK_EINT_FUNCTION(0, 13),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(1, "SCL2"),
+ MTK_FUNCTION(2, "DMIC4_DAT_R"),
+ MTK_FUNCTION(3, "I2SO1_D7"),
+ MTK_FUNCTION(7, "DBG_MON_A5")
+ ),
+ MTK_PIN(
+ 14, "GPIO14",
+ MTK_EINT_FUNCTION(0, 14),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(1, "SDA3"),
+ MTK_FUNCTION(2, "DMIC3_DAT"),
+ MTK_FUNCTION(3, "TDMIN_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_A6")
+ ),
+ MTK_PIN(
+ 15, "GPIO15",
+ MTK_EINT_FUNCTION(0, 15),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(1, "SCL3"),
+ MTK_FUNCTION(2, "DMIC3_CLK"),
+ MTK_FUNCTION(3, "TDMIN_DI"),
+ MTK_FUNCTION(7, "DBG_MON_A7")
+ ),
+ MTK_PIN(
+ 16, "GPIO16",
+ MTK_EINT_FUNCTION(0, 16),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(1, "SDA4"),
+ MTK_FUNCTION(2, "DMIC4_DAT"),
+ MTK_FUNCTION(3, "TDMIN_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_A8")
+ ),
+ MTK_PIN(
+ 17, "GPIO17",
+ MTK_EINT_FUNCTION(0, 17),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(1, "SCL4"),
+ MTK_FUNCTION(2, "DMIC4_CLK"),
+ MTK_FUNCTION(3, "TDMIN_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A9")
+ ),
+ MTK_PIN(
+ 18, "GPIO18",
+ MTK_EINT_FUNCTION(0, 18),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(1, "DP_TX_HPD")
+ ),
+ MTK_PIN(
+ 19, "GPIO19",
+ MTK_EINT_FUNCTION(0, 19),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "WAKEN"),
+ MTK_FUNCTION(2, "SCP_SDA1"),
+ MTK_FUNCTION(3, "MD32_0_JTAG_TCK"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TCK"),
+ MTK_FUNCTION(5, "SDA6")
+ ),
+ MTK_PIN(
+ 20, "GPIO20",
+ MTK_EINT_FUNCTION(0, 20),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "PERSTN"),
+ MTK_FUNCTION(2, "SCP_SCL1"),
+ MTK_FUNCTION(3, "MD32_0_JTAG_TMS"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TMS"),
+ MTK_FUNCTION(5, "SCL6")
+ ),
+ MTK_PIN(
+ 21, "GPIO21",
+ MTK_EINT_FUNCTION(0, 21),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "CLKREQN"),
+ MTK_FUNCTION(3, "MD32_0_JTAG_TDI"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TDI"),
+ MTK_FUNCTION(5, "SCP_SDA1")
+ ),
+ MTK_PIN(
+ 22, "GPIO22",
+ MTK_EINT_FUNCTION(0, 22),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(1, "CMMCLK0"),
+ MTK_FUNCTION(2, "PERSTN_1"),
+ MTK_FUNCTION(5, "SCP_SCL1"),
+ MTK_FUNCTION(7, "MD32_0_GPIO0")
+ ),
+ MTK_PIN(
+ 23, "GPIO23",
+ MTK_EINT_FUNCTION(0, 23),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(1, "CMMCLK1"),
+ MTK_FUNCTION(2, "CLKREQN_1"),
+ MTK_FUNCTION(3, "SDA4"),
+ MTK_FUNCTION(4, "DMIC1_CLK"),
+ MTK_FUNCTION(5, "SCP_SDA0"),
+ MTK_FUNCTION(7, "MD32_0_GPIO1")
+ ),
+ MTK_PIN(
+ 24, "GPIO24",
+ MTK_EINT_FUNCTION(0, 24),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "CMMCLK2"),
+ MTK_FUNCTION(2, "WAKEN_1"),
+ MTK_FUNCTION(3, "SCL4"),
+ MTK_FUNCTION(4, "DMIC1_DAT"),
+ MTK_FUNCTION(5, "SCP_SCL0"),
+ MTK_FUNCTION(6, "LVTS_26M"),
+ MTK_FUNCTION(7, "MD32_0_GPIO2")
+ ),
+ MTK_PIN(
+ 25, "GPIO25",
+ MTK_EINT_FUNCTION(0, 25),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "CMMRST"),
+ MTK_FUNCTION(2, "CMMCLK3"),
+ MTK_FUNCTION(3, "SPDIF_OUT"),
+ MTK_FUNCTION(4, "SDA6"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TRSTN"),
+ MTK_FUNCTION(6, "MD32_0_JTAG_TRST")
+ ),
+ MTK_PIN(
+ 26, "GPIO26",
+ MTK_EINT_FUNCTION(0, 26),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "CMMPDN"),
+ MTK_FUNCTION(2, "CMMCLK4"),
+ MTK_FUNCTION(3, "IR_IN"),
+ MTK_FUNCTION(4, "SCL6"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TDO"),
+ MTK_FUNCTION(6, "MD32_0_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 27, "GPIO27",
+ MTK_EINT_FUNCTION(0, 27),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "HDMIRX20_HTPLG"),
+ MTK_FUNCTION(2, "CMFLASH0"),
+ MTK_FUNCTION(3, "MD32_0_TXD"),
+ MTK_FUNCTION(4, "TP_UTXD2_AO"),
+ MTK_FUNCTION(5, "SCL7"),
+ MTK_FUNCTION(6, "UCTS2"),
+ MTK_FUNCTION(7, "DBG_MON_A18")
+ ),
+ MTK_PIN(
+ 28, "GPIO28",
+ MTK_EINT_FUNCTION(0, 28),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "HDMIRX20_PWR5V"),
+ MTK_FUNCTION(2, "CMFLASH1"),
+ MTK_FUNCTION(3, "MD32_0_RXD"),
+ MTK_FUNCTION(4, "TP_URXD2_AO"),
+ MTK_FUNCTION(5, "SDA7"),
+ MTK_FUNCTION(6, "URTS2"),
+ MTK_FUNCTION(7, "DBG_MON_A19")
+ ),
+ MTK_PIN(
+ 29, "GPIO29",
+ MTK_EINT_FUNCTION(0, 29),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "HDMIRX20_SCL"),
+ MTK_FUNCTION(2, "CMFLASH2"),
+ MTK_FUNCTION(3, "SCL5"),
+ MTK_FUNCTION(4, "TP_URTS2_AO"),
+ MTK_FUNCTION(6, "UTXD2"),
+ MTK_FUNCTION(7, "DBG_MON_A20")
+ ),
+ MTK_PIN(
+ 30, "GPIO30",
+ MTK_EINT_FUNCTION(0, 30),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "HDMIRX20_SDA"),
+ MTK_FUNCTION(2, "CMFLASH3"),
+ MTK_FUNCTION(3, "SDA5"),
+ MTK_FUNCTION(4, "TP_UCTS2_AO"),
+ MTK_FUNCTION(6, "URXD2"),
+ MTK_FUNCTION(7, "DBG_MON_A21")
+ ),
+ MTK_PIN(
+ 31, "GPIO31",
+ MTK_EINT_FUNCTION(0, 31),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "HDMITX20_PWR5V"),
+ MTK_FUNCTION(2, "DMIC1_DAT_R"),
+ MTK_FUNCTION(3, "PERSTN"),
+ MTK_FUNCTION(7, "DBG_MON_A22")
+ ),
+ MTK_PIN(
+ 32, "GPIO32",
+ MTK_EINT_FUNCTION(0, 32),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "HDMITX20_HTPLG"),
+ MTK_FUNCTION(3, "CLKREQN"),
+ MTK_FUNCTION(7, "DBG_MON_A23")
+ ),
+ MTK_PIN(
+ 33, "GPIO33",
+ MTK_EINT_FUNCTION(0, 33),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "HDMITX20_CEC"),
+ MTK_FUNCTION(2, "CMVREF0"),
+ MTK_FUNCTION(3, "WAKEN")
+ ),
+ MTK_PIN(
+ 34, "GPIO34",
+ MTK_EINT_FUNCTION(0, 34),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "HDMITX20_SCL"),
+ MTK_FUNCTION(2, "CMVREF1"),
+ MTK_FUNCTION(3, "SCL7"),
+ MTK_FUNCTION(4, "SCL6"),
+ MTK_FUNCTION(7, "DBG_MON_A24")
+ ),
+ MTK_PIN(
+ 35, "GPIO35",
+ MTK_EINT_FUNCTION(0, 35),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "HDMITX20_SDA"),
+ MTK_FUNCTION(2, "CMVREF2"),
+ MTK_FUNCTION(3, "SDA7"),
+ MTK_FUNCTION(4, "SDA6"),
+ MTK_FUNCTION(7, "DBG_MON_A25")
+ ),
+ MTK_PIN(
+ 36, "GPIO36",
+ MTK_EINT_FUNCTION(0, 36),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "RTC32K_CK"),
+ MTK_FUNCTION(7, "DBG_MON_A27")
+ ),
+ MTK_PIN(
+ 37, "GPIO37",
+ MTK_EINT_FUNCTION(0, 37),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "WATCHDOG"),
+ MTK_FUNCTION(7, "DBG_MON_A28")
+ ),
+ MTK_PIN(
+ 38, "GPIO38",
+ MTK_EINT_FUNCTION(0, 38),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(1, "SRCLKENA0"),
+ MTK_FUNCTION(7, "DBG_MON_A29")
+ ),
+ MTK_PIN(
+ 39, "GPIO39",
+ MTK_EINT_FUNCTION(0, 39),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "SRCLKENA1"),
+ MTK_FUNCTION(2, "DMIC2_DAT_R"),
+ MTK_FUNCTION(7, "DBG_MON_A30")
+ ),
+ MTK_PIN(
+ 40, "GPIO40",
+ MTK_EINT_FUNCTION(0, 40),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CSN"),
+ MTK_FUNCTION(3, "SPIM3_CSB"),
+ MTK_FUNCTION(7, "DBG_MON_A31")
+ ),
+ MTK_PIN(
+ 41, "GPIO41",
+ MTK_EINT_FUNCTION(0, 41),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CK"),
+ MTK_FUNCTION(3, "SPIM3_CLK"),
+ MTK_FUNCTION(7, "DBG_MON_A32")
+ ),
+ MTK_PIN(
+ 42, "GPIO42",
+ MTK_EINT_FUNCTION(0, 42),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MO"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MI"),
+ MTK_FUNCTION(3, "SPIM3_MO"),
+ MTK_FUNCTION(7, "DBG_MON_B0")
+ ),
+ MTK_PIN(
+ 43, "GPIO43",
+ MTK_EINT_FUNCTION(0, 43),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MI"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MO"),
+ MTK_FUNCTION(3, "SPIM3_MI"),
+ MTK_FUNCTION(7, "DBG_MON_B1")
+ ),
+ MTK_PIN(
+ 44, "GPIO44",
+ MTK_EINT_FUNCTION(0, 44),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "SPMI_M_SCL"),
+ MTK_FUNCTION(2, "I2SI00_DATA1"),
+ MTK_FUNCTION(3, "SCL5"),
+ MTK_FUNCTION(4, "UTXD5"),
+ MTK_FUNCTION(7, "DBG_MON_B2")
+ ),
+ MTK_PIN(
+ 45, "GPIO45",
+ MTK_EINT_FUNCTION(0, 45),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "SPMI_M_SDA"),
+ MTK_FUNCTION(2, "I2SI00_DATA2"),
+ MTK_FUNCTION(3, "SDA5"),
+ MTK_FUNCTION(4, "URXD5"),
+ MTK_FUNCTION(7, "DBG_MON_B3")
+ ),
+ MTK_PIN(
+ 46, "GPIO46",
+ MTK_EINT_FUNCTION(0, 46),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "I2SIN_MCK"),
+ MTK_FUNCTION(2, "I2SI00_DATA3"),
+ MTK_FUNCTION(3, "SPLIN_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B4")
+ ),
+ MTK_PIN(
+ 47, "GPIO47",
+ MTK_EINT_FUNCTION(0, 47),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "I2SIN_BCK"),
+ MTK_FUNCTION(2, "I2SIN0_BCK"),
+ MTK_FUNCTION(3, "SPLIN_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B5")
+ ),
+ MTK_PIN(
+ 48, "GPIO48",
+ MTK_EINT_FUNCTION(0, 48),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "I2SIN_WS"),
+ MTK_FUNCTION(2, "I2SIN0_LRCK"),
+ MTK_FUNCTION(3, "SPLIN_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B6")
+ ),
+ MTK_PIN(
+ 49, "GPIO49",
+ MTK_EINT_FUNCTION(0, 49),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "I2SIN_D0"),
+ MTK_FUNCTION(2, "I2SI00_DATA0"),
+ MTK_FUNCTION(3, "SPLIN_D0"),
+ MTK_FUNCTION(7, "DBG_MON_B7")
+ ),
+ MTK_PIN(
+ 50, "GPIO50",
+ MTK_EINT_FUNCTION(0, 50),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "I2SO1_MCK"),
+ MTK_FUNCTION(2, "I2SI5_D0"),
+ MTK_FUNCTION(4, "I2SO4_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B8")
+ ),
+ MTK_PIN(
+ 51, "GPIO51",
+ MTK_EINT_FUNCTION(0, 51),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "I2SO1_BCK"),
+ MTK_FUNCTION(2, "I2SI5_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B9")
+ ),
+ MTK_PIN(
+ 52, "GPIO52",
+ MTK_EINT_FUNCTION(0, 52),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "I2SO1_WS"),
+ MTK_FUNCTION(2, "I2SI5_WS"),
+ MTK_FUNCTION(7, "DBG_MON_B10")
+ ),
+ MTK_PIN(
+ 53, "GPIO53",
+ MTK_EINT_FUNCTION(0, 53),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "I2SO1_D0"),
+ MTK_FUNCTION(2, "I2SI5_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B11")
+ ),
+ MTK_PIN(
+ 54, "GPIO54",
+ MTK_EINT_FUNCTION(0, 54),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "I2SO1_D1"),
+ MTK_FUNCTION(2, "I2SI01_DATA1"),
+ MTK_FUNCTION(3, "SPLIN_D1"),
+ MTK_FUNCTION(4, "I2SO4_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B12")
+ ),
+ MTK_PIN(
+ 55, "GPIO55",
+ MTK_EINT_FUNCTION(0, 55),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "I2SO1_D2"),
+ MTK_FUNCTION(2, "I2SI01_DATA2"),
+ MTK_FUNCTION(3, "SPLIN_D2"),
+ MTK_FUNCTION(4, "I2SO4_WS"),
+ MTK_FUNCTION(7, "DBG_MON_B13")
+ ),
+ MTK_PIN(
+ 56, "GPIO56",
+ MTK_EINT_FUNCTION(0, 56),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "I2SO1_D3"),
+ MTK_FUNCTION(2, "I2SI01_DATA3"),
+ MTK_FUNCTION(3, "SPLIN_D3"),
+ MTK_FUNCTION(4, "I2SO4_D0"),
+ MTK_FUNCTION(7, "DBG_MON_B14")
+ ),
+ MTK_PIN(
+ 57, "GPIO57",
+ MTK_EINT_FUNCTION(0, 57),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "I2SO2_MCK"),
+ MTK_FUNCTION(2, "I2SO1_D12"),
+ MTK_FUNCTION(3, "LCM1_RST"),
+ MTK_FUNCTION(7, "DBG_MON_B15")
+ ),
+ MTK_PIN(
+ 58, "GPIO58",
+ MTK_EINT_FUNCTION(0, 58),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "I2SO2_BCK"),
+ MTK_FUNCTION(2, "I2SO1_D13"),
+ MTK_FUNCTION(3, "I2SIN1_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B16")
+ ),
+ MTK_PIN(
+ 59, "GPIO59",
+ MTK_EINT_FUNCTION(0, 59),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "I2SO2_WS"),
+ MTK_FUNCTION(2, "I2SO1_D14"),
+ MTK_FUNCTION(3, "I2SIN1_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B17")
+ ),
+ MTK_PIN(
+ 60, "GPIO60",
+ MTK_EINT_FUNCTION(0, 60),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "I2SO2_D0"),
+ MTK_FUNCTION(2, "I2SO1_D15"),
+ MTK_FUNCTION(3, "I2SI01_DATA0"),
+ MTK_FUNCTION(7, "DBG_MON_B18")
+ ),
+ MTK_PIN(
+ 61, "GPIO61",
+ MTK_EINT_FUNCTION(0, 61),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "DMIC1_CLK"),
+ MTK_FUNCTION(2, "I2SO2_BCK"),
+ MTK_FUNCTION(3, "SCP_SPI2_CK"),
+ MTK_FUNCTION(7, "DBG_MON_B19")
+ ),
+ MTK_PIN(
+ 62, "GPIO62",
+ MTK_EINT_FUNCTION(0, 62),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "DMIC1_DAT"),
+ MTK_FUNCTION(2, "I2SO2_WS"),
+ MTK_FUNCTION(3, "SCP_SPI2_MI"),
+ MTK_FUNCTION(7, "DBG_MON_B20")
+ ),
+ MTK_PIN(
+ 63, "GPIO63",
+ MTK_EINT_FUNCTION(0, 63),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "DMIC2_CLK"),
+ MTK_FUNCTION(2, "VBUSVALID"),
+ MTK_FUNCTION(3, "SCP_SPI2_MO"),
+ MTK_FUNCTION(4, "SCP_SCL2"),
+ MTK_FUNCTION(5, "SCP_JTAG1_TDO"),
+ MTK_FUNCTION(6, "JTDO_SEL1"),
+ MTK_FUNCTION(7, "DBG_MON_B21")
+ ),
+ MTK_PIN(
+ 64, "GPIO64",
+ MTK_EINT_FUNCTION(0, 64),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "DMIC2_DAT"),
+ MTK_FUNCTION(2, "VBUSVALID_1P"),
+ MTK_FUNCTION(3, "SCP_SPI2_CS"),
+ MTK_FUNCTION(4, "SCP_SDA2"),
+ MTK_FUNCTION(7, "DBG_MON_B22")
+ ),
+ MTK_PIN(
+ 65, "GPIO65",
+ MTK_EINT_FUNCTION(0, 65),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "PCM_DO"),
+ MTK_FUNCTION(2, "AUXIF_ST0"),
+ MTK_FUNCTION(3, "UCTS2"),
+ MTK_FUNCTION(5, "SCP_JTAG1_TMS"),
+ MTK_FUNCTION(6, "JTMS_SEL1"),
+ MTK_FUNCTION(7, "DBG_MON_B23")
+ ),
+ MTK_PIN(
+ 66, "GPIO66",
+ MTK_EINT_FUNCTION(0, 66),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "PCM_CLK"),
+ MTK_FUNCTION(2, "AUXIF_CLK0"),
+ MTK_FUNCTION(3, "URTS2"),
+ MTK_FUNCTION(5, "SCP_JTAG1_TCK"),
+ MTK_FUNCTION(6, "JTCK_SEL1"),
+ MTK_FUNCTION(7, "DBG_MON_B24")
+ ),
+ MTK_PIN(
+ 67, "GPIO67",
+ MTK_EINT_FUNCTION(0, 67),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "PCM_DI"),
+ MTK_FUNCTION(2, "AUXIF_ST1"),
+ MTK_FUNCTION(3, "UTXD2"),
+ MTK_FUNCTION(5, "SCP_JTAG1_TRSTN"),
+ MTK_FUNCTION(6, "JTRSTn_SEL1"),
+ MTK_FUNCTION(7, "DBG_MON_B25")
+ ),
+ MTK_PIN(
+ 68, "GPIO68",
+ MTK_EINT_FUNCTION(0, 68),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "PCM_SYNC"),
+ MTK_FUNCTION(2, "AUXIF_CLK1"),
+ MTK_FUNCTION(3, "URXD2"),
+ MTK_FUNCTION(5, "SCP_JTAG1_TDI"),
+ MTK_FUNCTION(6, "JTDI_SEL1"),
+ MTK_FUNCTION(7, "DBG_MON_B26")
+ ),
+ MTK_PIN(
+ 69, "GPIO69",
+ MTK_EINT_FUNCTION(0, 69),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "AUD_CLK_MOSI"),
+ MTK_FUNCTION(2, "I2SIN2_BCK"),
+ MTK_FUNCTION(3, "PWM_0"),
+ MTK_FUNCTION(4, "WAKEN"),
+ MTK_FUNCTION(7, "DBG_MON_B27")
+ ),
+ MTK_PIN(
+ 70, "GPIO70",
+ MTK_EINT_FUNCTION(0, 70),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "AUD_SYNC_MOSI"),
+ MTK_FUNCTION(2, "I2SIN2_LRCK"),
+ MTK_FUNCTION(3, "PWM_1"),
+ MTK_FUNCTION(4, "PERSTN"),
+ MTK_FUNCTION(7, "DBG_MON_B28")
+ ),
+ MTK_PIN(
+ 71, "GPIO71",
+ MTK_EINT_FUNCTION(0, 71),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO71"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI0"),
+ MTK_FUNCTION(2, "IDDIG_2P"),
+ MTK_FUNCTION(3, "PWM_2"),
+ MTK_FUNCTION(4, "CLKREQN"),
+ MTK_FUNCTION(7, "DBG_MON_B29")
+ ),
+ MTK_PIN(
+ 72, "GPIO72",
+ MTK_EINT_FUNCTION(0, 72),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO72"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI1"),
+ MTK_FUNCTION(2, "USB_DRVVBUS_2P"),
+ MTK_FUNCTION(3, "PWM_3"),
+ MTK_FUNCTION(4, "PERSTN_1"),
+ MTK_FUNCTION(7, "DBG_MON_B30")
+ ),
+ MTK_PIN(
+ 73, "GPIO73",
+ MTK_EINT_FUNCTION(0, 73),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO73"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO0"),
+ MTK_FUNCTION(2, "I2SI02_DATA0"),
+ MTK_FUNCTION(4, "CLKREQN_1"),
+ MTK_FUNCTION(5, "VOW_DAT_MISO"),
+ MTK_FUNCTION(7, "DBG_MON_B31")
+ ),
+ MTK_PIN(
+ 74, "GPIO74",
+ MTK_EINT_FUNCTION(0, 74),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO1"),
+ MTK_FUNCTION(2, "I2SI02_DATA1"),
+ MTK_FUNCTION(4, "WAKEN_1"),
+ MTK_FUNCTION(5, "VOW_CLK_MISO"),
+ MTK_FUNCTION(7, "DBG_MON_B32")
+ ),
+ MTK_PIN(
+ 75, "GPIO75",
+ MTK_EINT_FUNCTION(0, 75),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO2"),
+ MTK_FUNCTION(2, "I2SI02_DATA2")
+ ),
+ MTK_PIN(
+ 76, "GPIO76",
+ MTK_EINT_FUNCTION(0, 76),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "SCP_VREQ_VAO"),
+ MTK_FUNCTION(2, "I2SI02_DATA3"),
+ MTK_FUNCTION(7, "DBG_MON_A26")
+ ),
+ MTK_PIN(
+ 77, "GPIO77",
+ MTK_EINT_FUNCTION(0, 77),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO77"),
+ MTK_FUNCTION(1, "DGI_D0"),
+ MTK_FUNCTION(2, "DPI_D0"),
+ MTK_FUNCTION(3, "I2SI4_MCK"),
+ MTK_FUNCTION(4, "SPIM4_CLK"),
+ MTK_FUNCTION(5, "GBE_TXD3"),
+ MTK_FUNCTION(6, "SPM_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 78, "GPIO78",
+ MTK_EINT_FUNCTION(0, 78),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO78"),
+ MTK_FUNCTION(1, "DGI_D1"),
+ MTK_FUNCTION(2, "DPI_D1"),
+ MTK_FUNCTION(3, "I2SI4_BCK"),
+ MTK_FUNCTION(4, "SPIM4_MO"),
+ MTK_FUNCTION(5, "GBE_TXD2"),
+ MTK_FUNCTION(6, "SPM_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 79, "GPIO79",
+ MTK_EINT_FUNCTION(0, 79),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO79"),
+ MTK_FUNCTION(1, "DGI_D2"),
+ MTK_FUNCTION(2, "DPI_D2"),
+ MTK_FUNCTION(3, "I2SI4_WS"),
+ MTK_FUNCTION(4, "SPIM4_CSB"),
+ MTK_FUNCTION(5, "GBE_TXD1"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 80, "GPIO80",
+ MTK_EINT_FUNCTION(0, 80),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO80"),
+ MTK_FUNCTION(1, "DGI_D3"),
+ MTK_FUNCTION(2, "DPI_D3"),
+ MTK_FUNCTION(3, "I2SI4_D0"),
+ MTK_FUNCTION(4, "SPIM4_MI"),
+ MTK_FUNCTION(5, "GBE_TXD0"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 81, "GPIO81",
+ MTK_EINT_FUNCTION(0, 81),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO81"),
+ MTK_FUNCTION(1, "DGI_D4"),
+ MTK_FUNCTION(2, "DPI_D4"),
+ MTK_FUNCTION(3, "I2SI5_MCK"),
+ MTK_FUNCTION(4, "SPIM5_CLK"),
+ MTK_FUNCTION(5, "GBE_RXD3"),
+ MTK_FUNCTION(6, "SPM_JTAG_TRSTN")
+ ),
+ MTK_PIN(
+ 82, "GPIO82",
+ MTK_EINT_FUNCTION(0, 82),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO82"),
+ MTK_FUNCTION(1, "DGI_D5"),
+ MTK_FUNCTION(2, "DPI_D5"),
+ MTK_FUNCTION(3, "I2SI5_BCK"),
+ MTK_FUNCTION(4, "SPIM5_MO"),
+ MTK_FUNCTION(5, "GBE_RXD2"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 83, "GPIO83",
+ MTK_EINT_FUNCTION(0, 83),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "DGI_D6"),
+ MTK_FUNCTION(2, "DPI_D6"),
+ MTK_FUNCTION(3, "I2SI5_WS"),
+ MTK_FUNCTION(4, "SPIM5_CSB"),
+ MTK_FUNCTION(5, "GBE_RXD1"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 84, "GPIO84",
+ MTK_EINT_FUNCTION(0, 84),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "DGI_D7"),
+ MTK_FUNCTION(2, "DPI_D7"),
+ MTK_FUNCTION(3, "I2SI5_D0"),
+ MTK_FUNCTION(4, "SPIM5_MI"),
+ MTK_FUNCTION(5, "GBE_RXD0"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 85, "GPIO85",
+ MTK_EINT_FUNCTION(0, 85),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO85"),
+ MTK_FUNCTION(1, "DGI_D8"),
+ MTK_FUNCTION(2, "DPI_D8"),
+ MTK_FUNCTION(3, "I2SO4_MCK"),
+ MTK_FUNCTION(4, "SCP_SPI1_B_CK"),
+ MTK_FUNCTION(5, "GBE_TXC"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 86, "GPIO86",
+ MTK_EINT_FUNCTION(0, 86),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO86"),
+ MTK_FUNCTION(1, "DGI_D9"),
+ MTK_FUNCTION(2, "DPI_D9"),
+ MTK_FUNCTION(3, "I2SO4_BCK"),
+ MTK_FUNCTION(4, "SCP_SPI1_B_MI"),
+ MTK_FUNCTION(5, "GBE_RXC"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TRSTN")
+ ),
+ MTK_PIN(
+ 87, "GPIO87",
+ MTK_EINT_FUNCTION(0, 87),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO87"),
+ MTK_FUNCTION(1, "DGI_D10"),
+ MTK_FUNCTION(2, "DPI_D10"),
+ MTK_FUNCTION(3, "I2SO4_WS"),
+ MTK_FUNCTION(4, "SCP_SPI1_B_CS"),
+ MTK_FUNCTION(5, "GBE_RXDV"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 88, "GPIO88",
+ MTK_EINT_FUNCTION(0, 88),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO88"),
+ MTK_FUNCTION(1, "DGI_D11"),
+ MTK_FUNCTION(2, "DPI_D11"),
+ MTK_FUNCTION(3, "I2SO4_D0"),
+ MTK_FUNCTION(4, "SCP_SPI1_B_MO"),
+ MTK_FUNCTION(5, "GBE_TXEN"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 89, "GPIO89",
+ MTK_EINT_FUNCTION(0, 89),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO89"),
+ MTK_FUNCTION(1, "DGI_D12"),
+ MTK_FUNCTION(2, "DPI_D12"),
+ MTK_FUNCTION(3, "MSDC2_CMD_A"),
+ MTK_FUNCTION(4, "I2SO5_BCK"),
+ MTK_FUNCTION(5, "GBE_MDC"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 90, "GPIO90",
+ MTK_EINT_FUNCTION(0, 90),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO90"),
+ MTK_FUNCTION(1, "DGI_D13"),
+ MTK_FUNCTION(2, "DPI_D13"),
+ MTK_FUNCTION(3, "MSDC2_CLK_A"),
+ MTK_FUNCTION(4, "I2SO5_WS"),
+ MTK_FUNCTION(5, "GBE_MDIO"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 91, "GPIO91",
+ MTK_EINT_FUNCTION(0, 91),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO91"),
+ MTK_FUNCTION(1, "DGI_D14"),
+ MTK_FUNCTION(2, "DPI_D14"),
+ MTK_FUNCTION(3, "MSDC2_DAT3_A"),
+ MTK_FUNCTION(4, "I2SO5_D0"),
+ MTK_FUNCTION(5, "GBE_TXER"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TRSTN")
+ ),
+ MTK_PIN(
+ 92, "GPIO92",
+ MTK_EINT_FUNCTION(0, 92),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO92"),
+ MTK_FUNCTION(1, "DGI_D15"),
+ MTK_FUNCTION(2, "DPI_D15"),
+ MTK_FUNCTION(3, "MSDC2_DAT0_A"),
+ MTK_FUNCTION(4, "I2SO2_D1"),
+ MTK_FUNCTION(5, "GBE_RXER"),
+ MTK_FUNCTION(6, "CCU0_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 93, "GPIO93",
+ MTK_EINT_FUNCTION(0, 93),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO93"),
+ MTK_FUNCTION(1, "DGI_HSYNC"),
+ MTK_FUNCTION(2, "DPI_HSYNC"),
+ MTK_FUNCTION(3, "MSDC2_DAT2_A"),
+ MTK_FUNCTION(4, "I2SO2_D2"),
+ MTK_FUNCTION(5, "GBE_COL"),
+ MTK_FUNCTION(6, "CCU0_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 94, "GPIO94",
+ MTK_EINT_FUNCTION(0, 94),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO94"),
+ MTK_FUNCTION(1, "DGI_VSYNC"),
+ MTK_FUNCTION(2, "DPI_VSYNC"),
+ MTK_FUNCTION(3, "MSDC2_DAT1_A"),
+ MTK_FUNCTION(4, "I2SO2_D3"),
+ MTK_FUNCTION(5, "GBE_INTR"),
+ MTK_FUNCTION(6, "CCU0_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 95, "GPIO95",
+ MTK_EINT_FUNCTION(0, 95),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO95"),
+ MTK_FUNCTION(1, "DGI_DE"),
+ MTK_FUNCTION(2, "DPI_DE"),
+ MTK_FUNCTION(3, "UTXD2"),
+ MTK_FUNCTION(5, "I2SIN_D1"),
+ MTK_FUNCTION(6, "CCU0_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 96, "GPIO96",
+ MTK_EINT_FUNCTION(0, 96),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO96"),
+ MTK_FUNCTION(1, "DGI_CK"),
+ MTK_FUNCTION(2, "DPI_CK"),
+ MTK_FUNCTION(3, "URXD2"),
+ MTK_FUNCTION(4, "I2SO5_MCK"),
+ MTK_FUNCTION(5, "I2SIN_D2"),
+ MTK_FUNCTION(6, "CCU0_JTAG_TRST")
+ ),
+ MTK_PIN(
+ 97, "GPIO97",
+ MTK_EINT_FUNCTION(0, 97),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO97"),
+ MTK_FUNCTION(1, "DISP_PWM0"),
+ MTK_FUNCTION(2, "DVFSRC_EXT_REQ")
+ ),
+ MTK_PIN(
+ 98, "GPIO98",
+ MTK_EINT_FUNCTION(0, 98),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO98"),
+ MTK_FUNCTION(1, "UTXD0")
+ ),
+ MTK_PIN(
+ 99, "GPIO99",
+ MTK_EINT_FUNCTION(0, 99),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO99"),
+ MTK_FUNCTION(1, "URXD0")
+ ),
+ MTK_PIN(
+ 100, "GPIO100",
+ MTK_EINT_FUNCTION(0, 100),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "URTS1"),
+ MTK_FUNCTION(2, "DSI_TE"),
+ MTK_FUNCTION(3, "I2SO1_D8"),
+ MTK_FUNCTION(4, "KPROW2"),
+ MTK_FUNCTION(5, "PWM_0"),
+ MTK_FUNCTION(6, "TP_URTS1_AO"),
+ MTK_FUNCTION(7, "I2SIN_D0")
+ ),
+ MTK_PIN(
+ 101, "GPIO101",
+ MTK_EINT_FUNCTION(0, 101),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "UCTS1"),
+ MTK_FUNCTION(2, "DSI1_TE"),
+ MTK_FUNCTION(3, "I2SO1_D9"),
+ MTK_FUNCTION(4, "KPCOL2"),
+ MTK_FUNCTION(5, "PWM_1"),
+ MTK_FUNCTION(6, "TP_UCTS1_AO"),
+ MTK_FUNCTION(7, "I2SIN_D1")
+ ),
+ MTK_PIN(
+ 102, "GPIO102",
+ MTK_EINT_FUNCTION(0, 102),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "VBUSVALID_2P"),
+ MTK_FUNCTION(3, "I2SO1_D10"),
+ MTK_FUNCTION(4, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(5, "TP_UTXD1_AO"),
+ MTK_FUNCTION(6, "MD32_1_TXD"),
+ MTK_FUNCTION(7, "I2SIN_D2")
+ ),
+ MTK_PIN(
+ 103, "GPIO103",
+ MTK_EINT_FUNCTION(0, 103),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "VBUSVALID_3P"),
+ MTK_FUNCTION(3, "I2SO1_D11"),
+ MTK_FUNCTION(4, "SSPM_URXD_AO"),
+ MTK_FUNCTION(5, "TP_URXD1_AO"),
+ MTK_FUNCTION(6, "MD32_1_RXD"),
+ MTK_FUNCTION(7, "I2SIN_D3")
+ ),
+ MTK_PIN(
+ 104, "GPIO104",
+ MTK_EINT_FUNCTION(0, 104),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "KPROW0"),
+ MTK_FUNCTION(2, "DISP_PWM1")
+ ),
+ MTK_PIN(
+ 105, "GPIO105",
+ MTK_EINT_FUNCTION(0, 105),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "KPROW1"),
+ MTK_FUNCTION(2, "EDP_TX_HPD"),
+ MTK_FUNCTION(3, "PWM_2")
+ ),
+ MTK_PIN(
+ 106, "GPIO106",
+ MTK_EINT_FUNCTION(0, 106),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "KPCOL0")
+ ),
+ MTK_PIN(
+ 107, "GPIO107",
+ MTK_EINT_FUNCTION(0, 107),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "KPCOL1"),
+ MTK_FUNCTION(2, "DSI1_TE"),
+ MTK_FUNCTION(3, "PWM_3"),
+ MTK_FUNCTION(4, "SCP_SCL3"),
+ MTK_FUNCTION(5, "I2SIN_MCK")
+ ),
+ MTK_PIN(
+ 108, "GPIO108",
+ MTK_EINT_FUNCTION(0, 108),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "LCM_RST"),
+ MTK_FUNCTION(2, "KPCOL1"),
+ MTK_FUNCTION(4, "SCP_SDA3"),
+ MTK_FUNCTION(5, "I2SIN_BCK")
+ ),
+ MTK_PIN(
+ 109, "GPIO109",
+ MTK_EINT_FUNCTION(0, 109),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "DSI_TE"),
+ MTK_FUNCTION(2, "I2SIN_D3"),
+ MTK_FUNCTION(5, "I2SIN_WS")
+ ),
+ MTK_PIN(
+ 110, "GPIO110",
+ MTK_EINT_FUNCTION(0, 110),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(2, "JTMS_SEL3"),
+ MTK_FUNCTION(3, "UDI_TMS"),
+ MTK_FUNCTION(5, "CCU1_JTAG_TMS"),
+ MTK_FUNCTION(6, "IPU_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 111, "GPIO111",
+ MTK_EINT_FUNCTION(0, 111),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(2, "JTCK_SEL3"),
+ MTK_FUNCTION(3, "UDI_TCK"),
+ MTK_FUNCTION(5, "CCU1_JTAG_TCK"),
+ MTK_FUNCTION(6, "IPU_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 112, "GPIO112",
+ MTK_EINT_FUNCTION(0, 112),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(2, "JTDI_SEL3"),
+ MTK_FUNCTION(3, "UDI_TDI"),
+ MTK_FUNCTION(4, "I2SO2_D0"),
+ MTK_FUNCTION(5, "CCU1_JTAG_TDI"),
+ MTK_FUNCTION(6, "IPU_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 113, "GPIO113",
+ MTK_EINT_FUNCTION(0, 113),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(2, "JTDO_SEL3"),
+ MTK_FUNCTION(3, "UDI_TDO"),
+ MTK_FUNCTION(4, "I2SO2_D1"),
+ MTK_FUNCTION(5, "CCU1_JTAG_TDO"),
+ MTK_FUNCTION(6, "IPU_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 114, "GPIO114",
+ MTK_EINT_FUNCTION(0, 114),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(2, "JTRSTn_SEL3"),
+ MTK_FUNCTION(3, "UDI_NTRST"),
+ MTK_FUNCTION(4, "I2SO2_D2"),
+ MTK_FUNCTION(5, "CCU1_JTAG_TRST"),
+ MTK_FUNCTION(6, "IPU_JTAG_TRST")
+ ),
+ MTK_PIN(
+ 115, "GPIO115",
+ MTK_EINT_FUNCTION(0, 115),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "MSDC1_DAT3"),
+ MTK_FUNCTION(4, "I2SO2_D3"),
+ MTK_FUNCTION(6, "MD32_1_GPIO2")
+ ),
+ MTK_PIN(
+ 116, "GPIO116",
+ MTK_EINT_FUNCTION(0, 116),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "MSDC0_DAT7")
+ ),
+ MTK_PIN(
+ 117, "GPIO117",
+ MTK_EINT_FUNCTION(0, 117),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "MSDC0_DAT6")
+ ),
+ MTK_PIN(
+ 118, "GPIO118",
+ MTK_EINT_FUNCTION(0, 118),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "MSDC0_DAT5")
+ ),
+ MTK_PIN(
+ 119, "GPIO119",
+ MTK_EINT_FUNCTION(0, 119),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "MSDC0_DAT4")
+ ),
+ MTK_PIN(
+ 120, "GPIO120",
+ MTK_EINT_FUNCTION(0, 120),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "MSDC0_RSTB")
+ ),
+ MTK_PIN(
+ 121, "GPIO121",
+ MTK_EINT_FUNCTION(0, 121),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "MSDC0_CMD")
+ ),
+ MTK_PIN(
+ 122, "GPIO122",
+ MTK_EINT_FUNCTION(0, 122),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "MSDC0_CLK")
+ ),
+ MTK_PIN(
+ 123, "GPIO123",
+ MTK_EINT_FUNCTION(0, 123),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "MSDC0_DAT3")
+ ),
+ MTK_PIN(
+ 124, "GPIO124",
+ MTK_EINT_FUNCTION(0, 124),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "MSDC0_DAT2")
+ ),
+ MTK_PIN(
+ 125, "GPIO125",
+ MTK_EINT_FUNCTION(0, 125),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO125"),
+ MTK_FUNCTION(1, "MSDC0_DAT1")
+ ),
+ MTK_PIN(
+ 126, "GPIO126",
+ MTK_EINT_FUNCTION(0, 126),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO126"),
+ MTK_FUNCTION(1, "MSDC0_DAT0")
+ ),
+ MTK_PIN(
+ 127, "GPIO127",
+ MTK_EINT_FUNCTION(0, 127),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO127"),
+ MTK_FUNCTION(1, "MSDC0_DSL")
+ ),
+ MTK_PIN(
+ 128, "GPIO128",
+ MTK_EINT_FUNCTION(0, 128),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO128"),
+ MTK_FUNCTION(1, "IDDIG"),
+ MTK_FUNCTION(2, "UCTS2"),
+ MTK_FUNCTION(3, "UTXD5"),
+ MTK_FUNCTION(4, "UFS_MPHY_SCL"),
+ MTK_FUNCTION(5, "mbistreaden_trigger"),
+ MTK_FUNCTION(6, "MD32_1_GPIO0"),
+ MTK_FUNCTION(7, "SCP_SCL2")
+ ),
+ MTK_PIN(
+ 129, "GPIO129",
+ MTK_EINT_FUNCTION(0, 129),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO129"),
+ MTK_FUNCTION(1, "USB_DRVVBUS"),
+ MTK_FUNCTION(2, "URTS2"),
+ MTK_FUNCTION(3, "URXD5"),
+ MTK_FUNCTION(4, "UFS_MPHY_SDA"),
+ MTK_FUNCTION(5, "mbistwriteen_trigger"),
+ MTK_FUNCTION(6, "MD32_1_GPIO1"),
+ MTK_FUNCTION(7, "SCP_SDA2")
+ ),
+ MTK_PIN(
+ 130, "GPIO130",
+ MTK_EINT_FUNCTION(0, 130),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO130"),
+ MTK_FUNCTION(1, "IDDIG_1P"),
+ MTK_FUNCTION(2, "SPINOR_IO2"),
+ MTK_FUNCTION(3, "SNFI_WP"),
+ MTK_FUNCTION(4, "VPU_UDI_NTRST")
+ ),
+ MTK_PIN(
+ 131, "GPIO131",
+ MTK_EINT_FUNCTION(0, 131),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO131"),
+ MTK_FUNCTION(1, "USB_DRVVBUS_1P"),
+ MTK_FUNCTION(2, "SPINOR_IO3"),
+ MTK_FUNCTION(3, "SNFI_HOLD"),
+ MTK_FUNCTION(4, "MD32_1_JTAG_TRST"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TRSTN"),
+ MTK_FUNCTION(6, "APU_JTAG_TRST")
+ ),
+ MTK_PIN(
+ 132, "GPIO132",
+ MTK_EINT_FUNCTION(0, 132),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO132"),
+ MTK_FUNCTION(1, "SPIM0_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI0_CS"),
+ MTK_FUNCTION(3, "SPIS0_CSB"),
+ MTK_FUNCTION(4, "VPU_UDI_TMS"),
+ MTK_FUNCTION(6, "I2SO5_D0")
+ ),
+ MTK_PIN(
+ 133, "GPIO133",
+ MTK_EINT_FUNCTION(0, 133),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO133"),
+ MTK_FUNCTION(1, "SPIM0_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI0_CK"),
+ MTK_FUNCTION(3, "SPIS0_CLK"),
+ MTK_FUNCTION(4, "VPU_UDI_TCK"),
+ MTK_FUNCTION(6, "I2SO5_BCK")
+ ),
+ MTK_PIN(
+ 134, "GPIO134",
+ MTK_EINT_FUNCTION(0, 134),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO134"),
+ MTK_FUNCTION(1, "SPIM0_MO"),
+ MTK_FUNCTION(2, "SCP_SPI0_MO"),
+ MTK_FUNCTION(3, "SPIS0_SI"),
+ MTK_FUNCTION(4, "VPU_UDI_TDO"),
+ MTK_FUNCTION(6, "I2SO5_WS")
+ ),
+ MTK_PIN(
+ 135, "GPIO135",
+ MTK_EINT_FUNCTION(0, 135),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO135"),
+ MTK_FUNCTION(1, "SPIM0_MI"),
+ MTK_FUNCTION(2, "SCP_SPI0_MI"),
+ MTK_FUNCTION(3, "SPIS0_SO"),
+ MTK_FUNCTION(4, "VPU_UDI_TDI"),
+ MTK_FUNCTION(6, "I2SO5_MCK")
+ ),
+ MTK_PIN(
+ 136, "GPIO136",
+ MTK_EINT_FUNCTION(0, 136),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO136"),
+ MTK_FUNCTION(1, "SPIM1_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_CS"),
+ MTK_FUNCTION(3, "SPIS1_CSB"),
+ MTK_FUNCTION(4, "MD32_1_JTAG_TMS"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TMS"),
+ MTK_FUNCTION(6, "APU_JTAG_TMS"),
+ MTK_FUNCTION(7, "DBG_MON_A15")
+ ),
+ MTK_PIN(
+ 137, "GPIO137",
+ MTK_EINT_FUNCTION(0, 137),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO137"),
+ MTK_FUNCTION(1, "SPIM1_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_CK"),
+ MTK_FUNCTION(3, "SPIS1_CLK"),
+ MTK_FUNCTION(4, "MD32_1_JTAG_TCK"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TCK"),
+ MTK_FUNCTION(6, "APU_JTAG_TCK"),
+ MTK_FUNCTION(7, "DBG_MON_A14")
+ ),
+ MTK_PIN(
+ 138, "GPIO138",
+ MTK_EINT_FUNCTION(0, 138),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO138"),
+ MTK_FUNCTION(1, "SPIM1_MO"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_MO"),
+ MTK_FUNCTION(3, "SPIS1_SI"),
+ MTK_FUNCTION(4, "MD32_1_JTAG_TDO"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TDO"),
+ MTK_FUNCTION(6, "APU_JTAG_TDO"),
+ MTK_FUNCTION(7, "DBG_MON_A16")
+ ),
+ MTK_PIN(
+ 139, "GPIO139",
+ MTK_EINT_FUNCTION(0, 139),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO139"),
+ MTK_FUNCTION(1, "SPIM1_MI"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_MI"),
+ MTK_FUNCTION(3, "SPIS1_SO"),
+ MTK_FUNCTION(4, "MD32_1_JTAG_TDI"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TDI"),
+ MTK_FUNCTION(6, "APU_JTAG_TDI"),
+ MTK_FUNCTION(7, "DBG_MON_A17")
+ ),
+ MTK_PIN(
+ 140, "GPIO140",
+ MTK_EINT_FUNCTION(0, 140),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO140"),
+ MTK_FUNCTION(1, "SPIM2_CSB"),
+ MTK_FUNCTION(2, "SPINOR_CS"),
+ MTK_FUNCTION(3, "SNFI_CS"),
+ MTK_FUNCTION(4, "DMIC3_DAT"),
+ MTK_FUNCTION(7, "DBG_MON_A11")
+ ),
+ MTK_PIN(
+ 141, "GPIO141",
+ MTK_EINT_FUNCTION(0, 141),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO141"),
+ MTK_FUNCTION(1, "SPIM2_CLK"),
+ MTK_FUNCTION(2, "SPINOR_CK"),
+ MTK_FUNCTION(3, "SNFI_CLK"),
+ MTK_FUNCTION(4, "DMIC3_CLK"),
+ MTK_FUNCTION(7, "DBG_MON_A10")
+ ),
+ MTK_PIN(
+ 142, "GPIO142",
+ MTK_EINT_FUNCTION(0, 142),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO142"),
+ MTK_FUNCTION(1, "SPIM2_MO"),
+ MTK_FUNCTION(2, "SPINOR_IO0"),
+ MTK_FUNCTION(3, "SNFI_MOSI"),
+ MTK_FUNCTION(4, "DMIC4_DAT"),
+ MTK_FUNCTION(7, "DBG_MON_A12")
+ ),
+ MTK_PIN(
+ 143, "GPIO143",
+ MTK_EINT_FUNCTION(0, 143),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO143"),
+ MTK_FUNCTION(1, "SPIM2_MI"),
+ MTK_FUNCTION(2, "SPINOR_IO1"),
+ MTK_FUNCTION(3, "SNFI_MISO"),
+ MTK_FUNCTION(4, "DMIC4_CLK"),
+ MTK_FUNCTION(7, "DBG_MON_A13")
+ ),
+ MTK_PIN(
+ 144, "GPIO144",
+ MTK_EINT_FUNCTION(0, 144),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 145, "GPIO145",
+ MTK_EINT_FUNCTION(0, 145),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 146, "GPIO146",
+ MTK_EINT_FUNCTION(0, 146),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 147, "GPIO147",
+ MTK_EINT_FUNCTION(0, 147),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 148, "GPIO148",
+ MTK_EINT_FUNCTION(0, 148),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 149, "GPIO149",
+ MTK_EINT_FUNCTION(0, 149),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 150, "GPIO150",
+ MTK_EINT_FUNCTION(0, 150),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 151, "GPIO151",
+ MTK_EINT_FUNCTION(0, 151),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 152, "GPIO152",
+ MTK_EINT_FUNCTION(0, 152),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 153, "GPIO153",
+ MTK_EINT_FUNCTION(0, 153),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 154, "GPIO154",
+ MTK_EINT_FUNCTION(0, 154),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 155, "GPIO155",
+ MTK_EINT_FUNCTION(0, 155),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 156, "GPIO156",
+ MTK_EINT_FUNCTION(0, 216),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 157, "GPIO157",
+ MTK_EINT_FUNCTION(0, 217),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 158, "GPIO158",
+ MTK_EINT_FUNCTION(0, 218),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 159, "GPIO159",
+ MTK_EINT_FUNCTION(0, 219),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 160, "GPIO160",
+ MTK_EINT_FUNCTION(0, 220),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 161, "GPIO161",
+ MTK_EINT_FUNCTION(0, 221),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 162, "GPIO162",
+ MTK_EINT_FUNCTION(0, 222),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 163, "GPIO163",
+ MTK_EINT_FUNCTION(0, 223),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 164, "GPIO164",
+ MTK_EINT_FUNCTION(0, 224),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ )
+};
+
+#endif /* __PINCTRL_MTK_MT8195_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index da1f19288aa6..85db2e4377f0 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -970,6 +970,8 @@ int mtk_paris_pinctrl_probe(struct platform_device *pdev,
hw->nbase = hw->soc->nbase_names;
+ spin_lock_init(&hw->lock);
+
err = mtk_pctrl_build_state(pdev);
if (err) {
dev_err(&pdev->dev, "build state failed: %d\n", err);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
index 17491b27e487..8ba8f3e9121f 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
@@ -519,13 +519,13 @@ static struct mvebu_mpp_mode armada_cp110_mpp_modes[] = {
MPP_FUNCTION(4, "synce1", "clk"),
MPP_FUNCTION(8, "led", "data"),
MPP_FUNCTION(10, "sdio", "hw_rst"),
- MPP_FUNCTION(11, "sdio", "wr_protect")),
+ MPP_FUNCTION(11, "sdio_wp", "wr_protect")),
MPP_MODE(55,
MPP_FUNCTION(0, "gpio", NULL),
MPP_FUNCTION(1, "ge1", "rxctl_rxdv"),
MPP_FUNCTION(3, "ptp", "pulse"),
MPP_FUNCTION(10, "sdio", "led"),
- MPP_FUNCTION(11, "sdio", "card_detect")),
+ MPP_FUNCTION(11, "sdio_cd", "card_detect")),
MPP_MODE(56,
MPP_FUNCTION(0, "gpio", NULL),
MPP_FUNCTION(4, "tdm", "drx"),
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 1e225d513988..22e8d4c4040e 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -43,7 +43,7 @@ static const struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_INPUT_ENABLE, "input enabled", NULL, false),
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL, false),
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false),
- PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode", true),
+ PCONFDUMP(PIN_CONFIG_MODE_LOW_POWER, "pin low power", "mode", true),
PCONFDUMP(PIN_CONFIG_OUTPUT_ENABLE, "output enabled", NULL, false),
PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level", true),
PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
@@ -174,8 +174,8 @@ static const struct pinconf_generic_params dt_params[] = {
{ "input-schmitt", PIN_CONFIG_INPUT_SCHMITT, 0 },
{ "input-schmitt-disable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 0 },
{ "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
- { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 },
- { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 },
+ { "low-power-disable", PIN_CONFIG_MODE_LOW_POWER, 0 },
+ { "low-power-enable", PIN_CONFIG_MODE_LOW_POWER, 1 },
{ "output-disable", PIN_CONFIG_OUTPUT_ENABLE, 0 },
{ "output-enable", PIN_CONFIG_OUTPUT_ENABLE, 1 },
{ "output-high", PIN_CONFIG_OUTPUT, 1, },
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 02c075cc010b..d9d54065472e 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -370,9 +370,9 @@ DEFINE_SHOW_ATTRIBUTE(pinconf_groups);
void pinconf_init_device_debugfs(struct dentry *devroot,
struct pinctrl_dev *pctldev)
{
- debugfs_create_file("pinconf-pins", S_IFREG | S_IRUGO,
+ debugfs_create_file("pinconf-pins", 0444,
devroot, pctldev, &pinconf_pins_fops);
- debugfs_create_file("pinconf-groups", S_IFREG | S_IRUGO,
+ debugfs_create_file("pinconf-groups", 0444,
devroot, pctldev, &pinconf_groups_fops);
}
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index e71ebccc479c..03c32b2c5d30 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -801,6 +801,10 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
conf = atmel_pin_config_read(pctldev, pin_id);
+ /* Keep slew rate enabled by default. */
+ if (atmel_pioctrl->slew_rate_support)
+ conf |= ATMEL_PIO_SR_MASK;
+
for (i = 0; i < num_configs; i++) {
unsigned int param = pinconf_to_config_param(configs[i]);
unsigned int arg = pinconf_to_config_argument(configs[i]);
@@ -808,10 +812,6 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
dev_dbg(pctldev->dev, "%s: pin=%u, config=0x%lx\n",
__func__, pin_id, configs[i]);
- /* Keep slew rate enabled by default. */
- if (atmel_pioctrl->slew_rate_support)
- conf |= ATMEL_PIO_SR_MASK;
-
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
conf &= (~ATMEL_PIO_PUEN_MASK);
diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
index 067271b7d35a..a194d8089b6f 100644
--- a/drivers/pinctrl/pinctrl-equilibrium.c
+++ b/drivers/pinctrl/pinctrl-equilibrium.c
@@ -628,7 +628,8 @@ static int funcs_utils(struct device *dev, struct eqbr_pmx_func *funcs,
break;
default:
- return -EINVAL;
+ of_node_put(np);
+ return -EINVAL;
}
i++;
}
@@ -707,34 +708,42 @@ static int eqbr_build_groups(struct eqbr_pinctrl_drv_data *drvdata)
group.num_pins = of_property_count_u32_elems(np, "pins");
if (group.num_pins < 0) {
dev_err(dev, "No pins in the group: %s\n", prop->name);
+ of_node_put(np);
return -EINVAL;
}
group.name = prop->value;
group.pins = devm_kcalloc(dev, group.num_pins,
sizeof(*(group.pins)), GFP_KERNEL);
- if (!group.pins)
+ if (!group.pins) {
+ of_node_put(np);
return -ENOMEM;
+ }
pinmux = devm_kcalloc(dev, group.num_pins, sizeof(*pinmux),
GFP_KERNEL);
- if (!pinmux)
+ if (!pinmux) {
+ of_node_put(np);
return -ENOMEM;
+ }
for (j = 0; j < group.num_pins; j++) {
if (of_property_read_u32_index(np, "pins", j, &pin_id)) {
dev_err(dev, "Group %s: Read intel pins id failed\n",
group.name);
+ of_node_put(np);
return -EINVAL;
}
if (pin_id >= drvdata->pctl_desc.npins) {
dev_err(dev, "Group %s: Invalid pin ID, idx: %d, pin %u\n",
group.name, j, pin_id);
+ of_node_put(np);
return -EINVAL;
}
group.pins[j] = pin_id;
if (of_property_read_u32_index(np, "pinmux", j, &pinmux_id)) {
dev_err(dev, "Group %s: Read intel pinmux id failed\n",
group.name);
+ of_node_put(np);
return -EINVAL;
}
pinmux[j] = pinmux_id;
@@ -745,6 +754,7 @@ static int eqbr_build_groups(struct eqbr_pinctrl_drv_data *drvdata)
pinmux);
if (err < 0) {
dev_err(dev, "Failed to register group %s\n", group.name);
+ of_node_put(np);
return err;
}
memset(&group, 0, sizeof(group));
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index f2746125b077..651a36b9dcc0 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -3,8 +3,8 @@
* Ingenic SoCs pinctrl driver
*
* Copyright (c) 2017 Paul Cercueil <paul@crapouillou.net>
- * Copyright (c) 2019 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
* Copyright (c) 2017, 2019 Paul Boddie <paul@boddie.org.uk>
+ * Copyright (c) 2019, 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
*/
#include <linux/compiler.h>
@@ -26,47 +26,83 @@
#include "pinconf.h"
#include "pinmux.h"
-#define GPIO_PIN 0x00
-#define GPIO_MSK 0x20
+#define GPIO_PIN 0x00
+#define GPIO_MSK 0x20
+
+#define JZ4730_GPIO_DATA 0x00
+#define JZ4730_GPIO_GPDIR 0x04
+#define JZ4730_GPIO_GPPUR 0x0c
+#define JZ4730_GPIO_GPALR 0x10
+#define JZ4730_GPIO_GPAUR 0x14
+#define JZ4730_GPIO_GPIDLR 0x18
+#define JZ4730_GPIO_GPIDUR 0x1c
+#define JZ4730_GPIO_GPIER 0x20
+#define JZ4730_GPIO_GPIMR 0x24
+#define JZ4730_GPIO_GPFR 0x28
+
+#define JZ4740_GPIO_DATA 0x10
+#define JZ4740_GPIO_PULL_DIS 0x30
+#define JZ4740_GPIO_FUNC 0x40
+#define JZ4740_GPIO_SELECT 0x50
+#define JZ4740_GPIO_DIR 0x60
+#define JZ4740_GPIO_TRIG 0x70
+#define JZ4740_GPIO_FLAG 0x80
+
+#define JZ4770_GPIO_INT 0x10
+#define JZ4770_GPIO_PAT1 0x30
+#define JZ4770_GPIO_PAT0 0x40
+#define JZ4770_GPIO_FLAG 0x50
+#define JZ4770_GPIO_PEN 0x70
+
+#define X1830_GPIO_PEL 0x110
+#define X1830_GPIO_PEH 0x120
+#define X1830_GPIO_SR 0x150
+#define X1830_GPIO_SMT 0x160
+
+#define X2000_GPIO_EDG 0x70
+#define X2000_GPIO_PEPU 0x80
+#define X2000_GPIO_PEPD 0x90
+#define X2000_GPIO_SR 0xd0
+#define X2000_GPIO_SMT 0xe0
+
+#define REG_SET(x) ((x) + 0x4)
+#define REG_CLEAR(x) ((x) + 0x8)
+
+#define REG_PZ_BASE(x) ((x) * 7)
+#define REG_PZ_GID2LD(x) ((x) * 7 + 0xf0)
+
+#define GPIO_PULL_DIS 0
+#define GPIO_PULL_UP 1
+#define GPIO_PULL_DOWN 2
+
+#define PINS_PER_GPIO_CHIP 32
+#define JZ4730_PINS_PER_PAIRED_REG 16
-#define JZ4740_GPIO_DATA 0x10
-#define JZ4740_GPIO_PULL_DIS 0x30
-#define JZ4740_GPIO_FUNC 0x40
-#define JZ4740_GPIO_SELECT 0x50
-#define JZ4740_GPIO_DIR 0x60
-#define JZ4740_GPIO_TRIG 0x70
-#define JZ4740_GPIO_FLAG 0x80
-
-#define JZ4770_GPIO_INT 0x10
-#define JZ4770_GPIO_PAT1 0x30
-#define JZ4770_GPIO_PAT0 0x40
-#define JZ4770_GPIO_FLAG 0x50
-#define JZ4770_GPIO_PEN 0x70
-
-#define X1830_GPIO_PEL 0x110
-#define X1830_GPIO_PEH 0x120
-
-#define REG_SET(x) ((x) + 0x4)
-#define REG_CLEAR(x) ((x) + 0x8)
-
-#define REG_PZ_BASE(x) ((x) * 7)
-#define REG_PZ_GID2LD(x) ((x) * 7 + 0xf0)
-
-#define GPIO_PULL_DIS 0
-#define GPIO_PULL_UP 1
-#define GPIO_PULL_DOWN 2
+#define INGENIC_PIN_GROUP_FUNCS(name, id, funcs) \
+ { \
+ name, \
+ id##_pins, \
+ ARRAY_SIZE(id##_pins), \
+ funcs, \
+ }
-#define PINS_PER_GPIO_CHIP 32
+#define INGENIC_PIN_GROUP(name, id, func) \
+ INGENIC_PIN_GROUP_FUNCS(name, id, (void *)(func))
enum jz_version {
+ ID_JZ4730,
ID_JZ4740,
ID_JZ4725B,
+ ID_JZ4750,
+ ID_JZ4755,
ID_JZ4760,
ID_JZ4770,
+ ID_JZ4775,
ID_JZ4780,
ID_X1000,
ID_X1500,
ID_X1830,
+ ID_X2000,
};
struct ingenic_chip_info {
@@ -99,6 +135,99 @@ struct ingenic_gpio_chip {
unsigned int irq, reg_base;
};
+static const u32 jz4730_pull_ups[4] = {
+ 0x3fa3320f, 0xf200ffff, 0xffffffff, 0xffffffff,
+};
+
+static const u32 jz4730_pull_downs[4] = {
+ 0x00000df0, 0x0dff0000, 0x00000000, 0x00000000,
+};
+
+static int jz4730_mmc_1bit_pins[] = { 0x27, 0x26, 0x22, };
+static int jz4730_mmc_4bit_pins[] = { 0x23, 0x24, 0x25, };
+static int jz4730_uart0_data_pins[] = { 0x7e, 0x7f, };
+static int jz4730_uart1_data_pins[] = { 0x18, 0x19, };
+static int jz4730_uart2_data_pins[] = { 0x6f, 0x7d, };
+static int jz4730_uart3_data_pins[] = { 0x10, 0x15, };
+static int jz4730_uart3_hwflow_pins[] = { 0x11, 0x17, };
+static int jz4730_lcd_8bit_pins[] = {
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x3a, 0x39, 0x38,
+};
+static int jz4730_lcd_16bit_pins[] = {
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+};
+static int jz4730_lcd_special_pins[] = { 0x3d, 0x3c, 0x3e, 0x3f, };
+static int jz4730_lcd_generic_pins[] = { 0x3b, };
+static int jz4730_nand_cs1_pins[] = { 0x53, };
+static int jz4730_nand_cs2_pins[] = { 0x54, };
+static int jz4730_nand_cs3_pins[] = { 0x55, };
+static int jz4730_nand_cs4_pins[] = { 0x56, };
+static int jz4730_nand_cs5_pins[] = { 0x57, };
+static int jz4730_pwm_pwm0_pins[] = { 0x5e, };
+static int jz4730_pwm_pwm1_pins[] = { 0x5f, };
+
+static u8 jz4730_lcd_8bit_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, };
+
+static const struct group_desc jz4730_groups[] = {
+ INGENIC_PIN_GROUP("mmc-1bit", jz4730_mmc_1bit, 1),
+ INGENIC_PIN_GROUP("mmc-4bit", jz4730_mmc_4bit, 1),
+ INGENIC_PIN_GROUP("uart0-data", jz4730_uart0_data, 1),
+ INGENIC_PIN_GROUP("uart1-data", jz4730_uart1_data, 1),
+ INGENIC_PIN_GROUP("uart2-data", jz4730_uart2_data, 1),
+ INGENIC_PIN_GROUP("uart3-data", jz4730_uart3_data, 1),
+ INGENIC_PIN_GROUP("uart3-hwflow", jz4730_uart3_hwflow, 1),
+ INGENIC_PIN_GROUP_FUNCS("lcd-8bit", jz4730_lcd_8bit, jz4730_lcd_8bit_funcs),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4730_lcd_16bit, 1),
+ INGENIC_PIN_GROUP("lcd-special", jz4730_lcd_special, 1),
+ INGENIC_PIN_GROUP("lcd-generic", jz4730_lcd_generic, 1),
+ INGENIC_PIN_GROUP("nand-cs1", jz4730_nand_cs1, 1),
+ INGENIC_PIN_GROUP("nand-cs2", jz4730_nand_cs2, 1),
+ INGENIC_PIN_GROUP("nand-cs3", jz4730_nand_cs3, 1),
+ INGENIC_PIN_GROUP("nand-cs4", jz4730_nand_cs4, 1),
+ INGENIC_PIN_GROUP("nand-cs5", jz4730_nand_cs5, 1),
+ INGENIC_PIN_GROUP("pwm0", jz4730_pwm_pwm0, 1),
+ INGENIC_PIN_GROUP("pwm1", jz4730_pwm_pwm1, 1),
+};
+
+static const char *jz4730_mmc_groups[] = { "mmc-1bit", "mmc-4bit", };
+static const char *jz4730_uart0_groups[] = { "uart0-data", };
+static const char *jz4730_uart1_groups[] = { "uart1-data", };
+static const char *jz4730_uart2_groups[] = { "uart2-data", };
+static const char *jz4730_uart3_groups[] = { "uart3-data", "uart3-hwflow", };
+static const char *jz4730_lcd_groups[] = {
+ "lcd-8bit", "lcd-16bit", "lcd-special", "lcd-generic",
+};
+static const char *jz4730_nand_groups[] = {
+ "nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4", "nand-cs5",
+};
+static const char *jz4730_pwm0_groups[] = { "pwm0", };
+static const char *jz4730_pwm1_groups[] = { "pwm1", };
+
+static const struct function_desc jz4730_functions[] = {
+ { "mmc", jz4730_mmc_groups, ARRAY_SIZE(jz4730_mmc_groups), },
+ { "uart0", jz4730_uart0_groups, ARRAY_SIZE(jz4730_uart0_groups), },
+ { "uart1", jz4730_uart1_groups, ARRAY_SIZE(jz4730_uart1_groups), },
+ { "uart2", jz4730_uart2_groups, ARRAY_SIZE(jz4730_uart2_groups), },
+ { "uart3", jz4730_uart3_groups, ARRAY_SIZE(jz4730_uart3_groups), },
+ { "lcd", jz4730_lcd_groups, ARRAY_SIZE(jz4730_lcd_groups), },
+ { "nand", jz4730_nand_groups, ARRAY_SIZE(jz4730_nand_groups), },
+ { "pwm0", jz4730_pwm0_groups, ARRAY_SIZE(jz4730_pwm0_groups), },
+ { "pwm1", jz4730_pwm1_groups, ARRAY_SIZE(jz4730_pwm1_groups), },
+};
+
+static const struct ingenic_chip_info jz4730_chip_info = {
+ .num_chips = 4,
+ .reg_offset = 0x30,
+ .version = ID_JZ4730,
+ .groups = jz4730_groups,
+ .num_groups = ARRAY_SIZE(jz4730_groups),
+ .functions = jz4730_functions,
+ .num_functions = ARRAY_SIZE(jz4730_functions),
+ .pull_ups = jz4730_pull_ups,
+ .pull_downs = jz4730_pull_downs,
+};
+
static const u32 jz4740_pull_ups[4] = {
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
};
@@ -113,13 +242,15 @@ static int jz4740_uart0_data_pins[] = { 0x7a, 0x79, };
static int jz4740_uart0_hwflow_pins[] = { 0x7e, 0x7f, };
static int jz4740_uart1_data_pins[] = { 0x7e, 0x7f, };
static int jz4740_lcd_8bit_pins[] = {
- 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x52, 0x53, 0x54,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x52, 0x53, 0x54,
};
static int jz4740_lcd_16bit_pins[] = {
- 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x55,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
};
static int jz4740_lcd_18bit_pins[] = { 0x50, 0x51, };
-static int jz4740_lcd_18bit_tft_pins[] = { 0x56, 0x57, 0x31, 0x32, };
+static int jz4740_lcd_special_pins[] = { 0x31, 0x32, 0x56, 0x57, };
+static int jz4740_lcd_generic_pins[] = { 0x55, };
static int jz4740_nand_cs1_pins[] = { 0x39, };
static int jz4740_nand_cs2_pins[] = { 0x3a, };
static int jz4740_nand_cs3_pins[] = { 0x3b, };
@@ -134,18 +265,6 @@ static int jz4740_pwm_pwm5_pins[] = { 0x7c, };
static int jz4740_pwm_pwm6_pins[] = { 0x7e, };
static int jz4740_pwm_pwm7_pins[] = { 0x7f, };
-
-#define INGENIC_PIN_GROUP_FUNCS(name, id, funcs) \
- { \
- name, \
- id##_pins, \
- ARRAY_SIZE(id##_pins), \
- funcs, \
- }
-
-#define INGENIC_PIN_GROUP(name, id, func) \
- INGENIC_PIN_GROUP_FUNCS(name, id, (void *)(func))
-
static const struct group_desc jz4740_groups[] = {
INGENIC_PIN_GROUP("mmc-1bit", jz4740_mmc_1bit, 0),
INGENIC_PIN_GROUP("mmc-4bit", jz4740_mmc_4bit, 0),
@@ -155,8 +274,8 @@ static const struct group_desc jz4740_groups[] = {
INGENIC_PIN_GROUP("lcd-8bit", jz4740_lcd_8bit, 0),
INGENIC_PIN_GROUP("lcd-16bit", jz4740_lcd_16bit, 0),
INGENIC_PIN_GROUP("lcd-18bit", jz4740_lcd_18bit, 0),
- INGENIC_PIN_GROUP("lcd-18bit-tft", jz4740_lcd_18bit_tft, 0),
- { "lcd-no-pins", },
+ INGENIC_PIN_GROUP("lcd-special", jz4740_lcd_special, 0),
+ INGENIC_PIN_GROUP("lcd-generic", jz4740_lcd_generic, 0),
INGENIC_PIN_GROUP("nand-cs1", jz4740_nand_cs1, 0),
INGENIC_PIN_GROUP("nand-cs2", jz4740_nand_cs2, 0),
INGENIC_PIN_GROUP("nand-cs3", jz4740_nand_cs3, 0),
@@ -176,7 +295,7 @@ static const char *jz4740_mmc_groups[] = { "mmc-1bit", "mmc-4bit", };
static const char *jz4740_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
static const char *jz4740_uart1_groups[] = { "uart1-data", };
static const char *jz4740_lcd_groups[] = {
- "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-18bit-tft", "lcd-no-pins",
+ "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-special", "lcd-generic",
};
static const char *jz4740_nand_groups[] = {
"nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4", "nand-fre-fwe",
@@ -223,6 +342,17 @@ static int jz4725b_mmc0_4bit_pins[] = { 0x5d, 0x5b, 0x56, };
static int jz4725b_mmc1_1bit_pins[] = { 0x7a, 0x7b, 0x7c, };
static int jz4725b_mmc1_4bit_pins[] = { 0x7d, 0x7e, 0x7f, };
static int jz4725b_uart_data_pins[] = { 0x4c, 0x4d, };
+static int jz4725b_lcd_8bit_pins[] = {
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x72, 0x73, 0x74,
+};
+static int jz4725b_lcd_16bit_pins[] = {
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+};
+static int jz4725b_lcd_18bit_pins[] = { 0x70, 0x71, };
+static int jz4725b_lcd_24bit_pins[] = { 0x76, 0x77, 0x78, 0x79, };
+static int jz4725b_lcd_special_pins[] = { 0x76, 0x77, 0x78, 0x79, };
+static int jz4725b_lcd_generic_pins[] = { 0x75, };
static int jz4725b_nand_cs1_pins[] = { 0x55, };
static int jz4725b_nand_cs2_pins[] = { 0x56, };
static int jz4725b_nand_cs3_pins[] = { 0x57, };
@@ -235,19 +365,6 @@ static int jz4725b_pwm_pwm2_pins[] = { 0x4c, };
static int jz4725b_pwm_pwm3_pins[] = { 0x4d, };
static int jz4725b_pwm_pwm4_pins[] = { 0x4e, };
static int jz4725b_pwm_pwm5_pins[] = { 0x4f, };
-static int jz4725b_lcd_8bit_pins[] = {
- 0x72, 0x73, 0x74,
- 0x60, 0x61, 0x62, 0x63,
- 0x64, 0x65, 0x66, 0x67,
-};
-static int jz4725b_lcd_16bit_pins[] = {
- 0x68, 0x69, 0x6a, 0x6b,
- 0x6c, 0x6d, 0x6e, 0x6f,
-};
-static int jz4725b_lcd_18bit_pins[] = { 0x70, 0x71, };
-static int jz4725b_lcd_24bit_pins[] = { 0x76, 0x77, 0x78, 0x79, };
-static int jz4725b_lcd_special_pins[] = { 0x76, 0x77, 0x78, 0x79, };
-static int jz4725b_lcd_generic_pins[] = { 0x75, };
static u8 jz4725b_mmc0_4bit_funcs[] = { 1, 0, 1, };
@@ -258,6 +375,12 @@ static const struct group_desc jz4725b_groups[] = {
INGENIC_PIN_GROUP("mmc1-1bit", jz4725b_mmc1_1bit, 0),
INGENIC_PIN_GROUP("mmc1-4bit", jz4725b_mmc1_4bit, 0),
INGENIC_PIN_GROUP("uart-data", jz4725b_uart_data, 1),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4725b_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4725b_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4725b_lcd_18bit, 0),
+ INGENIC_PIN_GROUP("lcd-24bit", jz4725b_lcd_24bit, 1),
+ INGENIC_PIN_GROUP("lcd-special", jz4725b_lcd_special, 0),
+ INGENIC_PIN_GROUP("lcd-generic", jz4725b_lcd_generic, 0),
INGENIC_PIN_GROUP("nand-cs1", jz4725b_nand_cs1, 0),
INGENIC_PIN_GROUP("nand-cs2", jz4725b_nand_cs2, 0),
INGENIC_PIN_GROUP("nand-cs3", jz4725b_nand_cs3, 0),
@@ -270,17 +393,15 @@ static const struct group_desc jz4725b_groups[] = {
INGENIC_PIN_GROUP("pwm3", jz4725b_pwm_pwm3, 0),
INGENIC_PIN_GROUP("pwm4", jz4725b_pwm_pwm4, 0),
INGENIC_PIN_GROUP("pwm5", jz4725b_pwm_pwm5, 0),
- INGENIC_PIN_GROUP("lcd-8bit", jz4725b_lcd_8bit, 0),
- INGENIC_PIN_GROUP("lcd-16bit", jz4725b_lcd_16bit, 0),
- INGENIC_PIN_GROUP("lcd-18bit", jz4725b_lcd_18bit, 0),
- INGENIC_PIN_GROUP("lcd-24bit", jz4725b_lcd_24bit, 1),
- INGENIC_PIN_GROUP("lcd-special", jz4725b_lcd_special, 0),
- INGENIC_PIN_GROUP("lcd-generic", jz4725b_lcd_generic, 0),
};
static const char *jz4725b_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
static const char *jz4725b_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", };
static const char *jz4725b_uart_groups[] = { "uart-data", };
+static const char *jz4725b_lcd_groups[] = {
+ "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-24bit",
+ "lcd-special", "lcd-generic",
+};
static const char *jz4725b_nand_groups[] = {
"nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4",
"nand-cle-ale", "nand-fre-fwe",
@@ -291,10 +412,6 @@ static const char *jz4725b_pwm2_groups[] = { "pwm2", };
static const char *jz4725b_pwm3_groups[] = { "pwm3", };
static const char *jz4725b_pwm4_groups[] = { "pwm4", };
static const char *jz4725b_pwm5_groups[] = { "pwm5", };
-static const char *jz4725b_lcd_groups[] = {
- "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-24bit",
- "lcd-special", "lcd-generic",
-};
static const struct function_desc jz4725b_functions[] = {
{ "mmc0", jz4725b_mmc0_groups, ARRAY_SIZE(jz4725b_mmc0_groups), },
@@ -322,6 +439,275 @@ static const struct ingenic_chip_info jz4725b_chip_info = {
.pull_downs = jz4740_pull_downs,
};
+static const u32 jz4750_pull_ups[6] = {
+ 0xffffffff, 0xffffffff, 0x3fffffff, 0x7fffffff, 0x1fff3fff, 0x00ffffff,
+};
+
+static const u32 jz4750_pull_downs[6] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static int jz4750_uart0_data_pins[] = { 0xa4, 0xa5, };
+static int jz4750_uart0_hwflow_pins[] = { 0xa6, 0xa7, };
+static int jz4750_uart1_data_pins[] = { 0x90, 0x91, };
+static int jz4750_uart1_hwflow_pins[] = { 0x92, 0x93, };
+static int jz4750_uart2_data_pins[] = { 0x9b, 0x9a, };
+static int jz4750_uart3_data_pins[] = { 0xb0, 0xb1, };
+static int jz4750_uart3_hwflow_pins[] = { 0xb2, 0xb3, };
+static int jz4750_mmc0_1bit_pins[] = { 0xa8, 0xa9, 0xa0, };
+static int jz4750_mmc0_4bit_pins[] = { 0xa1, 0xa2, 0xa3, };
+static int jz4750_mmc0_8bit_pins[] = { 0xa4, 0xa5, 0xa6, 0xa7, };
+static int jz4750_mmc1_1bit_pins[] = { 0xae, 0xaf, 0xaa, };
+static int jz4750_mmc1_4bit_pins[] = { 0xab, 0xac, 0xad, };
+static int jz4750_i2c_pins[] = { 0x8c, 0x8d, };
+static int jz4750_cim_pins[] = {
+ 0x89, 0x8b, 0x8a, 0x88,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+};
+static int jz4750_lcd_8bit_pins[] = {
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x72, 0x73, 0x74,
+};
+static int jz4750_lcd_16bit_pins[] = {
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+};
+static int jz4750_lcd_18bit_pins[] = { 0x70, 0x71, };
+static int jz4750_lcd_24bit_pins[] = { 0x76, 0x77, 0x78, 0x79, 0xb2, 0xb3, };
+static int jz4750_lcd_special_pins[] = { 0x76, 0x77, 0x78, 0x79, };
+static int jz4750_lcd_generic_pins[] = { 0x75, };
+static int jz4750_nand_cs1_pins[] = { 0x55, };
+static int jz4750_nand_cs2_pins[] = { 0x56, };
+static int jz4750_nand_cs3_pins[] = { 0x57, };
+static int jz4750_nand_cs4_pins[] = { 0x58, };
+static int jz4750_nand_fre_fwe_pins[] = { 0x5c, 0x5d, };
+static int jz4750_pwm_pwm0_pins[] = { 0x94, };
+static int jz4750_pwm_pwm1_pins[] = { 0x95, };
+static int jz4750_pwm_pwm2_pins[] = { 0x96, };
+static int jz4750_pwm_pwm3_pins[] = { 0x97, };
+static int jz4750_pwm_pwm4_pins[] = { 0x98, };
+static int jz4750_pwm_pwm5_pins[] = { 0x99, };
+
+static const struct group_desc jz4750_groups[] = {
+ INGENIC_PIN_GROUP("uart0-data", jz4750_uart0_data, 1),
+ INGENIC_PIN_GROUP("uart0-hwflow", jz4750_uart0_hwflow, 1),
+ INGENIC_PIN_GROUP("uart1-data", jz4750_uart1_data, 0),
+ INGENIC_PIN_GROUP("uart1-hwflow", jz4750_uart1_hwflow, 0),
+ INGENIC_PIN_GROUP("uart2-data", jz4750_uart2_data, 1),
+ INGENIC_PIN_GROUP("uart3-data", jz4750_uart3_data, 0),
+ INGENIC_PIN_GROUP("uart3-hwflow", jz4750_uart3_hwflow, 0),
+ INGENIC_PIN_GROUP("mmc0-1bit", jz4750_mmc0_1bit, 0),
+ INGENIC_PIN_GROUP("mmc0-4bit", jz4750_mmc0_4bit, 0),
+ INGENIC_PIN_GROUP("mmc0-8bit", jz4750_mmc0_8bit, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit", jz4750_mmc1_1bit, 0),
+ INGENIC_PIN_GROUP("mmc1-4bit", jz4750_mmc1_4bit, 0),
+ INGENIC_PIN_GROUP("i2c-data", jz4750_i2c, 0),
+ INGENIC_PIN_GROUP("cim-data", jz4750_cim, 0),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4750_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4750_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4750_lcd_18bit, 0),
+ INGENIC_PIN_GROUP("lcd-24bit", jz4750_lcd_24bit, 1),
+ INGENIC_PIN_GROUP("lcd-special", jz4750_lcd_special, 0),
+ INGENIC_PIN_GROUP("lcd-generic", jz4750_lcd_generic, 0),
+ INGENIC_PIN_GROUP("nand-cs1", jz4750_nand_cs1, 0),
+ INGENIC_PIN_GROUP("nand-cs2", jz4750_nand_cs2, 0),
+ INGENIC_PIN_GROUP("nand-cs3", jz4750_nand_cs3, 0),
+ INGENIC_PIN_GROUP("nand-cs4", jz4750_nand_cs4, 0),
+ INGENIC_PIN_GROUP("nand-fre-fwe", jz4750_nand_fre_fwe, 0),
+ INGENIC_PIN_GROUP("pwm0", jz4750_pwm_pwm0, 0),
+ INGENIC_PIN_GROUP("pwm1", jz4750_pwm_pwm1, 0),
+ INGENIC_PIN_GROUP("pwm2", jz4750_pwm_pwm2, 0),
+ INGENIC_PIN_GROUP("pwm3", jz4750_pwm_pwm3, 0),
+ INGENIC_PIN_GROUP("pwm4", jz4750_pwm_pwm4, 0),
+ INGENIC_PIN_GROUP("pwm5", jz4750_pwm_pwm5, 0),
+};
+
+static const char *jz4750_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
+static const char *jz4750_uart1_groups[] = { "uart1-data", "uart1-hwflow", };
+static const char *jz4750_uart2_groups[] = { "uart2-data", };
+static const char *jz4750_uart3_groups[] = { "uart3-data", "uart3-hwflow", };
+static const char *jz4750_mmc0_groups[] = {
+ "mmc0-1bit", "mmc0-4bit", "mmc0-8bit",
+};
+static const char *jz4750_mmc1_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *jz4750_i2c_groups[] = { "i2c-data", };
+static const char *jz4750_cim_groups[] = { "cim-data", };
+static const char *jz4750_lcd_groups[] = {
+ "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-24bit",
+ "lcd-special", "lcd-generic",
+};
+static const char *jz4750_nand_groups[] = {
+ "nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4", "nand-fre-fwe",
+};
+static const char *jz4750_pwm0_groups[] = { "pwm0", };
+static const char *jz4750_pwm1_groups[] = { "pwm1", };
+static const char *jz4750_pwm2_groups[] = { "pwm2", };
+static const char *jz4750_pwm3_groups[] = { "pwm3", };
+static const char *jz4750_pwm4_groups[] = { "pwm4", };
+static const char *jz4750_pwm5_groups[] = { "pwm5", };
+
+static const struct function_desc jz4750_functions[] = {
+ { "uart0", jz4750_uart0_groups, ARRAY_SIZE(jz4750_uart0_groups), },
+ { "uart1", jz4750_uart1_groups, ARRAY_SIZE(jz4750_uart1_groups), },
+ { "uart2", jz4750_uart2_groups, ARRAY_SIZE(jz4750_uart2_groups), },
+ { "uart3", jz4750_uart3_groups, ARRAY_SIZE(jz4750_uart3_groups), },
+ { "mmc0", jz4750_mmc0_groups, ARRAY_SIZE(jz4750_mmc0_groups), },
+ { "mmc1", jz4750_mmc1_groups, ARRAY_SIZE(jz4750_mmc1_groups), },
+ { "i2c", jz4750_i2c_groups, ARRAY_SIZE(jz4750_i2c_groups), },
+ { "cim", jz4750_cim_groups, ARRAY_SIZE(jz4750_cim_groups), },
+ { "lcd", jz4750_lcd_groups, ARRAY_SIZE(jz4750_lcd_groups), },
+ { "nand", jz4750_nand_groups, ARRAY_SIZE(jz4750_nand_groups), },
+ { "pwm0", jz4750_pwm0_groups, ARRAY_SIZE(jz4750_pwm0_groups), },
+ { "pwm1", jz4750_pwm1_groups, ARRAY_SIZE(jz4750_pwm1_groups), },
+ { "pwm2", jz4750_pwm2_groups, ARRAY_SIZE(jz4750_pwm2_groups), },
+ { "pwm3", jz4750_pwm3_groups, ARRAY_SIZE(jz4750_pwm3_groups), },
+ { "pwm4", jz4750_pwm4_groups, ARRAY_SIZE(jz4750_pwm4_groups), },
+ { "pwm5", jz4750_pwm5_groups, ARRAY_SIZE(jz4750_pwm5_groups), },
+};
+
+static const struct ingenic_chip_info jz4750_chip_info = {
+ .num_chips = 6,
+ .reg_offset = 0x100,
+ .version = ID_JZ4750,
+ .groups = jz4750_groups,
+ .num_groups = ARRAY_SIZE(jz4750_groups),
+ .functions = jz4750_functions,
+ .num_functions = ARRAY_SIZE(jz4750_functions),
+ .pull_ups = jz4750_pull_ups,
+ .pull_downs = jz4750_pull_downs,
+};
+
+static const u32 jz4755_pull_ups[6] = {
+ 0xffffffff, 0xffffffff, 0x0fffffff, 0xffffffff, 0x33dc3fff, 0x0000fc00,
+};
+
+static const u32 jz4755_pull_downs[6] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static int jz4755_uart0_data_pins[] = { 0x7c, 0x7d, };
+static int jz4755_uart0_hwflow_pins[] = { 0x7e, 0x7f, };
+static int jz4755_uart1_data_pins[] = { 0x97, 0x99, };
+static int jz4755_uart2_data_pins[] = { 0x9f, };
+static int jz4755_mmc0_1bit_pins[] = { 0x2f, 0x50, 0x5c, };
+static int jz4755_mmc0_4bit_pins[] = { 0x5d, 0x5b, 0x51, };
+static int jz4755_mmc1_1bit_pins[] = { 0x3a, 0x3d, 0x3c, };
+static int jz4755_mmc1_4bit_pins[] = { 0x3b, 0x3e, 0x3f, };
+static int jz4755_i2c_pins[] = { 0x8c, 0x8d, };
+static int jz4755_cim_pins[] = {
+ 0x89, 0x8b, 0x8a, 0x88,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+};
+static int jz4755_lcd_8bit_pins[] = {
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x72, 0x73, 0x74,
+};
+static int jz4755_lcd_16bit_pins[] = {
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+};
+static int jz4755_lcd_18bit_pins[] = { 0x70, 0x71, };
+static int jz4755_lcd_24bit_pins[] = { 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, };
+static int jz4755_lcd_special_pins[] = { 0x76, 0x77, 0x78, 0x79, };
+static int jz4755_lcd_generic_pins[] = { 0x75, };
+static int jz4755_nand_cs1_pins[] = { 0x55, };
+static int jz4755_nand_cs2_pins[] = { 0x56, };
+static int jz4755_nand_cs3_pins[] = { 0x57, };
+static int jz4755_nand_cs4_pins[] = { 0x58, };
+static int jz4755_nand_fre_fwe_pins[] = { 0x5c, 0x5d, };
+static int jz4755_pwm_pwm0_pins[] = { 0x94, };
+static int jz4755_pwm_pwm1_pins[] = { 0xab, };
+static int jz4755_pwm_pwm2_pins[] = { 0x96, };
+static int jz4755_pwm_pwm3_pins[] = { 0x97, };
+static int jz4755_pwm_pwm4_pins[] = { 0x98, };
+static int jz4755_pwm_pwm5_pins[] = { 0x99, };
+
+static u8 jz4755_mmc0_1bit_funcs[] = { 2, 2, 1, };
+static u8 jz4755_mmc0_4bit_funcs[] = { 1, 0, 1, };
+static u8 jz4755_lcd_24bit_funcs[] = { 1, 1, 1, 1, 0, 0, };
+
+static const struct group_desc jz4755_groups[] = {
+ INGENIC_PIN_GROUP("uart0-data", jz4755_uart0_data, 0),
+ INGENIC_PIN_GROUP("uart0-hwflow", jz4755_uart0_hwflow, 0),
+ INGENIC_PIN_GROUP("uart1-data", jz4755_uart1_data, 0),
+ INGENIC_PIN_GROUP("uart2-data", jz4755_uart2_data, 1),
+ INGENIC_PIN_GROUP_FUNCS("mmc0-1bit", jz4755_mmc0_1bit,
+ jz4755_mmc0_1bit_funcs),
+ INGENIC_PIN_GROUP_FUNCS("mmc0-4bit", jz4755_mmc0_4bit,
+ jz4755_mmc0_4bit_funcs),
+ INGENIC_PIN_GROUP("mmc1-1bit", jz4755_mmc1_1bit, 1),
+ INGENIC_PIN_GROUP("mmc1-4bit", jz4755_mmc1_4bit, 1),
+ INGENIC_PIN_GROUP("i2c-data", jz4755_i2c, 0),
+ INGENIC_PIN_GROUP("cim-data", jz4755_cim, 0),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4755_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4755_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4755_lcd_18bit, 0),
+ INGENIC_PIN_GROUP_FUNCS("lcd-24bit", jz4755_lcd_24bit,
+ jz4755_lcd_24bit_funcs),
+ INGENIC_PIN_GROUP("lcd-special", jz4755_lcd_special, 0),
+ INGENIC_PIN_GROUP("lcd-generic", jz4755_lcd_generic, 0),
+ INGENIC_PIN_GROUP("nand-cs1", jz4755_nand_cs1, 0),
+ INGENIC_PIN_GROUP("nand-cs2", jz4755_nand_cs2, 0),
+ INGENIC_PIN_GROUP("nand-cs3", jz4755_nand_cs3, 0),
+ INGENIC_PIN_GROUP("nand-cs4", jz4755_nand_cs4, 0),
+ INGENIC_PIN_GROUP("nand-fre-fwe", jz4755_nand_fre_fwe, 0),
+ INGENIC_PIN_GROUP("pwm0", jz4755_pwm_pwm0, 0),
+ INGENIC_PIN_GROUP("pwm1", jz4755_pwm_pwm1, 1),
+ INGENIC_PIN_GROUP("pwm2", jz4755_pwm_pwm2, 0),
+ INGENIC_PIN_GROUP("pwm3", jz4755_pwm_pwm3, 0),
+ INGENIC_PIN_GROUP("pwm4", jz4755_pwm_pwm4, 0),
+ INGENIC_PIN_GROUP("pwm5", jz4755_pwm_pwm5, 0),
+};
+
+static const char *jz4755_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
+static const char *jz4755_uart1_groups[] = { "uart1-data", };
+static const char *jz4755_uart2_groups[] = { "uart2-data", };
+static const char *jz4755_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *jz4755_mmc1_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *jz4755_i2c_groups[] = { "i2c-data", };
+static const char *jz4755_cim_groups[] = { "cim-data", };
+static const char *jz4755_lcd_groups[] = {
+ "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-24bit",
+ "lcd-special", "lcd-generic",
+};
+static const char *jz4755_nand_groups[] = {
+ "nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4", "nand-fre-fwe",
+};
+static const char *jz4755_pwm0_groups[] = { "pwm0", };
+static const char *jz4755_pwm1_groups[] = { "pwm1", };
+static const char *jz4755_pwm2_groups[] = { "pwm2", };
+static const char *jz4755_pwm3_groups[] = { "pwm3", };
+static const char *jz4755_pwm4_groups[] = { "pwm4", };
+static const char *jz4755_pwm5_groups[] = { "pwm5", };
+
+static const struct function_desc jz4755_functions[] = {
+ { "uart0", jz4755_uart0_groups, ARRAY_SIZE(jz4755_uart0_groups), },
+ { "uart1", jz4755_uart1_groups, ARRAY_SIZE(jz4755_uart1_groups), },
+ { "uart2", jz4755_uart2_groups, ARRAY_SIZE(jz4755_uart2_groups), },
+ { "mmc0", jz4755_mmc0_groups, ARRAY_SIZE(jz4755_mmc0_groups), },
+ { "mmc1", jz4755_mmc1_groups, ARRAY_SIZE(jz4755_mmc1_groups), },
+ { "i2c", jz4755_i2c_groups, ARRAY_SIZE(jz4755_i2c_groups), },
+ { "cim", jz4755_cim_groups, ARRAY_SIZE(jz4755_cim_groups), },
+ { "lcd", jz4755_lcd_groups, ARRAY_SIZE(jz4755_lcd_groups), },
+ { "nand", jz4755_nand_groups, ARRAY_SIZE(jz4755_nand_groups), },
+ { "pwm0", jz4755_pwm0_groups, ARRAY_SIZE(jz4755_pwm0_groups), },
+ { "pwm1", jz4755_pwm1_groups, ARRAY_SIZE(jz4755_pwm1_groups), },
+ { "pwm2", jz4755_pwm2_groups, ARRAY_SIZE(jz4755_pwm2_groups), },
+ { "pwm3", jz4755_pwm3_groups, ARRAY_SIZE(jz4755_pwm3_groups), },
+ { "pwm4", jz4755_pwm4_groups, ARRAY_SIZE(jz4755_pwm4_groups), },
+ { "pwm5", jz4755_pwm5_groups, ARRAY_SIZE(jz4755_pwm5_groups), },
+};
+
+static const struct ingenic_chip_info jz4755_chip_info = {
+ .num_chips = 6,
+ .reg_offset = 0x100,
+ .version = ID_JZ4755,
+ .groups = jz4755_groups,
+ .num_groups = ARRAY_SIZE(jz4755_groups),
+ .functions = jz4755_functions,
+ .num_functions = ARRAY_SIZE(jz4755_functions),
+ .pull_ups = jz4755_pull_ups,
+ .pull_downs = jz4755_pull_downs,
+};
+
static const u32 jz4760_pull_ups[6] = {
0xffffffff, 0xfffcf3ff, 0xffffffff, 0xffffcfff, 0xfffffb7c, 0xfffff00f,
};
@@ -389,7 +775,7 @@ static int jz4760_lcd_18bit_pins[] = {
static int jz4760_lcd_24bit_pins[] = {
0x40, 0x41, 0x4a, 0x4b, 0x54, 0x55,
};
-static int jz4760_lcd_special_pins[] = { 0x40, 0x41, 0x4a, 0x54 };
+static int jz4760_lcd_special_pins[] = { 0x54, 0x4a, 0x41, 0x40, };
static int jz4760_lcd_generic_pins[] = { 0x49, };
static int jz4760_pwm_pwm0_pins[] = { 0x80, };
static int jz4760_pwm_pwm1_pins[] = { 0x81, };
@@ -450,8 +836,8 @@ static const struct group_desc jz4760_groups[] = {
INGENIC_PIN_GROUP("lcd-16bit", jz4760_lcd_16bit, 0),
INGENIC_PIN_GROUP("lcd-18bit", jz4760_lcd_18bit, 0),
INGENIC_PIN_GROUP("lcd-24bit", jz4760_lcd_24bit, 0),
- INGENIC_PIN_GROUP("lcd-generic", jz4760_lcd_generic, 0),
INGENIC_PIN_GROUP("lcd-special", jz4760_lcd_special, 1),
+ INGENIC_PIN_GROUP("lcd-generic", jz4760_lcd_generic, 0),
INGENIC_PIN_GROUP("pwm0", jz4760_pwm_pwm0, 0),
INGENIC_PIN_GROUP("pwm1", jz4760_pwm_pwm1, 0),
INGENIC_PIN_GROUP("pwm2", jz4760_pwm_pwm2, 0),
@@ -648,7 +1034,13 @@ static int jz4770_cim_12bit_pins[] = {
};
static int jz4770_lcd_8bit_pins[] = {
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x4c, 0x4d,
- 0x48, 0x49, 0x52, 0x53,
+ 0x48, 0x52, 0x53,
+};
+static int jz4770_lcd_16bit_pins[] = {
+ 0x4e, 0x4f, 0x50, 0x51, 0x56, 0x57, 0x58, 0x59,
+};
+static int jz4770_lcd_18bit_pins[] = {
+ 0x5a, 0x5b,
};
static int jz4770_lcd_24bit_pins[] = {
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
@@ -656,6 +1048,8 @@ static int jz4770_lcd_24bit_pins[] = {
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
0x58, 0x59, 0x5a, 0x5b,
};
+static int jz4770_lcd_special_pins[] = { 0x54, 0x4a, 0x41, 0x40, };
+static int jz4770_lcd_generic_pins[] = { 0x49, };
static int jz4770_pwm_pwm0_pins[] = { 0x80, };
static int jz4770_pwm_pwm1_pins[] = { 0x81, };
static int jz4770_pwm_pwm2_pins[] = { 0x82, };
@@ -667,7 +1061,9 @@ static int jz4770_pwm_pwm7_pins[] = { 0x6b, };
static int jz4770_mac_rmii_pins[] = {
0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8,
};
-static int jz4770_mac_mii_pins[] = { 0xa7, 0xaf, };
+static int jz4770_mac_mii_pins[] = {
+ 0x7b, 0x7a, 0x7d, 0x7c, 0xa7, 0x24, 0xaf,
+};
static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data, 0),
@@ -754,8 +1150,11 @@ static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("cim-data-8bit", jz4770_cim_8bit, 0),
INGENIC_PIN_GROUP("cim-data-12bit", jz4770_cim_12bit, 0),
INGENIC_PIN_GROUP("lcd-8bit", jz4770_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4770_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4770_lcd_18bit, 0),
INGENIC_PIN_GROUP("lcd-24bit", jz4770_lcd_24bit, 0),
- { "lcd-no-pins", },
+ INGENIC_PIN_GROUP("lcd-special", jz4770_lcd_special, 1),
+ INGENIC_PIN_GROUP("lcd-generic", jz4770_lcd_generic, 0),
INGENIC_PIN_GROUP("pwm0", jz4770_pwm_pwm0, 0),
INGENIC_PIN_GROUP("pwm1", jz4770_pwm_pwm1, 0),
INGENIC_PIN_GROUP("pwm2", jz4770_pwm_pwm2, 0),
@@ -816,7 +1215,8 @@ static const char *jz4770_i2c1_groups[] = { "i2c1-data", };
static const char *jz4770_i2c2_groups[] = { "i2c2-data", };
static const char *jz4770_cim_groups[] = { "cim-data-8bit", "cim-data-12bit", };
static const char *jz4770_lcd_groups[] = {
- "lcd-8bit", "lcd-24bit", "lcd-no-pins",
+ "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-24bit",
+ "lcd-special", "lcd-generic",
};
static const char *jz4770_pwm0_groups[] = { "pwm0", };
static const char *jz4770_pwm1_groups[] = { "pwm1", };
@@ -874,6 +1274,279 @@ static const struct ingenic_chip_info jz4770_chip_info = {
.pull_downs = jz4770_pull_downs,
};
+static const u32 jz4775_pull_ups[7] = {
+ 0x28ff00ff, 0xf030f3fc, 0x0fffffff, 0xfffe4000, 0xf0f0000c, 0x0000f00f, 0x0000f3c0,
+};
+
+static const u32 jz4775_pull_downs[7] = {
+ 0x00000000, 0x00030c03, 0x00000000, 0x00008000, 0x00000403, 0x00000ff0, 0x00030c00,
+};
+
+static int jz4775_uart0_data_pins[] = { 0xa0, 0xa3, };
+static int jz4775_uart0_hwflow_pins[] = { 0xa1, 0xa2, };
+static int jz4775_uart1_data_pins[] = { 0x7a, 0x7c, };
+static int jz4775_uart1_hwflow_pins[] = { 0x7b, 0x7d, };
+static int jz4775_uart2_data_c_pins[] = { 0x54, 0x4a, };
+static int jz4775_uart2_data_f_pins[] = { 0xa5, 0xa4, };
+static int jz4775_uart3_data_pins[] = { 0x1e, 0x1f, };
+static int jz4775_ssi_dt_a_pins[] = { 0x13, };
+static int jz4775_ssi_dt_d_pins[] = { 0x75, };
+static int jz4775_ssi_dr_a_pins[] = { 0x14, };
+static int jz4775_ssi_dr_d_pins[] = { 0x74, };
+static int jz4775_ssi_clk_a_pins[] = { 0x12, };
+static int jz4775_ssi_clk_d_pins[] = { 0x78, };
+static int jz4775_ssi_gpc_pins[] = { 0x76, };
+static int jz4775_ssi_ce0_a_pins[] = { 0x17, };
+static int jz4775_ssi_ce0_d_pins[] = { 0x79, };
+static int jz4775_ssi_ce1_pins[] = { 0x77, };
+static int jz4775_mmc0_1bit_a_pins[] = { 0x12, 0x13, 0x14, };
+static int jz4775_mmc0_4bit_a_pins[] = { 0x15, 0x16, 0x17, };
+static int jz4775_mmc0_8bit_a_pins[] = { 0x04, 0x05, 0x06, 0x07, };
+static int jz4775_mmc0_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, };
+static int jz4775_mmc0_4bit_e_pins[] = { 0x95, 0x96, 0x97, };
+static int jz4775_mmc1_1bit_d_pins[] = { 0x78, 0x79, 0x74, };
+static int jz4775_mmc1_4bit_d_pins[] = { 0x75, 0x76, 0x77, };
+static int jz4775_mmc1_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, };
+static int jz4775_mmc1_4bit_e_pins[] = { 0x95, 0x96, 0x97, };
+static int jz4775_mmc2_1bit_b_pins[] = { 0x3c, 0x3d, 0x34, };
+static int jz4775_mmc2_4bit_b_pins[] = { 0x35, 0x3e, 0x3f, };
+static int jz4775_mmc2_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, };
+static int jz4775_mmc2_4bit_e_pins[] = { 0x95, 0x96, 0x97, };
+static int jz4775_nemc_8bit_data_pins[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+};
+static int jz4775_nemc_16bit_data_pins[] = {
+ 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1,
+};
+static int jz4775_nemc_cle_ale_pins[] = { 0x20, 0x21, };
+static int jz4775_nemc_addr_pins[] = { 0x22, 0x23, 0x24, 0x25, };
+static int jz4775_nemc_rd_we_pins[] = { 0x10, 0x11, };
+static int jz4775_nemc_frd_fwe_pins[] = { 0x12, 0x13, };
+static int jz4775_nemc_wait_pins[] = { 0x1b, };
+static int jz4775_nemc_cs1_pins[] = { 0x15, };
+static int jz4775_nemc_cs2_pins[] = { 0x16, };
+static int jz4775_nemc_cs3_pins[] = { 0x17, };
+static int jz4775_i2c0_pins[] = { 0x7e, 0x7f, };
+static int jz4775_i2c1_pins[] = { 0x9e, 0x9f, };
+static int jz4775_i2c2_pins[] = { 0x80, 0x83, };
+static int jz4775_i2s_data_tx_pins[] = { 0xa3, };
+static int jz4775_i2s_data_rx_pins[] = { 0xa2, };
+static int jz4775_i2s_clk_txrx_pins[] = { 0xa0, 0xa1, };
+static int jz4775_i2s_sysclk_pins[] = { 0x83, };
+static int jz4775_dmic_pins[] = { 0xaa, 0xab, };
+static int jz4775_cim_pins[] = {
+ 0x26, 0x27, 0x28, 0x29,
+ 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31,
+};
+static int jz4775_lcd_8bit_pins[] = {
+ 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x4c, 0x4d,
+ 0x48, 0x52, 0x53,
+};
+static int jz4775_lcd_16bit_pins[] = {
+ 0x4e, 0x4f, 0x50, 0x51, 0x56, 0x57, 0x58, 0x59,
+};
+static int jz4775_lcd_18bit_pins[] = {
+ 0x5a, 0x5b,
+};
+static int jz4775_lcd_24bit_pins[] = {
+ 0x40, 0x41, 0x4a, 0x4b, 0x54, 0x55,
+};
+static int jz4775_lcd_special_pins[] = { 0x54, 0x4a, 0x41, 0x40, };
+static int jz4775_lcd_generic_pins[] = { 0x49, };
+static int jz4775_pwm_pwm0_pins[] = { 0x80, };
+static int jz4775_pwm_pwm1_pins[] = { 0x81, };
+static int jz4775_pwm_pwm2_pins[] = { 0x82, };
+static int jz4775_pwm_pwm3_pins[] = { 0x83, };
+static int jz4775_mac_rmii_pins[] = {
+ 0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8,
+};
+static int jz4775_mac_mii_pins[] = {
+ 0x7b, 0x7a, 0x7d, 0x7c, 0xa7, 0x24, 0xaf,
+};
+static int jz4775_mac_rgmii_pins[] = {
+ 0xa9, 0x7b, 0x7a, 0xab, 0xaa, 0xac, 0x7d, 0x7c, 0xa5, 0xa4,
+ 0xad, 0xae, 0xa7, 0xa6,
+};
+static int jz4775_mac_gmii_pins[] = {
+ 0x31, 0x30, 0x2f, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a,
+ 0xa8, 0x28, 0x24, 0xaf,
+};
+static int jz4775_otg_pins[] = { 0x8a, };
+
+static u8 jz4775_uart3_data_funcs[] = { 0, 1, };
+static u8 jz4775_mac_mii_funcs[] = { 1, 1, 1, 1, 0, 1, 0, };
+static u8 jz4775_mac_rgmii_funcs[] = {
+ 0, 1, 1, 0, 0, 0, 1, 1, 0, 0,
+ 0, 0, 0, 0,
+};
+static u8 jz4775_mac_gmii_funcs[] = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 1, 1, 0,
+};
+
+static const struct group_desc jz4775_groups[] = {
+ INGENIC_PIN_GROUP("uart0-data", jz4775_uart0_data, 0),
+ INGENIC_PIN_GROUP("uart0-hwflow", jz4775_uart0_hwflow, 0),
+ INGENIC_PIN_GROUP("uart1-data", jz4775_uart1_data, 0),
+ INGENIC_PIN_GROUP("uart1-hwflow", jz4775_uart1_hwflow, 0),
+ INGENIC_PIN_GROUP("uart2-data-c", jz4775_uart2_data_c, 2),
+ INGENIC_PIN_GROUP("uart2-data-f", jz4775_uart2_data_f, 1),
+ INGENIC_PIN_GROUP_FUNCS("uart3-data", jz4775_uart3_data,
+ jz4775_uart3_data_funcs),
+ INGENIC_PIN_GROUP("ssi-dt-a", jz4775_ssi_dt_a, 2),
+ INGENIC_PIN_GROUP("ssi-dt-d", jz4775_ssi_dt_d, 1),
+ INGENIC_PIN_GROUP("ssi-dr-a", jz4775_ssi_dr_a, 2),
+ INGENIC_PIN_GROUP("ssi-dr-d", jz4775_ssi_dr_d, 1),
+ INGENIC_PIN_GROUP("ssi-clk-a", jz4775_ssi_clk_a, 2),
+ INGENIC_PIN_GROUP("ssi-clk-d", jz4775_ssi_clk_d, 1),
+ INGENIC_PIN_GROUP("ssi-gpc", jz4775_ssi_gpc, 1),
+ INGENIC_PIN_GROUP("ssi-ce0-a", jz4775_ssi_ce0_a, 2),
+ INGENIC_PIN_GROUP("ssi-ce0-d", jz4775_ssi_ce0_d, 1),
+ INGENIC_PIN_GROUP("ssi-ce1", jz4775_ssi_ce1, 1),
+ INGENIC_PIN_GROUP("mmc0-1bit-a", jz4775_mmc0_1bit_a, 1),
+ INGENIC_PIN_GROUP("mmc0-4bit-a", jz4775_mmc0_4bit_a, 1),
+ INGENIC_PIN_GROUP("mmc0-8bit-a", jz4775_mmc0_8bit_a, 1),
+ INGENIC_PIN_GROUP("mmc0-1bit-e", jz4775_mmc0_1bit_e, 0),
+ INGENIC_PIN_GROUP("mmc0-4bit-e", jz4775_mmc0_4bit_e, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit-d", jz4775_mmc1_1bit_d, 0),
+ INGENIC_PIN_GROUP("mmc1-4bit-d", jz4775_mmc1_4bit_d, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit-e", jz4775_mmc1_1bit_e, 1),
+ INGENIC_PIN_GROUP("mmc1-4bit-e", jz4775_mmc1_4bit_e, 1),
+ INGENIC_PIN_GROUP("mmc2-1bit-b", jz4775_mmc2_1bit_b, 0),
+ INGENIC_PIN_GROUP("mmc2-4bit-b", jz4775_mmc2_4bit_b, 0),
+ INGENIC_PIN_GROUP("mmc2-1bit-e", jz4775_mmc2_1bit_e, 2),
+ INGENIC_PIN_GROUP("mmc2-4bit-e", jz4775_mmc2_4bit_e, 2),
+ INGENIC_PIN_GROUP("nemc-8bit-data", jz4775_nemc_8bit_data, 0),
+ INGENIC_PIN_GROUP("nemc-16bit-data", jz4775_nemc_16bit_data, 1),
+ INGENIC_PIN_GROUP("nemc-cle-ale", jz4775_nemc_cle_ale, 0),
+ INGENIC_PIN_GROUP("nemc-addr", jz4775_nemc_addr, 0),
+ INGENIC_PIN_GROUP("nemc-rd-we", jz4775_nemc_rd_we, 0),
+ INGENIC_PIN_GROUP("nemc-frd-fwe", jz4775_nemc_frd_fwe, 0),
+ INGENIC_PIN_GROUP("nemc-wait", jz4775_nemc_wait, 0),
+ INGENIC_PIN_GROUP("nemc-cs1", jz4775_nemc_cs1, 0),
+ INGENIC_PIN_GROUP("nemc-cs2", jz4775_nemc_cs2, 0),
+ INGENIC_PIN_GROUP("nemc-cs3", jz4775_nemc_cs3, 0),
+ INGENIC_PIN_GROUP("i2c0-data", jz4775_i2c0, 0),
+ INGENIC_PIN_GROUP("i2c1-data", jz4775_i2c1, 0),
+ INGENIC_PIN_GROUP("i2c2-data", jz4775_i2c2, 1),
+ INGENIC_PIN_GROUP("i2s-data-tx", jz4775_i2s_data_tx, 1),
+ INGENIC_PIN_GROUP("i2s-data-rx", jz4775_i2s_data_rx, 1),
+ INGENIC_PIN_GROUP("i2s-clk-txrx", jz4775_i2s_clk_txrx, 1),
+ INGENIC_PIN_GROUP("i2s-sysclk", jz4775_i2s_sysclk, 2),
+ INGENIC_PIN_GROUP("dmic", jz4775_dmic, 1),
+ INGENIC_PIN_GROUP("cim-data", jz4775_cim, 0),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4775_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4775_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4775_lcd_18bit, 0),
+ INGENIC_PIN_GROUP("lcd-24bit", jz4775_lcd_24bit, 0),
+ INGENIC_PIN_GROUP("lcd-generic", jz4775_lcd_generic, 0),
+ INGENIC_PIN_GROUP("lcd-special", jz4775_lcd_special, 1),
+ INGENIC_PIN_GROUP("pwm0", jz4775_pwm_pwm0, 0),
+ INGENIC_PIN_GROUP("pwm1", jz4775_pwm_pwm1, 0),
+ INGENIC_PIN_GROUP("pwm2", jz4775_pwm_pwm2, 0),
+ INGENIC_PIN_GROUP("pwm3", jz4775_pwm_pwm3, 0),
+ INGENIC_PIN_GROUP("mac-rmii", jz4775_mac_rmii, 0),
+ INGENIC_PIN_GROUP_FUNCS("mac-mii", jz4775_mac_mii,
+ jz4775_mac_mii_funcs),
+ INGENIC_PIN_GROUP_FUNCS("mac-rgmii", jz4775_mac_rgmii,
+ jz4775_mac_rgmii_funcs),
+ INGENIC_PIN_GROUP_FUNCS("mac-gmii", jz4775_mac_gmii,
+ jz4775_mac_gmii_funcs),
+ INGENIC_PIN_GROUP("otg-vbus", jz4775_otg, 0),
+};
+
+static const char *jz4775_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
+static const char *jz4775_uart1_groups[] = { "uart1-data", "uart1-hwflow", };
+static const char *jz4775_uart2_groups[] = { "uart2-data-c", "uart2-data-f", };
+static const char *jz4775_uart3_groups[] = { "uart3-data", };
+static const char *jz4775_ssi_groups[] = {
+ "ssi-dt-a", "ssi-dt-d",
+ "ssi-dr-a", "ssi-dr-d",
+ "ssi-clk-a", "ssi-clk-d",
+ "ssi-gpc",
+ "ssi-ce0-a", "ssi-ce0-d",
+ "ssi-ce1",
+};
+static const char *jz4775_mmc0_groups[] = {
+ "mmc0-1bit-a", "mmc0-4bit-a", "mmc0-8bit-a",
+ "mmc0-1bit-e", "mmc0-4bit-e",
+};
+static const char *jz4775_mmc1_groups[] = {
+ "mmc1-1bit-d", "mmc1-4bit-d",
+ "mmc1-1bit-e", "mmc1-4bit-e",
+};
+static const char *jz4775_mmc2_groups[] = {
+ "mmc2-1bit-b", "mmc2-4bit-b",
+ "mmc2-1bit-e", "mmc2-4bit-e",
+};
+static const char *jz4775_nemc_groups[] = {
+ "nemc-8bit-data", "nemc-16bit-data", "nemc-cle-ale",
+ "nemc-addr", "nemc-rd-we", "nemc-frd-fwe", "nemc-wait",
+};
+static const char *jz4775_cs1_groups[] = { "nemc-cs1", };
+static const char *jz4775_cs2_groups[] = { "nemc-cs2", };
+static const char *jz4775_cs3_groups[] = { "nemc-cs3", };
+static const char *jz4775_i2c0_groups[] = { "i2c0-data", };
+static const char *jz4775_i2c1_groups[] = { "i2c1-data", };
+static const char *jz4775_i2c2_groups[] = { "i2c2-data", };
+static const char *jz4775_i2s_groups[] = {
+ "i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-sysclk",
+};
+static const char *jz4775_dmic_groups[] = { "dmic", };
+static const char *jz4775_cim_groups[] = { "cim-data", };
+static const char *jz4775_lcd_groups[] = {
+ "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-24bit",
+ "lcd-special", "lcd-generic",
+};
+static const char *jz4775_pwm0_groups[] = { "pwm0", };
+static const char *jz4775_pwm1_groups[] = { "pwm1", };
+static const char *jz4775_pwm2_groups[] = { "pwm2", };
+static const char *jz4775_pwm3_groups[] = { "pwm3", };
+static const char *jz4775_mac_groups[] = {
+ "mac-rmii", "mac-mii", "mac-rgmii", "mac-gmii",
+};
+static const char *jz4775_otg_groups[] = { "otg-vbus", };
+
+static const struct function_desc jz4775_functions[] = {
+ { "uart0", jz4775_uart0_groups, ARRAY_SIZE(jz4775_uart0_groups), },
+ { "uart1", jz4775_uart1_groups, ARRAY_SIZE(jz4775_uart1_groups), },
+ { "uart2", jz4775_uart2_groups, ARRAY_SIZE(jz4775_uart2_groups), },
+ { "uart3", jz4775_uart3_groups, ARRAY_SIZE(jz4775_uart3_groups), },
+ { "ssi", jz4775_ssi_groups, ARRAY_SIZE(jz4775_ssi_groups), },
+ { "mmc0", jz4775_mmc0_groups, ARRAY_SIZE(jz4775_mmc0_groups), },
+ { "mmc1", jz4775_mmc1_groups, ARRAY_SIZE(jz4775_mmc1_groups), },
+ { "mmc2", jz4775_mmc2_groups, ARRAY_SIZE(jz4775_mmc2_groups), },
+ { "nemc", jz4775_nemc_groups, ARRAY_SIZE(jz4775_nemc_groups), },
+ { "nemc-cs1", jz4775_cs1_groups, ARRAY_SIZE(jz4775_cs1_groups), },
+ { "nemc-cs2", jz4775_cs2_groups, ARRAY_SIZE(jz4775_cs2_groups), },
+ { "nemc-cs3", jz4775_cs3_groups, ARRAY_SIZE(jz4775_cs3_groups), },
+ { "i2c0", jz4775_i2c0_groups, ARRAY_SIZE(jz4775_i2c0_groups), },
+ { "i2c1", jz4775_i2c1_groups, ARRAY_SIZE(jz4775_i2c1_groups), },
+ { "i2c2", jz4775_i2c2_groups, ARRAY_SIZE(jz4775_i2c2_groups), },
+ { "i2s", jz4775_i2s_groups, ARRAY_SIZE(jz4775_i2s_groups), },
+ { "dmic", jz4775_dmic_groups, ARRAY_SIZE(jz4775_dmic_groups), },
+ { "cim", jz4775_cim_groups, ARRAY_SIZE(jz4775_cim_groups), },
+ { "lcd", jz4775_lcd_groups, ARRAY_SIZE(jz4775_lcd_groups), },
+ { "pwm0", jz4775_pwm0_groups, ARRAY_SIZE(jz4775_pwm0_groups), },
+ { "pwm1", jz4775_pwm1_groups, ARRAY_SIZE(jz4775_pwm1_groups), },
+ { "pwm2", jz4775_pwm2_groups, ARRAY_SIZE(jz4775_pwm2_groups), },
+ { "pwm3", jz4775_pwm3_groups, ARRAY_SIZE(jz4775_pwm3_groups), },
+ { "mac", jz4775_mac_groups, ARRAY_SIZE(jz4775_mac_groups), },
+ { "otg", jz4775_otg_groups, ARRAY_SIZE(jz4775_otg_groups), },
+};
+
+static const struct ingenic_chip_info jz4775_chip_info = {
+ .num_chips = 7,
+ .reg_offset = 0x100,
+ .version = ID_JZ4775,
+ .groups = jz4775_groups,
+ .num_groups = ARRAY_SIZE(jz4775_groups),
+ .functions = jz4775_functions,
+ .num_functions = ARRAY_SIZE(jz4775_functions),
+ .pull_ups = jz4775_pull_ups,
+ .pull_downs = jz4775_pull_downs,
+};
+
static const u32 jz4780_pull_ups[6] = {
0x3fffffff, 0xfff0f3fc, 0x0fffffff, 0xffff4fff, 0xfffffb7c, 0x7fa7f00f,
};
@@ -927,6 +1600,7 @@ static int jz4780_i2s_data_rx_pins[] = { 0x86, };
static int jz4780_i2s_clk_txrx_pins[] = { 0x6c, 0x6d, };
static int jz4780_i2s_clk_rx_pins[] = { 0x88, 0x89, };
static int jz4780_i2s_sysclk_pins[] = { 0x85, };
+static int jz4780_dmic_pins[] = { 0x32, 0x33, };
static int jz4780_hdmi_ddc_pins[] = { 0xb9, 0xb8, };
static u8 jz4780_i2s_clk_txrx_funcs[] = { 1, 0, };
@@ -1025,11 +1699,16 @@ static const struct group_desc jz4780_groups[] = {
jz4780_i2s_clk_txrx_funcs),
INGENIC_PIN_GROUP("i2s-clk-rx", jz4780_i2s_clk_rx, 1),
INGENIC_PIN_GROUP("i2s-sysclk", jz4780_i2s_sysclk, 2),
+ INGENIC_PIN_GROUP("dmic", jz4780_dmic, 1),
INGENIC_PIN_GROUP("hdmi-ddc", jz4780_hdmi_ddc, 0),
INGENIC_PIN_GROUP("cim-data", jz4770_cim_8bit, 0),
INGENIC_PIN_GROUP("cim-data-12bit", jz4770_cim_12bit, 0),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4770_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4770_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4770_lcd_18bit, 0),
INGENIC_PIN_GROUP("lcd-24bit", jz4770_lcd_24bit, 0),
- { "lcd-no-pins", },
+ INGENIC_PIN_GROUP("lcd-special", jz4770_lcd_special, 1),
+ INGENIC_PIN_GROUP("lcd-generic", jz4770_lcd_generic, 0),
INGENIC_PIN_GROUP("pwm0", jz4770_pwm_pwm0, 0),
INGENIC_PIN_GROUP("pwm1", jz4770_pwm_pwm1, 0),
INGENIC_PIN_GROUP("pwm2", jz4770_pwm_pwm2, 0),
@@ -1077,6 +1756,7 @@ static const char *jz4780_i2c4_groups[] = { "i2c4-data-e", "i2c4-data-f", };
static const char *jz4780_i2s_groups[] = {
"i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-clk-rx", "i2s-sysclk",
};
+static const char *jz4780_dmic_groups[] = { "dmic", };
static const char *jz4780_cim_groups[] = { "cim-data", };
static const char *jz4780_hdmi_ddc_groups[] = { "hdmi-ddc", };
@@ -1104,6 +1784,7 @@ static const struct function_desc jz4780_functions[] = {
{ "i2c3", jz4780_i2c3_groups, ARRAY_SIZE(jz4780_i2c3_groups), },
{ "i2c4", jz4780_i2c4_groups, ARRAY_SIZE(jz4780_i2c4_groups), },
{ "i2s", jz4780_i2s_groups, ARRAY_SIZE(jz4780_i2s_groups), },
+ { "dmic", jz4780_dmic_groups, ARRAY_SIZE(jz4780_dmic_groups), },
{ "cim", jz4780_cim_groups, ARRAY_SIZE(jz4780_cim_groups), },
{ "lcd", jz4770_lcd_groups, ARRAY_SIZE(jz4770_lcd_groups), },
{ "pwm0", jz4770_pwm0_groups, ARRAY_SIZE(jz4770_pwm0_groups), },
@@ -1189,6 +1870,8 @@ static int x1000_i2s_data_tx_pins[] = { 0x24, };
static int x1000_i2s_data_rx_pins[] = { 0x23, };
static int x1000_i2s_clk_txrx_pins[] = { 0x21, 0x22, };
static int x1000_i2s_sysclk_pins[] = { 0x20, };
+static int x1000_dmic0_pins[] = { 0x35, 0x36, };
+static int x1000_dmic1_pins[] = { 0x25, };
static int x1000_cim_pins[] = {
0x08, 0x09, 0x0a, 0x0b,
0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0c,
@@ -1254,10 +1937,11 @@ static const struct group_desc x1000_groups[] = {
INGENIC_PIN_GROUP("i2s-data-rx", x1000_i2s_data_rx, 1),
INGENIC_PIN_GROUP("i2s-clk-txrx", x1000_i2s_clk_txrx, 1),
INGENIC_PIN_GROUP("i2s-sysclk", x1000_i2s_sysclk, 1),
+ INGENIC_PIN_GROUP("dmic0", x1000_dmic0, 0),
+ INGENIC_PIN_GROUP("dmic1", x1000_dmic1, 1),
INGENIC_PIN_GROUP("cim-data", x1000_cim, 2),
INGENIC_PIN_GROUP("lcd-8bit", x1000_lcd_8bit, 1),
INGENIC_PIN_GROUP("lcd-16bit", x1000_lcd_16bit, 1),
- { "lcd-no-pins", },
INGENIC_PIN_GROUP("pwm0", x1000_pwm_pwm0, 0),
INGENIC_PIN_GROUP("pwm1", x1000_pwm_pwm1, 1),
INGENIC_PIN_GROUP("pwm2", x1000_pwm_pwm2, 1),
@@ -1298,10 +1982,9 @@ static const char *x1000_i2c2_groups[] = { "i2c2-data", };
static const char *x1000_i2s_groups[] = {
"i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-sysclk",
};
+static const char *x1000_dmic_groups[] = { "dmic0", "dmic1", };
static const char *x1000_cim_groups[] = { "cim-data", };
-static const char *x1000_lcd_groups[] = {
- "lcd-8bit", "lcd-16bit", "lcd-no-pins",
-};
+static const char *x1000_lcd_groups[] = { "lcd-8bit", "lcd-16bit", };
static const char *x1000_pwm0_groups[] = { "pwm0", };
static const char *x1000_pwm1_groups[] = { "pwm1", };
static const char *x1000_pwm2_groups[] = { "pwm2", };
@@ -1324,6 +2007,7 @@ static const struct function_desc x1000_functions[] = {
{ "i2c1", x1000_i2c1_groups, ARRAY_SIZE(x1000_i2c1_groups), },
{ "i2c2", x1000_i2c2_groups, ARRAY_SIZE(x1000_i2c2_groups), },
{ "i2s", x1000_i2s_groups, ARRAY_SIZE(x1000_i2s_groups), },
+ { "dmic", x1000_dmic_groups, ARRAY_SIZE(x1000_dmic_groups), },
{ "cim", x1000_cim_groups, ARRAY_SIZE(x1000_cim_groups), },
{ "lcd", x1000_lcd_groups, ARRAY_SIZE(x1000_lcd_groups), },
{ "pwm0", x1000_pwm0_groups, ARRAY_SIZE(x1000_pwm0_groups), },
@@ -1363,6 +2047,8 @@ static int x1500_i2s_data_tx_pins[] = { 0x24, };
static int x1500_i2s_data_rx_pins[] = { 0x23, };
static int x1500_i2s_clk_txrx_pins[] = { 0x21, 0x22, };
static int x1500_i2s_sysclk_pins[] = { 0x20, };
+static int x1500_dmic0_pins[] = { 0x35, 0x36, };
+static int x1500_dmic1_pins[] = { 0x25, };
static int x1500_cim_pins[] = {
0x08, 0x09, 0x0a, 0x0b,
0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0c,
@@ -1392,8 +2078,9 @@ static const struct group_desc x1500_groups[] = {
INGENIC_PIN_GROUP("i2s-data-rx", x1500_i2s_data_rx, 1),
INGENIC_PIN_GROUP("i2s-clk-txrx", x1500_i2s_clk_txrx, 1),
INGENIC_PIN_GROUP("i2s-sysclk", x1500_i2s_sysclk, 1),
+ INGENIC_PIN_GROUP("dmic0", x1500_dmic0, 0),
+ INGENIC_PIN_GROUP("dmic1", x1500_dmic1, 1),
INGENIC_PIN_GROUP("cim-data", x1500_cim, 2),
- { "lcd-no-pins", },
INGENIC_PIN_GROUP("pwm0", x1500_pwm_pwm0, 0),
INGENIC_PIN_GROUP("pwm1", x1500_pwm_pwm1, 1),
INGENIC_PIN_GROUP("pwm2", x1500_pwm_pwm2, 1),
@@ -1413,8 +2100,8 @@ static const char *x1500_i2c2_groups[] = { "i2c2-data", };
static const char *x1500_i2s_groups[] = {
"i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-sysclk",
};
+static const char *x1500_dmic_groups[] = { "dmic0", "dmic1", };
static const char *x1500_cim_groups[] = { "cim-data", };
-static const char *x1500_lcd_groups[] = { "lcd-no-pins", };
static const char *x1500_pwm0_groups[] = { "pwm0", };
static const char *x1500_pwm1_groups[] = { "pwm1", };
static const char *x1500_pwm2_groups[] = { "pwm2", };
@@ -1431,8 +2118,8 @@ static const struct function_desc x1500_functions[] = {
{ "i2c1", x1500_i2c1_groups, ARRAY_SIZE(x1500_i2c1_groups), },
{ "i2c2", x1500_i2c2_groups, ARRAY_SIZE(x1500_i2c2_groups), },
{ "i2s", x1500_i2s_groups, ARRAY_SIZE(x1500_i2s_groups), },
+ { "dmic", x1500_dmic_groups, ARRAY_SIZE(x1500_dmic_groups), },
{ "cim", x1500_cim_groups, ARRAY_SIZE(x1500_cim_groups), },
- { "lcd", x1500_lcd_groups, ARRAY_SIZE(x1500_lcd_groups), },
{ "pwm0", x1500_pwm0_groups, ARRAY_SIZE(x1500_pwm0_groups), },
{ "pwm1", x1500_pwm1_groups, ARRAY_SIZE(x1500_pwm1_groups), },
{ "pwm2", x1500_pwm2_groups, ARRAY_SIZE(x1500_pwm2_groups), },
@@ -1471,16 +2158,16 @@ static int x1830_ssi0_gpc_pins[] = { 0x4d, };
static int x1830_ssi0_ce0_pins[] = { 0x50, };
static int x1830_ssi0_ce1_pins[] = { 0x4e, };
static int x1830_ssi1_dt_c_pins[] = { 0x53, };
-static int x1830_ssi1_dr_c_pins[] = { 0x54, };
-static int x1830_ssi1_clk_c_pins[] = { 0x57, };
-static int x1830_ssi1_gpc_c_pins[] = { 0x55, };
-static int x1830_ssi1_ce0_c_pins[] = { 0x58, };
-static int x1830_ssi1_ce1_c_pins[] = { 0x56, };
static int x1830_ssi1_dt_d_pins[] = { 0x62, };
+static int x1830_ssi1_dr_c_pins[] = { 0x54, };
static int x1830_ssi1_dr_d_pins[] = { 0x63, };
+static int x1830_ssi1_clk_c_pins[] = { 0x57, };
static int x1830_ssi1_clk_d_pins[] = { 0x66, };
+static int x1830_ssi1_gpc_c_pins[] = { 0x55, };
static int x1830_ssi1_gpc_d_pins[] = { 0x64, };
+static int x1830_ssi1_ce0_c_pins[] = { 0x58, };
static int x1830_ssi1_ce0_d_pins[] = { 0x67, };
+static int x1830_ssi1_ce1_c_pins[] = { 0x56, };
static int x1830_ssi1_ce1_d_pins[] = { 0x65, };
static int x1830_mmc0_1bit_pins[] = { 0x24, 0x25, 0x20, };
static int x1830_mmc0_4bit_pins[] = { 0x21, 0x22, 0x23, };
@@ -1494,11 +2181,15 @@ static int x1830_i2s_data_rx_pins[] = { 0x54, };
static int x1830_i2s_clk_txrx_pins[] = { 0x58, 0x52, };
static int x1830_i2s_clk_rx_pins[] = { 0x56, 0x55, };
static int x1830_i2s_sysclk_pins[] = { 0x57, };
-static int x1830_lcd_rgb_18bit_pins[] = {
+static int x1830_dmic0_pins[] = { 0x48, 0x59, };
+static int x1830_dmic1_pins[] = { 0x5a, };
+static int x1830_lcd_tft_8bit_pins[] = {
0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
- 0x68, 0x69, 0x6c, 0x6d, 0x6e, 0x6f,
- 0x70, 0x71, 0x72, 0x73, 0x76, 0x77,
- 0x78, 0x79, 0x7a, 0x7b,
+ 0x68, 0x73, 0x72, 0x69,
+};
+static int x1830_lcd_tft_24bit_pins[] = {
+ 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71,
+ 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b,
};
static int x1830_lcd_slcd_8bit_pins[] = {
0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x6c, 0x6d,
@@ -1562,10 +2253,12 @@ static const struct group_desc x1830_groups[] = {
INGENIC_PIN_GROUP("i2s-clk-txrx", x1830_i2s_clk_txrx, 0),
INGENIC_PIN_GROUP("i2s-clk-rx", x1830_i2s_clk_rx, 0),
INGENIC_PIN_GROUP("i2s-sysclk", x1830_i2s_sysclk, 0),
- INGENIC_PIN_GROUP("lcd-rgb-18bit", x1830_lcd_rgb_18bit, 0),
+ INGENIC_PIN_GROUP("dmic0", x1830_dmic0, 2),
+ INGENIC_PIN_GROUP("dmic1", x1830_dmic1, 2),
+ INGENIC_PIN_GROUP("lcd-tft-8bit", x1830_lcd_tft_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-tft-24bit", x1830_lcd_tft_24bit, 0),
INGENIC_PIN_GROUP("lcd-slcd-8bit", x1830_lcd_slcd_8bit, 1),
INGENIC_PIN_GROUP("lcd-slcd-16bit", x1830_lcd_slcd_16bit, 1),
- { "lcd-no-pins", },
INGENIC_PIN_GROUP("pwm0-b", x1830_pwm_pwm0_b, 0),
INGENIC_PIN_GROUP("pwm0-c", x1830_pwm_pwm0_c, 1),
INGENIC_PIN_GROUP("pwm1-b", x1830_pwm_pwm1_b, 0),
@@ -1607,8 +2300,9 @@ static const char *x1830_i2c2_groups[] = { "i2c2-data", };
static const char *x1830_i2s_groups[] = {
"i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-clk-rx", "i2s-sysclk",
};
+static const char *x1830_dmic_groups[] = { "dmic0", "dmic1", };
static const char *x1830_lcd_groups[] = {
- "lcd-rgb-18bit", "lcd-slcd-8bit", "lcd-slcd-16bit", "lcd-no-pins",
+ "lcd-tft-8bit", "lcd-tft-24bit", "lcd-slcd-8bit", "lcd-slcd-16bit",
};
static const char *x1830_pwm0_groups[] = { "pwm0-b", "pwm0-c", };
static const char *x1830_pwm1_groups[] = { "pwm1-b", "pwm1-c", };
@@ -1632,6 +2326,7 @@ static const struct function_desc x1830_functions[] = {
{ "i2c1", x1830_i2c1_groups, ARRAY_SIZE(x1830_i2c1_groups), },
{ "i2c2", x1830_i2c2_groups, ARRAY_SIZE(x1830_i2c2_groups), },
{ "i2s", x1830_i2s_groups, ARRAY_SIZE(x1830_i2s_groups), },
+ { "dmic", x1830_dmic_groups, ARRAY_SIZE(x1830_dmic_groups), },
{ "lcd", x1830_lcd_groups, ARRAY_SIZE(x1830_lcd_groups), },
{ "pwm0", x1830_pwm0_groups, ARRAY_SIZE(x1830_pwm0_groups), },
{ "pwm1", x1830_pwm1_groups, ARRAY_SIZE(x1830_pwm1_groups), },
@@ -1656,6 +2351,456 @@ static const struct ingenic_chip_info x1830_chip_info = {
.pull_downs = x1830_pull_downs,
};
+static const u32 x2000_pull_ups[5] = {
+ 0x0003ffff, 0xffffffff, 0x1ff0ffff, 0xc7fe3f3f, 0x8fff003f,
+};
+
+static const u32 x2000_pull_downs[5] = {
+ 0x0003ffff, 0xffffffff, 0x1ff0ffff, 0x00000000, 0x8fff003f,
+};
+
+static int x2000_uart0_data_pins[] = { 0x77, 0x78, };
+static int x2000_uart0_hwflow_pins[] = { 0x79, 0x7a, };
+static int x2000_uart1_data_pins[] = { 0x57, 0x58, };
+static int x2000_uart1_hwflow_pins[] = { 0x55, 0x56, };
+static int x2000_uart2_data_pins[] = { 0x7e, 0x7f, };
+static int x2000_uart3_data_c_pins[] = { 0x59, 0x5a, };
+static int x2000_uart3_data_d_pins[] = { 0x62, 0x63, };
+static int x2000_uart3_hwflow_c_pins[] = { 0x5b, 0x5c, };
+static int x2000_uart3_hwflow_d_pins[] = { 0x60, 0x61, };
+static int x2000_uart4_data_a_pins[] = { 0x02, 0x03, };
+static int x2000_uart4_data_c_pins[] = { 0x4b, 0x4c, };
+static int x2000_uart4_hwflow_a_pins[] = { 0x00, 0x01, };
+static int x2000_uart4_hwflow_c_pins[] = { 0x49, 0x4a, };
+static int x2000_uart5_data_a_pins[] = { 0x04, 0x05, };
+static int x2000_uart5_data_c_pins[] = { 0x45, 0x46, };
+static int x2000_uart6_data_a_pins[] = { 0x06, 0x07, };
+static int x2000_uart6_data_c_pins[] = { 0x47, 0x48, };
+static int x2000_uart7_data_a_pins[] = { 0x08, 0x09, };
+static int x2000_uart7_data_c_pins[] = { 0x41, 0x42, };
+static int x2000_uart8_data_pins[] = { 0x3c, 0x3d, };
+static int x2000_uart9_data_pins[] = { 0x3e, 0x3f, };
+static int x2000_sfc0_d_pins[] = { 0x73, 0x74, 0x75, 0x76, 0x71, 0x72, };
+static int x2000_sfc0_e_pins[] = { 0x92, 0x93, 0x94, 0x95, 0x90, 0x91, };
+static int x2000_sfc1_pins[] = { 0x77, 0x78, 0x79, 0x7a, };
+static int x2000_ssi0_dt_b_pins[] = { 0x3e, };
+static int x2000_ssi0_dt_d_pins[] = { 0x69, };
+static int x2000_ssi0_dr_b_pins[] = { 0x3d, };
+static int x2000_ssi0_dr_d_pins[] = { 0x6a, };
+static int x2000_ssi0_clk_b_pins[] = { 0x3f, };
+static int x2000_ssi0_clk_d_pins[] = { 0x68, };
+static int x2000_ssi0_ce0_b_pins[] = { 0x3c, };
+static int x2000_ssi0_ce0_d_pins[] = { 0x6d, };
+static int x2000_ssi1_dt_c_pins[] = { 0x4b, };
+static int x2000_ssi1_dt_d_pins[] = { 0x72, };
+static int x2000_ssi1_dt_e_pins[] = { 0x91, };
+static int x2000_ssi1_dr_c_pins[] = { 0x4a, };
+static int x2000_ssi1_dr_d_pins[] = { 0x73, };
+static int x2000_ssi1_dr_e_pins[] = { 0x92, };
+static int x2000_ssi1_clk_c_pins[] = { 0x4c, };
+static int x2000_ssi1_clk_d_pins[] = { 0x71, };
+static int x2000_ssi1_clk_e_pins[] = { 0x90, };
+static int x2000_ssi1_ce0_c_pins[] = { 0x49, };
+static int x2000_ssi1_ce0_d_pins[] = { 0x76, };
+static int x2000_ssi1_ce0_e_pins[] = { 0x95, };
+static int x2000_mmc0_1bit_pins[] = { 0x71, 0x72, 0x73, };
+static int x2000_mmc0_4bit_pins[] = { 0x74, 0x75, 0x75, };
+static int x2000_mmc0_8bit_pins[] = { 0x77, 0x78, 0x79, 0x7a, };
+static int x2000_mmc1_1bit_pins[] = { 0x68, 0x69, 0x6a, };
+static int x2000_mmc1_4bit_pins[] = { 0x6b, 0x6c, 0x6d, };
+static int x2000_mmc2_1bit_pins[] = { 0x80, 0x81, 0x82, };
+static int x2000_mmc2_4bit_pins[] = { 0x83, 0x84, 0x85, };
+static int x2000_emc_8bit_data_pins[] = {
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+};
+static int x2000_emc_16bit_data_pins[] = {
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+};
+static int x2000_emc_addr_pins[] = {
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c,
+};
+static int x2000_emc_rd_we_pins[] = { 0x2d, 0x2e, };
+static int x2000_emc_wait_pins[] = { 0x2f, };
+static int x2000_emc_cs1_pins[] = { 0x57, };
+static int x2000_emc_cs2_pins[] = { 0x58, };
+static int x2000_i2c0_pins[] = { 0x4e, 0x4d, };
+static int x2000_i2c1_c_pins[] = { 0x58, 0x57, };
+static int x2000_i2c1_d_pins[] = { 0x6c, 0x6b, };
+static int x2000_i2c2_b_pins[] = { 0x37, 0x36, };
+static int x2000_i2c2_d_pins[] = { 0x75, 0x74, };
+static int x2000_i2c2_e_pins[] = { 0x94, 0x93, };
+static int x2000_i2c3_a_pins[] = { 0x11, 0x10, };
+static int x2000_i2c3_d_pins[] = { 0x7f, 0x7e, };
+static int x2000_i2c4_c_pins[] = { 0x5a, 0x59, };
+static int x2000_i2c4_d_pins[] = { 0x61, 0x60, };
+static int x2000_i2c5_c_pins[] = { 0x5c, 0x5b, };
+static int x2000_i2c5_d_pins[] = { 0x65, 0x64, };
+static int x2000_i2s1_data_tx_pins[] = { 0x47, };
+static int x2000_i2s1_data_rx_pins[] = { 0x44, };
+static int x2000_i2s1_clk_tx_pins[] = { 0x45, 0x46, };
+static int x2000_i2s1_clk_rx_pins[] = { 0x42, 0x43, };
+static int x2000_i2s1_sysclk_tx_pins[] = { 0x48, };
+static int x2000_i2s1_sysclk_rx_pins[] = { 0x41, };
+static int x2000_i2s2_data_rx0_pins[] = { 0x0a, };
+static int x2000_i2s2_data_rx1_pins[] = { 0x0b, };
+static int x2000_i2s2_data_rx2_pins[] = { 0x0c, };
+static int x2000_i2s2_data_rx3_pins[] = { 0x0d, };
+static int x2000_i2s2_clk_rx_pins[] = { 0x11, 0x09, };
+static int x2000_i2s2_sysclk_rx_pins[] = { 0x07, };
+static int x2000_i2s3_data_tx0_pins[] = { 0x03, };
+static int x2000_i2s3_data_tx1_pins[] = { 0x04, };
+static int x2000_i2s3_data_tx2_pins[] = { 0x05, };
+static int x2000_i2s3_data_tx3_pins[] = { 0x06, };
+static int x2000_i2s3_clk_tx_pins[] = { 0x10, 0x02, };
+static int x2000_i2s3_sysclk_tx_pins[] = { 0x00, };
+static int x2000_dmic0_pins[] = { 0x54, 0x55, };
+static int x2000_dmic1_pins[] = { 0x56, };
+static int x2000_dmic2_pins[] = { 0x57, };
+static int x2000_dmic3_pins[] = { 0x58, };
+static int x2000_cim_8bit_pins[] = {
+ 0x0e, 0x0c, 0x0d, 0x4f,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+};
+static int x2000_cim_12bit_pins[] = { 0x08, 0x09, 0x0a, 0x0b, };
+static int x2000_lcd_tft_8bit_pins[] = {
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x38, 0x3a, 0x39, 0x3b,
+};
+static int x2000_lcd_tft_16bit_pins[] = {
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+};
+static int x2000_lcd_tft_18bit_pins[] = {
+ 0x30, 0x31,
+};
+static int x2000_lcd_tft_24bit_pins[] = {
+ 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+};
+static int x2000_lcd_slcd_8bit_pins[] = {
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x3a, 0x38, 0x3b, 0x30, 0x39,
+};
+static int x2000_pwm_pwm0_c_pins[] = { 0x40, };
+static int x2000_pwm_pwm0_d_pins[] = { 0x7e, };
+static int x2000_pwm_pwm1_c_pins[] = { 0x41, };
+static int x2000_pwm_pwm1_d_pins[] = { 0x7f, };
+static int x2000_pwm_pwm2_c_pins[] = { 0x42, };
+static int x2000_pwm_pwm2_e_pins[] = { 0x80, };
+static int x2000_pwm_pwm3_c_pins[] = { 0x43, };
+static int x2000_pwm_pwm3_e_pins[] = { 0x81, };
+static int x2000_pwm_pwm4_c_pins[] = { 0x44, };
+static int x2000_pwm_pwm4_e_pins[] = { 0x82, };
+static int x2000_pwm_pwm5_c_pins[] = { 0x45, };
+static int x2000_pwm_pwm5_e_pins[] = { 0x83, };
+static int x2000_pwm_pwm6_c_pins[] = { 0x46, };
+static int x2000_pwm_pwm6_e_pins[] = { 0x84, };
+static int x2000_pwm_pwm7_c_pins[] = { 0x47, };
+static int x2000_pwm_pwm7_e_pins[] = { 0x85, };
+static int x2000_pwm_pwm8_pins[] = { 0x48, };
+static int x2000_pwm_pwm9_pins[] = { 0x49, };
+static int x2000_pwm_pwm10_pins[] = { 0x4a, };
+static int x2000_pwm_pwm11_pins[] = { 0x4b, };
+static int x2000_pwm_pwm12_pins[] = { 0x4c, };
+static int x2000_pwm_pwm13_pins[] = { 0x4d, };
+static int x2000_pwm_pwm14_pins[] = { 0x4e, };
+static int x2000_pwm_pwm15_pins[] = { 0x4f, };
+static int x2000_mac0_rmii_pins[] = {
+ 0x4b, 0x47, 0x46, 0x4a, 0x43, 0x42, 0x4c, 0x4d, 0x4e, 0x41,
+};
+static int x2000_mac0_rgmii_pins[] = {
+ 0x4b, 0x49, 0x48, 0x47, 0x46, 0x4a, 0x45, 0x44, 0x43, 0x42,
+ 0x4c, 0x4d, 0x4f, 0x4e, 0x41,
+};
+static int x2000_mac1_rmii_pins[] = {
+ 0x32, 0x2d, 0x2c, 0x31, 0x29, 0x28, 0x33, 0x34, 0x35, 0x37,
+};
+static int x2000_mac1_rgmii_pins[] = {
+ 0x32, 0x2f, 0x2e, 0x2d, 0x2c, 0x31, 0x2b, 0x2a, 0x29, 0x28,
+ 0x33, 0x34, 0x36, 0x35, 0x37,
+};
+static int x2000_otg_pins[] = { 0x96, };
+
+static u8 x2000_cim_8bit_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, };
+
+static const struct group_desc x2000_groups[] = {
+ INGENIC_PIN_GROUP("uart0-data", x2000_uart0_data, 2),
+ INGENIC_PIN_GROUP("uart0-hwflow", x2000_uart0_hwflow, 2),
+ INGENIC_PIN_GROUP("uart1-data", x2000_uart1_data, 1),
+ INGENIC_PIN_GROUP("uart1-hwflow", x2000_uart1_hwflow, 1),
+ INGENIC_PIN_GROUP("uart2-data", x2000_uart2_data, 0),
+ INGENIC_PIN_GROUP("uart3-data-c", x2000_uart3_data_c, 0),
+ INGENIC_PIN_GROUP("uart3-data-d", x2000_uart3_data_d, 1),
+ INGENIC_PIN_GROUP("uart3-hwflow-c", x2000_uart3_hwflow_c, 0),
+ INGENIC_PIN_GROUP("uart3-hwflow-d", x2000_uart3_hwflow_d, 1),
+ INGENIC_PIN_GROUP("uart4-data-a", x2000_uart4_data_a, 1),
+ INGENIC_PIN_GROUP("uart4-data-c", x2000_uart4_data_c, 3),
+ INGENIC_PIN_GROUP("uart4-hwflow-a", x2000_uart4_hwflow_a, 1),
+ INGENIC_PIN_GROUP("uart4-hwflow-c", x2000_uart4_hwflow_c, 3),
+ INGENIC_PIN_GROUP("uart5-data-a", x2000_uart5_data_a, 1),
+ INGENIC_PIN_GROUP("uart5-data-c", x2000_uart5_data_c, 3),
+ INGENIC_PIN_GROUP("uart6-data-a", x2000_uart6_data_a, 1),
+ INGENIC_PIN_GROUP("uart6-data-c", x2000_uart6_data_c, 3),
+ INGENIC_PIN_GROUP("uart7-data-a", x2000_uart7_data_a, 1),
+ INGENIC_PIN_GROUP("uart7-data-c", x2000_uart7_data_c, 3),
+ INGENIC_PIN_GROUP("uart8-data", x2000_uart8_data, 3),
+ INGENIC_PIN_GROUP("uart9-data", x2000_uart9_data, 3),
+ INGENIC_PIN_GROUP("sfc0-d", x2000_sfc0_d, 1),
+ INGENIC_PIN_GROUP("sfc0-e", x2000_sfc0_e, 0),
+ INGENIC_PIN_GROUP("sfc1", x2000_sfc1, 1),
+ INGENIC_PIN_GROUP("ssi0-dt-b", x2000_ssi0_dt_b, 1),
+ INGENIC_PIN_GROUP("ssi0-dt-d", x2000_ssi0_dt_d, 1),
+ INGENIC_PIN_GROUP("ssi0-dr-b", x2000_ssi0_dr_b, 1),
+ INGENIC_PIN_GROUP("ssi0-dr-d", x2000_ssi0_dr_d, 1),
+ INGENIC_PIN_GROUP("ssi0-clk-b", x2000_ssi0_clk_b, 1),
+ INGENIC_PIN_GROUP("ssi0-clk-d", x2000_ssi0_clk_d, 1),
+ INGENIC_PIN_GROUP("ssi0-ce0-b", x2000_ssi0_ce0_b, 1),
+ INGENIC_PIN_GROUP("ssi0-ce0-d", x2000_ssi0_ce0_d, 1),
+ INGENIC_PIN_GROUP("ssi1-dt-c", x2000_ssi1_dt_c, 2),
+ INGENIC_PIN_GROUP("ssi1-dt-d", x2000_ssi1_dt_d, 2),
+ INGENIC_PIN_GROUP("ssi1-dt-e", x2000_ssi1_dt_e, 1),
+ INGENIC_PIN_GROUP("ssi1-dr-c", x2000_ssi1_dr_c, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-d", x2000_ssi1_dr_d, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-e", x2000_ssi1_dr_e, 1),
+ INGENIC_PIN_GROUP("ssi1-clk-c", x2000_ssi1_clk_c, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-d", x2000_ssi1_clk_d, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-e", x2000_ssi1_clk_e, 1),
+ INGENIC_PIN_GROUP("ssi1-ce0-c", x2000_ssi1_ce0_c, 2),
+ INGENIC_PIN_GROUP("ssi1-ce0-d", x2000_ssi1_ce0_d, 2),
+ INGENIC_PIN_GROUP("ssi1-ce0-e", x2000_ssi1_ce0_e, 1),
+ INGENIC_PIN_GROUP("mmc0-1bit", x2000_mmc0_1bit, 0),
+ INGENIC_PIN_GROUP("mmc0-4bit", x2000_mmc0_4bit, 0),
+ INGENIC_PIN_GROUP("mmc0-8bit", x2000_mmc0_8bit, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit", x2000_mmc1_1bit, 0),
+ INGENIC_PIN_GROUP("mmc1-4bit", x2000_mmc1_4bit, 0),
+ INGENIC_PIN_GROUP("mmc2-1bit", x2000_mmc2_1bit, 0),
+ INGENIC_PIN_GROUP("mmc2-4bit", x2000_mmc2_4bit, 0),
+ INGENIC_PIN_GROUP("emc-8bit-data", x2000_emc_8bit_data, 0),
+ INGENIC_PIN_GROUP("emc-16bit-data", x2000_emc_16bit_data, 0),
+ INGENIC_PIN_GROUP("emc-addr", x2000_emc_addr, 0),
+ INGENIC_PIN_GROUP("emc-rd-we", x2000_emc_rd_we, 0),
+ INGENIC_PIN_GROUP("emc-wait", x2000_emc_wait, 0),
+ INGENIC_PIN_GROUP("emc-cs1", x2000_emc_cs1, 3),
+ INGENIC_PIN_GROUP("emc-cs2", x2000_emc_cs2, 3),
+ INGENIC_PIN_GROUP("i2c0-data", x2000_i2c0, 3),
+ INGENIC_PIN_GROUP("i2c1-data-c", x2000_i2c1_c, 2),
+ INGENIC_PIN_GROUP("i2c1-data-d", x2000_i2c1_d, 1),
+ INGENIC_PIN_GROUP("i2c2-data-b", x2000_i2c2_b, 2),
+ INGENIC_PIN_GROUP("i2c2-data-d", x2000_i2c2_d, 2),
+ INGENIC_PIN_GROUP("i2c2-data-e", x2000_i2c2_e, 1),
+ INGENIC_PIN_GROUP("i2c3-data-a", x2000_i2c3_a, 0),
+ INGENIC_PIN_GROUP("i2c3-data-d", x2000_i2c3_d, 1),
+ INGENIC_PIN_GROUP("i2c4-data-c", x2000_i2c4_c, 1),
+ INGENIC_PIN_GROUP("i2c4-data-d", x2000_i2c4_d, 2),
+ INGENIC_PIN_GROUP("i2c5-data-c", x2000_i2c5_c, 1),
+ INGENIC_PIN_GROUP("i2c5-data-d", x2000_i2c5_d, 1),
+ INGENIC_PIN_GROUP("i2s1-data-tx", x2000_i2s1_data_tx, 2),
+ INGENIC_PIN_GROUP("i2s1-data-rx", x2000_i2s1_data_rx, 2),
+ INGENIC_PIN_GROUP("i2s1-clk-tx", x2000_i2s1_clk_tx, 2),
+ INGENIC_PIN_GROUP("i2s1-clk-rx", x2000_i2s1_clk_rx, 2),
+ INGENIC_PIN_GROUP("i2s1-sysclk-tx", x2000_i2s1_sysclk_tx, 2),
+ INGENIC_PIN_GROUP("i2s1-sysclk-rx", x2000_i2s1_sysclk_rx, 2),
+ INGENIC_PIN_GROUP("i2s2-data-rx0", x2000_i2s2_data_rx0, 2),
+ INGENIC_PIN_GROUP("i2s2-data-rx1", x2000_i2s2_data_rx1, 2),
+ INGENIC_PIN_GROUP("i2s2-data-rx2", x2000_i2s2_data_rx2, 2),
+ INGENIC_PIN_GROUP("i2s2-data-rx3", x2000_i2s2_data_rx3, 2),
+ INGENIC_PIN_GROUP("i2s2-clk-rx", x2000_i2s2_clk_rx, 2),
+ INGENIC_PIN_GROUP("i2s2-sysclk-rx", x2000_i2s2_sysclk_rx, 2),
+ INGENIC_PIN_GROUP("i2s3-data-tx0", x2000_i2s3_data_tx0, 2),
+ INGENIC_PIN_GROUP("i2s3-data-tx1", x2000_i2s3_data_tx1, 2),
+ INGENIC_PIN_GROUP("i2s3-data-tx2", x2000_i2s3_data_tx2, 2),
+ INGENIC_PIN_GROUP("i2s3-data-tx3", x2000_i2s3_data_tx3, 2),
+ INGENIC_PIN_GROUP("i2s3-clk-tx", x2000_i2s3_clk_tx, 2),
+ INGENIC_PIN_GROUP("i2s3-sysclk-tx", x2000_i2s3_sysclk_tx, 2),
+ INGENIC_PIN_GROUP("dmic0", x2000_dmic0, 0),
+ INGENIC_PIN_GROUP("dmic1", x2000_dmic1, 0),
+ INGENIC_PIN_GROUP("dmic2", x2000_dmic2, 0),
+ INGENIC_PIN_GROUP("dmic3", x2000_dmic3, 0),
+ INGENIC_PIN_GROUP_FUNCS("cim-data-8bit", x2000_cim_8bit,
+ x2000_cim_8bit_funcs),
+ INGENIC_PIN_GROUP("cim-data-12bit", x2000_cim_12bit, 0),
+ INGENIC_PIN_GROUP("lcd-tft-8bit", x2000_lcd_tft_8bit, 1),
+ INGENIC_PIN_GROUP("lcd-tft-16bit", x2000_lcd_tft_16bit, 1),
+ INGENIC_PIN_GROUP("lcd-tft-18bit", x2000_lcd_tft_18bit, 1),
+ INGENIC_PIN_GROUP("lcd-tft-24bit", x2000_lcd_tft_24bit, 1),
+ INGENIC_PIN_GROUP("lcd-slcd-8bit", x2000_lcd_slcd_8bit, 2),
+ INGENIC_PIN_GROUP("lcd-slcd-16bit", x2000_lcd_tft_16bit, 2),
+ INGENIC_PIN_GROUP("pwm0-c", x2000_pwm_pwm0_c, 0),
+ INGENIC_PIN_GROUP("pwm0-d", x2000_pwm_pwm0_d, 2),
+ INGENIC_PIN_GROUP("pwm1-c", x2000_pwm_pwm1_c, 0),
+ INGENIC_PIN_GROUP("pwm1-d", x2000_pwm_pwm1_d, 2),
+ INGENIC_PIN_GROUP("pwm2-c", x2000_pwm_pwm2_c, 0),
+ INGENIC_PIN_GROUP("pwm2-e", x2000_pwm_pwm2_e, 1),
+ INGENIC_PIN_GROUP("pwm3-c", x2000_pwm_pwm3_c, 0),
+ INGENIC_PIN_GROUP("pwm3-e", x2000_pwm_pwm3_e, 1),
+ INGENIC_PIN_GROUP("pwm4-c", x2000_pwm_pwm4_c, 0),
+ INGENIC_PIN_GROUP("pwm4-e", x2000_pwm_pwm4_e, 1),
+ INGENIC_PIN_GROUP("pwm5-c", x2000_pwm_pwm5_c, 0),
+ INGENIC_PIN_GROUP("pwm5-e", x2000_pwm_pwm5_e, 1),
+ INGENIC_PIN_GROUP("pwm6-c", x2000_pwm_pwm6_c, 0),
+ INGENIC_PIN_GROUP("pwm6-e", x2000_pwm_pwm6_e, 1),
+ INGENIC_PIN_GROUP("pwm7-c", x2000_pwm_pwm7_c, 0),
+ INGENIC_PIN_GROUP("pwm7-e", x2000_pwm_pwm7_e, 1),
+ INGENIC_PIN_GROUP("pwm8", x2000_pwm_pwm8, 0),
+ INGENIC_PIN_GROUP("pwm9", x2000_pwm_pwm9, 0),
+ INGENIC_PIN_GROUP("pwm10", x2000_pwm_pwm10, 0),
+ INGENIC_PIN_GROUP("pwm11", x2000_pwm_pwm11, 0),
+ INGENIC_PIN_GROUP("pwm12", x2000_pwm_pwm12, 0),
+ INGENIC_PIN_GROUP("pwm13", x2000_pwm_pwm13, 0),
+ INGENIC_PIN_GROUP("pwm14", x2000_pwm_pwm14, 0),
+ INGENIC_PIN_GROUP("pwm15", x2000_pwm_pwm15, 0),
+ INGENIC_PIN_GROUP("mac0-rmii", x2000_mac0_rmii, 1),
+ INGENIC_PIN_GROUP("mac0-rgmii", x2000_mac0_rgmii, 1),
+ INGENIC_PIN_GROUP("mac1-rmii", x2000_mac1_rmii, 3),
+ INGENIC_PIN_GROUP("mac1-rgmii", x2000_mac1_rgmii, 3),
+ INGENIC_PIN_GROUP("otg-vbus", x2000_otg, 0),
+};
+
+static const char *x2000_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
+static const char *x2000_uart1_groups[] = { "uart1-data", "uart1-hwflow", };
+static const char *x2000_uart2_groups[] = { "uart2-data", };
+static const char *x2000_uart3_groups[] = {
+ "uart3-data-c", "uart3-data-d", "uart3-hwflow-c", "uart3-hwflow-d",
+};
+static const char *x2000_uart4_groups[] = {
+ "uart4-data-a", "uart4-data-c", "uart4-hwflow-a", "uart4-hwflow-c",
+};
+static const char *x2000_uart5_groups[] = { "uart5-data-a", "uart5-data-c", };
+static const char *x2000_uart6_groups[] = { "uart6-data-a", "uart6-data-c", };
+static const char *x2000_uart7_groups[] = { "uart7-data-a", "uart7-data-c", };
+static const char *x2000_uart8_groups[] = { "uart8-data", };
+static const char *x2000_uart9_groups[] = { "uart9-data", };
+static const char *x2000_sfc_groups[] = { "sfc0-d", "sfc0-e", "sfc1", };
+static const char *x2000_ssi0_groups[] = {
+ "ssi0-dt-b", "ssi0-dt-d",
+ "ssi0-dr-b", "ssi0-dr-d",
+ "ssi0-clk-b", "ssi0-clk-d",
+ "ssi0-ce0-b", "ssi0-ce0-d",
+};
+static const char *x2000_ssi1_groups[] = {
+ "ssi1-dt-c", "ssi1-dt-d", "ssi1-dt-e",
+ "ssi1-dr-c", "ssi1-dr-d", "ssi1-dr-e",
+ "ssi1-clk-c", "ssi1-clk-d", "ssi1-clk-e",
+ "ssi1-ce0-c", "ssi1-ce0-d", "ssi1-ce0-e",
+};
+static const char *x2000_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", "mmc0-8bit", };
+static const char *x2000_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", };
+static const char *x2000_mmc2_groups[] = { "mmc2-1bit", "mmc2-4bit", };
+static const char *x2000_emc_groups[] = {
+ "emc-8bit-data", "emc-16bit-data",
+ "emc-addr", "emc-rd-we", "emc-wait",
+};
+static const char *x2000_cs1_groups[] = { "emc-cs1", };
+static const char *x2000_cs2_groups[] = { "emc-cs2", };
+static const char *x2000_i2c0_groups[] = { "i2c0-data", };
+static const char *x2000_i2c1_groups[] = { "i2c1-data-c", "i2c1-data-d", };
+static const char *x2000_i2c2_groups[] = { "i2c2-data-b", "i2c2-data-d", };
+static const char *x2000_i2c3_groups[] = { "i2c3-data-a", "i2c3-data-d", };
+static const char *x2000_i2c4_groups[] = { "i2c4-data-c", "i2c4-data-d", };
+static const char *x2000_i2c5_groups[] = { "i2c5-data-c", "i2c5-data-d", };
+static const char *x2000_i2s1_groups[] = {
+ "i2s1-data-tx", "i2s1-data-rx",
+ "i2s1-clk-tx", "i2s1-clk-rx",
+ "i2s1-sysclk-tx", "i2s1-sysclk-rx",
+};
+static const char *x2000_i2s2_groups[] = {
+ "i2s2-data-rx0", "i2s2-data-rx1", "i2s2-data-rx2", "i2s2-data-rx3",
+ "i2s2-clk-rx", "i2s2-sysclk-rx",
+};
+static const char *x2000_i2s3_groups[] = {
+ "i2s3-data-tx0", "i2s3-data-tx1", "i2s3-data-tx2", "i2s3-data-tx3",
+ "i2s3-clk-tx", "i2s3-sysclk-tx",
+};
+static const char *x2000_dmic_groups[] = { "dmic0", "dmic1", "dmic2", "dmic3", };
+static const char *x2000_cim_groups[] = { "cim-data-8bit", "cim-data-12bit", };
+static const char *x2000_lcd_groups[] = {
+ "lcd-tft-8bit", "lcd-tft-16bit", "lcd-tft-18bit", "lcd-tft-24bit",
+ "lcd-slcd-8bit", "lcd-slcd-16bit",
+};
+static const char *x2000_pwm0_groups[] = { "pwm0-c", "pwm0-d", };
+static const char *x2000_pwm1_groups[] = { "pwm1-c", "pwm1-d", };
+static const char *x2000_pwm2_groups[] = { "pwm2-c", "pwm2-e", };
+static const char *x2000_pwm3_groups[] = { "pwm3-c", "pwm3-r", };
+static const char *x2000_pwm4_groups[] = { "pwm4-c", "pwm4-e", };
+static const char *x2000_pwm5_groups[] = { "pwm5-c", "pwm5-e", };
+static const char *x2000_pwm6_groups[] = { "pwm6-c", "pwm6-e", };
+static const char *x2000_pwm7_groups[] = { "pwm7-c", "pwm7-e", };
+static const char *x2000_pwm8_groups[] = { "pwm8", };
+static const char *x2000_pwm9_groups[] = { "pwm9", };
+static const char *x2000_pwm10_groups[] = { "pwm10", };
+static const char *x2000_pwm11_groups[] = { "pwm11", };
+static const char *x2000_pwm12_groups[] = { "pwm12", };
+static const char *x2000_pwm13_groups[] = { "pwm13", };
+static const char *x2000_pwm14_groups[] = { "pwm14", };
+static const char *x2000_pwm15_groups[] = { "pwm15", };
+static const char *x2000_mac0_groups[] = { "mac0-rmii", "mac0-rgmii", };
+static const char *x2000_mac1_groups[] = { "mac1-rmii", "mac1-rgmii", };
+static const char *x2000_otg_groups[] = { "otg-vbus", };
+
+static const struct function_desc x2000_functions[] = {
+ { "uart0", x2000_uart0_groups, ARRAY_SIZE(x2000_uart0_groups), },
+ { "uart1", x2000_uart1_groups, ARRAY_SIZE(x2000_uart1_groups), },
+ { "uart2", x2000_uart2_groups, ARRAY_SIZE(x2000_uart2_groups), },
+ { "uart3", x2000_uart3_groups, ARRAY_SIZE(x2000_uart3_groups), },
+ { "uart4", x2000_uart4_groups, ARRAY_SIZE(x2000_uart4_groups), },
+ { "uart5", x2000_uart5_groups, ARRAY_SIZE(x2000_uart5_groups), },
+ { "uart6", x2000_uart6_groups, ARRAY_SIZE(x2000_uart6_groups), },
+ { "uart7", x2000_uart7_groups, ARRAY_SIZE(x2000_uart7_groups), },
+ { "uart8", x2000_uart8_groups, ARRAY_SIZE(x2000_uart8_groups), },
+ { "uart9", x2000_uart9_groups, ARRAY_SIZE(x2000_uart9_groups), },
+ { "sfc", x2000_sfc_groups, ARRAY_SIZE(x2000_sfc_groups), },
+ { "ssi0", x2000_ssi0_groups, ARRAY_SIZE(x2000_ssi0_groups), },
+ { "ssi1", x2000_ssi1_groups, ARRAY_SIZE(x2000_ssi1_groups), },
+ { "mmc0", x2000_mmc0_groups, ARRAY_SIZE(x2000_mmc0_groups), },
+ { "mmc1", x2000_mmc1_groups, ARRAY_SIZE(x2000_mmc1_groups), },
+ { "mmc2", x2000_mmc2_groups, ARRAY_SIZE(x2000_mmc2_groups), },
+ { "emc", x2000_emc_groups, ARRAY_SIZE(x2000_emc_groups), },
+ { "emc-cs1", x2000_cs1_groups, ARRAY_SIZE(x2000_cs1_groups), },
+ { "emc-cs2", x2000_cs2_groups, ARRAY_SIZE(x2000_cs2_groups), },
+ { "i2c0", x2000_i2c0_groups, ARRAY_SIZE(x2000_i2c0_groups), },
+ { "i2c1", x2000_i2c1_groups, ARRAY_SIZE(x2000_i2c1_groups), },
+ { "i2c2", x2000_i2c2_groups, ARRAY_SIZE(x2000_i2c2_groups), },
+ { "i2c3", x2000_i2c3_groups, ARRAY_SIZE(x2000_i2c3_groups), },
+ { "i2c4", x2000_i2c4_groups, ARRAY_SIZE(x2000_i2c4_groups), },
+ { "i2c5", x2000_i2c5_groups, ARRAY_SIZE(x2000_i2c5_groups), },
+ { "i2s1", x2000_i2s1_groups, ARRAY_SIZE(x2000_i2s1_groups), },
+ { "i2s2", x2000_i2s2_groups, ARRAY_SIZE(x2000_i2s2_groups), },
+ { "i2s3", x2000_i2s3_groups, ARRAY_SIZE(x2000_i2s3_groups), },
+ { "dmic", x2000_dmic_groups, ARRAY_SIZE(x2000_dmic_groups), },
+ { "cim", x2000_cim_groups, ARRAY_SIZE(x2000_cim_groups), },
+ { "lcd", x2000_lcd_groups, ARRAY_SIZE(x2000_lcd_groups), },
+ { "pwm0", x2000_pwm0_groups, ARRAY_SIZE(x2000_pwm0_groups), },
+ { "pwm1", x2000_pwm1_groups, ARRAY_SIZE(x2000_pwm1_groups), },
+ { "pwm2", x2000_pwm2_groups, ARRAY_SIZE(x2000_pwm2_groups), },
+ { "pwm3", x2000_pwm3_groups, ARRAY_SIZE(x2000_pwm3_groups), },
+ { "pwm4", x2000_pwm4_groups, ARRAY_SIZE(x2000_pwm4_groups), },
+ { "pwm5", x2000_pwm5_groups, ARRAY_SIZE(x2000_pwm5_groups), },
+ { "pwm6", x2000_pwm6_groups, ARRAY_SIZE(x2000_pwm6_groups), },
+ { "pwm7", x2000_pwm7_groups, ARRAY_SIZE(x2000_pwm7_groups), },
+ { "pwm8", x2000_pwm8_groups, ARRAY_SIZE(x2000_pwm8_groups), },
+ { "pwm9", x2000_pwm9_groups, ARRAY_SIZE(x2000_pwm9_groups), },
+ { "pwm10", x2000_pwm10_groups, ARRAY_SIZE(x2000_pwm10_groups), },
+ { "pwm11", x2000_pwm11_groups, ARRAY_SIZE(x2000_pwm11_groups), },
+ { "pwm12", x2000_pwm12_groups, ARRAY_SIZE(x2000_pwm12_groups), },
+ { "pwm13", x2000_pwm13_groups, ARRAY_SIZE(x2000_pwm13_groups), },
+ { "pwm14", x2000_pwm14_groups, ARRAY_SIZE(x2000_pwm14_groups), },
+ { "pwm15", x2000_pwm15_groups, ARRAY_SIZE(x2000_pwm15_groups), },
+ { "mac0", x2000_mac0_groups, ARRAY_SIZE(x2000_mac0_groups), },
+ { "mac1", x2000_mac1_groups, ARRAY_SIZE(x2000_mac1_groups), },
+ { "otg", x2000_otg_groups, ARRAY_SIZE(x2000_otg_groups), },
+};
+
+static const struct ingenic_chip_info x2000_chip_info = {
+ .num_chips = 5,
+ .reg_offset = 0x100,
+ .version = ID_X2000,
+ .groups = x2000_groups,
+ .num_groups = ARRAY_SIZE(x2000_groups),
+ .functions = x2000_functions,
+ .num_functions = ARRAY_SIZE(x2000_functions),
+ .pull_ups = x2000_pull_ups,
+ .pull_downs = x2000_pull_downs,
+};
+
static u32 ingenic_gpio_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
{
unsigned int val;
@@ -1668,6 +2813,12 @@ static u32 ingenic_gpio_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
static void ingenic_gpio_set_bit(struct ingenic_gpio_chip *jzgc,
u8 reg, u8 offset, bool set)
{
+ if (jzgc->jzpc->info->version == ID_JZ4730) {
+ regmap_update_bits(jzgc->jzpc->map, jzgc->reg_base + reg,
+ BIT(offset), set ? BIT(offset) : 0);
+ return;
+ }
+
if (set)
reg = REG_SET(reg);
else
@@ -1695,6 +2846,20 @@ static void ingenic_gpio_shadow_set_bit_load(struct ingenic_gpio_chip *jzgc)
jzgc->gc.base / PINS_PER_GPIO_CHIP);
}
+static void jz4730_gpio_set_bits(struct ingenic_gpio_chip *jzgc,
+ u8 reg_upper, u8 reg_lower, u8 offset, u8 value)
+{
+ /*
+ * JZ4730 function and IRQ registers support two-bits-per-pin
+ * definitions, split into two groups of 16.
+ */
+ u8 reg = offset < JZ4730_PINS_PER_PAIRED_REG ? reg_lower : reg_upper;
+ unsigned int idx = offset % JZ4730_PINS_PER_PAIRED_REG;
+ unsigned int mask = GENMASK(1, 0) << idx * 2;
+
+ regmap_update_bits(jzgc->jzpc->map, jzgc->reg_base + reg, mask, value << (idx * 2));
+}
+
static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
u8 offset)
{
@@ -1708,43 +2873,60 @@ static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
{
if (jzgc->jzpc->info->version >= ID_JZ4770)
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
- else
+ else if (jzgc->jzpc->info->version >= ID_JZ4740)
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_DATA, offset, !!value);
}
static void irq_set_type(struct ingenic_gpio_chip *jzgc,
u8 offset, unsigned int type)
{
u8 reg1, reg2;
- bool val1, val2;
+ bool val1, val2, val3;
switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ val1 = val2 = false;
+ val3 = true;
+ break;
case IRQ_TYPE_EDGE_RISING:
val1 = val2 = true;
+ val3 = false;
break;
case IRQ_TYPE_EDGE_FALLING:
- val1 = false;
+ val1 = val3 = false;
val2 = true;
break;
case IRQ_TYPE_LEVEL_HIGH:
val1 = true;
- val2 = false;
+ val2 = val3 = false;
break;
case IRQ_TYPE_LEVEL_LOW:
default:
- val1 = val2 = false;
+ val1 = val2 = val3 = false;
break;
}
if (jzgc->jzpc->info->version >= ID_JZ4770) {
reg1 = JZ4770_GPIO_PAT1;
reg2 = JZ4770_GPIO_PAT0;
- } else {
+ } else if (jzgc->jzpc->info->version >= ID_JZ4740) {
reg1 = JZ4740_GPIO_TRIG;
reg2 = JZ4740_GPIO_DIR;
+ } else {
+ ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPDIR, offset, false);
+ jz4730_gpio_set_bits(jzgc, JZ4730_GPIO_GPIDUR,
+ JZ4730_GPIO_GPIDLR, offset, (val2 << 1) | val1);
+ return;
}
- if (jzgc->jzpc->info->version >= ID_X1000) {
+ if (jzgc->jzpc->info->version >= ID_X2000) {
+ ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, val1);
+ ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, val2);
+ ingenic_gpio_shadow_set_bit_load(jzgc);
+ ingenic_gpio_set_bit(jzgc, X2000_GPIO_EDG, offset, val3);
+ } else if (jzgc->jzpc->info->version >= ID_X1000) {
ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, val1);
ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, val2);
ingenic_gpio_shadow_set_bit_load(jzgc);
@@ -1758,16 +2940,24 @@ static void ingenic_gpio_irq_mask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ int irq = irqd->hwirq;
- ingenic_gpio_set_bit(jzgc, GPIO_MSK, irqd->hwirq, true);
+ if (jzgc->jzpc->info->version >= ID_JZ4740)
+ ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, true);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIMR, irq, true);
}
static void ingenic_gpio_irq_unmask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ int irq = irqd->hwirq;
- ingenic_gpio_set_bit(jzgc, GPIO_MSK, irqd->hwirq, false);
+ if (jzgc->jzpc->info->version >= ID_JZ4740)
+ ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, false);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIMR, irq, false);
}
static void ingenic_gpio_irq_enable(struct irq_data *irqd)
@@ -1778,8 +2968,10 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
if (jzgc->jzpc->info->version >= ID_JZ4770)
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
- else
+ else if (jzgc->jzpc->info->version >= ID_JZ4740)
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIER, irq, true);
ingenic_gpio_irq_unmask(irqd);
}
@@ -1794,8 +2986,10 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
if (jzgc->jzpc->info->version >= ID_JZ4770)
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
- else
+ else if (jzgc->jzpc->info->version >= ID_JZ4740)
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIER, irq, false);
}
static void ingenic_gpio_irq_ack(struct irq_data *irqd)
@@ -1805,7 +2999,8 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
int irq = irqd->hwirq;
bool high;
- if (irqd_get_trigger_type(irqd) == IRQ_TYPE_EDGE_BOTH) {
+ if ((irqd_get_trigger_type(irqd) == IRQ_TYPE_EDGE_BOTH) &&
+ (jzgc->jzpc->info->version < ID_X2000)) {
/*
* Switch to an interrupt for the opposite edge to the one that
* triggered the interrupt being ACKed.
@@ -1819,8 +3014,10 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
if (jzgc->jzpc->info->version >= ID_JZ4770)
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
- else
+ else if (jzgc->jzpc->info->version >= ID_JZ4740)
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPFR, irq, false);
}
static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
@@ -1842,7 +3039,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
irq_set_handler_locked(irqd, handle_bad_irq);
}
- if (type == IRQ_TYPE_EDGE_BOTH) {
+ if ((type == IRQ_TYPE_EDGE_BOTH) && (jzgc->jzpc->info->version < ID_X2000)) {
/*
* The hardware does not support interrupts on both edges. The
* best we can do is to set up a single-edge interrupt and then
@@ -1876,8 +3073,10 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
if (jzgc->jzpc->info->version >= ID_JZ4770)
flag = ingenic_gpio_read_reg(jzgc, JZ4770_GPIO_FLAG);
- else
+ else if (jzgc->jzpc->info->version >= ID_JZ4740)
flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
+ else
+ flag = ingenic_gpio_read_reg(jzgc, JZ4730_GPIO_GPFR);
for_each_set_bit(i, &flag, 32)
generic_handle_irq(irq_linear_revmap(gc->irq.domain, i));
@@ -1913,13 +3112,26 @@ static int ingenic_gpio_direction_output(struct gpio_chip *gc,
}
static inline void ingenic_config_pin(struct ingenic_pinctrl *jzpc,
- unsigned int pin, u8 reg, bool set)
+ unsigned int pin, unsigned int reg, bool set)
{
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
- regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
- (set ? REG_SET(reg) : REG_CLEAR(reg)), BIT(idx));
+ if (set) {
+ if (jzpc->info->version >= ID_JZ4740)
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_SET(reg), BIT(idx));
+ else
+ regmap_set_bits(jzpc->map, offt * jzpc->info->reg_offset +
+ reg, BIT(idx));
+ } else {
+ if (jzpc->info->version >= ID_JZ4740)
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_CLEAR(reg), BIT(idx));
+ else
+ regmap_clear_bits(jzpc->map, offt * jzpc->info->reg_offset +
+ reg, BIT(idx));
+ }
}
static inline void ingenic_shadow_config_pin(struct ingenic_pinctrl *jzpc,
@@ -1938,8 +3150,24 @@ static inline void ingenic_shadow_config_pin_load(struct ingenic_pinctrl *jzpc,
pin / PINS_PER_GPIO_CHIP);
}
+static inline void jz4730_config_pin_function(struct ingenic_pinctrl *jzpc,
+ unsigned int pin, u8 reg_upper, u8 reg_lower, u8 value)
+{
+ /*
+ * JZ4730 function and IRQ registers support two-bits-per-pin
+ * definitions, split into two groups of 16.
+ */
+ unsigned int idx = pin % JZ4730_PINS_PER_PAIRED_REG;
+ unsigned int mask = GENMASK(1, 0) << idx * 2;
+ unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+ u8 reg = (pin % PINS_PER_GPIO_CHIP) < JZ4730_PINS_PER_PAIRED_REG ? reg_lower : reg_upper;
+
+ regmap_update_bits(jzpc->map, offt * jzpc->info->reg_offset + reg,
+ mask, value << (idx * 2));
+}
+
static inline bool ingenic_get_pin_config(struct ingenic_pinctrl *jzpc,
- unsigned int pin, u8 reg)
+ unsigned int pin, unsigned int reg)
{
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
@@ -1961,6 +3189,10 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PAT1))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
+ } else if (jzpc->info->version == ID_JZ4730) {
+ if (!ingenic_get_pin_config(jzpc, pin, JZ4730_GPIO_GPDIR))
+ return GPIO_LINE_DIRECTION_IN;
+ return GPIO_LINE_DIRECTION_OUT;
}
if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_SELECT))
@@ -2019,10 +3251,13 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
- } else {
+ } else if (jzpc->info->version >= ID_JZ4740) {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func & 0x1);
+ } else {
+ ingenic_config_pin(jzpc, pin, JZ4730_GPIO_GPIER, false);
+ jz4730_config_pin_function(jzpc, pin, JZ4730_GPIO_GPAUR, JZ4730_GPIO_GPALR, func);
}
return 0;
@@ -2083,10 +3318,14 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
- } else {
+ } else if (jzpc->info->version >= ID_JZ4740) {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false);
+ } else {
+ ingenic_config_pin(jzpc, pin, JZ4730_GPIO_GPIER, false);
+ ingenic_config_pin(jzpc, pin, JZ4730_GPIO_GPDIR, !input);
+ jz4730_config_pin_function(jzpc, pin, JZ4730_GPIO_GPAUR, JZ4730_GPIO_GPALR, 0);
}
return 0;
@@ -2107,44 +3346,120 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
enum pin_config_param param = pinconf_to_config_param(*config);
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
- bool pull;
+ unsigned int arg = 1;
+ unsigned int bias, reg;
+ bool pull, pullup, pulldown;
+
+ if (jzpc->info->version >= ID_X2000) {
+ pullup = ingenic_get_pin_config(jzpc, pin, X2000_GPIO_PEPU) &&
+ !ingenic_get_pin_config(jzpc, pin, X2000_GPIO_PEPD) &&
+ (jzpc->info->pull_ups[offt] & BIT(idx));
+ pulldown = ingenic_get_pin_config(jzpc, pin, X2000_GPIO_PEPD) &&
+ !ingenic_get_pin_config(jzpc, pin, X2000_GPIO_PEPU) &&
+ (jzpc->info->pull_downs[offt] & BIT(idx));
+
+ } else if (jzpc->info->version >= ID_X1830) {
+ unsigned int half = PINS_PER_GPIO_CHIP / 2;
+ unsigned int idxh = (pin % half) * 2;
- if (jzpc->info->version >= ID_JZ4770)
- pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
- else
- pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
+ if (idx < half)
+ regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
+ X1830_GPIO_PEL, &bias);
+ else
+ regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
+ X1830_GPIO_PEH, &bias);
+
+ bias = (bias >> idxh) & (GPIO_PULL_UP | GPIO_PULL_DOWN);
+
+ pullup = (bias == GPIO_PULL_UP) && (jzpc->info->pull_ups[offt] & BIT(idx));
+ pulldown = (bias == GPIO_PULL_DOWN) && (jzpc->info->pull_downs[offt] & BIT(idx));
+
+ } else {
+ if (jzpc->info->version >= ID_JZ4770)
+ pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
+ else if (jzpc->info->version >= ID_JZ4740)
+ pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
+ else
+ pull = ingenic_get_pin_config(jzpc, pin, JZ4730_GPIO_GPPUR);
+
+ pullup = pull && (jzpc->info->pull_ups[offt] & BIT(idx));
+ pulldown = pull && (jzpc->info->pull_downs[offt] & BIT(idx));
+ }
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- if (pull)
+ if (pullup || pulldown)
return -EINVAL;
+
break;
case PIN_CONFIG_BIAS_PULL_UP:
- if (!pull || !(jzpc->info->pull_ups[offt] & BIT(idx)))
+ if (!pullup)
return -EINVAL;
+
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- if (!pull || !(jzpc->info->pull_downs[offt] & BIT(idx)))
+ if (!pulldown)
return -EINVAL;
+
+ break;
+
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ if (jzpc->info->version >= ID_X2000)
+ reg = X2000_GPIO_SMT;
+ else if (jzpc->info->version >= ID_X1830)
+ reg = X1830_GPIO_SMT;
+ else
+ return -EINVAL;
+
+ arg = !!ingenic_get_pin_config(jzpc, pin, reg);
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ if (jzpc->info->version >= ID_X2000)
+ reg = X2000_GPIO_SR;
+ else if (jzpc->info->version >= ID_X1830)
+ reg = X1830_GPIO_SR;
+ else
+ return -EINVAL;
+
+ arg = !!ingenic_get_pin_config(jzpc, pin, reg);
break;
default:
return -ENOTSUPP;
}
- *config = pinconf_to_config_packed(param, 1);
+ *config = pinconf_to_config_packed(param, arg);
return 0;
}
static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
unsigned int pin, unsigned int bias)
{
- if (jzpc->info->version >= ID_X1830) {
+ if (jzpc->info->version >= ID_X2000) {
+ switch (bias) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, false);
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPU, true);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPU, false);
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, true);
+ break;
+
+ case PIN_CONFIG_BIAS_DISABLE:
+ default:
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPU, false);
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, false);
+ }
+
+ } else if (jzpc->info->version >= ID_X1830) {
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int half = PINS_PER_GPIO_CHIP / 2;
- unsigned int idxh = pin % half * 2;
+ unsigned int idxh = (pin % half) * 2;
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
if (idx < half) {
@@ -2161,18 +3476,40 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
} else if (jzpc->info->version >= ID_JZ4770) {
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PEN, !bias);
- } else {
+ } else if (jzpc->info->version >= ID_JZ4740) {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
+ } else {
+ ingenic_config_pin(jzpc, pin, JZ4730_GPIO_GPPUR, bias);
}
}
+static void ingenic_set_schmitt_trigger(struct ingenic_pinctrl *jzpc,
+ unsigned int pin, bool enable)
+{
+ if (jzpc->info->version >= ID_X2000)
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_SMT, enable);
+ else
+ ingenic_config_pin(jzpc, pin, X1830_GPIO_SMT, enable);
+}
+
static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
unsigned int pin, bool high)
{
if (jzpc->info->version >= ID_JZ4770)
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, high);
- else
+ else if (jzpc->info->version >= ID_JZ4740)
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
+ else
+ ingenic_config_pin(jzpc, pin, JZ4730_GPIO_DATA, high);
+}
+
+static void ingenic_set_slew_rate(struct ingenic_pinctrl *jzpc,
+ unsigned int pin, unsigned int slew)
+{
+ if (jzpc->info->version >= ID_X2000)
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_SR, slew);
+ else
+ ingenic_config_pin(jzpc, pin, X1830_GPIO_SR, slew);
}
static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
@@ -2189,7 +3526,9 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_BIAS_DISABLE:
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_SLEW_RATE:
continue;
default:
return -ENOTSUPP;
@@ -2222,6 +3561,13 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
ingenic_set_bias(jzpc, pin, GPIO_PULL_DOWN);
break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ if (jzpc->info->version < ID_X1830)
+ return -EINVAL;
+
+ ingenic_set_schmitt_trigger(jzpc, pin, arg);
+ break;
+
case PIN_CONFIG_OUTPUT:
ret = pinctrl_gpio_direction_output(pin);
if (ret)
@@ -2230,6 +3576,13 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
ingenic_set_output_level(jzpc, pin, arg);
break;
+ case PIN_CONFIG_SLEW_RATE:
+ if (jzpc->info->version < ID_X1830)
+ return -EINVAL;
+
+ ingenic_set_slew_rate(jzpc, pin, arg);
+ break;
+
default:
/* unreachable */
break;
@@ -2301,13 +3654,18 @@ static const struct regmap_config ingenic_pinctrl_regmap_config = {
};
static const struct of_device_id ingenic_gpio_of_match[] __initconst = {
+ { .compatible = "ingenic,jz4730-gpio", },
{ .compatible = "ingenic,jz4740-gpio", },
{ .compatible = "ingenic,jz4725b-gpio", },
+ { .compatible = "ingenic,jz4750-gpio", },
+ { .compatible = "ingenic,jz4755-gpio", },
{ .compatible = "ingenic,jz4760-gpio", },
{ .compatible = "ingenic,jz4770-gpio", },
+ { .compatible = "ingenic,jz4775-gpio", },
{ .compatible = "ingenic,jz4780-gpio", },
{ .compatible = "ingenic,x1000-gpio", },
{ .compatible = "ingenic,x1830-gpio", },
+ { .compatible = "ingenic,x2000-gpio", },
{},
};
@@ -2380,6 +3738,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
+
girq->parents[0] = jzgc->irq;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
@@ -2485,8 +3844,10 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
for_each_child_of_node(dev->of_node, node) {
if (of_match_node(ingenic_gpio_of_match, node)) {
err = ingenic_gpio_probe(jzpc, node);
- if (err)
+ if (err) {
+ of_node_put(node);
return err;
+ }
}
}
@@ -2495,6 +3856,10 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
static const struct of_device_id ingenic_pinctrl_of_match[] = {
{
+ .compatible = "ingenic,jz4730-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_JZ4730, &jz4730_chip_info)
+ },
+ {
.compatible = "ingenic,jz4740-pinctrl",
.data = IF_ENABLED(CONFIG_MACH_JZ4740, &jz4740_chip_info)
},
@@ -2503,6 +3868,14 @@ static const struct of_device_id ingenic_pinctrl_of_match[] = {
.data = IF_ENABLED(CONFIG_MACH_JZ4725B, &jz4725b_chip_info)
},
{
+ .compatible = "ingenic,jz4750-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_JZ4750, &jz4750_chip_info)
+ },
+ {
+ .compatible = "ingenic,jz4755-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_JZ4755, &jz4755_chip_info)
+ },
+ {
.compatible = "ingenic,jz4760-pinctrl",
.data = IF_ENABLED(CONFIG_MACH_JZ4760, &jz4760_chip_info)
},
@@ -2515,6 +3888,10 @@ static const struct of_device_id ingenic_pinctrl_of_match[] = {
.data = IF_ENABLED(CONFIG_MACH_JZ4770, &jz4770_chip_info)
},
{
+ .compatible = "ingenic,jz4775-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_JZ4775, &jz4775_chip_info)
+ },
+ {
.compatible = "ingenic,jz4780-pinctrl",
.data = IF_ENABLED(CONFIG_MACH_JZ4780, &jz4780_chip_info)
},
@@ -2534,6 +3911,14 @@ static const struct of_device_id ingenic_pinctrl_of_match[] = {
.compatible = "ingenic,x1830-pinctrl",
.data = IF_ENABLED(CONFIG_MACH_X1830, &x1830_chip_info)
},
+ {
+ .compatible = "ingenic,x2000-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_X2000, &x2000_chip_info)
+ },
+ {
+ .compatible = "ingenic,x2000e-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_X2000, &x2000_chip_info)
+ },
{ /* sentinel */ },
};
diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
index 8a733cf77ba0..f831526d06ff 100644
--- a/drivers/pinctrl/pinctrl-k210.c
+++ b/drivers/pinctrl/pinctrl-k210.c
@@ -15,7 +15,6 @@
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/io.h>
#include <dt-bindings/pinctrl/k210-fpioa.h>
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index 7b2f885e68bd..ed9bf2c89998 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -646,7 +646,7 @@ static const struct pin_config_item lpc18xx_conf_items[ARRAY_SIZE(lpc18xx_params
static int lpc18xx_pconf_get_usb1(enum pin_config_param param, int *arg, u32 reg)
{
switch (param) {
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
if (reg & LPC18XX_SCU_USB1_EPWR)
*arg = 0;
else
@@ -904,7 +904,7 @@ static int lpc18xx_pconf_set_usb1(struct pinctrl_dev *pctldev,
u32 param_val, u32 *reg)
{
switch (param) {
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
if (param_val)
*reg &= ~LPC18XX_SCU_USB1_EPWR;
else
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 53a0badc6b03..067fc4208de4 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -16,10 +16,12 @@
*/
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/machine.h>
@@ -61,8 +63,17 @@ enum rockchip_pinctrl_type {
RK3308,
RK3368,
RK3399,
+ RK3568,
};
+
+/**
+ * Generate a bitmask for setting a value (v) with a write mask bit in hiword
+ * register 31:16 area.
+ */
+#define WRITE_MASK_VAL(h, l, v) \
+ (GENMASK(((h) + 16), ((l) + 16)) | (((v) << (l)) & GENMASK((h), (l))))
+
/*
* Encode variants of iomux registers into a type variable
*/
@@ -290,6 +301,25 @@ struct rockchip_pin_bank {
.pull_type[3] = pull3, \
}
+#define PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, FLAG) \
+ { \
+ .bank_num = ID, \
+ .pin = PIN, \
+ .func = FUNC, \
+ .route_offset = REG, \
+ .route_val = VAL, \
+ .route_location = FLAG, \
+ }
+
+#define RK_MUXROUTE_SAME(ID, PIN, FUNC, REG, VAL) \
+ PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, ROCKCHIP_ROUTE_SAME)
+
+#define RK_MUXROUTE_GRF(ID, PIN, FUNC, REG, VAL) \
+ PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, ROCKCHIP_ROUTE_GRF)
+
+#define RK_MUXROUTE_PMU(ID, PIN, FUNC, REG, VAL) \
+ PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, ROCKCHIP_ROUTE_PMU)
+
/**
* struct rockchip_mux_recalced_data: represent a pin iomux data.
* @num: bank number.
@@ -801,597 +831,203 @@ static void rockchip_get_recalced_mux(struct rockchip_pin_bank *bank, int pin,
}
static struct rockchip_mux_route_data px30_mux_route_data[] = {
- {
- /* cif-d2m0 */
- .bank_num = 2,
- .pin = 0,
- .func = 1,
- .route_offset = 0x184,
- .route_val = BIT(16 + 7),
- }, {
- /* cif-d2m1 */
- .bank_num = 3,
- .pin = 3,
- .func = 3,
- .route_offset = 0x184,
- .route_val = BIT(16 + 7) | BIT(7),
- }, {
- /* pdm-m0 */
- .bank_num = 3,
- .pin = 22,
- .func = 2,
- .route_offset = 0x184,
- .route_val = BIT(16 + 8),
- }, {
- /* pdm-m1 */
- .bank_num = 2,
- .pin = 22,
- .func = 1,
- .route_offset = 0x184,
- .route_val = BIT(16 + 8) | BIT(8),
- }, {
- /* uart2-rxm0 */
- .bank_num = 1,
- .pin = 27,
- .func = 2,
- .route_offset = 0x184,
- .route_val = BIT(16 + 10),
- }, {
- /* uart2-rxm1 */
- .bank_num = 2,
- .pin = 14,
- .func = 2,
- .route_offset = 0x184,
- .route_val = BIT(16 + 10) | BIT(10),
- }, {
- /* uart3-rxm0 */
- .bank_num = 0,
- .pin = 17,
- .func = 2,
- .route_offset = 0x184,
- .route_val = BIT(16 + 9),
- }, {
- /* uart3-rxm1 */
- .bank_num = 1,
- .pin = 15,
- .func = 2,
- .route_offset = 0x184,
- .route_val = BIT(16 + 9) | BIT(9),
- },
+ RK_MUXROUTE_SAME(2, RK_PA0, 1, 0x184, BIT(16 + 7)), /* cif-d2m0 */
+ RK_MUXROUTE_SAME(3, RK_PA3, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d2m1 */
+ RK_MUXROUTE_SAME(3, RK_PC6, 2, 0x184, BIT(16 + 8)), /* pdm-m0 */
+ RK_MUXROUTE_SAME(2, RK_PC6, 1, 0x184, BIT(16 + 8) | BIT(8)), /* pdm-m1 */
+ RK_MUXROUTE_SAME(1, RK_PD3, 2, 0x184, BIT(16 + 10)), /* uart2-rxm0 */
+ RK_MUXROUTE_SAME(2, RK_PB6, 2, 0x184, BIT(16 + 10) | BIT(10)), /* uart2-rxm1 */
+ RK_MUXROUTE_SAME(0, RK_PC1, 2, 0x184, BIT(16 + 9)), /* uart3-rxm0 */
+ RK_MUXROUTE_SAME(1, RK_PB7, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-rxm1 */
};
static struct rockchip_mux_route_data rk3128_mux_route_data[] = {
- {
- /* spi-0 */
- .bank_num = 1,
- .pin = 10,
- .func = 1,
- .route_offset = 0x144,
- .route_val = BIT(16 + 3) | BIT(16 + 4),
- }, {
- /* spi-1 */
- .bank_num = 1,
- .pin = 27,
- .func = 3,
- .route_offset = 0x144,
- .route_val = BIT(16 + 3) | BIT(16 + 4) | BIT(3),
- }, {
- /* spi-2 */
- .bank_num = 0,
- .pin = 13,
- .func = 2,
- .route_offset = 0x144,
- .route_val = BIT(16 + 3) | BIT(16 + 4) | BIT(4),
- }, {
- /* i2s-0 */
- .bank_num = 1,
- .pin = 5,
- .func = 1,
- .route_offset = 0x144,
- .route_val = BIT(16 + 5),
- }, {
- /* i2s-1 */
- .bank_num = 0,
- .pin = 14,
- .func = 1,
- .route_offset = 0x144,
- .route_val = BIT(16 + 5) | BIT(5),
- }, {
- /* emmc-0 */
- .bank_num = 1,
- .pin = 22,
- .func = 2,
- .route_offset = 0x144,
- .route_val = BIT(16 + 6),
- }, {
- /* emmc-1 */
- .bank_num = 2,
- .pin = 4,
- .func = 2,
- .route_offset = 0x144,
- .route_val = BIT(16 + 6) | BIT(6),
- },
+ RK_MUXROUTE_SAME(1, RK_PB2, 1, 0x144, BIT(16 + 3) | BIT(16 + 4)), /* spi-0 */
+ RK_MUXROUTE_SAME(1, RK_PD3, 3, 0x144, BIT(16 + 3) | BIT(16 + 4) | BIT(3)), /* spi-1 */
+ RK_MUXROUTE_SAME(0, RK_PB5, 2, 0x144, BIT(16 + 3) | BIT(16 + 4) | BIT(4)), /* spi-2 */
+ RK_MUXROUTE_SAME(1, RK_PA5, 1, 0x144, BIT(16 + 5)), /* i2s-0 */
+ RK_MUXROUTE_SAME(0, RK_PB6, 1, 0x144, BIT(16 + 5) | BIT(5)), /* i2s-1 */
+ RK_MUXROUTE_SAME(1, RK_PC6, 2, 0x144, BIT(16 + 6)), /* emmc-0 */
+ RK_MUXROUTE_SAME(2, RK_PA4, 2, 0x144, BIT(16 + 6) | BIT(6)), /* emmc-1 */
};
static struct rockchip_mux_route_data rk3188_mux_route_data[] = {
- {
- /* non-iomuxed emmc/flash pins on flash-dqs */
- .bank_num = 0,
- .pin = 24,
- .func = 1,
- .route_location = ROCKCHIP_ROUTE_GRF,
- .route_offset = 0xa0,
- .route_val = BIT(16 + 11),
- }, {
- /* non-iomuxed emmc/flash pins on emmc-clk */
- .bank_num = 0,
- .pin = 24,
- .func = 2,
- .route_location = ROCKCHIP_ROUTE_GRF,
- .route_offset = 0xa0,
- .route_val = BIT(16 + 11) | BIT(11),
- },
+ RK_MUXROUTE_SAME(0, RK_PD0, 1, 0xa0, BIT(16 + 11)), /* non-iomuxed emmc/flash pins on flash-dqs */
+ RK_MUXROUTE_SAME(0, RK_PD0, 2, 0xa0, BIT(16 + 11) | BIT(11)), /* non-iomuxed emmc/flash pins on emmc-clk */
};
static struct rockchip_mux_route_data rk3228_mux_route_data[] = {
- {
- /* pwm0-0 */
- .bank_num = 0,
- .pin = 26,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16),
- }, {
- /* pwm0-1 */
- .bank_num = 3,
- .pin = 21,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16) | BIT(0),
- }, {
- /* pwm1-0 */
- .bank_num = 0,
- .pin = 27,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 1),
- }, {
- /* pwm1-1 */
- .bank_num = 0,
- .pin = 30,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 1) | BIT(1),
- }, {
- /* pwm2-0 */
- .bank_num = 0,
- .pin = 28,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 2),
- }, {
- /* pwm2-1 */
- .bank_num = 1,
- .pin = 12,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 2) | BIT(2),
- }, {
- /* pwm3-0 */
- .bank_num = 3,
- .pin = 26,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 3),
- }, {
- /* pwm3-1 */
- .bank_num = 1,
- .pin = 11,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 3) | BIT(3),
- }, {
- /* sdio-0_d0 */
- .bank_num = 1,
- .pin = 1,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 4),
- }, {
- /* sdio-1_d0 */
- .bank_num = 3,
- .pin = 2,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 4) | BIT(4),
- }, {
- /* spi-0_rx */
- .bank_num = 0,
- .pin = 13,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 5),
- }, {
- /* spi-1_rx */
- .bank_num = 2,
- .pin = 0,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 5) | BIT(5),
- }, {
- /* emmc-0_cmd */
- .bank_num = 1,
- .pin = 22,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 7),
- }, {
- /* emmc-1_cmd */
- .bank_num = 2,
- .pin = 4,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 7) | BIT(7),
- }, {
- /* uart2-0_rx */
- .bank_num = 1,
- .pin = 19,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 8),
- }, {
- /* uart2-1_rx */
- .bank_num = 1,
- .pin = 10,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 8) | BIT(8),
- }, {
- /* uart1-0_rx */
- .bank_num = 1,
- .pin = 10,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 11),
- }, {
- /* uart1-1_rx */
- .bank_num = 3,
- .pin = 13,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 11) | BIT(11),
- },
+ RK_MUXROUTE_SAME(0, RK_PD2, 1, 0x50, BIT(16)), /* pwm0-0 */
+ RK_MUXROUTE_SAME(3, RK_PC5, 1, 0x50, BIT(16) | BIT(0)), /* pwm0-1 */
+ RK_MUXROUTE_SAME(0, RK_PD3, 1, 0x50, BIT(16 + 1)), /* pwm1-0 */
+ RK_MUXROUTE_SAME(0, RK_PD6, 2, 0x50, BIT(16 + 1) | BIT(1)), /* pwm1-1 */
+ RK_MUXROUTE_SAME(0, RK_PD4, 1, 0x50, BIT(16 + 2)), /* pwm2-0 */
+ RK_MUXROUTE_SAME(1, RK_PB4, 2, 0x50, BIT(16 + 2) | BIT(2)), /* pwm2-1 */
+ RK_MUXROUTE_SAME(3, RK_PD2, 1, 0x50, BIT(16 + 3)), /* pwm3-0 */
+ RK_MUXROUTE_SAME(1, RK_PB3, 2, 0x50, BIT(16 + 3) | BIT(3)), /* pwm3-1 */
+ RK_MUXROUTE_SAME(1, RK_PA1, 1, 0x50, BIT(16 + 4)), /* sdio-0_d0 */
+ RK_MUXROUTE_SAME(3, RK_PA2, 1, 0x50, BIT(16 + 4) | BIT(4)), /* sdio-1_d0 */
+ RK_MUXROUTE_SAME(0, RK_PB5, 2, 0x50, BIT(16 + 5)), /* spi-0_rx */
+ RK_MUXROUTE_SAME(2, RK_PA0, 2, 0x50, BIT(16 + 5) | BIT(5)), /* spi-1_rx */
+ RK_MUXROUTE_SAME(1, RK_PC6, 2, 0x50, BIT(16 + 7)), /* emmc-0_cmd */
+ RK_MUXROUTE_SAME(2, RK_PA4, 2, 0x50, BIT(16 + 7) | BIT(7)), /* emmc-1_cmd */
+ RK_MUXROUTE_SAME(1, RK_PC3, 2, 0x50, BIT(16 + 8)), /* uart2-0_rx */
+ RK_MUXROUTE_SAME(1, RK_PB2, 2, 0x50, BIT(16 + 8) | BIT(8)), /* uart2-1_rx */
+ RK_MUXROUTE_SAME(1, RK_PB2, 1, 0x50, BIT(16 + 11)), /* uart1-0_rx */
+ RK_MUXROUTE_SAME(3, RK_PB5, 1, 0x50, BIT(16 + 11) | BIT(11)), /* uart1-1_rx */
};
static struct rockchip_mux_route_data rk3288_mux_route_data[] = {
- {
- /* edphdmi_cecinoutt1 */
- .bank_num = 7,
- .pin = 16,
- .func = 2,
- .route_offset = 0x264,
- .route_val = BIT(16 + 12) | BIT(12),
- }, {
- /* edphdmi_cecinout */
- .bank_num = 7,
- .pin = 23,
- .func = 4,
- .route_offset = 0x264,
- .route_val = BIT(16 + 12),
- },
+ RK_MUXROUTE_SAME(7, RK_PC0, 2, 0x264, BIT(16 + 12) | BIT(12)), /* edphdmi_cecinoutt1 */
+ RK_MUXROUTE_SAME(7, RK_PC7, 4, 0x264, BIT(16 + 12)), /* edphdmi_cecinout */
};
static struct rockchip_mux_route_data rk3308_mux_route_data[] = {
- {
- /* rtc_clk */
- .bank_num = 0,
- .pin = 19,
- .func = 1,
- .route_offset = 0x314,
- .route_val = BIT(16 + 0) | BIT(0),
- }, {
- /* uart2_rxm0 */
- .bank_num = 1,
- .pin = 22,
- .func = 2,
- .route_offset = 0x314,
- .route_val = BIT(16 + 2) | BIT(16 + 3),
- }, {
- /* uart2_rxm1 */
- .bank_num = 4,
- .pin = 26,
- .func = 2,
- .route_offset = 0x314,
- .route_val = BIT(16 + 2) | BIT(16 + 3) | BIT(2),
- }, {
- /* i2c3_sdam0 */
- .bank_num = 0,
- .pin = 15,
- .func = 2,
- .route_offset = 0x608,
- .route_val = BIT(16 + 8) | BIT(16 + 9),
- }, {
- /* i2c3_sdam1 */
- .bank_num = 3,
- .pin = 12,
- .func = 2,
- .route_offset = 0x608,
- .route_val = BIT(16 + 8) | BIT(16 + 9) | BIT(8),
- }, {
- /* i2c3_sdam2 */
- .bank_num = 2,
- .pin = 0,
- .func = 3,
- .route_offset = 0x608,
- .route_val = BIT(16 + 8) | BIT(16 + 9) | BIT(9),
- }, {
- /* i2s-8ch-1-sclktxm0 */
- .bank_num = 1,
- .pin = 3,
- .func = 2,
- .route_offset = 0x308,
- .route_val = BIT(16 + 3),
- }, {
- /* i2s-8ch-1-sclkrxm0 */
- .bank_num = 1,
- .pin = 4,
- .func = 2,
- .route_offset = 0x308,
- .route_val = BIT(16 + 3),
- }, {
- /* i2s-8ch-1-sclktxm1 */
- .bank_num = 1,
- .pin = 13,
- .func = 2,
- .route_offset = 0x308,
- .route_val = BIT(16 + 3) | BIT(3),
- }, {
- /* i2s-8ch-1-sclkrxm1 */
- .bank_num = 1,
- .pin = 14,
- .func = 2,
- .route_offset = 0x308,
- .route_val = BIT(16 + 3) | BIT(3),
- }, {
- /* pdm-clkm0 */
- .bank_num = 1,
- .pin = 4,
- .func = 3,
- .route_offset = 0x308,
- .route_val = BIT(16 + 12) | BIT(16 + 13),
- }, {
- /* pdm-clkm1 */
- .bank_num = 1,
- .pin = 14,
- .func = 4,
- .route_offset = 0x308,
- .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(12),
- }, {
- /* pdm-clkm2 */
- .bank_num = 2,
- .pin = 6,
- .func = 2,
- .route_offset = 0x308,
- .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(13),
- }, {
- /* pdm-clkm-m2 */
- .bank_num = 2,
- .pin = 4,
- .func = 3,
- .route_offset = 0x600,
- .route_val = BIT(16 + 2) | BIT(2),
- }, {
- /* spi1_miso */
- .bank_num = 3,
- .pin = 10,
- .func = 3,
- .route_offset = 0x314,
- .route_val = BIT(16 + 9),
- }, {
- /* spi1_miso_m1 */
- .bank_num = 2,
- .pin = 4,
- .func = 2,
- .route_offset = 0x314,
- .route_val = BIT(16 + 9) | BIT(9),
- }, {
- /* owire_m0 */
- .bank_num = 0,
- .pin = 11,
- .func = 3,
- .route_offset = 0x314,
- .route_val = BIT(16 + 10) | BIT(16 + 11),
- }, {
- /* owire_m1 */
- .bank_num = 1,
- .pin = 22,
- .func = 7,
- .route_offset = 0x314,
- .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(10),
- }, {
- /* owire_m2 */
- .bank_num = 2,
- .pin = 2,
- .func = 5,
- .route_offset = 0x314,
- .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(11),
- }, {
- /* can_rxd_m0 */
- .bank_num = 0,
- .pin = 11,
- .func = 2,
- .route_offset = 0x314,
- .route_val = BIT(16 + 12) | BIT(16 + 13),
- }, {
- /* can_rxd_m1 */
- .bank_num = 1,
- .pin = 22,
- .func = 5,
- .route_offset = 0x314,
- .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(12),
- }, {
- /* can_rxd_m2 */
- .bank_num = 2,
- .pin = 2,
- .func = 4,
- .route_offset = 0x314,
- .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(13),
- }, {
- /* mac_rxd0_m0 */
- .bank_num = 1,
- .pin = 20,
- .func = 3,
- .route_offset = 0x314,
- .route_val = BIT(16 + 14),
- }, {
- /* mac_rxd0_m1 */
- .bank_num = 4,
- .pin = 2,
- .func = 2,
- .route_offset = 0x314,
- .route_val = BIT(16 + 14) | BIT(14),
- }, {
- /* uart3_rx */
- .bank_num = 3,
- .pin = 12,
- .func = 4,
- .route_offset = 0x314,
- .route_val = BIT(16 + 15),
- }, {
- /* uart3_rx_m1 */
- .bank_num = 0,
- .pin = 17,
- .func = 3,
- .route_offset = 0x314,
- .route_val = BIT(16 + 15) | BIT(15),
- },
+ RK_MUXROUTE_SAME(0, RK_PC3, 1, 0x314, BIT(16 + 0) | BIT(0)), /* rtc_clk */
+ RK_MUXROUTE_SAME(1, RK_PC6, 2, 0x314, BIT(16 + 2) | BIT(16 + 3)), /* uart2_rxm0 */
+ RK_MUXROUTE_SAME(4, RK_PD2, 2, 0x314, BIT(16 + 2) | BIT(16 + 3) | BIT(2)), /* uart2_rxm1 */
+ RK_MUXROUTE_SAME(0, RK_PB7, 2, 0x608, BIT(16 + 8) | BIT(16 + 9)), /* i2c3_sdam0 */
+ RK_MUXROUTE_SAME(3, RK_PB4, 2, 0x608, BIT(16 + 8) | BIT(16 + 9) | BIT(8)), /* i2c3_sdam1 */
+ RK_MUXROUTE_SAME(2, RK_PA0, 3, 0x608, BIT(16 + 8) | BIT(16 + 9) | BIT(9)), /* i2c3_sdam2 */
+ RK_MUXROUTE_SAME(1, RK_PA3, 2, 0x308, BIT(16 + 3)), /* i2s-8ch-1-sclktxm0 */
+ RK_MUXROUTE_SAME(1, RK_PA4, 2, 0x308, BIT(16 + 3)), /* i2s-8ch-1-sclkrxm0 */
+ RK_MUXROUTE_SAME(1, RK_PB5, 2, 0x308, BIT(16 + 3) | BIT(3)), /* i2s-8ch-1-sclktxm1 */
+ RK_MUXROUTE_SAME(1, RK_PB6, 2, 0x308, BIT(16 + 3) | BIT(3)), /* i2s-8ch-1-sclkrxm1 */
+ RK_MUXROUTE_SAME(1, RK_PA4, 3, 0x308, BIT(16 + 12) | BIT(16 + 13)), /* pdm-clkm0 */
+ RK_MUXROUTE_SAME(1, RK_PB6, 4, 0x308, BIT(16 + 12) | BIT(16 + 13) | BIT(12)), /* pdm-clkm1 */
+ RK_MUXROUTE_SAME(2, RK_PA6, 2, 0x308, BIT(16 + 12) | BIT(16 + 13) | BIT(13)), /* pdm-clkm2 */
+ RK_MUXROUTE_SAME(2, RK_PA4, 3, 0x600, BIT(16 + 2) | BIT(2)), /* pdm-clkm-m2 */
+ RK_MUXROUTE_SAME(3, RK_PB2, 3, 0x314, BIT(16 + 9)), /* spi1_miso */
+ RK_MUXROUTE_SAME(2, RK_PA4, 2, 0x314, BIT(16 + 9) | BIT(9)), /* spi1_miso_m1 */
+ RK_MUXROUTE_SAME(0, RK_PB3, 3, 0x314, BIT(16 + 10) | BIT(16 + 11)), /* owire_m0 */
+ RK_MUXROUTE_SAME(1, RK_PC6, 7, 0x314, BIT(16 + 10) | BIT(16 + 11) | BIT(10)), /* owire_m1 */
+ RK_MUXROUTE_SAME(2, RK_PA2, 5, 0x314, BIT(16 + 10) | BIT(16 + 11) | BIT(11)), /* owire_m2 */
+ RK_MUXROUTE_SAME(0, RK_PB3, 2, 0x314, BIT(16 + 12) | BIT(16 + 13)), /* can_rxd_m0 */
+ RK_MUXROUTE_SAME(1, RK_PC6, 5, 0x314, BIT(16 + 12) | BIT(16 + 13) | BIT(12)), /* can_rxd_m1 */
+ RK_MUXROUTE_SAME(2, RK_PA2, 4, 0x314, BIT(16 + 12) | BIT(16 + 13) | BIT(13)), /* can_rxd_m2 */
+ RK_MUXROUTE_SAME(1, RK_PC4, 3, 0x314, BIT(16 + 14)), /* mac_rxd0_m0 */
+ RK_MUXROUTE_SAME(4, RK_PA2, 2, 0x314, BIT(16 + 14) | BIT(14)), /* mac_rxd0_m1 */
+ RK_MUXROUTE_SAME(3, RK_PB4, 4, 0x314, BIT(16 + 15)), /* uart3_rx */
+ RK_MUXROUTE_SAME(0, RK_PC1, 3, 0x314, BIT(16 + 15) | BIT(15)), /* uart3_rx_m1 */
};
static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
- {
- /* uart2dbg_rxm0 */
- .bank_num = 1,
- .pin = 1,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16) | BIT(16 + 1),
- }, {
- /* uart2dbg_rxm1 */
- .bank_num = 2,
- .pin = 1,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16) | BIT(16 + 1) | BIT(0),
- }, {
- /* gmac-m1_rxd0 */
- .bank_num = 1,
- .pin = 11,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 2) | BIT(2),
- }, {
- /* gmac-m1-optimized_rxd3 */
- .bank_num = 1,
- .pin = 14,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 10) | BIT(10),
- }, {
- /* pdm_sdi0m0 */
- .bank_num = 2,
- .pin = 19,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 3),
- }, {
- /* pdm_sdi0m1 */
- .bank_num = 1,
- .pin = 23,
- .func = 3,
- .route_offset = 0x50,
- .route_val = BIT(16 + 3) | BIT(3),
- }, {
- /* spi_rxdm2 */
- .bank_num = 3,
- .pin = 2,
- .func = 4,
- .route_offset = 0x50,
- .route_val = BIT(16 + 4) | BIT(16 + 5) | BIT(5),
- }, {
- /* i2s2_sdim0 */
- .bank_num = 1,
- .pin = 24,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 6),
- }, {
- /* i2s2_sdim1 */
- .bank_num = 3,
- .pin = 2,
- .func = 6,
- .route_offset = 0x50,
- .route_val = BIT(16 + 6) | BIT(6),
- }, {
- /* card_iom1 */
- .bank_num = 2,
- .pin = 22,
- .func = 3,
- .route_offset = 0x50,
- .route_val = BIT(16 + 7) | BIT(7),
- }, {
- /* tsp_d5m1 */
- .bank_num = 2,
- .pin = 16,
- .func = 3,
- .route_offset = 0x50,
- .route_val = BIT(16 + 8) | BIT(8),
- }, {
- /* cif_data5m1 */
- .bank_num = 2,
- .pin = 16,
- .func = 4,
- .route_offset = 0x50,
- .route_val = BIT(16 + 9) | BIT(9),
- },
+ RK_MUXROUTE_SAME(1, RK_PA1, 2, 0x50, BIT(16) | BIT(16 + 1)), /* uart2dbg_rxm0 */
+ RK_MUXROUTE_SAME(2, RK_PA1, 1, 0x50, BIT(16) | BIT(16 + 1) | BIT(0)), /* uart2dbg_rxm1 */
+ RK_MUXROUTE_SAME(1, RK_PB3, 2, 0x50, BIT(16 + 2) | BIT(2)), /* gmac-m1_rxd0 */
+ RK_MUXROUTE_SAME(1, RK_PB6, 2, 0x50, BIT(16 + 10) | BIT(10)), /* gmac-m1-optimized_rxd3 */
+ RK_MUXROUTE_SAME(2, RK_PC3, 2, 0x50, BIT(16 + 3)), /* pdm_sdi0m0 */
+ RK_MUXROUTE_SAME(1, RK_PC7, 3, 0x50, BIT(16 + 3) | BIT(3)), /* pdm_sdi0m1 */
+ RK_MUXROUTE_SAME(3, RK_PA2, 4, 0x50, BIT(16 + 4) | BIT(16 + 5) | BIT(5)), /* spi_rxdm2 */
+ RK_MUXROUTE_SAME(1, RK_PD0, 1, 0x50, BIT(16 + 6)), /* i2s2_sdim0 */
+ RK_MUXROUTE_SAME(3, RK_PA2, 6, 0x50, BIT(16 + 6) | BIT(6)), /* i2s2_sdim1 */
+ RK_MUXROUTE_SAME(2, RK_PC6, 3, 0x50, BIT(16 + 7) | BIT(7)), /* card_iom1 */
+ RK_MUXROUTE_SAME(2, RK_PC0, 3, 0x50, BIT(16 + 8) | BIT(8)), /* tsp_d5m1 */
+ RK_MUXROUTE_SAME(2, RK_PC0, 4, 0x50, BIT(16 + 9) | BIT(9)), /* cif_data5m1 */
};
static struct rockchip_mux_route_data rk3399_mux_route_data[] = {
- {
- /* uart2dbga_rx */
- .bank_num = 4,
- .pin = 8,
- .func = 2,
- .route_offset = 0xe21c,
- .route_val = BIT(16 + 10) | BIT(16 + 11),
- }, {
- /* uart2dbgb_rx */
- .bank_num = 4,
- .pin = 16,
- .func = 2,
- .route_offset = 0xe21c,
- .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(10),
- }, {
- /* uart2dbgc_rx */
- .bank_num = 4,
- .pin = 19,
- .func = 1,
- .route_offset = 0xe21c,
- .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(11),
- }, {
- /* pcie_clkreqn */
- .bank_num = 2,
- .pin = 26,
- .func = 2,
- .route_offset = 0xe21c,
- .route_val = BIT(16 + 14),
- }, {
- /* pcie_clkreqnb */
- .bank_num = 4,
- .pin = 24,
- .func = 1,
- .route_offset = 0xe21c,
- .route_val = BIT(16 + 14) | BIT(14),
- },
+ RK_MUXROUTE_SAME(4, RK_PB0, 2, 0xe21c, BIT(16 + 10) | BIT(16 + 11)), /* uart2dbga_rx */
+ RK_MUXROUTE_SAME(4, RK_PC0, 2, 0xe21c, BIT(16 + 10) | BIT(16 + 11) | BIT(10)), /* uart2dbgb_rx */
+ RK_MUXROUTE_SAME(4, RK_PC3, 1, 0xe21c, BIT(16 + 10) | BIT(16 + 11) | BIT(11)), /* uart2dbgc_rx */
+ RK_MUXROUTE_SAME(2, RK_PD2, 2, 0xe21c, BIT(16 + 14)), /* pcie_clkreqn */
+ RK_MUXROUTE_SAME(4, RK_PD0, 1, 0xe21c, BIT(16 + 14) | BIT(14)), /* pcie_clkreqnb */
+};
+
+static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
+ RK_MUXROUTE_PMU(0, RK_PB7, 1, 0x0110, WRITE_MASK_VAL(1, 0, 0)), /* PWM0 IO mux M0 */
+ RK_MUXROUTE_PMU(0, RK_PC7, 2, 0x0110, WRITE_MASK_VAL(1, 0, 1)), /* PWM0 IO mux M1 */
+ RK_MUXROUTE_PMU(0, RK_PC0, 1, 0x0110, WRITE_MASK_VAL(3, 2, 0)), /* PWM1 IO mux M0 */
+ RK_MUXROUTE_PMU(0, RK_PB5, 4, 0x0110, WRITE_MASK_VAL(3, 2, 1)), /* PWM1 IO mux M1 */
+ RK_MUXROUTE_PMU(0, RK_PC1, 1, 0x0110, WRITE_MASK_VAL(5, 4, 0)), /* PWM2 IO mux M0 */
+ RK_MUXROUTE_PMU(0, RK_PB6, 4, 0x0110, WRITE_MASK_VAL(5, 4, 1)), /* PWM2 IO mux M1 */
+ RK_MUXROUTE_PMU(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PA1, 4, 0x0300, WRITE_MASK_VAL(0, 0, 1)), /* CAN0 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA1, 3, 0x0300, WRITE_MASK_VAL(2, 2, 0)), /* CAN1 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC3, 3, 0x0300, WRITE_MASK_VAL(2, 2, 1)), /* CAN1 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PB5, 3, 0x0300, WRITE_MASK_VAL(4, 4, 0)), /* CAN2 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PB2, 4, 0x0300, WRITE_MASK_VAL(4, 4, 1)), /* CAN2 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PC4, 1, 0x0300, WRITE_MASK_VAL(6, 6, 0)), /* HPDIN IO mux M0 */
+ RK_MUXROUTE_PMU(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PB1, 3, 0x0300, WRITE_MASK_VAL(8, 8, 0)), /* GMAC1 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PA7, 3, 0x0300, WRITE_MASK_VAL(8, 8, 1)), /* GMAC1 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PD1, 1, 0x0300, WRITE_MASK_VAL(10, 10, 0)), /* HDMITX IO mux M0 */
+ RK_MUXROUTE_PMU(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
+ RK_MUXROUTE_PMU(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PB4, 1, 0x0300, WRITE_MASK_VAL(14, 14, 1)), /* I2C2 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA0, 1, 0x0304, WRITE_MASK_VAL(0, 0, 0)), /* I2C3 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PB6, 4, 0x0304, WRITE_MASK_VAL(0, 0, 1)), /* I2C3 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PB2, 1, 0x0304, WRITE_MASK_VAL(2, 2, 0)), /* I2C4 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PB1, 2, 0x0304, WRITE_MASK_VAL(2, 2, 1)), /* I2C4 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PB4, 4, 0x0304, WRITE_MASK_VAL(4, 4, 0)), /* I2C5 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PD0, 2, 0x0304, WRITE_MASK_VAL(4, 4, 1)), /* I2C5 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PB1, 5, 0x0304, WRITE_MASK_VAL(14, 14, 0)), /* PWM8 IO mux M0 */
+ RK_MUXROUTE_GRF(1, RK_PD5, 4, 0x0304, WRITE_MASK_VAL(14, 14, 1)), /* PWM8 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PB2, 5, 0x0308, WRITE_MASK_VAL(0, 0, 0)), /* PWM9 IO mux M0 */
+ RK_MUXROUTE_GRF(1, RK_PD6, 4, 0x0308, WRITE_MASK_VAL(0, 0, 1)), /* PWM9 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PB5, 5, 0x0308, WRITE_MASK_VAL(2, 2, 0)), /* PWM10 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PA1, 2, 0x0308, WRITE_MASK_VAL(2, 2, 1)), /* PWM10 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PB6, 5, 0x0308, WRITE_MASK_VAL(4, 4, 0)), /* PWM11 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC0, 3, 0x0308, WRITE_MASK_VAL(4, 4, 1)), /* PWM11 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PB7, 2, 0x0308, WRITE_MASK_VAL(6, 6, 0)), /* PWM12 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC5, 1, 0x0308, WRITE_MASK_VAL(6, 6, 1)), /* PWM12 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PC0, 2, 0x0308, WRITE_MASK_VAL(8, 8, 0)), /* PWM13 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC6, 1, 0x0308, WRITE_MASK_VAL(8, 8, 1)), /* PWM13 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PC4, 1, 0x0308, WRITE_MASK_VAL(10, 10, 0)), /* PWM14 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC2, 1, 0x0308, WRITE_MASK_VAL(10, 10, 1)), /* PWM14 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PC5, 1, 0x0308, WRITE_MASK_VAL(12, 12, 0)), /* PWM15 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC3, 1, 0x0308, WRITE_MASK_VAL(12, 12, 1)), /* PWM15 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PD2, 3, 0x0308, WRITE_MASK_VAL(14, 14, 0)), /* SDMMC2 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PA5, 5, 0x0308, WRITE_MASK_VAL(14, 14, 1)), /* SDMMC2 IO mux M1 */
+ RK_MUXROUTE_PMU(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PD3, 3, 0x030c, WRITE_MASK_VAL(0, 0, 1)), /* SPI0 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PB5, 3, 0x030c, WRITE_MASK_VAL(2, 2, 0)), /* SPI1 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PC3, 3, 0x030c, WRITE_MASK_VAL(2, 2, 1)), /* SPI1 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PC1, 4, 0x030c, WRITE_MASK_VAL(4, 4, 0)), /* SPI2 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PA0, 3, 0x030c, WRITE_MASK_VAL(4, 4, 1)), /* SPI2 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PB3, 4, 0x030c, WRITE_MASK_VAL(6, 6, 0)), /* SPI3 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC2, 2, 0x030c, WRITE_MASK_VAL(6, 6, 1)), /* SPI3 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PB4, 2, 0x030c, WRITE_MASK_VAL(8, 8, 0)), /* UART1 IO mux M0 */
+ RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
+ RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
+ RK_MUXROUTE_GRF(1, RK_PD5, 2, 0x030c, WRITE_MASK_VAL(10, 10, 1)), /* UART2 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA1, 2, 0x030c, WRITE_MASK_VAL(12, 12, 0)), /* UART3 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PB7, 4, 0x030c, WRITE_MASK_VAL(12, 12, 1)), /* UART3 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA6, 2, 0x030c, WRITE_MASK_VAL(14, 14, 0)), /* UART4 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PB2, 4, 0x030c, WRITE_MASK_VAL(14, 14, 1)), /* UART4 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PA2, 3, 0x0310, WRITE_MASK_VAL(0, 0, 0)), /* UART5 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PC2, 4, 0x0310, WRITE_MASK_VAL(0, 0, 1)), /* UART5 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PA4, 3, 0x0310, WRITE_MASK_VAL(2, 2, 0)), /* UART6 IO mux M0 */
+ RK_MUXROUTE_GRF(1, RK_PD5, 3, 0x0310, WRITE_MASK_VAL(2, 2, 1)), /* UART6 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PA6, 3, 0x0310, WRITE_MASK_VAL(5, 4, 0)), /* UART7 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PC4, 4, 0x0310, WRITE_MASK_VAL(5, 4, 1)), /* UART7 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PA2, 4, 0x0310, WRITE_MASK_VAL(5, 4, 2)), /* UART7 IO mux M2 */
+ RK_MUXROUTE_GRF(2, RK_PC5, 3, 0x0310, WRITE_MASK_VAL(6, 6, 0)), /* UART8 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PD7, 4, 0x0310, WRITE_MASK_VAL(6, 6, 1)), /* UART8 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PB0, 3, 0x0310, WRITE_MASK_VAL(9, 8, 0)), /* UART9 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC5, 4, 0x0310, WRITE_MASK_VAL(9, 8, 1)), /* UART9 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PA4, 4, 0x0310, WRITE_MASK_VAL(9, 8, 2)), /* UART9 IO mux M2 */
+ RK_MUXROUTE_GRF(1, RK_PA2, 1, 0x0310, WRITE_MASK_VAL(11, 10, 0)), /* I2S1 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PC6, 4, 0x0310, WRITE_MASK_VAL(11, 10, 1)), /* I2S1 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PD0, 5, 0x0310, WRITE_MASK_VAL(11, 10, 2)), /* I2S1 IO mux M2 */
+ RK_MUXROUTE_GRF(2, RK_PC1, 1, 0x0310, WRITE_MASK_VAL(12, 12, 0)), /* I2S2 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PB6, 5, 0x0310, WRITE_MASK_VAL(12, 12, 1)), /* I2S2 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PA2, 4, 0x0310, WRITE_MASK_VAL(14, 14, 0)), /* I2S3 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC2, 5, 0x0310, WRITE_MASK_VAL(14, 14, 1)), /* I2S3 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(1, 0, 0)), /* PDM IO mux M0 */
+ RK_MUXROUTE_GRF(1, RK_PA6, 3, 0x0314, WRITE_MASK_VAL(1, 0, 0)), /* PDM IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PD6, 5, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PA0, 4, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PC4, 5, 0x0314, WRITE_MASK_VAL(1, 0, 2)), /* PDM IO mux M2 */
+ RK_MUXROUTE_PMU(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PD0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 1)), /* PCIE20 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PB0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 2)), /* PCIE20 IO mux M2 */
+ RK_MUXROUTE_PMU(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PD2, 4, 0x0314, WRITE_MASK_VAL(5, 4, 1)), /* PCIE30X1 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA5, 4, 0x0314, WRITE_MASK_VAL(5, 4, 2)), /* PCIE30X1 IO mux M2 */
+ RK_MUXROUTE_PMU(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PD4, 4, 0x0314, WRITE_MASK_VAL(7, 6, 1)), /* PCIE30X2 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PC2, 4, 0x0314, WRITE_MASK_VAL(7, 6, 2)), /* PCIE30X2 IO mux M2 */
};
static bool rockchip_get_mux_route(struct rockchip_pin_bank *bank, int pin,
@@ -2102,6 +1738,68 @@ static void rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
*bit = (pin_num % 8) * 2;
}
+#define RK3568_PULL_PMU_OFFSET 0x20
+#define RK3568_PULL_GRF_OFFSET 0x80
+#define RK3568_PULL_BITS_PER_PIN 2
+#define RK3568_PULL_PINS_PER_REG 8
+#define RK3568_PULL_BANK_STRIDE 0x10
+
+static void rk3568_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = RK3568_PULL_PMU_OFFSET;
+ *reg += bank->bank_num * RK3568_PULL_BANK_STRIDE;
+ *reg += ((pin_num / RK3568_PULL_PINS_PER_REG) * 4);
+
+ *bit = pin_num % RK3568_PULL_PINS_PER_REG;
+ *bit *= RK3568_PULL_BITS_PER_PIN;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = RK3568_PULL_GRF_OFFSET;
+ *reg += (bank->bank_num - 1) * RK3568_PULL_BANK_STRIDE;
+ *reg += ((pin_num / RK3568_PULL_PINS_PER_REG) * 4);
+
+ *bit = (pin_num % RK3568_PULL_PINS_PER_REG);
+ *bit *= RK3568_PULL_BITS_PER_PIN;
+ }
+}
+
+#define RK3568_DRV_PMU_OFFSET 0x70
+#define RK3568_DRV_GRF_OFFSET 0x200
+#define RK3568_DRV_BITS_PER_PIN 8
+#define RK3568_DRV_PINS_PER_REG 2
+#define RK3568_DRV_BANK_STRIDE 0x40
+
+static void rk3568_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The first 32 pins of the first bank are located in PMU */
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = RK3568_DRV_PMU_OFFSET;
+ *reg += ((pin_num / RK3568_DRV_PINS_PER_REG) * 4);
+
+ *bit = pin_num % RK3568_DRV_PINS_PER_REG;
+ *bit *= RK3568_DRV_BITS_PER_PIN;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = RK3568_DRV_GRF_OFFSET;
+ *reg += (bank->bank_num - 1) * RK3568_DRV_BANK_STRIDE;
+ *reg += ((pin_num / RK3568_DRV_PINS_PER_REG) * 4);
+
+ *bit = (pin_num % RK3568_DRV_PINS_PER_REG);
+ *bit *= RK3568_DRV_BITS_PER_PIN;
+ }
+}
+
static int rockchip_perpin_drv_list[DRV_TYPE_MAX][8] = {
{ 2, 4, 8, 12, -1, -1, -1, -1 },
{ 3, 6, 9, 12, -1, -1, -1, -1 },
@@ -2202,6 +1900,11 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
bank->bank_num, pin_num, strength);
ctrl->drv_calc_reg(bank, pin_num, &regmap, &reg, &bit);
+ if (ctrl->type == RK3568) {
+ rmask_bits = RK3568_DRV_BITS_PER_PIN;
+ ret = (1 << (strength + 1)) - 1;
+ goto config;
+ }
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(rockchip_perpin_drv_list[drv_type]); i++) {
@@ -2271,6 +1974,7 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
return -EINVAL;
}
+config:
/* enable the write to the equivalent lower bits */
data = ((1 << rmask_bits) - 1) << (bit + 16);
rmask = data | (data >> 16);
@@ -2373,6 +2077,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RK3308:
case RK3368:
case RK3399:
+ case RK3568:
pull_type = bank->pull_type[pin_num / 8];
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(rockchip_pull_list[pull_type]);
@@ -2382,6 +2087,14 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
break;
}
}
+ /*
+ * In the TRM, pull-up being 1 for everything except the GPIO0_D0-D6,
+ * where that pull up value becomes 3.
+ */
+ if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) {
+ if (ret == 1)
+ ret = 3;
+ }
if (ret < 0) {
dev_err(info->dev, "unsupported pull setting %d\n",
@@ -2426,6 +2139,35 @@ static int rk3328_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
return 0;
}
+#define RK3568_SCHMITT_BITS_PER_PIN 2
+#define RK3568_SCHMITT_PINS_PER_REG 8
+#define RK3568_SCHMITT_BANK_STRIDE 0x10
+#define RK3568_SCHMITT_GRF_OFFSET 0xc0
+#define RK3568_SCHMITT_PMUGRF_OFFSET 0x30
+
+static int rk3568_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num,
+ struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = RK3568_SCHMITT_PMUGRF_OFFSET;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = RK3568_SCHMITT_GRF_OFFSET;
+ *reg += (bank->bank_num - 1) * RK3568_SCHMITT_BANK_STRIDE;
+ }
+
+ *reg += ((pin_num / RK3568_SCHMITT_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3568_SCHMITT_PINS_PER_REG;
+ *bit *= RK3568_SCHMITT_BITS_PER_PIN;
+
+ return 0;
+}
+
static int rockchip_get_schmitt(struct rockchip_pin_bank *bank, int pin_num)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -2444,6 +2186,13 @@ static int rockchip_get_schmitt(struct rockchip_pin_bank *bank, int pin_num)
return ret;
data >>= bit;
+ switch (ctrl->type) {
+ case RK3568:
+ return data & ((1 << RK3568_SCHMITT_BITS_PER_PIN) - 1);
+ default:
+ break;
+ }
+
return data & 0x1;
}
@@ -2465,8 +2214,17 @@ static int rockchip_set_schmitt(struct rockchip_pin_bank *bank,
return ret;
/* enable the write to the equivalent lower bits */
- data = BIT(bit + 16) | (enable << bit);
- rmask = BIT(bit + 16) | BIT(bit);
+ switch (ctrl->type) {
+ case RK3568:
+ data = ((1 << RK3568_SCHMITT_BITS_PER_PIN) - 1) << (bit + 16);
+ rmask = data | (data >> 16);
+ data |= ((enable ? 0x2 : 0x1) << bit);
+ break;
+ default:
+ data = BIT(bit + 16) | (enable << bit);
+ rmask = BIT(bit + 16) | BIT(bit);
+ break;
+ }
return regmap_update_bits(regmap, reg, rmask, data);
}
@@ -2640,6 +2398,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RK3308:
case RK3368:
case RK3399:
+ case RK3568:
return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT);
}
@@ -3433,6 +3192,7 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
* things enabled, so for us that's all masked and all enabled.
*/
writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTMASK);
+ writel_relaxed(0xffffffff, bank->reg_base + GPIO_PORTS_EOI);
writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTEN);
gc->mask_cache = 0xffffffff;
@@ -4213,6 +3973,45 @@ static struct rockchip_pin_ctrl rk3399_pin_ctrl = {
.drv_calc_reg = rk3399_calc_drv_reg_and_bit,
};
+static struct rockchip_pin_bank rk3568_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT,
+ IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT,
+ IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT,
+ IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT),
+ PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT),
+ PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT),
+ PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT),
+ PIN_BANK_IOMUX_FLAGS(4, 32, "gpio4", IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT),
+};
+
+static struct rockchip_pin_ctrl rk3568_pin_ctrl = {
+ .pin_banks = rk3568_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3568_pin_banks),
+ .label = "RK3568-GPIO",
+ .type = RK3568,
+ .grf_mux_offset = 0x0,
+ .pmu_mux_offset = 0x0,
+ .grf_drv_offset = 0x0200,
+ .pmu_drv_offset = 0x0070,
+ .iomux_routes = rk3568_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(rk3568_mux_route_data),
+ .pull_calc_reg = rk3568_calc_pull_reg_and_bit,
+ .drv_calc_reg = rk3568_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = rk3568_calc_schmitt_reg_and_bit,
+};
+
static const struct of_device_id rockchip_pinctrl_dt_match[] = {
{ .compatible = "rockchip,px30-pinctrl",
.data = &px30_pin_ctrl },
@@ -4242,6 +4041,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = &rk3368_pin_ctrl },
{ .compatible = "rockchip,rk3399-pinctrl",
.data = &rk3399_pin_ctrl },
+ { .compatible = "rockchip,rk3568-pinctrl",
+ .data = &rk3568_pin_ctrl },
{},
};
@@ -4259,3 +4060,14 @@ static int __init rockchip_pinctrl_drv_register(void)
return platform_driver_register(&rockchip_pinctrl_driver);
}
postcore_initcall(rockchip_pinctrl_drv_register);
+
+static void __exit rockchip_pinctrl_drv_unregister(void)
+{
+ platform_driver_unregister(&rockchip_pinctrl_driver);
+}
+module_exit(rockchip_pinctrl_drv_unregister);
+
+MODULE_DESCRIPTION("ROCKCHIP Pin Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pinctrl-rockchip");
+MODULE_DEVICE_TABLE(of, rockchip_pinctrl_dt_match);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 7771316dfffa..2c9c9835f375 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -270,20 +270,44 @@ static void __maybe_unused pcs_writel(unsigned val, void __iomem *reg)
writel(val, reg);
}
+static unsigned int pcs_pin_reg_offset_get(struct pcs_device *pcs,
+ unsigned int pin)
+{
+ unsigned int mux_bytes = pcs->width / BITS_PER_BYTE;
+
+ if (pcs->bits_per_mux) {
+ unsigned int pin_offset_bytes;
+
+ pin_offset_bytes = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
+ return (pin_offset_bytes / mux_bytes) * mux_bytes;
+ }
+
+ return pin * mux_bytes;
+}
+
+static unsigned int pcs_pin_shift_reg_get(struct pcs_device *pcs,
+ unsigned int pin)
+{
+ return (pin % (pcs->width / pcs->bits_per_pin)) * pcs->bits_per_pin;
+}
+
static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s,
unsigned pin)
{
struct pcs_device *pcs;
- unsigned val, mux_bytes;
+ unsigned int val;
unsigned long offset;
size_t pa;
pcs = pinctrl_dev_get_drvdata(pctldev);
- mux_bytes = pcs->width / BITS_PER_BYTE;
- offset = pin * mux_bytes;
+ offset = pcs_pin_reg_offset_get(pcs, pin);
val = pcs->read(pcs->base + offset);
+
+ if (pcs->bits_per_mux)
+ val &= pcs->fmask << pcs_pin_shift_reg_get(pcs, pin);
+
pa = pcs->res->start + offset;
seq_printf(s, "%zx %08x %s ", pa, val, DRIVER_NAME);
@@ -384,7 +408,6 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
struct pcs_gpiofunc_range *frange = NULL;
struct list_head *pos, *tmp;
- int mux_bytes = 0;
unsigned data;
/* If function mask is null, return directly. */
@@ -392,29 +415,27 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
return -ENOTSUPP;
list_for_each_safe(pos, tmp, &pcs->gpiofuncs) {
+ u32 offset;
+
frange = list_entry(pos, struct pcs_gpiofunc_range, node);
if (pin >= frange->offset + frange->npins
|| pin < frange->offset)
continue;
- mux_bytes = pcs->width / BITS_PER_BYTE;
- if (pcs->bits_per_mux) {
- int byte_num, offset, pin_shift;
+ offset = pcs_pin_reg_offset_get(pcs, pin);
- byte_num = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
- offset = (byte_num / mux_bytes) * mux_bytes;
- pin_shift = pin % (pcs->width / pcs->bits_per_pin) *
- pcs->bits_per_pin;
+ if (pcs->bits_per_mux) {
+ int pin_shift = pcs_pin_shift_reg_get(pcs, pin);
data = pcs->read(pcs->base + offset);
data &= ~(pcs->fmask << pin_shift);
data |= frange->gpiofunc << pin_shift;
pcs->write(data, pcs->base + offset);
} else {
- data = pcs->read(pcs->base + pin * mux_bytes);
+ data = pcs->read(pcs->base + offset);
data &= ~pcs->fmask;
data |= frange->gpiofunc;
- pcs->write(data, pcs->base + pin * mux_bytes);
+ pcs->write(data, pcs->base + offset);
}
break;
}
@@ -512,7 +533,7 @@ static int pcs_pinconf_get(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_DRIVE_STRENGTH:
case PIN_CONFIG_SLEW_RATE:
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
default:
*config = data;
break;
@@ -550,7 +571,7 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev,
case PIN_CONFIG_INPUT_SCHMITT:
case PIN_CONFIG_DRIVE_STRENGTH:
case PIN_CONFIG_SLEW_RATE:
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
shift = ffs(func->conf[i].mask) - 1;
data &= ~func->conf[i].mask;
data |= (arg << shift) & func->conf[i].mask;
@@ -656,10 +677,8 @@ static const struct pinconf_ops pcs_pinconf_ops = {
* pcs_add_pin() - add a pin to the static per controller pin array
* @pcs: pcs driver instance
* @offset: register offset from base
- * @pin_pos: unused
*/
-static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
- unsigned pin_pos)
+static int pcs_add_pin(struct pcs_device *pcs, unsigned int offset)
{
struct pcs_soc_data *pcs_soc = &pcs->socdata;
struct pinctrl_pin_desc *pin;
@@ -703,14 +722,12 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
static int pcs_allocate_pin_table(struct pcs_device *pcs)
{
int mux_bytes, nr_pins, i;
- int num_pins_in_register = 0;
mux_bytes = pcs->width / BITS_PER_BYTE;
if (pcs->bits_per_mux) {
pcs->bits_per_pin = fls(pcs->fmask);
nr_pins = (pcs->size * BITS_PER_BYTE) / pcs->bits_per_pin;
- num_pins_in_register = pcs->width / pcs->bits_per_pin;
} else {
nr_pins = pcs->size / mux_bytes;
}
@@ -728,17 +745,9 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs)
for (i = 0; i < pcs->desc.npins; i++) {
unsigned offset;
int res;
- int byte_num;
- int pin_pos = 0;
- if (pcs->bits_per_mux) {
- byte_num = (pcs->bits_per_pin * i) / BITS_PER_BYTE;
- offset = (byte_num / mux_bytes) * mux_bytes;
- pin_pos = i % num_pins_in_register;
- } else {
- offset = i * mux_bytes;
- }
- res = pcs_add_pin(pcs, offset, pin_pos);
+ offset = pcs_pin_reg_offset_get(pcs, i);
+ res = pcs_add_pin(pcs, offset);
if (res < 0) {
dev_err(pcs->dev, "error adding pins: %i\n", res);
return res;
@@ -910,7 +919,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
{ "pinctrl-single,drive-strength", PIN_CONFIG_DRIVE_STRENGTH, },
{ "pinctrl-single,slew-rate", PIN_CONFIG_SLEW_RATE, },
{ "pinctrl-single,input-schmitt", PIN_CONFIG_INPUT_SCHMITT, },
- { "pinctrl-single,low-power-mode", PIN_CONFIG_LOW_POWER_MODE, },
+ { "pinctrl-single,low-power-mode", PIN_CONFIG_MODE_LOW_POWER, },
};
static const struct pcs_conf_type prop4[] = {
{ "pinctrl-single,bias-pullup", PIN_CONFIG_BIAS_PULL_UP, },
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index c6052a0e827a..5fb924a2eedd 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -1016,7 +1016,7 @@ static int zynq_pinconf_cfg_get(struct pinctrl_dev *pctldev,
case PIN_CONFIG_SLEW_RATE:
arg = !!(reg & ZYNQ_PINCONF_SPEED);
break;
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
{
enum zynq_io_standards iostd = zynq_pinconf_iostd_get(reg);
@@ -1087,7 +1087,7 @@ static int zynq_pinconf_cfg_set(struct pinctrl_dev *pctldev,
reg &= ~ZYNQ_PINCONF_IOTYPE_MASK;
reg |= arg << ZYNQ_PINCONF_IOTYPE_SHIFT;
break;
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
if (arg)
reg |= ZYNQ_PINCONF_DISABLE_RECVR;
else
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
new file mode 100644
index 000000000000..d5497003ce71
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -0,0 +1,906 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP pin controller
+ *
+ * Copyright (C) 2020 Xilinx, Inc.
+ *
+ * Sai Krishna Potthuri <lakshmi.sai.krishna.potthuri@xilinx.com>
+ * Rajan Vaja <rajan.vaja@xilinx.com>
+ */
+
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "core.h"
+#include "pinctrl-utils.h"
+
+#define ZYNQMP_PIN_PREFIX "MIO"
+#define PINCTRL_GET_FUNC_NAME_RESP_LEN 16
+#define MAX_FUNC_NAME_LEN 16
+#define MAX_GROUP_PIN 50
+#define MAX_PIN_GROUPS 50
+#define END_OF_FUNCTIONS "END_OF_FUNCTIONS"
+#define NUM_GROUPS_PER_RESP 6
+
+#define PINCTRL_GET_FUNC_GROUPS_RESP_LEN 12
+#define PINCTRL_GET_PIN_GROUPS_RESP_LEN 12
+#define NA_GROUP 0xFFFF
+#define RESERVED_GROUP 0xFFFE
+
+#define DRIVE_STRENGTH_2MA 2
+#define DRIVE_STRENGTH_4MA 4
+#define DRIVE_STRENGTH_8MA 8
+#define DRIVE_STRENGTH_12MA 12
+
+/**
+ * struct zynqmp_pmux_function - a pinmux function
+ * @name: Name of the pin mux function
+ * @groups: List of pin groups for this function
+ * @ngroups: Number of entries in @groups
+ * @node: Firmware node matching with the function
+ *
+ * This structure holds information about pin control function
+ * and function group names supporting that function.
+ */
+struct zynqmp_pmux_function {
+ char name[MAX_FUNC_NAME_LEN];
+ const char * const *groups;
+ unsigned int ngroups;
+};
+
+/**
+ * struct zynqmp_pinctrl - driver data
+ * @pctrl: Pin control device
+ * @groups: Pin groups
+ * @ngroups: Number of @groups
+ * @funcs: Pin mux functions
+ * @nfuncs: Number of @funcs
+ *
+ * This struct is stored as driver data and used to retrieve
+ * information regarding pin control functions, groups and
+ * group pins.
+ */
+struct zynqmp_pinctrl {
+ struct pinctrl_dev *pctrl;
+ const struct zynqmp_pctrl_group *groups;
+ unsigned int ngroups;
+ const struct zynqmp_pmux_function *funcs;
+ unsigned int nfuncs;
+};
+
+/**
+ * struct zynqmp_pctrl_group - Pin control group info
+ * @name: Group name
+ * @pins: Group pin numbers
+ * @npins: Number of pins in the group
+ */
+struct zynqmp_pctrl_group {
+ const char *name;
+ unsigned int pins[MAX_GROUP_PIN];
+ unsigned int npins;
+};
+
+static struct pinctrl_desc zynqmp_desc;
+
+static int zynqmp_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->ngroups;
+}
+
+static const char *zynqmp_pctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->groups[selector].name;
+}
+
+static int zynqmp_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *npins)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pctrl->groups[selector].pins;
+ *npins = pctrl->groups[selector].npins;
+
+ return 0;
+}
+
+static const struct pinctrl_ops zynqmp_pctrl_ops = {
+ .get_groups_count = zynqmp_pctrl_get_groups_count,
+ .get_group_name = zynqmp_pctrl_get_group_name,
+ .get_group_pins = zynqmp_pctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int zynqmp_pinmux_request_pin(struct pinctrl_dev *pctldev,
+ unsigned int pin)
+{
+ int ret;
+
+ ret = zynqmp_pm_pinctrl_request(pin);
+ if (ret) {
+ dev_err(pctldev->dev, "request failed for pin %u\n", pin);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zynqmp_pmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->nfuncs;
+}
+
+static const char *zynqmp_pmux_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->funcs[selector].name;
+}
+
+/**
+ * zynqmp_pmux_get_function_groups() - Get groups for the function
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Function ID
+ * @groups: Group names.
+ * @num_groups: Number of function groups.
+ *
+ * Get function's group count and group names.
+ */
+static int zynqmp_pmux_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctrl->funcs[selector].groups;
+ *num_groups = pctrl->funcs[selector].ngroups;
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinmux_set_mux() - Set requested function for the group
+ * @pctldev: Pincontrol device pointer.
+ * @function: Function ID.
+ * @group: Group ID.
+ *
+ * Loop through all pins of the group and call firmware API
+ * to set requested function for all pins in the group.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ unsigned int group)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct zynqmp_pctrl_group *pgrp = &pctrl->groups[group];
+ int ret, i;
+
+ for (i = 0; i < pgrp->npins; i++) {
+ unsigned int pin = pgrp->pins[i];
+
+ ret = zynqmp_pm_pinctrl_set_function(pin, function);
+ if (ret) {
+ dev_err(pctldev->dev, "set mux failed for pin %u\n",
+ pin);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int zynqmp_pinmux_release_pin(struct pinctrl_dev *pctldev,
+ unsigned int pin)
+{
+ int ret;
+
+ ret = zynqmp_pm_pinctrl_release(pin);
+ if (ret) {
+ dev_err(pctldev->dev, "free pin failed for pin %u\n",
+ pin);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops zynqmp_pinmux_ops = {
+ .request = zynqmp_pinmux_request_pin,
+ .get_functions_count = zynqmp_pmux_get_functions_count,
+ .get_function_name = zynqmp_pmux_get_function_name,
+ .get_function_groups = zynqmp_pmux_get_function_groups,
+ .set_mux = zynqmp_pinmux_set_mux,
+ .free = zynqmp_pinmux_release_pin,
+};
+
+/**
+ * zynqmp_pinconf_cfg_get() - get config value for the pin
+ * @pctldev: Pin control device pointer.
+ * @pin: Pin number.
+ * @config: Value of config param.
+ *
+ * Get value of the requested configuration parameter for the
+ * given pin.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinconf_cfg_get(struct pinctrl_dev *pctldev,
+ unsigned int pin,
+ unsigned long *config)
+{
+ unsigned int arg, param = pinconf_to_config_param(*config);
+ int ret;
+
+ if (pin >= zynqmp_desc.npins)
+ return -EOPNOTSUPP;
+
+ switch (param) {
+ case PIN_CONFIG_SLEW_RATE:
+ param = PM_PINCTRL_CONFIG_SLEW_RATE;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ param = PM_PINCTRL_CONFIG_PULL_CTRL;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &arg);
+ if (arg != PM_PINCTRL_BIAS_PULL_UP)
+ return -EINVAL;
+
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ param = PM_PINCTRL_CONFIG_PULL_CTRL;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &arg);
+ if (arg != PM_PINCTRL_BIAS_PULL_DOWN)
+ return -EINVAL;
+
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ param = PM_PINCTRL_CONFIG_BIAS_STATUS;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &arg);
+ if (arg != PM_PINCTRL_BIAS_DISABLE)
+ return -EINVAL;
+
+ arg = 1;
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ param = PM_PINCTRL_CONFIG_VOLTAGE_STATUS;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &arg);
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ param = PM_PINCTRL_CONFIG_SCHMITT_CMOS;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &arg);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ param = PM_PINCTRL_CONFIG_DRIVE_STRENGTH;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &arg);
+ switch (arg) {
+ case PM_PINCTRL_DRIVE_STRENGTH_2MA:
+ arg = DRIVE_STRENGTH_2MA;
+ break;
+ case PM_PINCTRL_DRIVE_STRENGTH_4MA:
+ arg = DRIVE_STRENGTH_4MA;
+ break;
+ case PM_PINCTRL_DRIVE_STRENGTH_8MA:
+ arg = DRIVE_STRENGTH_8MA;
+ break;
+ case PM_PINCTRL_DRIVE_STRENGTH_12MA:
+ arg = DRIVE_STRENGTH_12MA;
+ break;
+ default:
+ /* Invalid drive strength */
+ dev_warn(pctldev->dev,
+ "Invalid drive strength for pin %d\n",
+ pin);
+ return -EINVAL;
+ }
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ param = pinconf_to_config_param(*config);
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinconf_cfg_set() - Set requested config for the pin
+ * @pctldev: Pincontrol device pointer.
+ * @pin: Pin number.
+ * @configs: Configuration to set.
+ * @num_configs: Number of configurations.
+ *
+ * Loop through all configurations and call firmware API
+ * to set requested configurations for the pin.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
+{
+ int i, ret;
+
+ if (pin >= zynqmp_desc.npins)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < num_configs; i++) {
+ unsigned int param = pinconf_to_config_param(configs[i]);
+ unsigned int arg = pinconf_to_config_argument(configs[i]);
+ unsigned int value;
+
+ switch (param) {
+ case PIN_CONFIG_SLEW_RATE:
+ param = PM_PINCTRL_CONFIG_SLEW_RATE;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ param = PM_PINCTRL_CONFIG_PULL_CTRL;
+ arg = PM_PINCTRL_BIAS_PULL_UP;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ param = PM_PINCTRL_CONFIG_PULL_CTRL;
+ arg = PM_PINCTRL_BIAS_PULL_DOWN;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ param = PM_PINCTRL_CONFIG_BIAS_STATUS;
+ arg = PM_PINCTRL_BIAS_DISABLE;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ param = PM_PINCTRL_CONFIG_SCHMITT_CMOS;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ switch (arg) {
+ case DRIVE_STRENGTH_2MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_2MA;
+ break;
+ case DRIVE_STRENGTH_4MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_4MA;
+ break;
+ case DRIVE_STRENGTH_8MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_8MA;
+ break;
+ case DRIVE_STRENGTH_12MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_12MA;
+ break;
+ default:
+ /* Invalid drive strength */
+ dev_warn(pctldev->dev,
+ "Invalid drive strength for pin %d\n",
+ pin);
+ return -EINVAL;
+ }
+
+ param = PM_PINCTRL_CONFIG_DRIVE_STRENGTH;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, value);
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ param = PM_PINCTRL_CONFIG_VOLTAGE_STATUS;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &value);
+
+ if (arg != value)
+ dev_warn(pctldev->dev,
+ "Invalid IO Standard requested for pin %d\n",
+ pin);
+
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ case PIN_CONFIG_MODE_LOW_POWER:
+ /*
+ * These cases are mentioned in dts but configurable
+ * registers are unknown. So falling through to ignore
+ * boot time warnings as of now.
+ */
+ ret = 0;
+ break;
+ default:
+ dev_warn(pctldev->dev,
+ "unsupported configuration parameter '%u'\n",
+ param);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+ if (ret)
+ dev_warn(pctldev->dev,
+ "failed to set: pin %u param %u value %u\n",
+ pin, param, arg);
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinconf_group_set() - Set requested config for the group
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Group ID.
+ * @configs: Configuration to set.
+ * @num_configs: Number of configurations.
+ *
+ * Call function to set configs for each pin in the group.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ int i, ret;
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct zynqmp_pctrl_group *pgrp = &pctrl->groups[selector];
+
+ for (i = 0; i < pgrp->npins; i++) {
+ ret = zynqmp_pinconf_cfg_set(pctldev, pgrp->pins[i], configs,
+ num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops zynqmp_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = zynqmp_pinconf_cfg_get,
+ .pin_config_set = zynqmp_pinconf_cfg_set,
+ .pin_config_group_set = zynqmp_pinconf_group_set,
+};
+
+static struct pinctrl_desc zynqmp_desc = {
+ .name = "zynqmp_pinctrl",
+ .owner = THIS_MODULE,
+ .pctlops = &zynqmp_pctrl_ops,
+ .pmxops = &zynqmp_pinmux_ops,
+ .confops = &zynqmp_pinconf_ops,
+};
+
+static int zynqmp_pinctrl_get_function_groups(u32 fid, u32 index, u16 *groups)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_FUNCTION_GROUPS;
+ qdata.arg1 = fid;
+ qdata.arg2 = index;
+
+ ret = zynqmp_pm_query_data(qdata, payload);
+ if (ret)
+ return ret;
+
+ memcpy(groups, &payload[1], PINCTRL_GET_FUNC_GROUPS_RESP_LEN);
+
+ return ret;
+}
+
+static int zynqmp_pinctrl_get_func_num_groups(u32 fid, unsigned int *ngroups)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS;
+ qdata.arg1 = fid;
+
+ ret = zynqmp_pm_query_data(qdata, payload);
+ if (ret)
+ return ret;
+
+ *ngroups = payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_func_groups() - prepare function and groups data
+ * @dev: Device pointer.
+ * @fid: Function ID.
+ * @func: Function data.
+ * @groups: Groups data.
+ *
+ * Query firmware to get group IDs for each function. Firmware returns
+ * group IDs. Based on group index for the function, group names in
+ * the function are stored. For example, the first group in "eth0" function
+ * is named as "eth0_0" and second group as "eth0_1" and so on.
+ *
+ * Based on the group ID received from the firmware, function stores name of
+ * the group for that group ID. For example, if "eth0" first group ID
+ * is x, groups[x] name will be stored as "eth0_0".
+ *
+ * Once done for each function, each function would have its group names
+ * and each groups would also have their names.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_func_groups(struct device *dev, u32 fid,
+ struct zynqmp_pmux_function *func,
+ struct zynqmp_pctrl_group *groups)
+{
+ u16 resp[NUM_GROUPS_PER_RESP] = {0};
+ const char **fgroups;
+ int ret = 0, index, i;
+
+ fgroups = devm_kzalloc(dev, sizeof(*fgroups) * func->ngroups, GFP_KERNEL);
+ if (!fgroups)
+ return -ENOMEM;
+
+ for (index = 0; index < func->ngroups; index += NUM_GROUPS_PER_RESP) {
+ ret = zynqmp_pinctrl_get_function_groups(fid, index, resp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NUM_GROUPS_PER_RESP; i++) {
+ if (resp[i] == NA_GROUP)
+ goto done;
+
+ if (resp[i] == RESERVED_GROUP)
+ continue;
+
+ fgroups[index + i] = devm_kasprintf(dev, GFP_KERNEL,
+ "%s_%d_grp",
+ func->name,
+ index + i);
+ if (!fgroups[index + i])
+ return -ENOMEM;
+
+ groups[resp[i]].name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s_%d_grp",
+ func->name,
+ index + i);
+ if (!groups[resp[i]].name)
+ return -ENOMEM;
+ }
+ }
+done:
+ func->groups = fgroups;
+
+ return ret;
+}
+
+static void zynqmp_pinctrl_get_function_name(u32 fid, char *name)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 payload[PAYLOAD_ARG_CNT];
+
+ qdata.qid = PM_QID_PINCTRL_GET_FUNCTION_NAME;
+ qdata.arg1 = fid;
+
+ /*
+ * Name of the function is maximum 16 bytes and cannot
+ * accommodate the return value in SMC buffers, hence ignoring
+ * the return value for this specific qid.
+ */
+ zynqmp_pm_query_data(qdata, payload);
+ memcpy(name, payload, PINCTRL_GET_FUNC_NAME_RESP_LEN);
+}
+
+static int zynqmp_pinctrl_get_num_functions(unsigned int *nfuncs)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_NUM_FUNCTIONS;
+
+ ret = zynqmp_pm_query_data(qdata, payload);
+ if (ret)
+ return ret;
+
+ *nfuncs = payload[1];
+
+ return ret;
+}
+
+static int zynqmp_pinctrl_get_pin_groups(u32 pin, u32 index, u16 *groups)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_PIN_GROUPS;
+ qdata.arg1 = pin;
+ qdata.arg2 = index;
+
+ ret = zynqmp_pm_query_data(qdata, payload);
+ if (ret)
+ return ret;
+
+ memcpy(groups, &payload[1], PINCTRL_GET_PIN_GROUPS_RESP_LEN);
+
+ return ret;
+}
+
+static void zynqmp_pinctrl_group_add_pin(struct zynqmp_pctrl_group *group,
+ unsigned int pin)
+{
+ group->pins[group->npins++] = pin;
+}
+
+/**
+ * zynqmp_pinctrl_create_pin_groups() - assign pins to respective groups
+ * @dev: Device pointer.
+ * @groups: Groups data.
+ * @pin: Pin number.
+ *
+ * Query firmware to get groups available for the given pin.
+ * Based on the firmware response(group IDs for the pin), add
+ * pin number to the respective group's pin array.
+ *
+ * Once all pins are queries, each groups would have its number
+ * of pins and pin numbers data.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_create_pin_groups(struct device *dev,
+ struct zynqmp_pctrl_group *groups,
+ unsigned int pin)
+{
+ u16 resp[NUM_GROUPS_PER_RESP] = {0};
+ int ret, i, index = 0;
+
+ do {
+ ret = zynqmp_pinctrl_get_pin_groups(pin, index, resp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NUM_GROUPS_PER_RESP; i++) {
+ if (resp[i] == NA_GROUP)
+ return ret;
+
+ if (resp[i] == RESERVED_GROUP)
+ continue;
+
+ zynqmp_pinctrl_group_add_pin(&groups[resp[i]], pin);
+ }
+ index += NUM_GROUPS_PER_RESP;
+ } while (index <= MAX_PIN_GROUPS);
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_group_pins() - prepare each group's pin data
+ * @dev: Device pointer.
+ * @groups: Groups data.
+ * @ngroups: Number of groups.
+ *
+ * Prepare pin number and number of pins data for each pins.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_group_pins(struct device *dev,
+ struct zynqmp_pctrl_group *groups,
+ unsigned int ngroups)
+{
+ unsigned int pin;
+ int ret;
+
+ for (pin = 0; pin < zynqmp_desc.npins; pin++) {
+ ret = zynqmp_pinctrl_create_pin_groups(dev, groups, pin);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_function_info() - prepare function info
+ * @dev: Device pointer.
+ * @pctrl: Pin control driver data.
+ *
+ * Query firmware for functions, groups and pin information and
+ * prepare pin control driver data.
+ *
+ * Query number of functions and number of function groups (number
+ * of groups in given function) to allocate required memory buffers
+ * for functions and groups. Once buffers are allocated to store
+ * functions and groups data, query and store required information
+ * (number of groups and group names for each function, number of
+ * pins and pin numbers for each group).
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_function_info(struct device *dev,
+ struct zynqmp_pinctrl *pctrl)
+{
+ struct zynqmp_pmux_function *funcs;
+ struct zynqmp_pctrl_group *groups;
+ int ret, i;
+
+ ret = zynqmp_pinctrl_get_num_functions(&pctrl->nfuncs);
+ if (ret)
+ return ret;
+
+ funcs = devm_kzalloc(dev, sizeof(*funcs) * pctrl->nfuncs, GFP_KERNEL);
+ if (!funcs)
+ return -ENOMEM;
+
+ for (i = 0; i < pctrl->nfuncs; i++) {
+ zynqmp_pinctrl_get_function_name(i, funcs[i].name);
+
+ ret = zynqmp_pinctrl_get_func_num_groups(i, &funcs[i].ngroups);
+ if (ret)
+ return ret;
+
+ pctrl->ngroups += funcs[i].ngroups;
+ }
+
+ groups = devm_kzalloc(dev, sizeof(*groups) * pctrl->ngroups, GFP_KERNEL);
+ if (!groups)
+ return -ENOMEM;
+
+ for (i = 0; i < pctrl->nfuncs; i++) {
+ ret = zynqmp_pinctrl_prepare_func_groups(dev, i, &funcs[i],
+ groups);
+ if (ret)
+ return ret;
+ }
+
+ ret = zynqmp_pinctrl_prepare_group_pins(dev, groups, pctrl->ngroups);
+ if (ret)
+ return ret;
+
+ pctrl->funcs = funcs;
+ pctrl->groups = groups;
+
+ return ret;
+}
+
+static int zynqmp_pinctrl_get_num_pins(unsigned int *npins)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_NUM_PINS;
+
+ ret = zynqmp_pm_query_data(qdata, payload);
+ if (ret)
+ return ret;
+
+ *npins = payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_pin_desc() - prepare pin description info
+ * @dev: Device pointer.
+ * @zynqmp_pins: Pin information.
+ * @npins: Number of pins.
+ *
+ * Query number of pins information from firmware and prepare pin
+ * description containing pin number and pin name.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev,
+ const struct pinctrl_pin_desc
+ **zynqmp_pins,
+ unsigned int *npins)
+{
+ struct pinctrl_pin_desc *pins, *pin;
+ int ret;
+ int i;
+
+ ret = zynqmp_pinctrl_get_num_pins(npins);
+ if (ret)
+ return ret;
+
+ pins = devm_kzalloc(dev, sizeof(*pins) * *npins, GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < *npins; i++) {
+ pin = &pins[i];
+ pin->number = i;
+ pin->name = devm_kasprintf(dev, GFP_KERNEL, "%s%d",
+ ZYNQMP_PIN_PREFIX, i);
+ if (!pin->name)
+ return -ENOMEM;
+ }
+
+ *zynqmp_pins = pins;
+
+ return 0;
+}
+
+static int zynqmp_pinctrl_probe(struct platform_device *pdev)
+{
+ struct zynqmp_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ ret = zynqmp_pinctrl_prepare_pin_desc(&pdev->dev,
+ &zynqmp_desc.pins,
+ &zynqmp_desc.npins);
+ if (ret) {
+ dev_err(&pdev->dev, "pin desc prepare fail with %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = zynqmp_pinctrl_prepare_function_info(&pdev->dev, pctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "function info prepare fail with %d\n",
+ ret);
+ return ret;
+ }
+
+ pctrl->pctrl = pinctrl_register(&zynqmp_desc, &pdev->dev, pctrl);
+ if (IS_ERR(pctrl->pctrl))
+ return PTR_ERR(pctrl->pctrl);
+
+ platform_set_drvdata(pdev, pctrl);
+
+ return ret;
+}
+
+static int zynqmp_pinctrl_remove(struct platform_device *pdev)
+{
+ struct zynqmp_pinctrl *pctrl = platform_get_drvdata(pdev);
+
+ pinctrl_unregister(pctrl->pctrl);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_pinctrl_of_match[] = {
+ { .compatible = "xlnx,zynqmp-pinctrl" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_pinctrl_of_match);
+
+static struct platform_driver zynqmp_pinctrl_driver = {
+ .driver = {
+ .name = "zynqmp-pinctrl",
+ .of_match_table = zynqmp_pinctrl_of_match,
+ },
+ .probe = zynqmp_pinctrl_probe,
+ .remove = zynqmp_pinctrl_remove,
+};
+
+module_platform_driver(zynqmp_pinctrl_driver);
+
+MODULE_AUTHOR("Sai Krishna Potthuri <lakshmi.sai.krishna.potthuri@xilinx.com>");
+MODULE_DESCRIPTION("ZynqMP Pin Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 36a11c9e893a..6cdbd9ccf2f0 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -12,6 +12,7 @@
*/
#define pr_fmt(fmt) "pinmux core: " fmt
+#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -673,13 +674,114 @@ void pinmux_show_setting(struct seq_file *s,
DEFINE_SHOW_ATTRIBUTE(pinmux_functions);
DEFINE_SHOW_ATTRIBUTE(pinmux_pins);
+#define PINMUX_SELECT_MAX 128
+static ssize_t pinmux_select(struct file *file, const char __user *user_buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *sfile = file->private_data;
+ struct pinctrl_dev *pctldev = sfile->private;
+ const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
+ const char *const *groups;
+ char *buf, *gname, *fname;
+ unsigned int num_groups;
+ int fsel, gsel, ret;
+
+ if (len > PINMUX_SELECT_MAX)
+ return -ENOMEM;
+
+ buf = kzalloc(PINMUX_SELECT_MAX, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = strncpy_from_user(buf, user_buf, PINMUX_SELECT_MAX);
+ if (ret < 0)
+ goto exit_free_buf;
+ buf[len-1] = '\0';
+
+ /* remove leading and trailing spaces of input buffer */
+ gname = strstrip(buf);
+ if (*gname == '\0') {
+ ret = -EINVAL;
+ goto exit_free_buf;
+ }
+
+ /* find a separator which is a spacelike character */
+ for (fname = gname; !isspace(*fname); fname++) {
+ if (*fname == '\0') {
+ ret = -EINVAL;
+ goto exit_free_buf;
+ }
+ }
+ *fname = '\0';
+
+ /* drop extra spaces between function and group names */
+ fname = skip_spaces(fname + 1);
+ if (*fname == '\0') {
+ ret = -EINVAL;
+ goto exit_free_buf;
+ }
+
+ ret = pinmux_func_name_to_selector(pctldev, fname);
+ if (ret < 0) {
+ dev_err(pctldev->dev, "invalid function %s in map table\n", fname);
+ goto exit_free_buf;
+ }
+ fsel = ret;
+
+ ret = pmxops->get_function_groups(pctldev, fsel, &groups, &num_groups);
+ if (ret) {
+ dev_err(pctldev->dev, "no groups for function %d (%s)", fsel, fname);
+ goto exit_free_buf;
+ }
+
+ ret = match_string(groups, num_groups, gname);
+ if (ret < 0) {
+ dev_err(pctldev->dev, "invalid group %s", gname);
+ goto exit_free_buf;
+ }
+
+ ret = pinctrl_get_group_selector(pctldev, gname);
+ if (ret < 0) {
+ dev_err(pctldev->dev, "failed to get group selector for %s", gname);
+ goto exit_free_buf;
+ }
+ gsel = ret;
+
+ ret = pmxops->set_mux(pctldev, fsel, gsel);
+ if (ret) {
+ dev_err(pctldev->dev, "set_mux() failed: %d", ret);
+ goto exit_free_buf;
+ }
+ ret = len;
+
+exit_free_buf:
+ kfree(buf);
+
+ return ret;
+}
+
+static int pinmux_select_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, NULL, inode->i_private);
+}
+
+static const struct file_operations pinmux_select_ops = {
+ .owner = THIS_MODULE,
+ .open = pinmux_select_open,
+ .write = pinmux_select,
+ .llseek = no_llseek,
+ .release = single_release,
+};
+
void pinmux_init_device_debugfs(struct dentry *devroot,
struct pinctrl_dev *pctldev)
{
- debugfs_create_file("pinmux-functions", S_IFREG | S_IRUGO,
+ debugfs_create_file("pinmux-functions", 0444,
devroot, pctldev, &pinmux_functions_fops);
- debugfs_create_file("pinmux-pins", S_IFREG | S_IRUGO,
+ debugfs_create_file("pinmux-pins", 0444,
devroot, pctldev, &pinmux_pins_fops);
+ debugfs_create_file("pinmux-select", 0200,
+ devroot, pctldev, &pinmux_select_ops);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index eab029a21643..d2568dab8c78 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -194,7 +194,7 @@ static int pxa2xx_pconf_group_get(struct pinctrl_dev *pctldev,
spin_lock_irqsave(&pctl->lock, flags);
val = readl_relaxed(pgsr) & BIT(pin % 32);
- *config = val ? PIN_CONFIG_LOW_POWER_MODE : 0;
+ *config = val ? PIN_CONFIG_MODE_LOW_POWER : 0;
spin_unlock_irqrestore(&pctl->lock, flags);
dev_dbg(pctl->dev, "get sleep gpio state(pin=%d) %d\n",
@@ -217,7 +217,7 @@ static int pxa2xx_pconf_group_set(struct pinctrl_dev *pctldev,
for (i = 0; i < num_configs; i++) {
switch (pinconf_to_config_param(configs[i])) {
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
is_set = pinconf_to_config_argument(configs[i]);
break;
default:
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 6853a896c476..25d2f7f7f3b6 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -3,7 +3,7 @@ if (ARCH_QCOM || COMPILE_TEST)
config PINCTRL_MSM
tristate "Qualcomm core pin controller driver"
- depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y
+ depends on GPIOLIB && (QCOM_SCM || !QCOM_SCM) #if QCOM_SCM=m this can't be =y
select PINMUX
select PINCONF
select GENERIC_PINCONF
@@ -222,7 +222,7 @@ config PINCTRL_SC7280
config PINCTRL_SC8180X
tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
- depends on GPIOLIB && OF
+ depends on GPIOLIB && (OF || ACPI)
select PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
index 9d41abfca37e..afddf6d60dbe 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
@@ -1449,6 +1449,28 @@ static const struct msm_pingroup sc7280_groups[] = {
[182] = SDC_QDSD_PINGROUP(sdc2_data, 0xb4000, 9, 0),
};
+static const struct msm_gpio_wakeirq_map sc7280_pdc_map[] = {
+ { 0, 134 }, { 3, 131 }, { 4, 121 }, { 7, 103 }, { 8, 155 },
+ { 11, 93 }, { 12, 78 }, { 15, 79 }, { 16, 80 }, { 18, 81 },
+ { 19, 107 }, { 20, 82 }, { 21, 83 }, { 23, 99 }, { 24, 86 },
+ { 25, 95 }, { 27, 158 }, { 28, 159 }, { 31, 90 }, { 32, 144 },
+ { 34, 77 }, { 35, 92 }, { 36, 157 }, { 39, 73 }, { 40, 97 },
+ { 41, 98 }, { 43, 85 }, { 44, 100 }, { 45, 101 }, { 47, 102 },
+ { 48, 74 }, { 51, 112 }, { 52, 156 }, { 54, 117 }, { 55, 84 },
+ { 56, 108 }, { 59, 110 }, { 60, 111 }, { 61, 123 }, { 63, 104 },
+ { 68, 127 }, { 72, 150 }, { 75, 133 }, { 77, 125 }, { 78, 105 },
+ { 79, 106 }, { 80, 118 }, { 81, 119 }, { 82, 162 }, { 83, 122 },
+ { 86, 75 }, { 88, 154 }, { 89, 124 }, { 90, 149 }, { 91, 76 },
+ { 93, 128 }, { 95, 160 }, { 101, 126 }, { 102, 96 }, { 103, 116 },
+ { 104, 114 }, { 112, 72 }, { 116, 135 }, { 117, 163 }, { 119, 137 },
+ { 121, 138 }, { 123, 139 }, { 125, 140 }, { 127, 141 }, { 128, 165 },
+ { 129, 143 }, { 130, 94 }, { 131, 145 }, { 133, 146 }, { 136, 147 },
+ { 140, 148 }, { 141, 115 }, { 142, 113 }, { 145, 130 }, { 148, 132 },
+ { 150, 87 }, { 151, 88 }, { 153, 89 }, { 155, 164 }, { 156, 129 },
+ { 157, 161 }, { 158, 120 }, { 161, 136 }, { 163, 142 }, { 172, 166 },
+ { 174, 167 },
+};
+
static const struct msm_pinctrl_soc_data sc7280_pinctrl = {
.pins = sc7280_pins,
.npins = ARRAY_SIZE(sc7280_pins),
@@ -1457,6 +1479,8 @@ static const struct msm_pinctrl_soc_data sc7280_pinctrl = {
.groups = sc7280_groups,
.ngroups = ARRAY_SIZE(sc7280_groups),
.ngpios = 176,
+ .wakeirq_map = sc7280_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sc7280_pdc_map),
};
static int sc7280_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
index b765bf667574..0d9654b4ab60 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
@@ -23,6 +23,21 @@ enum {
WEST
};
+/*
+ * ACPI DSDT has one single memory resource for TLMM. The offsets below are
+ * used to locate different tiles for ACPI probe.
+ */
+struct tile_info {
+ u32 offset;
+ u32 size;
+};
+
+static const struct tile_info sc8180x_tile_info[] = {
+ { 0x00d00000, 0x00300000, },
+ { 0x00500000, 0x00700000, },
+ { 0x00100000, 0x00300000, },
+};
+
#define FUNCTION(fname) \
[msm_mux_##fname] = { \
.name = #fname, \
@@ -1557,6 +1572,13 @@ static const struct msm_pingroup sc8180x_groups[] = {
[193] = SDC_QDSD_PINGROUP(sdc2_data, 0x4b2000, 9, 0),
};
+static const int sc8180x_acpi_reserved_gpios[] = {
+ 0, 1, 2, 3,
+ 47, 48, 49, 50,
+ 126, 127, 128, 129,
+ -1 /* terminator */
+};
+
static const struct msm_gpio_wakeirq_map sc8180x_pdc_map[] = {
{ 3, 31 }, { 5, 32 }, { 8, 33 }, { 9, 34 }, { 10, 100 }, { 12, 104 },
{ 24, 37 }, { 26, 38 }, { 27, 41 }, { 28, 42 }, { 30, 39 }, { 36, 43 },
@@ -1588,13 +1610,109 @@ static struct msm_pinctrl_soc_data sc8180x_pinctrl = {
.nwakeirq_map = ARRAY_SIZE(sc8180x_pdc_map),
};
+static const struct msm_pinctrl_soc_data sc8180x_acpi_pinctrl = {
+ .tiles = sc8180x_tiles,
+ .ntiles = ARRAY_SIZE(sc8180x_tiles),
+ .pins = sc8180x_pins,
+ .npins = ARRAY_SIZE(sc8180x_pins),
+ .groups = sc8180x_groups,
+ .ngroups = ARRAY_SIZE(sc8180x_groups),
+ .reserved_gpios = sc8180x_acpi_reserved_gpios,
+ .ngpios = 190,
+};
+
+/*
+ * ACPI DSDT has one single memory resource for TLMM, which voilates the
+ * hardware layout of 3 sepearte tiles. Let's split the memory resource into
+ * 3 named ones, so that msm_pinctrl_probe() can map memory for ACPI in the
+ * same way as for DT probe.
+ */
+static int sc8180x_pinctrl_add_tile_resources(struct platform_device *pdev)
+{
+ int nres_num = pdev->num_resources + ARRAY_SIZE(sc8180x_tiles) - 1;
+ struct resource *mres, *nres, *res;
+ int i, ret;
+
+ /*
+ * DT already has tiles defined properly, so nothing needs to be done
+ * for DT probe.
+ */
+ if (pdev->dev.of_node)
+ return 0;
+
+ /* Allocate for new resources */
+ nres = devm_kzalloc(&pdev->dev, sizeof(*nres) * nres_num, GFP_KERNEL);
+ if (!nres)
+ return -ENOMEM;
+
+ res = nres;
+
+ for (i = 0; i < pdev->num_resources; i++) {
+ struct resource *r = &pdev->resource[i];
+
+ /* Save memory resource and copy others */
+ if (resource_type(r) == IORESOURCE_MEM)
+ mres = r;
+ else
+ *res++ = *r;
+ }
+
+ /* Append tile memory resources */
+ for (i = 0; i < ARRAY_SIZE(sc8180x_tiles); i++, res++) {
+ const struct tile_info *info = &sc8180x_tile_info[i];
+
+ res->start = mres->start + info->offset;
+ res->end = mres->start + info->offset + info->size - 1;
+ res->flags = mres->flags;
+ res->name = sc8180x_tiles[i];
+
+ /* Add new MEM to resource tree */
+ insert_resource(mres->parent, res);
+ }
+
+ /* Remove old MEM from resource tree */
+ remove_resource(mres);
+
+ /* Free old resources and install new ones */
+ ret = platform_device_add_resources(pdev, nres, nres_num);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add new resources: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int sc8180x_pinctrl_probe(struct platform_device *pdev)
{
- return msm_pinctrl_probe(pdev, &sc8180x_pinctrl);
+ const struct msm_pinctrl_soc_data *soc_data;
+ int ret;
+
+ soc_data = device_get_match_data(&pdev->dev);
+ if (!soc_data)
+ return -EINVAL;
+
+ ret = sc8180x_pinctrl_add_tile_resources(pdev);
+ if (ret)
+ return ret;
+
+ return msm_pinctrl_probe(pdev, soc_data);
}
+static const struct acpi_device_id sc8180x_pinctrl_acpi_match[] = {
+ {
+ .id = "QCOM040D",
+ .driver_data = (kernel_ulong_t) &sc8180x_acpi_pinctrl,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, sc8180x_pinctrl_acpi_match);
+
static const struct of_device_id sc8180x_pinctrl_of_match[] = {
- { .compatible = "qcom,sc8180x-tlmm", },
+ {
+ .compatible = "qcom,sc8180x-tlmm",
+ .data = &sc8180x_pinctrl,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, sc8180x_pinctrl_of_match);
@@ -1603,6 +1721,7 @@ static struct platform_driver sc8180x_pinctrl_driver = {
.driver = {
.name = "sc8180x-pinctrl",
.of_match_table = sc8180x_pinctrl_of_match,
+ .acpi_match_table = sc8180x_pinctrl_acpi_match,
},
.probe = sc8180x_pinctrl_probe,
.remove = msm_pinctrl_remove,
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350.c b/drivers/pinctrl/qcom/pinctrl-sm8350.c
index a406ed0ec7d3..4d8f8636c2b3 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8350.c
@@ -1603,6 +1603,25 @@ static const struct msm_pingroup sm8350_groups[] = {
[206] = SDC_PINGROUP(sdc2_data, 0x1cf000, 9, 0),
};
+static const struct msm_gpio_wakeirq_map sm8350_pdc_map[] = {
+ { 2, 117 }, { 7, 82 }, { 11, 83 }, { 14, 80 }, { 15, 146 },
+ { 19, 121 }, { 23, 84 }, { 26, 86 }, { 27, 75 }, { 31, 85 },
+ { 32, 97 }, { 34, 98 }, { 35, 131 }, { 36, 79 }, { 38, 99 },
+ { 39, 92 }, { 40, 101 }, { 43, 137 }, { 44, 102 }, { 46, 96 },
+ { 47, 93 }, { 50, 108 }, { 51, 127 }, { 55, 128 }, { 56, 81 },
+ { 59, 112 }, { 60, 119 }, { 63, 73 }, { 67, 74 }, { 71, 134 },
+ { 75, 103 }, { 79, 104 }, { 80, 126 }, { 81, 139 }, { 82, 140 },
+ { 83, 141 }, { 84, 124 }, { 85, 109 }, { 86, 143 }, { 87, 138 },
+ { 88, 122 }, { 89, 113 }, { 90, 114 }, { 91, 115 }, { 92, 76 },
+ { 95, 147 }, { 96, 148 }, { 98, 149 }, { 99, 150 }, { 115, 125 },
+ { 116, 106 }, { 117, 105 }, { 118, 116 }, { 119, 123 }, { 130, 145 },
+ { 136, 72 }, { 140, 100 }, { 151, 110 }, { 153, 95 }, { 155, 107 },
+ { 156, 94 }, { 157, 111 }, { 159, 118 }, { 162, 77 }, { 165, 78 },
+ { 169, 70 }, { 172, 132 }, { 174, 87 }, { 175, 88 }, { 177, 89 },
+ { 179, 120 }, { 180, 129 }, { 183, 90 }, { 185, 136 }, { 187, 142 },
+ { 190, 144 }, { 198, 91 }, { 200, 133 }, { 202, 135 },
+};
+
static const struct msm_pinctrl_soc_data sm8350_tlmm = {
.pins = sm8350_pins,
.npins = ARRAY_SIZE(sm8350_pins),
@@ -1611,6 +1630,8 @@ static const struct msm_pinctrl_soc_data sm8350_tlmm = {
.groups = sm8350_groups,
.ngroups = ARRAY_SIZE(sm8350_groups),
.ngpios = 204,
+ .wakeirq_map = sm8350_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sm8350_pdc_map),
};
static int sm8350_tlmm_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 9801c717e311..00870da0c94e 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1127,8 +1127,15 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8150b-gpio", .data = (void *) 12 },
/* pm8150l has 12 GPIOs with holes on 7 */
{ .compatible = "qcom,pm8150l-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm8350-gpio", .data = (void *) 10 },
+ { .compatible = "qcom,pm8350b-gpio", .data = (void *) 8 },
+ { .compatible = "qcom,pm8350c-gpio", .data = (void *) 9 },
+ { .compatible = "qcom,pmk8350-gpio", .data = (void *) 4 },
+ { .compatible = "qcom,pmr735a-gpio", .data = (void *) 4 },
+ { .compatible = "qcom,pmr735b-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm6150-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm6150l-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm8008-gpio", .data = (void *) 2 },
/* pmx55 has 11 GPIOs with holes on 3, 7, 10, 11 */
{ .compatible = "qcom,pmx55-gpio", .data = (void *) 11 },
{ },
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index 2bfd3006f6fd..5ccc49b387f1 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -394,26 +394,6 @@ int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type)
return 0;
}
-const struct pinmux_bias_reg *
-sh_pfc_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
- unsigned int *bit)
-{
- unsigned int i, j;
-
- for (i = 0; pfc->info->bias_regs[i].puen; i++) {
- for (j = 0; j < ARRAY_SIZE(pfc->info->bias_regs[i].pins); j++) {
- if (pfc->info->bias_regs[i].pins[j] == pin) {
- *bit = j;
- return &pfc->info->bias_regs[i];
- }
- }
- }
-
- WARN_ONCE(1, "Pin %u is not in bias info list\n", pin);
-
- return NULL;
-}
-
static int sh_pfc_init_ranges(struct sh_pfc *pfc)
{
struct sh_pfc_pin_range *range;
diff --git a/drivers/pinctrl/renesas/core.h b/drivers/pinctrl/renesas/core.h
index 5ca7e0830ae9..51f391e9713a 100644
--- a/drivers/pinctrl/renesas/core.h
+++ b/drivers/pinctrl/renesas/core.h
@@ -29,12 +29,4 @@ void sh_pfc_write(struct sh_pfc *pfc, u32 reg, u32 data);
int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin);
int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type);
-const struct pinmux_bias_reg *
-sh_pfc_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
- unsigned int *bit);
-
-unsigned int rcar_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin);
-void rcar_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias);
-
#endif /* __SH_PFC_CORE_H__ */
diff --git a/drivers/pinctrl/renesas/pfc-r8a73a4.c b/drivers/pinctrl/renesas/pfc-r8a73a4.c
index b21f5afe610f..b26ff9d6ead4 100644
--- a/drivers/pinctrl/renesas/pfc-r8a73a4.c
+++ b/drivers/pinctrl/renesas/pfc-r8a73a4.c
@@ -2649,59 +2649,21 @@ static const struct pinmux_irq pinmux_irqs[] = {
PINMUX_IRQ(329), /* IRQ57 */
};
-#define PORTCR_PULMD_OFF (0 << 6)
-#define PORTCR_PULMD_DOWN (2 << 6)
-#define PORTCR_PULMD_UP (3 << 6)
-#define PORTCR_PULMD_MASK (3 << 6)
-
static const unsigned int r8a73a4_portcr_offsets[] = {
0x00000000, 0x00001000, 0x00000000, 0x00001000,
0x00001000, 0x00002000, 0x00002000, 0x00002000,
0x00002000, 0x00003000, 0x00003000,
};
-static unsigned int r8a73a4_pinmux_get_bias(struct sh_pfc *pfc,
- unsigned int pin)
-{
- void __iomem *addr;
-
- addr = pfc->windows->virt + r8a73a4_portcr_offsets[pin >> 5] + pin;
-
- switch (ioread8(addr) & PORTCR_PULMD_MASK) {
- case PORTCR_PULMD_UP:
- return PIN_CONFIG_BIAS_PULL_UP;
- case PORTCR_PULMD_DOWN:
- return PIN_CONFIG_BIAS_PULL_DOWN;
- case PORTCR_PULMD_OFF:
- default:
- return PIN_CONFIG_BIAS_DISABLE;
- }
-}
-
-static void r8a73a4_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
+static void __iomem *r8a73a4_pin_to_portcr(struct sh_pfc *pfc, unsigned int pin)
{
- void __iomem *addr;
- u32 value;
-
- addr = pfc->windows->virt + r8a73a4_portcr_offsets[pin >> 5] + pin;
- value = ioread8(addr) & ~PORTCR_PULMD_MASK;
-
- switch (bias) {
- case PIN_CONFIG_BIAS_PULL_UP:
- value |= PORTCR_PULMD_UP;
- break;
- case PIN_CONFIG_BIAS_PULL_DOWN:
- value |= PORTCR_PULMD_DOWN;
- break;
- }
-
- iowrite8(value, addr);
+ return pfc->windows->virt + r8a73a4_portcr_offsets[pin >> 5] + pin;
}
static const struct sh_pfc_soc_operations r8a73a4_pfc_ops = {
- .get_bias = r8a73a4_pinmux_get_bias,
- .set_bias = r8a73a4_pinmux_set_bias,
+ .get_bias = rmobile_pinmux_get_bias,
+ .set_bias = rmobile_pinmux_set_bias,
+ .pin_to_portcr = r8a73a4_pin_to_portcr,
};
const struct sh_pfc_soc_info r8a73a4_pinmux_info = {
diff --git a/drivers/pinctrl/renesas/pfc-r8a7740.c b/drivers/pinctrl/renesas/pfc-r8a7740.c
index fdf1b0f09f57..4eac3899d69b 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7740.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7740.c
@@ -3672,11 +3672,6 @@ static const struct pinmux_irq pinmux_irqs[] = {
PINMUX_IRQ(41, 167), /* IRQ31A */
};
-#define PORTnCR_PULMD_OFF (0 << 6)
-#define PORTnCR_PULMD_DOWN (2 << 6)
-#define PORTnCR_PULMD_UP (3 << 6)
-#define PORTnCR_PULMD_MASK (3 << 6)
-
struct r8a7740_portcr_group {
unsigned int end_pin;
unsigned int offset;
@@ -3686,7 +3681,7 @@ static const struct r8a7740_portcr_group r8a7740_portcr_offsets[] = {
{ 83, 0x0000 }, { 114, 0x1000 }, { 209, 0x2000 }, { 211, 0x3000 },
};
-static void __iomem *r8a7740_pinmux_portcr(struct sh_pfc *pfc, unsigned int pin)
+static void __iomem *r8a7740_pin_to_portcr(struct sh_pfc *pfc, unsigned int pin)
{
unsigned int i;
@@ -3701,43 +3696,10 @@ static void __iomem *r8a7740_pinmux_portcr(struct sh_pfc *pfc, unsigned int pin)
return NULL;
}
-static unsigned int r8a7740_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin)
-{
- void __iomem *addr = r8a7740_pinmux_portcr(pfc, pin);
- u32 value = ioread8(addr) & PORTnCR_PULMD_MASK;
-
- switch (value) {
- case PORTnCR_PULMD_UP:
- return PIN_CONFIG_BIAS_PULL_UP;
- case PORTnCR_PULMD_DOWN:
- return PIN_CONFIG_BIAS_PULL_DOWN;
- case PORTnCR_PULMD_OFF:
- default:
- return PIN_CONFIG_BIAS_DISABLE;
- }
-}
-
-static void r8a7740_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
-{
- void __iomem *addr = r8a7740_pinmux_portcr(pfc, pin);
- u32 value = ioread8(addr) & ~PORTnCR_PULMD_MASK;
-
- switch (bias) {
- case PIN_CONFIG_BIAS_PULL_UP:
- value |= PORTnCR_PULMD_UP;
- break;
- case PIN_CONFIG_BIAS_PULL_DOWN:
- value |= PORTnCR_PULMD_DOWN;
- break;
- }
-
- iowrite8(value, addr);
-}
-
static const struct sh_pfc_soc_operations r8a7740_pfc_ops = {
- .get_bias = r8a7740_pinmux_get_bias,
- .set_bias = r8a7740_pinmux_set_bias,
+ .get_bias = rmobile_pinmux_get_bias,
+ .set_bias = rmobile_pinmux_set_bias,
+ .pin_to_portcr = r8a7740_pin_to_portcr,
};
const struct sh_pfc_soc_info r8a7740_pinmux_info = {
diff --git a/drivers/pinctrl/renesas/pfc-r8a7778.c b/drivers/pinctrl/renesas/pfc-r8a7778.c
index 75f52b1798c3..6185af9c4990 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7778.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7778.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/pinctrl/pinconf-generic.h>
-#include "core.h"
#include "sh_pfc.h"
#define PORT_GP_PUP_1(bank, pin, fn, sfx) \
diff --git a/drivers/pinctrl/renesas/pfc-r8a7791.c b/drivers/pinctrl/renesas/pfc-r8a7791.c
index 6fce9fe2e98f..fe4ccab6b0b8 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7791.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7791.c
@@ -16,22 +16,50 @@
* which case they support both 3.3V and 1.8V signalling.
*/
#define CPU_ALL_GP(fn, sfx) \
- PORT_GP_32(0, fn, sfx), \
- PORT_GP_26(1, fn, sfx), \
- PORT_GP_32(2, fn, sfx), \
- PORT_GP_32(3, fn, sfx), \
- PORT_GP_32(4, fn, sfx), \
- PORT_GP_32(5, fn, sfx), \
- PORT_GP_CFG_24(6, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
- PORT_GP_1(6, 24, fn, sfx), \
- PORT_GP_1(6, 25, fn, sfx), \
- PORT_GP_1(6, 26, fn, sfx), \
- PORT_GP_1(6, 27, fn, sfx), \
- PORT_GP_1(6, 28, fn, sfx), \
- PORT_GP_1(6, 29, fn, sfx), \
- PORT_GP_1(6, 30, fn, sfx), \
- PORT_GP_1(6, 31, fn, sfx), \
- PORT_GP_26(7, fn, sfx)
+ PORT_GP_CFG_32(0, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_26(1, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_32(2, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_32(3, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_32(4, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_32(5, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_24(6, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE | SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 24, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 25, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 26, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 27, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 28, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 29, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 30, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 31, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_7(7, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_1(7, 7, fn, sfx), \
+ PORT_GP_1(7, 8, fn, sfx), \
+ PORT_GP_1(7, 9, fn, sfx), \
+ PORT_GP_CFG_1(7, 10, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 11, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 12, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 13, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 14, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 15, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 16, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 17, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 18, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 19, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 20, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 21, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 22, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 23, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 24, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 25, fn, sfx, SH_PFC_PIN_CFG_PULL_UP)
+
+#define CPU_ALL_NOGP(fn) \
+ PIN_NOGP_CFG(ASEBRK_N_ACK, "ASEBRK#/ACK", fn, SH_PFC_PIN_CFG_PULL_DOWN), \
+ PIN_NOGP_CFG(AVS1, "AVS1", fn, SH_PFC_PIN_CFG_PULL_UP), \
+ PIN_NOGP_CFG(AVS2, "AVS2", fn, SH_PFC_PIN_CFG_PULL_UP), \
+ PIN_NOGP_CFG(TCK, "TCK", fn, SH_PFC_PIN_CFG_PULL_UP), \
+ PIN_NOGP_CFG(TDI, "TDI", fn, SH_PFC_PIN_CFG_PULL_UP), \
+ PIN_NOGP_CFG(TMS, "TMS", fn, SH_PFC_PIN_CFG_PULL_UP), \
+ PIN_NOGP_CFG(TRST_N, "TRST#", fn, SH_PFC_PIN_CFG_PULL_UP)
enum {
PINMUX_RESERVED = 0,
@@ -1696,8 +1724,17 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP16_11_10, CAN1_RX_B, SEL_CAN1_1),
};
+/*
+ * Pins not associated with a GPIO port.
+ */
+enum {
+ GP_ASSIGN_LAST(),
+ NOGP_ALL(),
+};
+
static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
+ PINMUX_NOGP_ALL(),
};
#if defined(CONFIG_PINCTRL_PFC_R8A7791) || defined(CONFIG_PINCTRL_PFC_R8A7793)
@@ -6645,8 +6682,322 @@ static int r8a7791_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc
return 31 - (pin & 0x1f);
}
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUPR0", 0xe6060100, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(1, 4), /* A20 */
+ [ 1] = RCAR_GP_PIN(1, 5), /* A21 */
+ [ 2] = RCAR_GP_PIN(1, 6), /* A22 */
+ [ 3] = RCAR_GP_PIN(1, 7), /* A23 */
+ [ 4] = RCAR_GP_PIN(1, 8), /* A24 */
+ [ 5] = RCAR_GP_PIN(6, 31), /* DU0_DOTCLKIN */
+ [ 6] = RCAR_GP_PIN(0, 0), /* D0 */
+ [ 7] = RCAR_GP_PIN(0, 1), /* D1 */
+ [ 8] = RCAR_GP_PIN(0, 2), /* D2 */
+ [ 9] = RCAR_GP_PIN(0, 3), /* D3 */
+ [10] = RCAR_GP_PIN(0, 4), /* D4 */
+ [11] = RCAR_GP_PIN(0, 5), /* D5 */
+ [12] = RCAR_GP_PIN(0, 6), /* D6 */
+ [13] = RCAR_GP_PIN(0, 7), /* D7 */
+ [14] = RCAR_GP_PIN(0, 8), /* D8 */
+ [15] = RCAR_GP_PIN(0, 9), /* D9 */
+ [16] = RCAR_GP_PIN(0, 10), /* D10 */
+ [17] = RCAR_GP_PIN(0, 11), /* D11 */
+ [18] = RCAR_GP_PIN(0, 12), /* D12 */
+ [19] = RCAR_GP_PIN(0, 13), /* D13 */
+ [20] = RCAR_GP_PIN(0, 14), /* D14 */
+ [21] = RCAR_GP_PIN(0, 15), /* D15 */
+ [22] = RCAR_GP_PIN(0, 16), /* A0 */
+ [23] = RCAR_GP_PIN(0, 17), /* A1 */
+ [24] = RCAR_GP_PIN(0, 18), /* A2 */
+ [25] = RCAR_GP_PIN(0, 19), /* A3 */
+ [26] = RCAR_GP_PIN(0, 20), /* A4 */
+ [27] = RCAR_GP_PIN(0, 21), /* A5 */
+ [28] = RCAR_GP_PIN(0, 22), /* A6 */
+ [29] = RCAR_GP_PIN(0, 23), /* A7 */
+ [30] = RCAR_GP_PIN(0, 24), /* A8 */
+ [31] = RCAR_GP_PIN(0, 25), /* A9 */
+ } },
+ { PINMUX_BIAS_REG("PUPR1", 0xe6060104, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(0, 26), /* A10 */
+ [ 1] = RCAR_GP_PIN(0, 27), /* A11 */
+ [ 2] = RCAR_GP_PIN(0, 28), /* A12 */
+ [ 3] = RCAR_GP_PIN(0, 29), /* A13 */
+ [ 4] = RCAR_GP_PIN(0, 30), /* A14 */
+ [ 5] = RCAR_GP_PIN(0, 31), /* A15 */
+ [ 6] = RCAR_GP_PIN(1, 0), /* A16 */
+ [ 7] = RCAR_GP_PIN(1, 1), /* A17 */
+ [ 8] = RCAR_GP_PIN(1, 2), /* A18 */
+ [ 9] = RCAR_GP_PIN(1, 3), /* A19 */
+ [10] = PIN_TRST_N, /* TRST# */
+ [11] = PIN_TCK, /* TCK */
+ [12] = PIN_TMS, /* TMS */
+ [13] = PIN_TDI, /* TDI */
+ [14] = RCAR_GP_PIN(1, 11), /* CS1#/A26 */
+ [15] = RCAR_GP_PIN(1, 12), /* EX_CS0# */
+ [16] = RCAR_GP_PIN(1, 13), /* EX_CS1# */
+ [17] = RCAR_GP_PIN(1, 14), /* EX_CS2# */
+ [18] = RCAR_GP_PIN(1, 15), /* EX_CS3# */
+ [19] = RCAR_GP_PIN(1, 16), /* EX_CS4# */
+ [20] = RCAR_GP_PIN(1, 17), /* EX_CS5# */
+ [21] = RCAR_GP_PIN(1, 18), /* BS# */
+ [22] = RCAR_GP_PIN(1, 19), /* RD# */
+ [23] = RCAR_GP_PIN(1, 20), /* RD/WR# */
+ [24] = RCAR_GP_PIN(1, 21), /* WE0# */
+ [25] = RCAR_GP_PIN(1, 22), /* WE1# */
+ [26] = RCAR_GP_PIN(1, 23), /* EX_WAIT0 */
+ [27] = RCAR_GP_PIN(1, 24), /* DREQ0 */
+ [28] = RCAR_GP_PIN(1, 25), /* DACK0 */
+ [29] = RCAR_GP_PIN(5, 31), /* SPEEDIN */
+ [30] = RCAR_GP_PIN(2, 0), /* SSI_SCK0129 */
+ [31] = RCAR_GP_PIN(2, 1), /* SSI_WS0129 */
+ } },
+ { PINMUX_BIAS_REG("PUPR2", 0xe6060108, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(2, 2), /* SSI_SDATA0 */
+ [ 1] = RCAR_GP_PIN(2, 3), /* SSI_SCK1 */
+ [ 2] = RCAR_GP_PIN(2, 4), /* SSI_WS1 */
+ [ 3] = RCAR_GP_PIN(2, 5), /* SSI_SDATA1 */
+ [ 4] = RCAR_GP_PIN(2, 6), /* SSI_SCK2 */
+ [ 5] = RCAR_GP_PIN(2, 7), /* SSI_WS2 */
+ [ 6] = RCAR_GP_PIN(2, 8), /* SSI_SDATA2 */
+ [ 7] = RCAR_GP_PIN(2, 9), /* SSI_SCK34 */
+ [ 8] = RCAR_GP_PIN(2, 10), /* SSI_WS34 */
+ [ 9] = RCAR_GP_PIN(2, 11), /* SSI_SDATA3 */
+ [10] = RCAR_GP_PIN(2, 12), /* SSI_SCK4 */
+ [11] = RCAR_GP_PIN(2, 13), /* SSI_WS4 */
+ [12] = RCAR_GP_PIN(2, 14), /* SSI_SDATA4 */
+ [13] = RCAR_GP_PIN(2, 15), /* SSI_SCK5 */
+ [14] = RCAR_GP_PIN(2, 16), /* SSI_WS5 */
+ [15] = RCAR_GP_PIN(2, 17), /* SSI_SDATA5 */
+ [16] = RCAR_GP_PIN(2, 18), /* SSI_SCK6 */
+ [17] = RCAR_GP_PIN(2, 19), /* SSI_WS6 */
+ [18] = RCAR_GP_PIN(2, 20), /* SSI_SDATA6 */
+ [19] = RCAR_GP_PIN(2, 21), /* SSI_SCK78 */
+ [20] = RCAR_GP_PIN(2, 22), /* SSI_WS78 */
+ [21] = RCAR_GP_PIN(2, 23), /* SSI_SDATA7 */
+ [22] = RCAR_GP_PIN(2, 24), /* SSI_SDATA8 */
+ [23] = RCAR_GP_PIN(2, 25), /* SSI_SCK9 */
+ [24] = RCAR_GP_PIN(2, 26), /* SSI_WS9 */
+ [25] = RCAR_GP_PIN(2, 27), /* SSI_SDATA9 */
+ [26] = RCAR_GP_PIN(2, 28), /* AUDIO_CLKA */
+ [27] = RCAR_GP_PIN(2, 29), /* AUDIO_CLKB */
+ [28] = RCAR_GP_PIN(2, 30), /* AUDIO_CLKC */
+ [29] = RCAR_GP_PIN(2, 31), /* AUDIO_CLKOUT */
+ [30] = RCAR_GP_PIN(7, 10), /* IRQ0 */
+ [31] = RCAR_GP_PIN(7, 11), /* IRQ1 */
+ } },
+ { PINMUX_BIAS_REG("PUPR3", 0xe606010c, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(7, 12), /* IRQ2 */
+ [ 1] = RCAR_GP_PIN(7, 13), /* IRQ3 */
+ [ 2] = RCAR_GP_PIN(7, 14), /* IRQ4 */
+ [ 3] = RCAR_GP_PIN(7, 15), /* IRQ5 */
+ [ 4] = RCAR_GP_PIN(7, 16), /* IRQ6 */
+ [ 5] = RCAR_GP_PIN(7, 17), /* IRQ7 */
+ [ 6] = RCAR_GP_PIN(7, 18), /* IRQ8 */
+ [ 7] = RCAR_GP_PIN(7, 19), /* IRQ9 */
+ [ 8] = RCAR_GP_PIN(3, 0), /* DU1_DR0 */
+ [ 9] = RCAR_GP_PIN(3, 1), /* DU1_DR1 */
+ [10] = RCAR_GP_PIN(3, 2), /* DU1_DR2 */
+ [11] = RCAR_GP_PIN(3, 3), /* DU1_DR3 */
+ [12] = RCAR_GP_PIN(3, 4), /* DU1_DR4 */
+ [13] = RCAR_GP_PIN(3, 5), /* DU1_DR5 */
+ [14] = RCAR_GP_PIN(3, 6), /* DU1_DR6 */
+ [15] = RCAR_GP_PIN(3, 7), /* DU1_DR7 */
+ [16] = RCAR_GP_PIN(3, 8), /* DU1_DG0 */
+ [17] = RCAR_GP_PIN(3, 9), /* DU1_DG1 */
+ [18] = RCAR_GP_PIN(3, 10), /* DU1_DG2 */
+ [19] = RCAR_GP_PIN(3, 11), /* DU1_DG3 */
+ [20] = RCAR_GP_PIN(3, 12), /* DU1_DG4 */
+ [21] = RCAR_GP_PIN(3, 13), /* DU1_DG5 */
+ [22] = RCAR_GP_PIN(3, 14), /* DU1_DG6 */
+ [23] = RCAR_GP_PIN(3, 15), /* DU1_DG7 */
+ [24] = RCAR_GP_PIN(3, 16), /* DU1_DB0 */
+ [25] = RCAR_GP_PIN(3, 17), /* DU1_DB1 */
+ [26] = RCAR_GP_PIN(3, 18), /* DU1_DB2 */
+ [27] = RCAR_GP_PIN(3, 19), /* DU1_DB3 */
+ [28] = RCAR_GP_PIN(3, 20), /* DU1_DB4 */
+ [29] = RCAR_GP_PIN(3, 21), /* DU1_DB5 */
+ [30] = RCAR_GP_PIN(3, 22), /* DU1_DB6 */
+ [31] = RCAR_GP_PIN(3, 23), /* DU1_DB7 */
+ } },
+ { PINMUX_BIAS_REG("PUPR4", 0xe6060110, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(3, 24), /* DU1_DOTCLKIN */
+ [ 1] = RCAR_GP_PIN(3, 25), /* DU1_DOTCLKOUT0 */
+ [ 2] = RCAR_GP_PIN(3, 26), /* DU1_DOTCLKOUT1 */
+ [ 3] = RCAR_GP_PIN(3, 27), /* DU1_EXHSYNC_DU1_HSYNC */
+ [ 4] = RCAR_GP_PIN(3, 28), /* DU1_EXVSYNC_DU1_VSYNC */
+ [ 5] = RCAR_GP_PIN(3, 29), /* DU1_EXODDF_DU1_ODDF_DISP_CDE */
+ [ 6] = RCAR_GP_PIN(3, 30), /* DU1_DISP */
+ [ 7] = RCAR_GP_PIN(3, 31), /* DU1_CDE */
+ [ 8] = RCAR_GP_PIN(4, 0), /* VI0_CLK */
+ [ 9] = RCAR_GP_PIN(4, 1), /* VI0_CLKENB */
+ [10] = RCAR_GP_PIN(4, 2), /* VI0_FIELD */
+ [11] = RCAR_GP_PIN(4, 3), /* VI0_HSYNC# */
+ [12] = RCAR_GP_PIN(4, 4), /* VI0_VSYNC# */
+ [13] = RCAR_GP_PIN(4, 5), /* VI0_DATA0_VI0_B0 */
+ [14] = RCAR_GP_PIN(4, 6), /* VI0_DATA1_VI0_B1 */
+ [15] = RCAR_GP_PIN(4, 7), /* VI0_DATA2_VI0_B2 */
+ [16] = RCAR_GP_PIN(4, 8), /* VI0_DATA3_VI0_B3 */
+ [17] = RCAR_GP_PIN(4, 9), /* VI0_DATA4_VI0_B4 */
+ [18] = RCAR_GP_PIN(4, 10), /* VI0_DATA5_VI0_B5 */
+ [19] = RCAR_GP_PIN(4, 11), /* VI0_DATA6_VI0_B6 */
+ [20] = RCAR_GP_PIN(4, 12), /* VI0_DATA7_VI0_B7 */
+ [21] = RCAR_GP_PIN(4, 13), /* VI0_G0 */
+ [22] = RCAR_GP_PIN(4, 14), /* VI0_G1 */
+ [23] = RCAR_GP_PIN(4, 15), /* VI0_G2 */
+ [24] = RCAR_GP_PIN(4, 16), /* VI0_G3 */
+ [25] = RCAR_GP_PIN(4, 17), /* VI0_G4 */
+ [26] = RCAR_GP_PIN(4, 18), /* VI0_G5 */
+ [27] = RCAR_GP_PIN(4, 19), /* VI0_G6 */
+ [28] = RCAR_GP_PIN(4, 20), /* VI0_G7 */
+ [29] = RCAR_GP_PIN(4, 21), /* VI0_R0 */
+ [30] = RCAR_GP_PIN(4, 22), /* VI0_R1 */
+ [31] = RCAR_GP_PIN(4, 23), /* VI0_R2 */
+ } },
+ { PINMUX_BIAS_REG("PUPR5", 0xe6060114, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(4, 24), /* VI0_R3 */
+ [ 1] = RCAR_GP_PIN(4, 25), /* VI0_R4 */
+ [ 2] = RCAR_GP_PIN(4, 26), /* VI0_R5 */
+ [ 3] = RCAR_GP_PIN(4, 27), /* VI0_R6 */
+ [ 4] = RCAR_GP_PIN(4, 28), /* VI0_R7 */
+ [ 5] = RCAR_GP_PIN(5, 0), /* VI1_HSYNC# */
+ [ 6] = RCAR_GP_PIN(5, 1), /* VI1_VSYNC# */
+ [ 7] = RCAR_GP_PIN(5, 2), /* VI1_CLKENB */
+ [ 8] = RCAR_GP_PIN(5, 3), /* VI1_FIELD */
+ [ 9] = RCAR_GP_PIN(5, 4), /* VI1_CLK */
+ [10] = RCAR_GP_PIN(5, 5), /* VI1_DATA0 */
+ [11] = RCAR_GP_PIN(5, 6), /* VI1_DATA1 */
+ [12] = RCAR_GP_PIN(5, 7), /* VI1_DATA2 */
+ [13] = RCAR_GP_PIN(5, 8), /* VI1_DATA3 */
+ [14] = RCAR_GP_PIN(5, 9), /* VI1_DATA4 */
+ [15] = RCAR_GP_PIN(5, 10), /* VI1_DATA5 */
+ [16] = RCAR_GP_PIN(5, 11), /* VI1_DATA6 */
+ [17] = RCAR_GP_PIN(5, 12), /* VI1_DATA7 */
+ [18] = RCAR_GP_PIN(5, 13), /* ETH_MDIO */
+ [19] = RCAR_GP_PIN(5, 14), /* ETH_CRS_DV */
+ [20] = RCAR_GP_PIN(5, 15), /* ETH_RX_ER */
+ [21] = RCAR_GP_PIN(5, 16), /* ETH_RXD0 */
+ [22] = RCAR_GP_PIN(5, 17), /* ETH_RXD1 */
+ [23] = RCAR_GP_PIN(5, 18), /* ETH_LINK */
+ [24] = RCAR_GP_PIN(5, 19), /* ETH_REFCLK */
+ [25] = RCAR_GP_PIN(5, 20), /* ETH_TXD1 */
+ [26] = RCAR_GP_PIN(5, 21), /* ETH_TX_EN */
+ [27] = RCAR_GP_PIN(5, 22), /* ETH_MAGIC */
+ [28] = RCAR_GP_PIN(5, 23), /* ETH_TXD0 */
+ [29] = RCAR_GP_PIN(5, 24), /* ETH_MDC */
+ [30] = RCAR_GP_PIN(5, 25), /* STP_IVCXO27_0 */
+ [31] = RCAR_GP_PIN(5, 26), /* STP_ISCLK_0 */
+ } },
+ { PINMUX_BIAS_REG("PUPR6", 0xe6060118, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(5, 27), /* STP_ISD_0 */
+ [ 1] = RCAR_GP_PIN(5, 28), /* STP_ISEN_0 */
+ [ 2] = RCAR_GP_PIN(5, 29), /* STP_ISSYNC_0 */
+ [ 3] = RCAR_GP_PIN(5, 30), /* STP_OPWM_0 */
+ [ 4] = RCAR_GP_PIN(6, 0), /* SD0_CLK */
+ [ 5] = RCAR_GP_PIN(6, 1), /* SD0_CMD */
+ [ 6] = RCAR_GP_PIN(6, 2), /* SD0_DATA0 */
+ [ 7] = RCAR_GP_PIN(6, 3), /* SD0_DATA1 */
+ [ 8] = RCAR_GP_PIN(6, 4), /* SD0_DATA2 */
+ [ 9] = RCAR_GP_PIN(6, 5), /* SD0_DATA3 */
+ [10] = RCAR_GP_PIN(6, 6), /* SD0_CD */
+ [11] = RCAR_GP_PIN(6, 7), /* SD0_WP */
+ [12] = RCAR_GP_PIN(6, 8), /* SD2_CLK */
+ [13] = RCAR_GP_PIN(6, 9), /* SD2_CMD */
+ [14] = RCAR_GP_PIN(6, 10), /* SD2_DATA0 */
+ [15] = RCAR_GP_PIN(6, 11), /* SD2_DATA1 */
+ [16] = RCAR_GP_PIN(6, 12), /* SD2_DATA2 */
+ [17] = RCAR_GP_PIN(6, 13), /* SD2_DATA3 */
+ [18] = RCAR_GP_PIN(6, 14), /* SD2_CD */
+ [19] = RCAR_GP_PIN(6, 15), /* SD2_WP */
+ [20] = RCAR_GP_PIN(6, 16), /* SD3_CLK */
+ [21] = RCAR_GP_PIN(6, 17), /* SD3_CMD */
+ [22] = RCAR_GP_PIN(6, 18), /* SD3_DATA0 */
+ [23] = RCAR_GP_PIN(6, 19), /* SD3_DATA1 */
+ [24] = RCAR_GP_PIN(6, 20), /* SD3_DATA2 */
+ [25] = RCAR_GP_PIN(6, 21), /* SD3_DATA3 */
+ [26] = RCAR_GP_PIN(6, 22), /* SD3_CD */
+ [27] = RCAR_GP_PIN(6, 23), /* SD3_WP */
+ [28] = RCAR_GP_PIN(6, 24), /* MSIOF0_SCK */
+ [29] = RCAR_GP_PIN(6, 25), /* MSIOF0_SYNC */
+ [30] = RCAR_GP_PIN(6, 26), /* MSIOF0_TXD */
+ [31] = RCAR_GP_PIN(6, 27), /* MSIOF0_RXD */
+ } },
+ { PINMUX_BIAS_REG("PUPR7", 0xe606011c, "N/A", 0) {
+ /* PUPR7 pull-up pins */
+ [ 0] = RCAR_GP_PIN(6, 28), /* MSIOF0_SS1 */
+ [ 1] = RCAR_GP_PIN(6, 29), /* MSIOF0_SS2 */
+ [ 2] = RCAR_GP_PIN(4, 29), /* SIM0_RST */
+ [ 3] = RCAR_GP_PIN(4, 30), /* SIM0_CLK */
+ [ 4] = RCAR_GP_PIN(4, 31), /* SIM0_D */
+ [ 5] = RCAR_GP_PIN(7, 20), /* GPS_CLK */
+ [ 6] = RCAR_GP_PIN(7, 21), /* GPS_SIGN */
+ [ 7] = RCAR_GP_PIN(7, 22), /* GPS_MAG */
+ [ 8] = RCAR_GP_PIN(7, 0), /* HCTS0# */
+ [ 9] = RCAR_GP_PIN(7, 1), /* HRTS0# */
+ [10] = RCAR_GP_PIN(7, 2), /* HSCK0 */
+ [11] = RCAR_GP_PIN(7, 3), /* HRX0 */
+ [12] = RCAR_GP_PIN(7, 4), /* HTX0 */
+ [13] = RCAR_GP_PIN(7, 5), /* HRX1 */
+ [14] = RCAR_GP_PIN(7, 6), /* HTX1 */
+ [15] = SH_PFC_PIN_NONE,
+ [16] = SH_PFC_PIN_NONE,
+ [17] = SH_PFC_PIN_NONE,
+ [18] = RCAR_GP_PIN(1, 9), /* A25 */
+ [19] = SH_PFC_PIN_NONE,
+ [20] = RCAR_GP_PIN(1, 10), /* CS0# */
+ [21] = RCAR_GP_PIN(7, 23), /* USB0_PWEN */
+ [22] = RCAR_GP_PIN(7, 24), /* USB0_OVC */
+ [23] = RCAR_GP_PIN(7, 25), /* USB1_PWEN */
+ [24] = RCAR_GP_PIN(6, 30), /* USB1_OVC */
+ [25] = PIN_AVS1, /* AVS1 */
+ [26] = PIN_AVS2, /* AVS2 */
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("N/A", 0, "PUPR7", 0xe606011c) {
+ /* PUPR7 pull-down pins */
+ [ 0] = SH_PFC_PIN_NONE,
+ [ 1] = SH_PFC_PIN_NONE,
+ [ 2] = SH_PFC_PIN_NONE,
+ [ 3] = SH_PFC_PIN_NONE,
+ [ 4] = SH_PFC_PIN_NONE,
+ [ 5] = SH_PFC_PIN_NONE,
+ [ 6] = SH_PFC_PIN_NONE,
+ [ 7] = SH_PFC_PIN_NONE,
+ [ 8] = SH_PFC_PIN_NONE,
+ [ 9] = SH_PFC_PIN_NONE,
+ [10] = SH_PFC_PIN_NONE,
+ [11] = SH_PFC_PIN_NONE,
+ [12] = SH_PFC_PIN_NONE,
+ [13] = SH_PFC_PIN_NONE,
+ [14] = SH_PFC_PIN_NONE,
+ [15] = SH_PFC_PIN_NONE,
+ [16] = SH_PFC_PIN_NONE,
+ [17] = SH_PFC_PIN_NONE,
+ [18] = SH_PFC_PIN_NONE,
+ [19] = PIN_ASEBRK_N_ACK, /* ASEBRK#/ACK */
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { /* sentinel */ },
+};
+
static const struct sh_pfc_soc_operations r8a7791_pinmux_ops = {
.pin_to_pocctrl = r8a7791_pin_to_pocctrl,
+ .get_bias = rcar_pinmux_get_bias,
+ .set_bias = rcar_pinmux_set_bias,
};
#ifdef CONFIG_PINCTRL_PFC_R8A7743
@@ -6665,6 +7016,7 @@ const struct sh_pfc_soc_info r8a7743_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions.common),
.cfg_regs = pinmux_config_regs,
+ .bias_regs = pinmux_bias_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
@@ -6687,6 +7039,7 @@ const struct sh_pfc_soc_info r8a7744_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions.common),
.cfg_regs = pinmux_config_regs,
+ .bias_regs = pinmux_bias_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
@@ -6711,6 +7064,7 @@ const struct sh_pfc_soc_info r8a7791_pinmux_info = {
ARRAY_SIZE(pinmux_functions.automotive),
.cfg_regs = pinmux_config_regs,
+ .bias_regs = pinmux_bias_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
@@ -6735,6 +7089,7 @@ const struct sh_pfc_soc_info r8a7793_pinmux_info = {
ARRAY_SIZE(pinmux_functions.automotive),
.cfg_regs = pinmux_config_regs,
+ .bias_regs = pinmux_bias_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/renesas/pfc-r8a7792.c b/drivers/pinctrl/renesas/pfc-r8a7792.c
index 258f82fb31c0..f54a7c81005d 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7792.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7792.c
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_GP(fn, sfx) \
diff --git a/drivers/pinctrl/renesas/pfc-r8a77950.c b/drivers/pinctrl/renesas/pfc-r8a77950.c
index 32fe8caca70a..ee4ce9349aae 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77950.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77950.c
@@ -8,7 +8,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
diff --git a/drivers/pinctrl/renesas/pfc-r8a77951.c b/drivers/pinctrl/renesas/pfc-r8a77951.c
index bdd605e41303..be4eee070842 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77951.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77951.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/sys_soc.h>
-#include "core.h"
#include "sh_pfc.h"
#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
@@ -4126,6 +4125,18 @@ static const union vin_data vin4_data_b_mux = {
VI4_DATA22_MARK, VI4_DATA23_MARK,
},
};
+static const unsigned int vin4_g8_pins[] = {
+ RCAR_GP_PIN(1, 0), RCAR_GP_PIN(1, 1),
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+static const unsigned int vin4_g8_mux[] = {
+ VI4_DATA8_MARK, VI4_DATA9_MARK,
+ VI4_DATA10_MARK, VI4_DATA11_MARK,
+ VI4_DATA12_MARK, VI4_DATA13_MARK,
+ VI4_DATA14_MARK, VI4_DATA15_MARK,
+};
static const unsigned int vin4_sync_pins[] = {
/* HSYNC#, VSYNC# */
RCAR_GP_PIN(1, 18), RCAR_GP_PIN(1, 17),
@@ -4180,6 +4191,18 @@ static const union vin_data16 vin5_data_mux = {
VI5_DATA14_MARK, VI5_DATA15_MARK,
},
};
+static const unsigned int vin5_high8_pins[] = {
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+static const unsigned int vin5_high8_mux[] = {
+ VI5_DATA8_MARK, VI5_DATA9_MARK,
+ VI5_DATA10_MARK, VI5_DATA11_MARK,
+ VI5_DATA12_MARK, VI5_DATA13_MARK,
+ VI5_DATA14_MARK, VI5_DATA15_MARK,
+};
static const unsigned int vin5_sync_pins[] = {
/* HSYNC#, VSYNC# */
RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
@@ -4210,7 +4233,7 @@ static const unsigned int vin5_clk_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[326];
+ struct sh_pfc_pin_group common[328];
#ifdef CONFIG_PINCTRL_PFC_R8A77951
struct sh_pfc_pin_group automotive[30];
#endif
@@ -4530,6 +4553,7 @@ static const struct {
SH_PFC_PIN_GROUP(vin4_data18_b),
VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
+ SH_PFC_PIN_GROUP(vin4_g8),
SH_PFC_PIN_GROUP(vin4_sync),
SH_PFC_PIN_GROUP(vin4_field),
SH_PFC_PIN_GROUP(vin4_clkenb),
@@ -4538,6 +4562,7 @@ static const struct {
VIN_DATA_PIN_GROUP(vin5_data, 10),
VIN_DATA_PIN_GROUP(vin5_data, 12),
VIN_DATA_PIN_GROUP(vin5_data, 16),
+ SH_PFC_PIN_GROUP(vin5_high8),
SH_PFC_PIN_GROUP(vin5_sync),
SH_PFC_PIN_GROUP(vin5_field),
SH_PFC_PIN_GROUP(vin5_clkenb),
@@ -5097,6 +5122,7 @@ static const char * const vin4_groups[] = {
"vin4_data18_b",
"vin4_data20_b",
"vin4_data24_b",
+ "vin4_g8",
"vin4_sync",
"vin4_field",
"vin4_clkenb",
@@ -5108,6 +5134,7 @@ static const char * const vin5_groups[] = {
"vin5_data10",
"vin5_data12",
"vin5_data16",
+ "vin5_high8",
"vin5_sync",
"vin5_field",
"vin5_clkenb",
diff --git a/drivers/pinctrl/renesas/pfc-r8a7796.c b/drivers/pinctrl/renesas/pfc-r8a7796.c
index 96b5b1509bb7..44e9d2eea484 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7796.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7796.c
@@ -14,7 +14,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
@@ -4100,6 +4099,18 @@ static const union vin_data vin4_data_b_mux = {
VI4_DATA22_MARK, VI4_DATA23_MARK,
},
};
+static const unsigned int vin4_g8_pins[] = {
+ RCAR_GP_PIN(1, 0), RCAR_GP_PIN(1, 1),
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+static const unsigned int vin4_g8_mux[] = {
+ VI4_DATA8_MARK, VI4_DATA9_MARK,
+ VI4_DATA10_MARK, VI4_DATA11_MARK,
+ VI4_DATA12_MARK, VI4_DATA13_MARK,
+ VI4_DATA14_MARK, VI4_DATA15_MARK,
+};
static const unsigned int vin4_sync_pins[] = {
/* HSYNC#, VSYNC# */
RCAR_GP_PIN(1, 18), RCAR_GP_PIN(1, 17),
@@ -4154,6 +4165,18 @@ static const union vin_data16 vin5_data_mux = {
VI5_DATA14_MARK, VI5_DATA15_MARK,
},
};
+static const unsigned int vin5_high8_pins[] = {
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+static const unsigned int vin5_high8_mux[] = {
+ VI5_DATA8_MARK, VI5_DATA9_MARK,
+ VI5_DATA10_MARK, VI5_DATA11_MARK,
+ VI5_DATA12_MARK, VI5_DATA13_MARK,
+ VI5_DATA14_MARK, VI5_DATA15_MARK,
+};
static const unsigned int vin5_sync_pins[] = {
/* HSYNC#, VSYNC# */
RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
@@ -4184,7 +4207,7 @@ static const unsigned int vin5_clk_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[322];
+ struct sh_pfc_pin_group common[324];
#if defined(CONFIG_PINCTRL_PFC_R8A77960) || defined(CONFIG_PINCTRL_PFC_R8A77961)
struct sh_pfc_pin_group automotive[30];
#endif
@@ -4500,6 +4523,7 @@ static const struct {
SH_PFC_PIN_GROUP(vin4_data18_b),
VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
+ SH_PFC_PIN_GROUP(vin4_g8),
SH_PFC_PIN_GROUP(vin4_sync),
SH_PFC_PIN_GROUP(vin4_field),
SH_PFC_PIN_GROUP(vin4_clkenb),
@@ -4508,6 +4532,7 @@ static const struct {
VIN_DATA_PIN_GROUP(vin5_data, 10),
VIN_DATA_PIN_GROUP(vin5_data, 12),
VIN_DATA_PIN_GROUP(vin5_data, 16),
+ SH_PFC_PIN_GROUP(vin5_high8),
SH_PFC_PIN_GROUP(vin5_sync),
SH_PFC_PIN_GROUP(vin5_field),
SH_PFC_PIN_GROUP(vin5_clkenb),
@@ -5054,6 +5079,7 @@ static const char * const vin4_groups[] = {
"vin4_data18_b",
"vin4_data20_b",
"vin4_data24_b",
+ "vin4_g8",
"vin4_sync",
"vin4_field",
"vin4_clkenb",
@@ -5065,6 +5091,7 @@ static const char * const vin5_groups[] = {
"vin5_data10",
"vin5_data12",
"vin5_data16",
+ "vin5_high8",
"vin5_sync",
"vin5_field",
"vin5_clkenb",
diff --git a/drivers/pinctrl/renesas/pfc-r8a77965.c b/drivers/pinctrl/renesas/pfc-r8a77965.c
index f15e29383d9b..e69210cc6148 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77965.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77965.c
@@ -15,7 +15,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
@@ -4337,6 +4336,20 @@ static const union vin_data vin4_data_b_mux = {
},
};
+static const unsigned int vin4_g8_pins[] = {
+ RCAR_GP_PIN(1, 0), RCAR_GP_PIN(1, 1),
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+
+static const unsigned int vin4_g8_mux[] = {
+ VI4_DATA8_MARK, VI4_DATA9_MARK,
+ VI4_DATA10_MARK, VI4_DATA11_MARK,
+ VI4_DATA12_MARK, VI4_DATA13_MARK,
+ VI4_DATA14_MARK, VI4_DATA15_MARK,
+};
+
static const unsigned int vin4_sync_pins[] = {
/* VSYNC_N, HSYNC_N */
RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 18),
@@ -4397,6 +4410,20 @@ static const union vin_data16 vin5_data_mux = {
},
};
+static const unsigned int vin5_high8_pins[] = {
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+
+static const unsigned int vin5_high8_mux[] = {
+ VI5_DATA8_MARK, VI5_DATA9_MARK,
+ VI5_DATA10_MARK, VI5_DATA11_MARK,
+ VI5_DATA12_MARK, VI5_DATA13_MARK,
+ VI5_DATA14_MARK, VI5_DATA15_MARK,
+};
+
static const unsigned int vin5_sync_pins[] = {
/* VSYNC_N, HSYNC_N */
RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 10),
@@ -4431,7 +4458,7 @@ static const unsigned int vin5_clk_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[324];
+ struct sh_pfc_pin_group common[326];
#ifdef CONFIG_PINCTRL_PFC_R8A77965
struct sh_pfc_pin_group automotive[30];
#endif
@@ -4749,6 +4776,7 @@ static const struct {
SH_PFC_PIN_GROUP(vin4_data18_b),
VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
+ SH_PFC_PIN_GROUP(vin4_g8),
SH_PFC_PIN_GROUP(vin4_sync),
SH_PFC_PIN_GROUP(vin4_field),
SH_PFC_PIN_GROUP(vin4_clkenb),
@@ -4757,6 +4785,7 @@ static const struct {
VIN_DATA_PIN_GROUP(vin5_data, 10),
VIN_DATA_PIN_GROUP(vin5_data, 12),
VIN_DATA_PIN_GROUP(vin5_data, 16),
+ SH_PFC_PIN_GROUP(vin5_high8),
SH_PFC_PIN_GROUP(vin5_sync),
SH_PFC_PIN_GROUP(vin5_field),
SH_PFC_PIN_GROUP(vin5_clkenb),
@@ -5307,6 +5336,7 @@ static const char * const vin4_groups[] = {
"vin4_data18_b",
"vin4_data20_b",
"vin4_data24_b",
+ "vin4_g8",
"vin4_sync",
"vin4_field",
"vin4_clkenb",
@@ -5318,6 +5348,7 @@ static const char * const vin5_groups[] = {
"vin5_data10",
"vin5_data12",
"vin5_data16",
+ "vin5_high8",
"vin5_sync",
"vin5_field",
"vin5_clkenb",
diff --git a/drivers/pinctrl/renesas/pfc-r8a77970.c b/drivers/pinctrl/renesas/pfc-r8a77970.c
index e8a0fc468eb2..7935826cfae7 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77970.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77970.c
@@ -16,7 +16,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_GP(fn, sfx) \
diff --git a/drivers/pinctrl/renesas/pfc-r8a77980.c b/drivers/pinctrl/renesas/pfc-r8a77980.c
index ebd07bebaeeb..20cff93a2a13 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77980.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77980.c
@@ -16,7 +16,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_GP(fn, sfx) \
diff --git a/drivers/pinctrl/renesas/pfc-r8a77990.c b/drivers/pinctrl/renesas/pfc-r8a77990.c
index 0a32e3c317c1..d040eb3e305d 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77990.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77990.c
@@ -14,7 +14,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CFG_FLAGS (SH_PFC_PIN_CFG_PULL_UP_DOWN)
@@ -3697,6 +3696,20 @@ static const union vin_data vin4_data_b_mux = {
},
};
+static const unsigned int vin4_g8_pins[] = {
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+ RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 10),
+ RCAR_GP_PIN(1, 13), RCAR_GP_PIN(1, 14),
+};
+
+static const unsigned int vin4_g8_mux[] = {
+ VI4_DATA8_MARK, VI4_DATA9_MARK,
+ VI4_DATA10_MARK, VI4_DATA11_MARK,
+ VI4_DATA12_MARK, VI4_DATA13_MARK,
+ VI4_DATA14_MARK, VI4_DATA15_MARK,
+};
+
static const unsigned int vin4_sync_pins[] = {
/* HSYNC, VSYNC */
RCAR_GP_PIN(2, 25), RCAR_GP_PIN(2, 24),
@@ -3771,6 +3784,20 @@ static const unsigned int vin5_data8_b_mux[] = {
VI5_DATA6_B_MARK, VI5_DATA7_B_MARK,
};
+static const unsigned int vin5_high8_pins[] = {
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 11),
+ RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+};
+
+static const unsigned int vin5_high8_mux[] = {
+ VI5_DATA8_A_MARK, VI5_DATA9_A_MARK,
+ VI5_DATA10_A_MARK, VI5_DATA11_A_MARK,
+ VI5_DATA12_A_MARK, VI5_DATA13_A_MARK,
+ VI5_DATA14_A_MARK, VI5_DATA15_A_MARK,
+};
+
static const unsigned int vin5_sync_a_pins[] = {
/* HSYNC_N, VSYNC_N */
RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 9),
@@ -3813,7 +3840,7 @@ static const unsigned int vin5_clk_b_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[253];
+ struct sh_pfc_pin_group common[255];
#ifdef CONFIG_PINCTRL_PFC_R8A77990
struct sh_pfc_pin_group automotive[21];
#endif
@@ -4058,6 +4085,7 @@ static const struct {
SH_PFC_PIN_GROUP(vin4_data18_b),
VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
+ SH_PFC_PIN_GROUP(vin4_g8),
SH_PFC_PIN_GROUP(vin4_sync),
SH_PFC_PIN_GROUP(vin4_field),
SH_PFC_PIN_GROUP(vin4_clkenb),
@@ -4067,6 +4095,7 @@ static const struct {
VIN_DATA_PIN_GROUP(vin5_data, 12, _a),
VIN_DATA_PIN_GROUP(vin5_data, 16, _a),
SH_PFC_PIN_GROUP(vin5_data8_b),
+ SH_PFC_PIN_GROUP(vin5_high8),
SH_PFC_PIN_GROUP(vin5_sync_a),
SH_PFC_PIN_GROUP(vin5_field_a),
SH_PFC_PIN_GROUP(vin5_clkenb_a),
@@ -4516,6 +4545,7 @@ static const char * const vin4_groups[] = {
"vin4_data18_b",
"vin4_data20_b",
"vin4_data24_b",
+ "vin4_g8",
"vin4_sync",
"vin4_field",
"vin4_clkenb",
@@ -4528,6 +4558,7 @@ static const char * const vin5_groups[] = {
"vin5_data12_a",
"vin5_data16_a",
"vin5_data8_b",
+ "vin5_high8",
"vin5_sync_a",
"vin5_field_a",
"vin5_clkenb_a",
diff --git a/drivers/pinctrl/renesas/pfc-r8a77995.c b/drivers/pinctrl/renesas/pfc-r8a77995.c
index 672251d86c2d..b479f87a3b23 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77995.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77995.c
@@ -14,7 +14,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_GP(fn, sfx) \
diff --git a/drivers/pinctrl/renesas/pfc-r8a779a0.c b/drivers/pinctrl/renesas/pfc-r8a779a0.c
index 2250ccd0470a..ad6532443a78 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779a0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779a0.c
@@ -11,7 +11,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
diff --git a/drivers/pinctrl/renesas/pfc-sh73a0.c b/drivers/pinctrl/renesas/pfc-sh73a0.c
index 96b91e95b1e1..ed6db809e80d 100644
--- a/drivers/pinctrl/renesas/pfc-sh73a0.c
+++ b/drivers/pinctrl/renesas/pfc-sh73a0.c
@@ -13,7 +13,6 @@
#include <linux/regulator/machine.h>
#include <linux/slab.h>
-#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_PORT(fn, pfx, sfx) \
@@ -4310,50 +4309,14 @@ static const struct regulator_init_data sh73a0_vccq_mc0_init_data = {
* Pin bias
*/
-#define PORTnCR_PULMD_OFF (0 << 6)
-#define PORTnCR_PULMD_DOWN (2 << 6)
-#define PORTnCR_PULMD_UP (3 << 6)
-#define PORTnCR_PULMD_MASK (3 << 6)
-
static const unsigned int sh73a0_portcr_offsets[] = {
0x00000000, 0x00001000, 0x00001000, 0x00002000, 0x00002000,
0x00002000, 0x00002000, 0x00003000, 0x00003000, 0x00002000,
};
-static unsigned int sh73a0_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin)
+static void __iomem *sh73a0_pin_to_portcr(struct sh_pfc *pfc, unsigned int pin)
{
- void __iomem *addr = pfc->windows->virt
- + sh73a0_portcr_offsets[pin >> 5] + pin;
- u32 value = ioread8(addr) & PORTnCR_PULMD_MASK;
-
- switch (value) {
- case PORTnCR_PULMD_UP:
- return PIN_CONFIG_BIAS_PULL_UP;
- case PORTnCR_PULMD_DOWN:
- return PIN_CONFIG_BIAS_PULL_DOWN;
- case PORTnCR_PULMD_OFF:
- default:
- return PIN_CONFIG_BIAS_DISABLE;
- }
-}
-
-static void sh73a0_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
-{
- void __iomem *addr = pfc->windows->virt
- + sh73a0_portcr_offsets[pin >> 5] + pin;
- u32 value = ioread8(addr) & ~PORTnCR_PULMD_MASK;
-
- switch (bias) {
- case PIN_CONFIG_BIAS_PULL_UP:
- value |= PORTnCR_PULMD_UP;
- break;
- case PIN_CONFIG_BIAS_PULL_DOWN:
- value |= PORTnCR_PULMD_DOWN;
- break;
- }
-
- iowrite8(value, addr);
+ return pfc->windows->virt + sh73a0_portcr_offsets[pin >> 5] + pin;
}
/* -----------------------------------------------------------------------------
@@ -4383,8 +4346,9 @@ static int sh73a0_pinmux_soc_init(struct sh_pfc *pfc)
static const struct sh_pfc_soc_operations sh73a0_pfc_ops = {
.init = sh73a0_pinmux_soc_init,
- .get_bias = sh73a0_pinmux_get_bias,
- .set_bias = sh73a0_pinmux_set_bias,
+ .get_bias = rmobile_pinmux_get_bias,
+ .set_bias = rmobile_pinmux_set_bias,
+ .pin_to_portcr = sh73a0_pin_to_portcr,
};
const struct sh_pfc_soc_info sh73a0_pinmux_info = {
diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
index a49f74730272..bb488af29862 100644
--- a/drivers/pinctrl/renesas/pinctrl.c
+++ b/drivers/pinctrl/renesas/pinctrl.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
@@ -840,21 +841,48 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
return pinctrl_enable(pmx->pctl);
}
+static const struct pinmux_bias_reg *
+rcar_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
+ unsigned int *bit)
+{
+ unsigned int i, j;
+
+ for (i = 0; pfc->info->bias_regs[i].puen || pfc->info->bias_regs[i].pud; i++) {
+ for (j = 0; j < ARRAY_SIZE(pfc->info->bias_regs[i].pins); j++) {
+ if (pfc->info->bias_regs[i].pins[j] == pin) {
+ *bit = j;
+ return &pfc->info->bias_regs[i];
+ }
+ }
+ }
+
+ WARN_ONCE(1, "Pin %u is not in bias info list\n", pin);
+
+ return NULL;
+}
+
unsigned int rcar_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin)
{
const struct pinmux_bias_reg *reg;
unsigned int bit;
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ reg = rcar_pin_to_bias_reg(pfc, pin, &bit);
if (!reg)
return PIN_CONFIG_BIAS_DISABLE;
- if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
- return PIN_CONFIG_BIAS_DISABLE;
- else if (!reg->pud || (sh_pfc_read(pfc, reg->pud) & BIT(bit)))
- return PIN_CONFIG_BIAS_PULL_UP;
- else
- return PIN_CONFIG_BIAS_PULL_DOWN;
+ if (reg->puen) {
+ if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
+ return PIN_CONFIG_BIAS_DISABLE;
+ else if (!reg->pud || (sh_pfc_read(pfc, reg->pud) & BIT(bit)))
+ return PIN_CONFIG_BIAS_PULL_UP;
+ else
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+ } else {
+ if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+ else
+ return PIN_CONFIG_BIAS_DISABLE;
+ }
}
void rcar_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
@@ -864,21 +892,68 @@ void rcar_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
u32 enable, updown;
unsigned int bit;
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ reg = rcar_pin_to_bias_reg(pfc, pin, &bit);
if (!reg)
return;
- enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
- if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= BIT(bit);
+ if (reg->puen) {
+ enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
+ if (bias != PIN_CONFIG_BIAS_DISABLE)
+ enable |= BIT(bit);
- if (reg->pud) {
- updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
- if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= BIT(bit);
+ if (reg->pud) {
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
+ if (bias == PIN_CONFIG_BIAS_PULL_UP)
+ updown |= BIT(bit);
- sh_pfc_write(pfc, reg->pud, updown);
+ sh_pfc_write(pfc, reg->pud, updown);
+ }
+
+ sh_pfc_write(pfc, reg->puen, enable);
+ } else {
+ enable = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
+ if (bias == PIN_CONFIG_BIAS_PULL_DOWN)
+ enable |= BIT(bit);
+
+ sh_pfc_write(pfc, reg->pud, enable);
+ }
+}
+
+#define PORTnCR_PULMD_OFF (0 << 6)
+#define PORTnCR_PULMD_DOWN (2 << 6)
+#define PORTnCR_PULMD_UP (3 << 6)
+#define PORTnCR_PULMD_MASK (3 << 6)
+
+unsigned int rmobile_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin)
+{
+ void __iomem *reg = pfc->info->ops->pin_to_portcr(pfc, pin);
+ u32 value = ioread8(reg) & PORTnCR_PULMD_MASK;
+
+ switch (value) {
+ case PORTnCR_PULMD_UP:
+ return PIN_CONFIG_BIAS_PULL_UP;
+ case PORTnCR_PULMD_DOWN:
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+ case PORTnCR_PULMD_OFF:
+ default:
+ return PIN_CONFIG_BIAS_DISABLE;
+ }
+}
+
+void rmobile_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
+{
+ void __iomem *reg = pfc->info->ops->pin_to_portcr(pfc, pin);
+ u32 value = ioread8(reg) & ~PORTnCR_PULMD_MASK;
+
+ switch (bias) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ value |= PORTnCR_PULMD_UP;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ value |= PORTnCR_PULMD_DOWN;
+ break;
}
- sh_pfc_write(pfc, reg->puen, enable);
+ iowrite8(value, reg);
}
diff --git a/drivers/pinctrl/renesas/sh_pfc.h b/drivers/pinctrl/renesas/sh_pfc.h
index 5934faeb23d7..320898861c4b 100644
--- a/drivers/pinctrl/renesas/sh_pfc.h
+++ b/drivers/pinctrl/renesas/sh_pfc.h
@@ -188,9 +188,9 @@ struct pinmux_drive_reg {
.reg = r, \
.fields =
-struct pinmux_bias_reg {
+struct pinmux_bias_reg { /* At least one of puen/pud must exist */
u32 puen; /* Pull-enable or pull-up control register */
- u32 pud; /* Pull-up/down control register (optional) */
+ u32 pud; /* Pull-up/down or pull-down control register */
const u16 pins[32];
};
@@ -273,6 +273,7 @@ struct sh_pfc_soc_operations {
void (*set_bias)(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias);
int (*pin_to_pocctrl)(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl);
+ void __iomem * (*pin_to_portcr)(struct sh_pfc *pfc, unsigned int pin);
};
struct sh_pfc_soc_info {
@@ -478,9 +479,13 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
PORT_GP_CFG_1(bank, 5, fn, sfx, cfg)
#define PORT_GP_6(bank, fn, sfx) PORT_GP_CFG_6(bank, fn, sfx, 0)
-#define PORT_GP_CFG_8(bank, fn, sfx, cfg) \
+#define PORT_GP_CFG_7(bank, fn, sfx, cfg) \
PORT_GP_CFG_6(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 6, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 6, fn, sfx, cfg)
+#define PORT_GP_7(bank, fn, sfx) PORT_GP_CFG_7(bank, fn, sfx, 0)
+
+#define PORT_GP_CFG_8(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_7(bank, fn, sfx, cfg), \
PORT_GP_CFG_1(bank, 7, fn, sfx, cfg)
#define PORT_GP_8(bank, fn, sfx) PORT_GP_CFG_8(bank, fn, sfx, 0)
@@ -773,4 +778,15 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
*/
#define RCAR_GP_PIN(bank, pin) (((bank) * 32) + (pin))
+/*
+ * Bias helpers
+ */
+unsigned int rcar_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin);
+void rcar_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias);
+
+unsigned int rmobile_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin);
+void rmobile_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias);
+
#endif /* __SH_PFC_H */
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 0cd7f33cdf25..2b99f4130e1e 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -55,7 +55,7 @@ static void exynos_irq_mask(struct irq_data *irqd)
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
- unsigned long mask;
+ unsigned int mask;
unsigned long flags;
raw_spin_lock_irqsave(&bank->slock, flags);
@@ -83,7 +83,7 @@ static void exynos_irq_unmask(struct irq_data *irqd)
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
- unsigned long mask;
+ unsigned int mask;
unsigned long flags;
/*
@@ -483,7 +483,7 @@ static void exynos_irq_eint0_15(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static inline void exynos_irq_demux_eint(unsigned long pend,
+static inline void exynos_irq_demux_eint(unsigned int pend,
struct irq_domain *domain)
{
unsigned int irq;
@@ -500,8 +500,8 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
- unsigned long pend;
- unsigned long mask;
+ unsigned int pend;
+ unsigned int mask;
int i;
chained_irq_enter(chip, desc);
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 7d9bdedcd71b..ad9eb5ed8e81 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -531,6 +531,8 @@ static bool stm32_pctrl_is_function_valid(struct stm32_pinctrl *pctl,
break;
}
+ dev_err(pctl->dev, "invalid function %d on pin %d .\n", fnum, pin_num);
+
return false;
}
@@ -545,11 +547,8 @@ static int stm32_pctrl_dt_node_to_map_func(struct stm32_pinctrl *pctl,
(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
(*map)[*num_maps].data.mux.group = grp->name;
- if (!stm32_pctrl_is_function_valid(pctl, pin, fnum)) {
- dev_err(pctl->dev, "invalid function %d on pin %d .\n",
- fnum, pin);
+ if (!stm32_pctrl_is_function_valid(pctl, pin, fnum))
return -EINVAL;
- }
(*map)[*num_maps].data.mux.function = stm32_gpio_functions[fnum];
(*num_maps)++;
@@ -620,7 +619,6 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
func = STM32_GET_PIN_FUNC(pinfunc);
if (!stm32_pctrl_is_function_valid(pctl, pin, func)) {
- dev_err(pctl->dev, "invalid function.\n");
err = -EINVAL;
goto exit;
}
@@ -821,11 +819,8 @@ static int stm32_pmx_set_mux(struct pinctrl_dev *pctldev,
int pin;
ret = stm32_pctrl_is_function_valid(pctl, g->pin, function);
- if (!ret) {
- dev_err(pctl->dev, "invalid function %d on group %d .\n",
- function, group);
+ if (!ret)
return -EINVAL;
- }
range = pinctrl_find_gpio_range_from_pin(pctldev, g->pin);
if (!range) {
@@ -1542,8 +1537,10 @@ int stm32_pctl_probe(struct platform_device *pdev)
if (of_property_read_bool(child, "gpio-controller")) {
bank->rstc = of_reset_control_get_exclusive(child,
NULL);
- if (PTR_ERR(bank->rstc) == -EPROBE_DEFER)
+ if (PTR_ERR(bank->rstc) == -EPROBE_DEFER) {
+ of_node_put(child);
return -EPROBE_DEFER;
+ }
bank->clk = of_clk_get_by_name(child, NULL);
if (IS_ERR(bank->clk)) {
@@ -1551,6 +1548,7 @@ int stm32_pctl_probe(struct platform_device *pdev)
dev_err(dev,
"failed to get clk (%ld)\n",
PTR_ERR(bank->clk));
+ of_node_put(child);
return PTR_ERR(bank->clk);
}
i++;
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index 60a67139ff0a..4e2382778d38 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -511,8 +511,10 @@ static int ti_iodelay_dt_node_to_map(struct pinctrl_dev *pctldev,
}
pins = devm_kcalloc(iod->dev, rows, sizeof(*pins), GFP_KERNEL);
- if (!pins)
+ if (!pins) {
+ error = -ENOMEM;
goto free_group;
+ }
cfg = devm_kcalloc(iod->dev, rows, sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
@@ -867,7 +869,8 @@ static int ti_iodelay_probe(struct platform_device *pdev)
goto exit_out;
}
- if (ti_iodelay_pinconf_init_dev(iod))
+ ret = ti_iodelay_pinconf_init_dev(iod);
+ if (ret)
goto exit_out;
ret = ti_iodelay_alloc_pins(dev, iod, res->start);
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 472a03daa869..4e14b4d6635d 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -52,12 +52,15 @@ struct i2c_peripheral {
enum i2c_adapter_type type;
u32 pci_devid;
+ const struct property_entry *properties;
+
struct i2c_client *client;
};
struct acpi_peripheral {
char hid[ACPI_ID_LEN];
- const struct property_entry *properties;
+ struct software_node swnode;
+ struct i2c_client *client;
};
struct chromeos_laptop {
@@ -68,7 +71,7 @@ struct chromeos_laptop {
struct i2c_peripheral *i2c_peripherals;
unsigned int num_i2c_peripherals;
- const struct acpi_peripheral *acpi_peripherals;
+ struct acpi_peripheral *acpi_peripherals;
unsigned int num_acpi_peripherals;
};
@@ -161,7 +164,7 @@ static void chromeos_laptop_check_adapter(struct i2c_adapter *adapter)
static bool chromeos_laptop_adjust_client(struct i2c_client *client)
{
- const struct acpi_peripheral *acpi_dev;
+ struct acpi_peripheral *acpi_dev;
struct acpi_device_id acpi_ids[2] = { };
int i;
int error;
@@ -175,8 +178,7 @@ static bool chromeos_laptop_adjust_client(struct i2c_client *client)
memcpy(acpi_ids[0].id, acpi_dev->hid, ACPI_ID_LEN);
if (acpi_match_device(acpi_ids, &client->dev)) {
- error = device_add_properties(&client->dev,
- acpi_dev->properties);
+ error = device_add_software_node(&client->dev, &acpi_dev->swnode);
if (error) {
dev_err(&client->dev,
"failed to add properties: %d\n",
@@ -184,6 +186,8 @@ static bool chromeos_laptop_adjust_client(struct i2c_client *client)
break;
}
+ acpi_dev->client = client;
+
return true;
}
}
@@ -193,15 +197,28 @@ static bool chromeos_laptop_adjust_client(struct i2c_client *client)
static void chromeos_laptop_detach_i2c_client(struct i2c_client *client)
{
+ struct acpi_peripheral *acpi_dev;
struct i2c_peripheral *i2c_dev;
int i;
- for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
- i2c_dev = &cros_laptop->i2c_peripherals[i];
+ if (has_acpi_companion(&client->dev))
+ for (i = 0; i < cros_laptop->num_acpi_peripherals; i++) {
+ acpi_dev = &cros_laptop->acpi_peripherals[i];
- if (i2c_dev->client == client)
- i2c_dev->client = NULL;
- }
+ if (acpi_dev->client == client) {
+ acpi_dev->client = NULL;
+ return;
+ }
+ }
+ else
+ for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
+
+ if (i2c_dev->client == client) {
+ i2c_dev->client = NULL;
+ return;
+ }
+ }
}
static int chromeos_laptop_i2c_notifier_call(struct notifier_block *nb,
@@ -302,28 +319,26 @@ static struct i2c_peripheral chromebook_pixel_peripherals[] __initdata = {
.board_info = {
I2C_BOARD_INFO("atmel_mxt_ts",
ATMEL_TS_I2C_ADDR),
- .properties =
- chromebook_atmel_touchscreen_props,
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "touchscreen",
.irqflags = IRQF_TRIGGER_FALLING,
.type = I2C_ADAPTER_PANEL,
.alt_addr = ATMEL_TS_I2C_BL_ADDR,
+ .properties = chromebook_atmel_touchscreen_props,
},
/* Touchpad. */
{
.board_info = {
I2C_BOARD_INFO("atmel_mxt_tp",
ATMEL_TP_I2C_ADDR),
- .properties =
- chromebook_pixel_trackpad_props,
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "trackpad",
.irqflags = IRQF_TRIGGER_FALLING,
.type = I2C_ADAPTER_VGADDC,
.alt_addr = ATMEL_TP_I2C_BL_ADDR,
+ .properties = chromebook_pixel_trackpad_props,
},
/* Light Sensor. */
{
@@ -414,8 +429,6 @@ static struct i2c_peripheral acer_c720_peripherals[] __initdata = {
.board_info = {
I2C_BOARD_INFO("atmel_mxt_ts",
ATMEL_TS_I2C_ADDR),
- .properties =
- chromebook_atmel_touchscreen_props,
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "touchscreen",
@@ -423,6 +436,7 @@ static struct i2c_peripheral acer_c720_peripherals[] __initdata = {
.type = I2C_ADAPTER_DESIGNWARE,
.pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x2)),
.alt_addr = ATMEL_TS_I2C_BL_ADDR,
+ .properties = chromebook_atmel_touchscreen_props,
},
/* Touchpad. */
{
@@ -498,12 +512,16 @@ static struct acpi_peripheral samus_peripherals[] __initdata = {
/* Touchpad */
{
.hid = "ATML0000",
- .properties = samus_trackpad_props,
+ .swnode = {
+ .properties = samus_trackpad_props,
+ },
},
/* Touchsceen */
{
.hid = "ATML0001",
- .properties = chromebook_atmel_touchscreen_props,
+ .swnode = {
+ .properties = chromebook_atmel_touchscreen_props,
+ },
},
};
DECLARE_ACPI_CROS_LAPTOP(samus);
@@ -512,12 +530,16 @@ static struct acpi_peripheral generic_atmel_peripherals[] __initdata = {
/* Touchpad */
{
.hid = "ATML0000",
- .properties = chromebook_pixel_trackpad_props,
+ .swnode = {
+ .properties = chromebook_pixel_trackpad_props,
+ },
},
/* Touchsceen */
{
.hid = "ATML0001",
- .properties = chromebook_atmel_touchscreen_props,
+ .swnode = {
+ .properties = chromebook_atmel_touchscreen_props,
+ },
},
};
DECLARE_ACPI_CROS_LAPTOP(generic_atmel);
@@ -743,12 +765,11 @@ chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop,
if (error)
goto err_out;
- /* We need to deep-copy properties */
- if (info->properties) {
- info->properties =
- property_entries_dup(info->properties);
- if (IS_ERR(info->properties)) {
- error = PTR_ERR(info->properties);
+ /* Create primary fwnode for the device - copies everything */
+ if (i2c_dev->properties) {
+ info->fwnode = fwnode_create_software_node(i2c_dev->properties, NULL);
+ if (IS_ERR(info->fwnode)) {
+ error = PTR_ERR(info->fwnode);
goto err_out;
}
}
@@ -760,8 +781,8 @@ err_out:
while (--i >= 0) {
i2c_dev = &cros_laptop->i2c_peripherals[i];
info = &i2c_dev->board_info;
- if (info->properties)
- property_entries_free(info->properties);
+ if (!IS_ERR_OR_NULL(info->fwnode))
+ fwnode_remove_software_node(info->fwnode);
}
kfree(cros_laptop->i2c_peripherals);
return error;
@@ -801,11 +822,11 @@ chromeos_laptop_prepare_acpi_peripherals(struct chromeos_laptop *cros_laptop,
*acpi_dev = *src_dev;
/* We need to deep-copy properties */
- if (src_dev->properties) {
- acpi_dev->properties =
- property_entries_dup(src_dev->properties);
- if (IS_ERR(acpi_dev->properties)) {
- error = PTR_ERR(acpi_dev->properties);
+ if (src_dev->swnode.properties) {
+ acpi_dev->swnode.properties =
+ property_entries_dup(src_dev->swnode.properties);
+ if (IS_ERR(acpi_dev->swnode.properties)) {
+ error = PTR_ERR(acpi_dev->swnode.properties);
goto err_out;
}
}
@@ -821,8 +842,8 @@ chromeos_laptop_prepare_acpi_peripherals(struct chromeos_laptop *cros_laptop,
err_out:
while (--i >= 0) {
acpi_dev = &acpi_peripherals[i];
- if (acpi_dev->properties)
- property_entries_free(acpi_dev->properties);
+ if (!IS_ERR_OR_NULL(acpi_dev->swnode.properties))
+ property_entries_free(acpi_dev->swnode.properties);
}
kfree(acpi_peripherals);
@@ -833,21 +854,20 @@ static void chromeos_laptop_destroy(const struct chromeos_laptop *cros_laptop)
{
const struct acpi_peripheral *acpi_dev;
struct i2c_peripheral *i2c_dev;
- struct i2c_board_info *info;
int i;
for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
i2c_dev = &cros_laptop->i2c_peripherals[i];
- info = &i2c_dev->board_info;
-
i2c_unregister_device(i2c_dev->client);
- property_entries_free(info->properties);
}
for (i = 0; i < cros_laptop->num_acpi_peripherals; i++) {
acpi_dev = &cros_laptop->acpi_peripherals[i];
- property_entries_free(acpi_dev->properties);
+ if (acpi_dev->client)
+ device_remove_software_node(&acpi_dev->client->dev);
+
+ property_entries_free(acpi_dev->swnode.properties);
}
kfree(cros_laptop->i2c_peripherals);
diff --git a/drivers/platform/x86/dell/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c
index 03c3ff34bcf5..085ad0a0d22e 100644
--- a/drivers/platform/x86/dell/dell_rbu.c
+++ b/drivers/platform/x86/dell/dell_rbu.c
@@ -675,6 +675,3 @@ static __exit void dcdrbu_exit(void)
module_exit(dcdrbu_exit);
module_init(dcdrbu_init);
-
-/* vim:noet:ts=8:sw=8
-*/
diff --git a/drivers/platform/x86/intel_cht_int33fe_microb.c b/drivers/platform/x86/intel_cht_int33fe_microb.c
index 20b11e0d9a75..673f41cd14b5 100644
--- a/drivers/platform/x86/intel_cht_int33fe_microb.c
+++ b/drivers/platform/x86/intel_cht_int33fe_microb.c
@@ -35,6 +35,10 @@ static const struct property_entry bq27xxx_props[] = {
{ }
};
+static const struct software_node bq27xxx_node = {
+ .properties = bq27xxx_props,
+};
+
int cht_int33fe_microb_probe(struct cht_int33fe_data *data)
{
struct device *dev = data->dev;
@@ -43,7 +47,7 @@ int cht_int33fe_microb_probe(struct cht_int33fe_data *data)
memset(&board_info, 0, sizeof(board_info));
strscpy(board_info.type, "bq27542", ARRAY_SIZE(board_info.type));
board_info.dev_name = "bq27542";
- board_info.properties = bq27xxx_props;
+ board_info.swnode = &bq27xxx_node;
data->battery_fg = i2c_acpi_new_device(dev, 1, &board_info);
return PTR_ERR_OR_ZERO(data->battery_fg);
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index f2edef0df40f..8c20e524e9ad 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -108,7 +108,7 @@ config PTP_1588_CLOCK_PCH
config PTP_1588_CLOCK_KVM
tristate "KVM virtual PTP clock"
depends on PTP_1588_CLOCK
- depends on KVM_GUEST && X86
+ depends on (KVM_GUEST && X86) || (HAVE_ARM_SMCCC_DISCOVERY && ARM_ARCH_TIMER)
default y
help
This driver adds support for using kvm infrastructure as a PTP
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index db5aef3bddc6..8673d1743faa 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -4,6 +4,8 @@
#
ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o
+ptp_kvm-$(CONFIG_X86) := ptp_kvm_x86.o ptp_kvm_common.o
+ptp_kvm-$(CONFIG_HAVE_ARM_SMCCC) := ptp_kvm_arm.o ptp_kvm_common.o
obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o
obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o
obj-$(CONFIG_PTP_1588_CLOCK_INES) += ptp_ines.o
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index 75463c2e2b86..fa636951169e 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -1404,8 +1404,8 @@ static int idtcm_set_pll_mode(struct idtcm_channel *channel,
/* PTP Hardware Clock interface */
-/**
- * @brief Maximum absolute value for write phase offset in picoseconds
+/*
+ * Maximum absolute value for write phase offset in picoseconds
*
* Destination signed register is 32-bit register in resolution of 50ps
*
diff --git a/drivers/ptp/ptp_kvm_arm.c b/drivers/ptp/ptp_kvm_arm.c
new file mode 100644
index 000000000000..b7d28c8dfb84
--- /dev/null
+++ b/drivers/ptp/ptp_kvm_arm.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Virtual PTP 1588 clock for use with KVM guests
+ * Copyright (C) 2019 ARM Ltd.
+ * All Rights Reserved
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/ptp_kvm.h>
+
+#include <asm/arch_timer.h>
+#include <asm/hypervisor.h>
+
+int kvm_arch_ptp_init(void)
+{
+ int ret;
+
+ ret = kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_PTP);
+ if (ret <= 0)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+int kvm_arch_ptp_get_clock(struct timespec64 *ts)
+{
+ return kvm_arch_ptp_get_crosststamp(NULL, ts, NULL);
+}
diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm_common.c
index 658d33fc3195..fcae32f56f25 100644
--- a/drivers/ptp/ptp_kvm.c
+++ b/drivers/ptp/ptp_kvm_common.c
@@ -8,11 +8,11 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/ptp_kvm.h>
#include <uapi/linux/kvm_para.h>
#include <asm/kvm_para.h>
-#include <asm/pvclock.h>
-#include <asm/kvmclock.h>
#include <uapi/asm/kvm_para.h>
#include <linux/ptp_clock_kernel.h>
@@ -24,56 +24,29 @@ struct kvm_ptp_clock {
static DEFINE_SPINLOCK(kvm_ptp_lock);
-static struct pvclock_vsyscall_time_info *hv_clock;
-
-static struct kvm_clock_pairing clock_pair;
-static phys_addr_t clock_pair_gpa;
-
static int ptp_kvm_get_time_fn(ktime_t *device_time,
struct system_counterval_t *system_counter,
void *ctx)
{
- unsigned long ret;
+ long ret;
+ u64 cycle;
struct timespec64 tspec;
- unsigned version;
- int cpu;
- struct pvclock_vcpu_time_info *src;
+ struct clocksource *cs;
spin_lock(&kvm_ptp_lock);
preempt_disable_notrace();
- cpu = smp_processor_id();
- src = &hv_clock[cpu].pvti;
-
- do {
- /*
- * We are using a TSC value read in the hosts
- * kvm_hc_clock_pairing handling.
- * So any changes to tsc_to_system_mul
- * and tsc_shift or any other pvclock
- * data invalidate that measurement.
- */
- version = pvclock_read_begin(src);
-
- ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
- clock_pair_gpa,
- KVM_CLOCK_PAIRING_WALLCLOCK);
- if (ret != 0) {
- pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret);
- spin_unlock(&kvm_ptp_lock);
- preempt_enable_notrace();
- return -EOPNOTSUPP;
- }
-
- tspec.tv_sec = clock_pair.sec;
- tspec.tv_nsec = clock_pair.nsec;
- ret = __pvclock_read_cycles(src, clock_pair.tsc);
- } while (pvclock_read_retry(src, version));
+ ret = kvm_arch_ptp_get_crosststamp(&cycle, &tspec, &cs);
+ if (ret) {
+ spin_unlock(&kvm_ptp_lock);
+ preempt_enable_notrace();
+ return ret;
+ }
preempt_enable_notrace();
- system_counter->cycles = ret;
- system_counter->cs = &kvm_clock;
+ system_counter->cycles = cycle;
+ system_counter->cs = cs;
*device_time = timespec64_to_ktime(tspec);
@@ -111,22 +84,17 @@ static int ptp_kvm_settime(struct ptp_clock_info *ptp,
static int ptp_kvm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
- unsigned long ret;
+ long ret;
struct timespec64 tspec;
spin_lock(&kvm_ptp_lock);
- ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
- clock_pair_gpa,
- KVM_CLOCK_PAIRING_WALLCLOCK);
- if (ret != 0) {
- pr_err_ratelimited("clock offset hypercall ret %lu\n", ret);
+ ret = kvm_arch_ptp_get_clock(&tspec);
+ if (ret) {
spin_unlock(&kvm_ptp_lock);
- return -EOPNOTSUPP;
+ return ret;
}
- tspec.tv_sec = clock_pair.sec;
- tspec.tv_nsec = clock_pair.nsec;
spin_unlock(&kvm_ptp_lock);
memcpy(ts, &tspec, sizeof(struct timespec64));
@@ -168,19 +136,12 @@ static int __init ptp_kvm_init(void)
{
long ret;
- if (!kvm_para_available())
- return -ENODEV;
-
- clock_pair_gpa = slow_virt_to_phys(&clock_pair);
- hv_clock = pvclock_get_pvti_cpu0_va();
-
- if (!hv_clock)
- return -ENODEV;
-
- ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
- KVM_CLOCK_PAIRING_WALLCLOCK);
- if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP)
- return -ENODEV;
+ ret = kvm_arch_ptp_init();
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ pr_err("fail to initialize ptp_kvm");
+ return ret;
+ }
kvm_ptp_clock.caps = ptp_kvm_caps;
diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c
new file mode 100644
index 000000000000..3dd519dfc473
--- /dev/null
+++ b/drivers/ptp/ptp_kvm_x86.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Virtual PTP 1588 clock for use with KVM guests
+ *
+ * Copyright (C) 2017 Red Hat Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <asm/pvclock.h>
+#include <asm/kvmclock.h>
+#include <linux/module.h>
+#include <uapi/asm/kvm_para.h>
+#include <uapi/linux/kvm_para.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_kvm.h>
+
+struct pvclock_vsyscall_time_info *hv_clock;
+
+static phys_addr_t clock_pair_gpa;
+static struct kvm_clock_pairing clock_pair;
+
+int kvm_arch_ptp_init(void)
+{
+ long ret;
+
+ if (!kvm_para_available())
+ return -ENODEV;
+
+ clock_pair_gpa = slow_virt_to_phys(&clock_pair);
+ hv_clock = pvclock_get_pvti_cpu0_va();
+ if (!hv_clock)
+ return -ENODEV;
+
+ ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
+ KVM_CLOCK_PAIRING_WALLCLOCK);
+ if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP)
+ return -ENODEV;
+
+ return 0;
+}
+
+int kvm_arch_ptp_get_clock(struct timespec64 *ts)
+{
+ long ret;
+
+ ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
+ clock_pair_gpa,
+ KVM_CLOCK_PAIRING_WALLCLOCK);
+ if (ret != 0) {
+ pr_err_ratelimited("clock offset hypercall ret %lu\n", ret);
+ return -EOPNOTSUPP;
+ }
+
+ ts->tv_sec = clock_pair.sec;
+ ts->tv_nsec = clock_pair.nsec;
+
+ return 0;
+}
+
+int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
+ struct clocksource **cs)
+{
+ struct pvclock_vcpu_time_info *src;
+ unsigned int version;
+ long ret;
+ int cpu;
+
+ cpu = smp_processor_id();
+ src = &hv_clock[cpu].pvti;
+
+ do {
+ /*
+ * We are using a TSC value read in the hosts
+ * kvm_hc_clock_pairing handling.
+ * So any changes to tsc_to_system_mul
+ * and tsc_shift or any other pvclock
+ * data invalidate that measurement.
+ */
+ version = pvclock_read_begin(src);
+
+ ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
+ clock_pair_gpa,
+ KVM_CLOCK_PAIRING_WALLCLOCK);
+ if (ret != 0) {
+ pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret);
+ return -EOPNOTSUPP;
+ }
+ tspec->tv_sec = clock_pair.sec;
+ tspec->tv_nsec = clock_pair.nsec;
+ *cycle = __pvclock_read_cycles(src, clock_pair.tsc);
+ } while (pvclock_read_retry(src, version));
+
+ *cs = &kvm_clock;
+
+ return 0;
+}
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index ce10ecd41ba0..a17e8cc642c5 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_pch.h>
#include <linux/slab.h>
#define STATION_ADDR_LEN 20
@@ -36,7 +37,8 @@ enum pch_status {
PCH_FAILED,
PCH_UNSUPPORTED,
};
-/**
+
+/*
* struct pch_ts_regs - IEEE 1588 registers
*/
struct pch_ts_regs {
@@ -102,7 +104,8 @@ struct pch_ts_regs {
#define PCH_IEEE1588_ETH (1 << 0)
#define PCH_IEEE1588_CAN (1 << 1)
-/**
+
+/*
* struct pch_dev - Driver private data
*/
struct pch_dev {
@@ -119,7 +122,7 @@ struct pch_dev {
spinlock_t register_lock;
};
-/**
+/*
* struct pch_params - 1588 module parameter
*/
struct pch_params {
@@ -179,17 +182,6 @@ static inline void pch_block_reset(struct pch_dev *chip)
iowrite32(val, (&chip->regs->control));
}
-u32 pch_ch_control_read(struct pci_dev *pdev)
-{
- struct pch_dev *chip = pci_get_drvdata(pdev);
- u32 val;
-
- val = ioread32(&chip->regs->ch_control);
-
- return val;
-}
-EXPORT_SYMBOL(pch_ch_control_read);
-
void pch_ch_control_write(struct pci_dev *pdev, u32 val)
{
struct pch_dev *chip = pci_get_drvdata(pdev);
@@ -296,6 +288,7 @@ static void pch_reset(struct pch_dev *chip)
* IEEE 1588 hardware when looking at PTP
* traffic on the ethernet interface
* @addr: dress which contain the column separated address to be used.
+ * @pdev: PCI device.
*/
int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
{
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index d3371ac7b871..c76adedd58c9 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -618,6 +618,15 @@ config PWM_TWL_LED
To compile this driver as a module, choose M here: the module
will be called pwm-twl-led.
+config PWM_VISCONTI
+ tristate "Toshiba Visconti PWM support"
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ help
+ PWM Subsystem driver support for Toshiba Visconti SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-visconti.
+
config PWM_VT8500
tristate "vt8500 PWM support"
depends on ARCH_VT8500 || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index d3879619bd76..708840b7fba8 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -58,4 +58,5 @@ obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o
obj-$(CONFIG_PWM_TWL) += pwm-twl.o
obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o
+obj-$(CONFIG_PWM_VISCONTI) += pwm-visconti.o
obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index a8eff4b3ee36..c4d5c0667137 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -37,23 +37,13 @@ static struct pwm_device *pwm_to_device(unsigned int pwm)
return radix_tree_lookup(&pwm_tree, pwm);
}
-static int alloc_pwms(int pwm, unsigned int count)
+static int alloc_pwms(unsigned int count)
{
- unsigned int from = 0;
unsigned int start;
- if (pwm >= MAX_PWMS)
- return -EINVAL;
-
- if (pwm >= 0)
- from = pwm;
-
- start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, from,
+ start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, 0,
count, 0);
- if (pwm >= 0 && start != pwm)
- return -EEXIST;
-
if (start + count > MAX_PWMS)
return -ENOSPC;
@@ -260,18 +250,14 @@ static bool pwm_ops_check(const struct pwm_chip *chip)
}
/**
- * pwmchip_add_with_polarity() - register a new PWM chip
+ * pwmchip_add() - register a new PWM chip
* @chip: the PWM chip to add
- * @polarity: initial polarity of PWM channels
*
- * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base
- * will be used. The initial polarity for all channels is specified by the
- * @polarity parameter.
+ * Register a new PWM chip.
*
* Returns: 0 on success or a negative error code on failure.
*/
-int pwmchip_add_with_polarity(struct pwm_chip *chip,
- enum pwm_polarity polarity)
+int pwmchip_add(struct pwm_chip *chip)
{
struct pwm_device *pwm;
unsigned int i;
@@ -285,25 +271,24 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
mutex_lock(&pwm_lock);
- ret = alloc_pwms(chip->base, chip->npwm);
+ ret = alloc_pwms(chip->npwm);
if (ret < 0)
goto out;
+ chip->base = ret;
+
chip->pwms = kcalloc(chip->npwm, sizeof(*pwm), GFP_KERNEL);
if (!chip->pwms) {
ret = -ENOMEM;
goto out;
}
- chip->base = ret;
-
for (i = 0; i < chip->npwm; i++) {
pwm = &chip->pwms[i];
pwm->chip = chip;
pwm->pwm = chip->base + i;
pwm->hwpwm = i;
- pwm->state.polarity = polarity;
radix_tree_insert(&pwm_tree, pwm->pwm, pwm);
}
@@ -326,21 +311,6 @@ out:
return ret;
}
-EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity);
-
-/**
- * pwmchip_add() - register a new PWM chip
- * @chip: the PWM chip to add
- *
- * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base
- * will be used. The initial polarity for all channels is normal.
- *
- * Returns: 0 on success or a negative error code on failure.
- */
-int pwmchip_add(struct pwm_chip *chip)
-{
- return pwmchip_add_with_polarity(chip, PWM_POLARITY_NORMAL);
-}
EXPORT_SYMBOL_GPL(pwmchip_add);
/**
@@ -607,7 +577,7 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
*/
if (state->polarity != pwm->state.polarity) {
if (!chip->ops->set_polarity)
- return -ENOTSUPP;
+ return -EINVAL;
/*
* Changing the polarity of a running PWM is
diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
index 58c6c0f5b0ec..e2a26d9da25b 100644
--- a/drivers/pwm/pwm-ab8500.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -24,23 +24,37 @@ struct ab8500_pwm_chip {
struct pwm_chip chip;
};
-static int ab8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
{
- int ret = 0;
- unsigned int higher_val, lower_val;
+ int ret;
u8 reg;
+ unsigned int higher_val, lower_val;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ ret = abx500_mask_and_set_register_interruptible(chip->dev,
+ AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
+ 1 << (chip->base - 1), 0);
+
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
+ pwm->label, ret);
+ return ret;
+ }
/*
* get the first 8 bits that are be written to
* AB8500_PWM_OUT_CTRL1_REG[0:7]
*/
- lower_val = duty_ns & 0x00FF;
+ lower_val = state->duty_cycle & 0x00FF;
/*
* get bits [9:10] that are to be written to
* AB8500_PWM_OUT_CTRL2_REG[0:1]
*/
- higher_val = ((duty_ns & 0x0300) >> 8);
+ higher_val = ((state->duty_cycle & 0x0300) >> 8);
reg = AB8500_PWM_OUT_CTRL1_REG + ((chip->base - 1) * 2);
@@ -48,15 +62,11 @@ static int ab8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
reg, (u8)lower_val);
if (ret < 0)
return ret;
+
ret = abx500_set_register_interruptible(chip->dev, AB8500_MISC,
(reg + 1), (u8)higher_val);
-
- return ret;
-}
-
-static int ab8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- int ret;
+ if (ret < 0)
+ return ret;
ret = abx500_mask_and_set_register_interruptible(chip->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
@@ -64,25 +74,12 @@ static int ab8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
if (ret < 0)
dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
pwm->label, ret);
- return ret;
-}
-static void ab8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- int ret;
-
- ret = abx500_mask_and_set_register_interruptible(chip->dev,
- AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
- 1 << (chip->base - 1), 0);
- if (ret < 0)
- dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
- pwm->label, ret);
+ return ret;
}
static const struct pwm_ops ab8500_pwm_ops = {
- .config = ab8500_pwm_config,
- .enable = ab8500_pwm_enable,
- .disable = ab8500_pwm_disable,
+ .apply = ab8500_pwm_apply,
.owner = THIS_MODULE,
};
@@ -101,7 +98,6 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
ab8500->chip.dev = &pdev->dev;
ab8500->chip.ops = &ab8500_pwm_ops;
- ab8500->chip.base = -1;
ab8500->chip.npwm = 1;
err = pwmchip_add(&ab8500->chip);
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index dcbc0489dfd4..6ab597e54005 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -265,12 +265,11 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
chip->hlcdc = hlcdc;
chip->chip.ops = &atmel_hlcdc_pwm_ops;
chip->chip.dev = dev;
- chip->chip.base = -1;
chip->chip.npwm = 1;
chip->chip.of_xlate = of_pwm_xlate_with_flags;
chip->chip.of_pwm_n_cells = 3;
- ret = pwmchip_add_with_polarity(&chip->chip, PWM_POLARITY_INVERSED);
+ ret = pwmchip_add(&chip->chip);
if (ret) {
clk_disable_unprepare(hlcdc->periph_clk);
return ret;
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 5ccc3e7420e9..8451d3e846be 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -362,20 +362,37 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
tcbpwm->div = i;
tcbpwm->duty = duty;
- /* If the PWM is enabled, call enable to apply the new conf */
- if (pwm_is_enabled(pwm))
- atmel_tcb_pwm_enable(chip, pwm);
-
return 0;
}
+static int atmel_tcb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int duty_cycle, period;
+ int ret;
+
+ /* This function only sets a flag in driver data */
+ atmel_tcb_pwm_set_polarity(chip, pwm, state->polarity);
+
+ if (!state->enabled) {
+ atmel_tcb_pwm_disable(chip, pwm);
+ return 0;
+ }
+
+ period = state->period < INT_MAX ? state->period : INT_MAX;
+ duty_cycle = state->duty_cycle < INT_MAX ? state->duty_cycle : INT_MAX;
+
+ ret = atmel_tcb_pwm_config(chip, pwm, duty_cycle, period);
+ if (ret)
+ return ret;
+
+ return atmel_tcb_pwm_enable(chip, pwm);
+}
+
static const struct pwm_ops atmel_tcb_pwm_ops = {
.request = atmel_tcb_pwm_request,
.free = atmel_tcb_pwm_free,
- .config = atmel_tcb_pwm_config,
- .set_polarity = atmel_tcb_pwm_set_polarity,
- .enable = atmel_tcb_pwm_enable,
- .disable = atmel_tcb_pwm_disable,
+ .apply = atmel_tcb_pwm_apply,
.owner = THIS_MODULE,
};
@@ -454,7 +471,6 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
tcbpwm->chip.ops = &atmel_tcb_pwm_ops;
tcbpwm->chip.of_xlate = of_pwm_xlate_with_flags;
tcbpwm->chip.of_pwm_n_cells = 3;
- tcbpwm->chip.base = -1;
tcbpwm->chip.npwm = NPWM;
tcbpwm->channel = channel;
tcbpwm->regmap = regmap;
@@ -491,14 +507,14 @@ static int atmel_tcb_pwm_remove(struct platform_device *pdev)
struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
int err;
- clk_disable_unprepare(tcbpwm->slow_clk);
- clk_put(tcbpwm->slow_clk);
- clk_put(tcbpwm->clk);
-
err = pwmchip_remove(&tcbpwm->chip);
if (err < 0)
return err;
+ clk_disable_unprepare(tcbpwm->slow_clk);
+ clk_put(tcbpwm->slow_clk);
+ clk_put(tcbpwm->clk);
+
return 0;
}
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 5813339b597b..29b5ad03f715 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -124,6 +124,7 @@ static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip,
}
static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
+ unsigned long clkrate,
const struct pwm_state *state,
unsigned long *cprd, u32 *pres)
{
@@ -132,7 +133,7 @@ static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
int shift;
/* Calculate the period cycles and prescale value */
- cycles *= clk_get_rate(atmel_pwm->clk);
+ cycles *= clkrate;
do_div(cycles, NSEC_PER_SEC);
/*
@@ -158,12 +159,14 @@ static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
}
static void atmel_pwm_calculate_cdty(const struct pwm_state *state,
- unsigned long cprd, unsigned long *cdty)
+ unsigned long clkrate, unsigned long cprd,
+ u32 pres, unsigned long *cdty)
{
unsigned long long cycles = state->duty_cycle;
- cycles *= cprd;
- do_div(cycles, state->period);
+ cycles *= clkrate;
+ do_div(cycles, NSEC_PER_SEC);
+ cycles >>= pres;
*cdty = cprd - cycles;
}
@@ -244,17 +247,23 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
pwm_get_state(pwm, &cstate);
if (state->enabled) {
+ unsigned long clkrate = clk_get_rate(atmel_pwm->clk);
+
if (cstate.enabled &&
cstate.polarity == state->polarity &&
cstate.period == state->period) {
+ u32 cmr = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
+
cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
atmel_pwm->data->regs.period);
- atmel_pwm_calculate_cdty(state, cprd, &cdty);
+ pres = cmr & PWM_CMR_CPRE_MSK;
+
+ atmel_pwm_calculate_cdty(state, clkrate, cprd, pres, &cdty);
atmel_pwm_update_cdty(chip, pwm, cdty);
return 0;
}
- ret = atmel_pwm_calculate_cprd_and_pres(chip, state, &cprd,
+ ret = atmel_pwm_calculate_cprd_and_pres(chip, clkrate, state, &cprd,
&pres);
if (ret) {
dev_err(chip->dev,
@@ -262,7 +271,7 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
- atmel_pwm_calculate_cdty(state, cprd, &cdty);
+ atmel_pwm_calculate_cdty(state, clkrate, cprd, pres, &cdty);
if (cstate.enabled) {
atmel_pwm_disable(chip, pwm, false);
@@ -319,7 +328,7 @@ static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
atmel_pwm->data->regs.duty);
- tmp = (u64)cdty * NSEC_PER_SEC;
+ tmp = (u64)(cprd - cdty) * NSEC_PER_SEC;
tmp <<= pres;
state->duty_cycle = DIV64_U64_ROUND_UP(tmp, rate);
@@ -429,7 +438,6 @@ static int atmel_pwm_probe(struct platform_device *pdev)
atmel_pwm->chip.ops = &atmel_pwm_ops;
atmel_pwm->chip.of_xlate = of_pwm_xlate_with_flags;
atmel_pwm->chip.of_pwm_n_cells = 3;
- atmel_pwm->chip.base = -1;
atmel_pwm->chip.npwm = 4;
ret = pwmchip_add(&atmel_pwm->chip);
@@ -451,10 +459,12 @@ static int atmel_pwm_remove(struct platform_device *pdev)
{
struct atmel_pwm_chip *atmel_pwm = platform_get_drvdata(pdev);
+ pwmchip_remove(&atmel_pwm->chip);
+
clk_unprepare(atmel_pwm->clk);
mutex_destroy(&atmel_pwm->isr_lock);
- return pwmchip_remove(&atmel_pwm->chip);
+ return 0;
}
static struct platform_driver atmel_pwm_driver = {
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
index f4853c4a2d75..edd2ce1760ab 100644
--- a/drivers/pwm/pwm-bcm-iproc.c
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -209,7 +209,6 @@ static int iproc_pwmc_probe(struct platform_device *pdev)
ip->chip.dev = &pdev->dev;
ip->chip.ops = &iproc_pwm_ops;
- ip->chip.base = -1;
ip->chip.npwm = 4;
ip->chip.of_xlate = of_pwm_xlate_with_flags;
ip->chip.of_pwm_n_cells = 3;
@@ -254,9 +253,11 @@ static int iproc_pwmc_remove(struct platform_device *pdev)
{
struct iproc_pwmc *ip = platform_get_drvdata(pdev);
+ pwmchip_remove(&ip->chip);
+
clk_disable_unprepare(ip->clk);
- return pwmchip_remove(&ip->chip);
+ return 0;
}
static const struct of_device_id bcm_iproc_pwmc_dt[] = {
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 578b3621c97e..800b9edf2e71 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -271,7 +271,6 @@ static int kona_pwmc_probe(struct platform_device *pdev)
kp->chip.dev = &pdev->dev;
kp->chip.ops = &kona_pwm_ops;
- kp->chip.base = -1;
kp->chip.npwm = 6;
kp->chip.of_xlate = of_pwm_xlate_with_flags;
kp->chip.of_pwm_n_cells = 3;
@@ -301,7 +300,7 @@ static int kona_pwmc_probe(struct platform_device *pdev)
clk_disable_unprepare(kp->clk);
- ret = pwmchip_add_with_polarity(&kp->chip, PWM_POLARITY_INVERSED);
+ ret = pwmchip_add(&kp->chip);
if (ret < 0)
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
@@ -311,11 +310,6 @@ static int kona_pwmc_probe(struct platform_device *pdev)
static int kona_pwmc_remove(struct platform_device *pdev)
{
struct kona_pwmc *kp = platform_get_drvdata(pdev);
- unsigned int chan;
-
- for (chan = 0; chan < kp->chip.npwm; chan++)
- if (pwm_is_enabled(&kp->chip.pwms[chan]))
- clk_disable_unprepare(kp->clk);
return pwmchip_remove(&kp->chip);
}
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index 6ff5f04b3e07..fc240d5b8121 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -64,8 +64,9 @@ static int bcm2835_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
unsigned long rate = clk_get_rate(pc->clk);
- unsigned long long period;
- unsigned long scaler;
+ unsigned long long period_cycles;
+ u64 max_period;
+
u32 val;
if (!rate) {
@@ -73,18 +74,36 @@ static int bcm2835_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return -EINVAL;
}
- scaler = DIV_ROUND_CLOSEST(NSEC_PER_SEC, rate);
+ /*
+ * period_cycles must be a 32 bit value, so period * rate / NSEC_PER_SEC
+ * must be <= U32_MAX. As U32_MAX * NSEC_PER_SEC < U64_MAX the
+ * multiplication period * rate doesn't overflow.
+ * To calculate the maximal possible period that guarantees the
+ * above inequality:
+ *
+ * round(period * rate / NSEC_PER_SEC) <= U32_MAX
+ * <=> period * rate / NSEC_PER_SEC < U32_MAX + 0.5
+ * <=> period * rate < (U32_MAX + 0.5) * NSEC_PER_SEC
+ * <=> period < ((U32_MAX + 0.5) * NSEC_PER_SEC) / rate
+ * <=> period < ((U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC/2) / rate
+ * <=> period <= ceil((U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC/2) / rate) - 1
+ */
+ max_period = DIV_ROUND_UP_ULL((u64)U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC / 2, rate) - 1;
+
+ if (state->period > max_period)
+ return -EINVAL;
+
/* set period */
- period = DIV_ROUND_CLOSEST_ULL(state->period, scaler);
+ period_cycles = DIV_ROUND_CLOSEST_ULL(state->period * rate, NSEC_PER_SEC);
- /* dont accept a period that is too small or has been truncated */
- if ((period < PERIOD_MIN) || (period > U32_MAX))
+ /* don't accept a period that is too small */
+ if (period_cycles < PERIOD_MIN)
return -EINVAL;
- writel(period, pc->base + PERIOD(pwm->hwpwm));
+ writel(period_cycles, pc->base + PERIOD(pwm->hwpwm));
/* set duty cycle */
- val = DIV_ROUND_CLOSEST_ULL(state->duty_cycle, scaler);
+ val = DIV_ROUND_CLOSEST_ULL(state->duty_cycle * rate, NSEC_PER_SEC);
writel(val, pc->base + DUTY(pwm->hwpwm));
/* set polarity */
@@ -139,7 +158,6 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &bcm2835_pwm_ops;
- pc->chip.base = -1;
pc->chip.npwm = 2;
pc->chip.of_xlate = of_pwm_xlate_with_flags;
pc->chip.of_pwm_n_cells = 3;
@@ -161,9 +179,11 @@ static int bcm2835_pwm_remove(struct platform_device *pdev)
{
struct bcm2835_pwm *pc = platform_get_drvdata(pdev);
+ pwmchip_remove(&pc->chip);
+
clk_disable_unprepare(pc->clk);
- return pwmchip_remove(&pc->chip);
+ return 0;
}
static const struct of_device_id bcm2835_pwm_of_match[] = {
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index fe405289e582..acb6fbc3cc32 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -206,7 +206,6 @@ static int berlin_pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &berlin_pwm_ops;
- pwm->chip.base = -1;
pwm->chip.npwm = 4;
pwm->chip.of_xlate = of_pwm_xlate_with_flags;
pwm->chip.of_pwm_n_cells = 3;
diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
index 8b66f9d2f589..8b1d1e7aa856 100644
--- a/drivers/pwm/pwm-brcmstb.c
+++ b/drivers/pwm/pwm-brcmstb.c
@@ -258,7 +258,6 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
p->chip.dev = &pdev->dev;
p->chip.ops = &brcmstb_pwm_ops;
- p->chip.base = -1;
p->chip.npwm = 2;
p->base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index cb1af86873ee..f3d17a590305 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -128,7 +128,6 @@ static int clps711x_pwm_probe(struct platform_device *pdev)
priv->chip.ops = &clps711x_pwm_ops;
priv->chip.dev = &pdev->dev;
- priv->chip.base = -1;
priv->chip.npwm = 2;
priv->chip.of_xlate = clps711x_pwm_xlate;
priv->chip.of_pwm_n_cells = 1;
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
index 1e2276808b7a..02522a9a3073 100644
--- a/drivers/pwm/pwm-crc.c
+++ b/drivers/pwm/pwm-crc.c
@@ -168,7 +168,6 @@ static int crystalcove_pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &crc_pwm_ops;
- pwm->chip.base = -1;
pwm->chip.npwm = 1;
/* get the PMIC regmap */
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index c1c337969e4e..9fffb566af5f 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -124,6 +124,9 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->period != EC_PWM_MAX_DUTY)
return -EINVAL;
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
/*
* EC doesn't separate the concept of duty cycle and enabled, but
* kernel does. Translate.
@@ -253,7 +256,6 @@ static int cros_ec_pwm_probe(struct platform_device *pdev)
chip->ops = &cros_ec_pwm_ops;
chip->of_xlate = cros_ec_pwm_xlate;
chip->of_pwm_n_cells = 1;
- chip->base = -1;
ret = cros_ec_num_pwms(ec);
if (ret < 0) {
dev_err(dev, "Couldn't find PWMs: %d\n", ret);
diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c
index f6c98e0d57c2..7568300bb11e 100644
--- a/drivers/pwm/pwm-dwc.c
+++ b/drivers/pwm/pwm-dwc.c
@@ -233,7 +233,6 @@ static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
dwc->chip.dev = dev;
dwc->chip.ops = &dwc_pwm_ops;
dwc->chip.npwm = DWC_TIMERS_TOTAL;
- dwc->chip.base = -1;
ret = pwmchip_add(&dwc->chip);
if (ret)
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index c9fc6f223640..4ca70794ad96 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -185,7 +185,6 @@ static int ep93xx_pwm_probe(struct platform_device *pdev)
ep93xx_pwm->chip.dev = &pdev->dev;
ep93xx_pwm->chip.ops = &ep93xx_pwm_ops;
- ep93xx_pwm->chip.base = -1;
ep93xx_pwm->chip.npwm = 1;
ret = pwmchip_add(&ep93xx_pwm->chip);
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 2a6801226aba..0e1ae9469eda 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -453,7 +453,6 @@ static int fsl_pwm_probe(struct platform_device *pdev)
fpc->chip.ops = &fsl_pwm_ops;
fpc->chip.of_xlate = of_pwm_xlate_with_flags;
fpc->chip.of_pwm_n_cells = 3;
- fpc->chip.base = -1;
fpc->chip.npwm = 8;
ret = pwmchip_add(&fpc->chip);
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index a1900d0a872e..82d17fc75c21 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -205,7 +205,6 @@ static int hibvt_pwm_probe(struct platform_device *pdev)
pwm_chip->chip.ops = &hibvt_pwm_ops;
pwm_chip->chip.dev = &pdev->dev;
- pwm_chip->chip.base = -1;
pwm_chip->chip.npwm = soc->num_pwms;
pwm_chip->chip.of_xlate = of_pwm_xlate_with_flags;
pwm_chip->chip.of_pwm_n_cells = 3;
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 6faf5b5a5584..cc37054589cc 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -304,7 +304,6 @@ static int img_pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &img_pwm_ops;
- pwm->chip.base = -1;
pwm->chip.npwm = IMG_PWM_NPWM;
ret = pwmchip_add(&pwm->chip);
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index aaf629bd8c35..97c9133b6876 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -363,7 +363,6 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
tpm->chip.dev = &pdev->dev;
tpm->chip.ops = &imx_tpm_pwm_ops;
- tpm->chip.base = -1;
tpm->chip.of_xlate = of_pwm_xlate_with_flags;
tpm->chip.of_pwm_n_cells = 3;
@@ -411,9 +410,7 @@ static int __maybe_unused pwm_imx_tpm_resume(struct device *dev)
ret = clk_prepare_enable(tpm->clk);
if (ret)
- dev_err(dev,
- "failed to prepare or enable clock: %d\n",
- ret);
+ dev_err(dev, "failed to prepare or enable clock: %d\n", ret);
return ret;
}
diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c
index 727e0d3e249e..c957b365448e 100644
--- a/drivers/pwm/pwm-imx1.c
+++ b/drivers/pwm/pwm-imx1.c
@@ -155,7 +155,6 @@ static int pwm_imx1_probe(struct platform_device *pdev)
imx->chip.ops = &pwm_imx1_ops;
imx->chip.dev = &pdev->dev;
- imx->chip.base = -1;
imx->chip.npwm = 1;
imx->mmio_base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index 18055326a2f3..ba695115c160 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -327,7 +327,6 @@ static int pwm_imx27_probe(struct platform_device *pdev)
imx->chip.ops = &pwm_imx27_ops;
imx->chip.dev = &pdev->dev;
- imx->chip.base = -1;
imx->chip.npwm = 1;
imx->chip.of_xlate = of_pwm_xlate_with_flags;
diff --git a/drivers/pwm/pwm-intel-lgm.c b/drivers/pwm/pwm-intel-lgm.c
index e9e54dda07aa..015f5eba09a1 100644
--- a/drivers/pwm/pwm-intel-lgm.c
+++ b/drivers/pwm/pwm-intel-lgm.c
@@ -207,7 +207,6 @@ static int lgm_pwm_probe(struct platform_device *pdev)
pc->chip.dev = dev;
pc->chip.ops = &lgm_pwm_ops;
pc->chip.npwm = 1;
- pc->chip.base = -1;
lgm_pwm_init(pc);
diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
index 957b972c458b..6c6e26d18329 100644
--- a/drivers/pwm/pwm-iqs620a.c
+++ b/drivers/pwm/pwm-iqs620a.c
@@ -206,7 +206,6 @@ static int iqs620_pwm_probe(struct platform_device *pdev)
iqs620_pwm->chip.dev = &pdev->dev;
iqs620_pwm->chip.ops = &iqs620_pwm_ops;
- iqs620_pwm->chip.base = -1;
iqs620_pwm->chip.npwm = 1;
mutex_init(&iqs620_pwm->lock);
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 00c642fa2eed..5b6bdcdcecf5 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -244,7 +244,6 @@ static int jz4740_pwm_probe(struct platform_device *pdev)
jz4740->chip.dev = dev;
jz4740->chip.ops = &jz4740_pwm_ops;
jz4740->chip.npwm = info->num_pwms;
- jz4740->chip.base = -1;
jz4740->chip.of_xlate = of_pwm_xlate_with_flags;
jz4740->chip.of_pwm_n_cells = 3;
diff --git a/drivers/pwm/pwm-keembay.c b/drivers/pwm/pwm-keembay.c
index cdfdef66ff8e..521a825c8ba0 100644
--- a/drivers/pwm/pwm-keembay.c
+++ b/drivers/pwm/pwm-keembay.c
@@ -203,7 +203,6 @@ static int keembay_pwm_probe(struct platform_device *pdev)
if (ret)
return ret;
- priv->chip.base = -1;
priv->chip.dev = dev;
priv->chip.ops = &keembay_pwm_ops;
priv->chip.npwm = KMB_TOTAL_PWM_CHANNELS;
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index bf3f14fb5f24..7551253ada32 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -275,7 +275,6 @@ static int lp3943_pwm_probe(struct platform_device *pdev)
lp3943_pwm->chip.dev = &pdev->dev;
lp3943_pwm->chip.ops = &lp3943_pwm_ops;
lp3943_pwm->chip.npwm = LP3943_NUM_PWMS;
- lp3943_pwm->chip.base = -1;
platform_set_drvdata(pdev, lp3943_pwm);
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index 7ef40243eb6c..b643ac61a2e7 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -370,7 +370,6 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
lpc18xx_pwm->chip.dev = &pdev->dev;
lpc18xx_pwm->chip.ops = &lpc18xx_pwm_ops;
- lpc18xx_pwm->chip.base = -1;
lpc18xx_pwm->chip.npwm = 16;
lpc18xx_pwm->chip.of_xlate = of_pwm_xlate_with_flags;
lpc18xx_pwm->chip.of_pwm_n_cells = 3;
@@ -442,13 +441,15 @@ static int lpc18xx_pwm_remove(struct platform_device *pdev)
struct lpc18xx_pwm_chip *lpc18xx_pwm = platform_get_drvdata(pdev);
u32 val;
+ pwmchip_remove(&lpc18xx_pwm->chip);
+
val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL,
val | LPC18XX_PWM_CTRL_HALT);
clk_disable_unprepare(lpc18xx_pwm->pwm_clk);
- return pwmchip_remove(&lpc18xx_pwm->chip);
+ return 0;
}
static struct platform_driver lpc18xx_pwm_driver = {
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 6b4090436c06..2834a0f001d3 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -116,7 +116,6 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
lpc32xx->chip.dev = &pdev->dev;
lpc32xx->chip.ops = &lpc32xx_pwm_ops;
lpc32xx->chip.npwm = 1;
- lpc32xx->chip.base = -1;
ret = pwmchip_add(&lpc32xx->chip);
if (ret < 0) {
@@ -137,10 +136,6 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
static int lpc32xx_pwm_remove(struct platform_device *pdev)
{
struct lpc32xx_pwm_chip *lpc32xx = platform_get_drvdata(pdev);
- unsigned int i;
-
- for (i = 0; i < lpc32xx->chip.npwm; i++)
- pwm_disable(&lpc32xx->chip.pwms[i]);
return pwmchip_remove(&lpc32xx->chip);
}
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 939de93c157b..58b4031524af 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -234,7 +234,6 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
lpwm->chip.dev = dev;
lpwm->chip.ops = &pwm_lpss_ops;
- lpwm->chip.base = -1;
lpwm->chip.npwm = info->npwm;
ret = pwmchip_add(&lpwm->chip);
@@ -255,12 +254,6 @@ EXPORT_SYMBOL_GPL(pwm_lpss_probe);
int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
{
- int i;
-
- for (i = 0; i < lpwm->info->npwm; i++) {
- if (pwm_is_enabled(&lpwm->chip.pwms[i]))
- pm_runtime_put(lpwm->chip.dev);
- }
return pwmchip_remove(&lpwm->chip);
}
EXPORT_SYMBOL_GPL(pwm_lpss_remove);
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index fcfc3b147e5f..b4a31060bcd7 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -107,12 +107,6 @@ static void pwm_mediatek_clk_disable(struct pwm_chip *chip,
clk_disable_unprepare(pc->clk_top);
}
-static inline u32 pwm_mediatek_readl(struct pwm_mediatek_chip *chip,
- unsigned int num, unsigned int offset)
-{
- return readl(chip->regs + pwm_mediatek_reg_offset[num] + offset);
-}
-
static inline void pwm_mediatek_writel(struct pwm_mediatek_chip *chip,
unsigned int num, unsigned int offset,
u32 value)
@@ -263,7 +257,6 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &pwm_mediatek_ops;
- pc->chip.base = -1;
pc->chip.npwm = pc->soc->num_pwms;
ret = pwmchip_add(&pc->chip);
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index a3ce9789412a..9eb060613cb4 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -550,7 +550,6 @@ static int meson_pwm_probe(struct platform_device *pdev)
spin_lock_init(&meson->lock);
meson->chip.dev = &pdev->dev;
meson->chip.ops = &meson_pwm_ops;
- meson->chip.base = -1;
meson->chip.npwm = MESON_NUM_PWMS;
meson->chip.of_xlate = of_pwm_xlate_with_flags;
meson->chip.of_pwm_n_cells = 3;
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 87c6b4bc5d43..9b3ba401a3db 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -202,7 +202,6 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
mdp->chip.dev = &pdev->dev;
mdp->chip.ops = &mtk_disp_pwm_ops;
- mdp->chip.base = -1;
mdp->chip.npwm = 1;
ret = pwmchip_add(&mdp->chip);
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index 7ce616923c52..0266e84e982c 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -140,7 +140,6 @@ static int mxs_pwm_probe(struct platform_device *pdev)
mxs->chip.ops = &mxs_pwm_ops;
mxs->chip.of_xlate = of_pwm_xlate_with_flags;
mxs->chip.of_pwm_n_cells = 3;
- mxs->chip.base = -1;
ret = of_property_read_u32(np, "fsl,pwm-number", &mxs->chip.npwm);
if (ret < 0) {
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 358db4ff9d4f..612b3c859295 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -403,7 +403,6 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
omap->chip.dev = &pdev->dev;
omap->chip.ops = &pwm_omap_dmtimer_ops;
- omap->chip.base = -1;
omap->chip.npwm = 1;
omap->chip.of_xlate = of_pwm_xlate_with_flags;
omap->chip.of_pwm_n_cells = 3;
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 4a55dc18656c..7c9f174de64e 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -51,7 +51,6 @@
#define PCA9685_PRESCALE_MAX 0xFF /* => min. frequency of 24 Hz */
#define PCA9685_COUNTER_RANGE 4096
-#define PCA9685_DEFAULT_PERIOD 5000000 /* Default period_ns = 1/200 Hz */
#define PCA9685_OSC_CLOCK_MHZ 25 /* Internal oscillator with 25 MHz */
#define PCA9685_NUMREGS 0xFF
@@ -71,10 +70,14 @@
#define LED_N_OFF_H(N) (PCA9685_LEDX_OFF_H + (4 * (N)))
#define LED_N_OFF_L(N) (PCA9685_LEDX_OFF_L + (4 * (N)))
+#define REG_ON_H(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_ON_H : LED_N_ON_H((C)))
+#define REG_ON_L(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_ON_L : LED_N_ON_L((C)))
+#define REG_OFF_H(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_OFF_H : LED_N_OFF_H((C)))
+#define REG_OFF_L(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_OFF_L : LED_N_OFF_L((C)))
+
struct pca9685 {
struct pwm_chip chip;
struct regmap *regmap;
- int period_ns;
#if IS_ENABLED(CONFIG_GPIOLIB)
struct mutex lock;
struct gpio_chip gpio;
@@ -87,6 +90,53 @@ static inline struct pca9685 *to_pca(struct pwm_chip *chip)
return container_of(chip, struct pca9685, chip);
}
+/* Helper function to set the duty cycle ratio to duty/4096 (e.g. duty=2048 -> 50%) */
+static void pca9685_pwm_set_duty(struct pca9685 *pca, int channel, unsigned int duty)
+{
+ if (duty == 0) {
+ /* Set the full OFF bit, which has the highest precedence */
+ regmap_write(pca->regmap, REG_OFF_H(channel), LED_FULL);
+ } else if (duty >= PCA9685_COUNTER_RANGE) {
+ /* Set the full ON bit and clear the full OFF bit */
+ regmap_write(pca->regmap, REG_ON_H(channel), LED_FULL);
+ regmap_write(pca->regmap, REG_OFF_H(channel), 0);
+ } else {
+ /* Set OFF time (clears the full OFF bit) */
+ regmap_write(pca->regmap, REG_OFF_L(channel), duty & 0xff);
+ regmap_write(pca->regmap, REG_OFF_H(channel), (duty >> 8) & 0xf);
+ /* Clear the full ON bit */
+ regmap_write(pca->regmap, REG_ON_H(channel), 0);
+ }
+}
+
+static unsigned int pca9685_pwm_get_duty(struct pca9685 *pca, int channel)
+{
+ unsigned int off_h = 0, val = 0;
+
+ if (WARN_ON(channel >= PCA9685_MAXCHAN)) {
+ /* HW does not support reading state of "all LEDs" channel */
+ return 0;
+ }
+
+ regmap_read(pca->regmap, LED_N_OFF_H(channel), &off_h);
+ if (off_h & LED_FULL) {
+ /* Full OFF bit is set */
+ return 0;
+ }
+
+ regmap_read(pca->regmap, LED_N_ON_H(channel), &val);
+ if (val & LED_FULL) {
+ /* Full ON bit is set */
+ return PCA9685_COUNTER_RANGE;
+ }
+
+ if (regmap_read(pca->regmap, LED_N_OFF_L(channel), &val)) {
+ /* Reset val to 0 in case reading LED_N_OFF_L failed */
+ val = 0;
+ }
+ return ((off_h & 0xf) << 8) | (val & 0xff);
+}
+
#if IS_ENABLED(CONFIG_GPIOLIB)
static bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, int pwm_idx)
{
@@ -138,34 +188,23 @@ static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset)
static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset)
{
struct pca9685 *pca = gpiochip_get_data(gpio);
- struct pwm_device *pwm = &pca->chip.pwms[offset];
- unsigned int value;
-
- regmap_read(pca->regmap, LED_N_ON_H(pwm->hwpwm), &value);
- return value & LED_FULL;
+ return pca9685_pwm_get_duty(pca, offset) != 0;
}
static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset,
int value)
{
struct pca9685 *pca = gpiochip_get_data(gpio);
- struct pwm_device *pwm = &pca->chip.pwms[offset];
- unsigned int on = value ? LED_FULL : 0;
- /* Clear both OFF registers */
- regmap_write(pca->regmap, LED_N_OFF_L(pwm->hwpwm), 0);
- regmap_write(pca->regmap, LED_N_OFF_H(pwm->hwpwm), 0);
-
- /* Set the full ON bit */
- regmap_write(pca->regmap, LED_N_ON_H(pwm->hwpwm), on);
+ pca9685_pwm_set_duty(pca, offset, value ? PCA9685_COUNTER_RANGE : 0);
}
static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
{
struct pca9685 *pca = gpiochip_get_data(gpio);
- pca9685_pwm_gpio_set(gpio, offset, 0);
+ pca9685_pwm_set_duty(pca, offset, 0);
pm_runtime_put(pca->chip.dev);
pca9685_pwm_clear_inuse(pca, offset);
}
@@ -246,165 +285,85 @@ static void pca9685_set_sleep_mode(struct pca9685 *pca, bool enable)
}
}
-static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
{
struct pca9685 *pca = to_pca(chip);
- unsigned long long duty;
- unsigned int reg;
- int prescale;
-
- if (period_ns != pca->period_ns) {
- prescale = DIV_ROUND_CLOSEST(PCA9685_OSC_CLOCK_MHZ * period_ns,
- PCA9685_COUNTER_RANGE * 1000) - 1;
-
- if (prescale >= PCA9685_PRESCALE_MIN &&
- prescale <= PCA9685_PRESCALE_MAX) {
- /*
- * Putting the chip briefly into SLEEP mode
- * at this point won't interfere with the
- * pm_runtime framework, because the pm_runtime
- * state is guaranteed active here.
- */
- /* Put chip into sleep mode */
- pca9685_set_sleep_mode(pca, true);
-
- /* Change the chip-wide output frequency */
- regmap_write(pca->regmap, PCA9685_PRESCALE, prescale);
-
- /* Wake the chip up */
- pca9685_set_sleep_mode(pca, false);
-
- pca->period_ns = period_ns;
- } else {
- dev_err(chip->dev,
- "prescaler not set: period out of bounds!\n");
- return -EINVAL;
- }
- }
+ unsigned long long duty, prescale;
+ unsigned int val = 0;
- if (duty_ns < 1) {
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_H;
- else
- reg = LED_N_OFF_H(pwm->hwpwm);
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
- regmap_write(pca->regmap, reg, LED_FULL);
-
- return 0;
+ prescale = DIV_ROUND_CLOSEST_ULL(PCA9685_OSC_CLOCK_MHZ * state->period,
+ PCA9685_COUNTER_RANGE * 1000) - 1;
+ if (prescale < PCA9685_PRESCALE_MIN || prescale > PCA9685_PRESCALE_MAX) {
+ dev_err(chip->dev, "pwm not changed: period out of bounds!\n");
+ return -EINVAL;
}
- if (duty_ns == period_ns) {
- /* Clear both OFF registers */
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_L;
- else
- reg = LED_N_OFF_L(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, 0x0);
-
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_H;
- else
- reg = LED_N_OFF_H(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, 0x0);
-
- /* Set the full ON bit */
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_ON_H;
- else
- reg = LED_N_ON_H(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, LED_FULL);
-
+ if (!state->enabled) {
+ pca9685_pwm_set_duty(pca, pwm->hwpwm, 0);
return 0;
}
- duty = PCA9685_COUNTER_RANGE * (unsigned long long)duty_ns;
- duty = DIV_ROUND_UP_ULL(duty, period_ns);
-
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_L;
- else
- reg = LED_N_OFF_L(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, (int)duty & 0xff);
-
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_H;
- else
- reg = LED_N_OFF_H(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, ((int)duty >> 8) & 0xf);
+ regmap_read(pca->regmap, PCA9685_PRESCALE, &val);
+ if (prescale != val) {
+ /*
+ * Putting the chip briefly into SLEEP mode
+ * at this point won't interfere with the
+ * pm_runtime framework, because the pm_runtime
+ * state is guaranteed active here.
+ */
+ /* Put chip into sleep mode */
+ pca9685_set_sleep_mode(pca, true);
- /* Clear the full ON bit, otherwise the set OFF time has no effect */
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_ON_H;
- else
- reg = LED_N_ON_H(pwm->hwpwm);
+ /* Change the chip-wide output frequency */
+ regmap_write(pca->regmap, PCA9685_PRESCALE, prescale);
- regmap_write(pca->regmap, reg, 0);
+ /* Wake the chip up */
+ pca9685_set_sleep_mode(pca, false);
+ }
+ duty = PCA9685_COUNTER_RANGE * state->duty_cycle;
+ duty = DIV_ROUND_UP_ULL(duty, state->period);
+ pca9685_pwm_set_duty(pca, pwm->hwpwm, duty);
return 0;
}
-static int pca9685_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static void pca9685_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
{
struct pca9685 *pca = to_pca(chip);
- unsigned int reg;
-
- /*
- * The PWM subsystem does not support a pre-delay.
- * So, set the ON-timeout to 0
- */
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_ON_L;
- else
- reg = LED_N_ON_L(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, 0);
-
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_ON_H;
- else
- reg = LED_N_ON_H(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, 0);
+ unsigned long long duty;
+ unsigned int val = 0;
+ /* Calculate (chip-wide) period from prescale value */
+ regmap_read(pca->regmap, PCA9685_PRESCALE, &val);
/*
- * Clear the full-off bit.
- * It has precedence over the others and must be off.
+ * PCA9685_OSC_CLOCK_MHZ is 25, i.e. an integer divider of 1000.
+ * The following calculation is therefore only a multiplication
+ * and we are not losing precision.
*/
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_H;
- else
- reg = LED_N_OFF_H(pwm->hwpwm);
-
- regmap_update_bits(pca->regmap, reg, LED_FULL, 0x0);
+ state->period = (PCA9685_COUNTER_RANGE * 1000 / PCA9685_OSC_CLOCK_MHZ) *
+ (val + 1);
- return 0;
-}
+ /* The (per-channel) polarity is fixed */
+ state->polarity = PWM_POLARITY_NORMAL;
-static void pca9685_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct pca9685 *pca = to_pca(chip);
- unsigned int reg;
-
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_H;
- else
- reg = LED_N_OFF_H(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, LED_FULL);
-
- /* Clear the LED_OFF counter. */
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_L;
- else
- reg = LED_N_OFF_L(pwm->hwpwm);
+ if (pwm->hwpwm >= PCA9685_MAXCHAN) {
+ /*
+ * The "all LEDs" channel does not support HW readout
+ * Return 0 and disabled for backwards compatibility
+ */
+ state->duty_cycle = 0;
+ state->enabled = false;
+ return;
+ }
- regmap_write(pca->regmap, reg, 0x0);
+ state->enabled = true;
+ duty = pca9685_pwm_get_duty(pca, pwm->hwpwm);
+ state->duty_cycle = DIV_ROUND_DOWN_ULL(duty * state->period, PCA9685_COUNTER_RANGE);
}
static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -422,15 +381,14 @@ static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pca9685 *pca = to_pca(chip);
- pca9685_pwm_disable(chip, pwm);
+ pca9685_pwm_set_duty(pca, pwm->hwpwm, 0);
pm_runtime_put(chip->dev);
pca9685_pwm_clear_inuse(pca, pwm->hwpwm);
}
static const struct pwm_ops pca9685_pwm_ops = {
- .enable = pca9685_pwm_enable,
- .disable = pca9685_pwm_disable,
- .config = pca9685_pwm_config,
+ .apply = pca9685_pwm_apply,
+ .get_state = pca9685_pwm_get_state,
.request = pca9685_pwm_request,
.free = pca9685_pwm_free,
.owner = THIS_MODULE,
@@ -461,7 +419,6 @@ static int pca9685_pwm_probe(struct i2c_client *client,
ret);
return ret;
}
- pca->period_ns = PCA9685_DEFAULT_PERIOD;
i2c_set_clientdata(client, pca);
@@ -484,16 +441,15 @@ static int pca9685_pwm_probe(struct i2c_client *client,
reg &= ~(MODE1_ALLCALL | MODE1_SUB1 | MODE1_SUB2 | MODE1_SUB3);
regmap_write(pca->regmap, PCA9685_MODE1, reg);
- /* Clear all "full off" bits */
- regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_L, 0);
- regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_H, 0);
+ /* Reset OFF registers to POR default */
+ regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_L, LED_FULL);
+ regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_H, LED_FULL);
pca->chip.ops = &pca9685_pwm_ops;
/* Add an extra channel for ALL_LED */
pca->chip.npwm = PCA9685_MAXCHAN + 1;
pca->chip.dev = &client->dev;
- pca->chip.base = -1;
ret = pwmchip_add(&pca->chip);
if (ret < 0)
@@ -505,14 +461,20 @@ static int pca9685_pwm_probe(struct i2c_client *client,
return ret;
}
- /* The chip comes out of power-up in the active state */
- pm_runtime_set_active(&client->dev);
- /*
- * Enable will put the chip into suspend, which is what we
- * want as all outputs are disabled at this point
- */
pm_runtime_enable(&client->dev);
+ if (pm_runtime_enabled(&client->dev)) {
+ /*
+ * Although the chip comes out of power-up in the sleep state,
+ * we force it to sleep in case it was woken up before
+ */
+ pca9685_set_sleep_mode(pca, true);
+ pm_runtime_set_suspended(&client->dev);
+ } else {
+ /* Wake the chip up if runtime PM is disabled */
+ pca9685_set_sleep_mode(pca, false);
+ }
+
return 0;
}
@@ -524,7 +486,14 @@ static int pca9685_pwm_remove(struct i2c_client *client)
ret = pwmchip_remove(&pca->chip);
if (ret)
return ret;
+
+ if (!pm_runtime_enabled(&client->dev)) {
+ /* Put chip in sleep state if runtime PM is disabled */
+ pca9685_set_sleep_mode(pca, true);
+ }
+
pm_runtime_disable(&client->dev);
+
return 0;
}
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index d06cf60e6575..cfb683827d32 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -184,7 +184,6 @@ static int pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &pxa_pwm_ops;
- pwm->chip.base = -1;
pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1;
if (IS_ENABLED(CONFIG_OF)) {
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 002ab79a7ec2..9daca0c772c7 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -224,7 +224,6 @@ static int rcar_pwm_probe(struct platform_device *pdev)
rcar_pwm->chip.dev = &pdev->dev;
rcar_pwm->chip.ops = &rcar_pwm_ops;
- rcar_pwm->chip.base = -1;
rcar_pwm->chip.npwm = 1;
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index d02b24b77cdf..e2959fae0969 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -410,7 +410,6 @@ static int tpu_probe(struct platform_device *pdev)
tpu->chip.ops = &tpu_pwm_ops;
tpu->chip.of_xlate = of_pwm_xlate_with_flags;
tpu->chip.of_pwm_n_cells = 3;
- tpu->chip.base = -1;
tpu->chip.npwm = TPU_CHANNEL_MAX;
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index 6ad7d0a50aed..301785fa293e 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -352,7 +352,6 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
pc->data = id->data;
pc->chip.dev = &pdev->dev;
pc->chip.ops = &rockchip_pwm_ops;
- pc->chip.base = -1;
pc->chip.npwm = 1;
if (pc->data->supports_polarity) {
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index 645d0066ff0a..515489fa4f6d 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -519,7 +519,6 @@ static int pwm_samsung_probe(struct platform_device *pdev)
chip->chip.dev = &pdev->dev;
chip->chip.ops = &pwm_samsung_ops;
- chip->chip.base = -1;
chip->chip.npwm = SAMSUNG_PWM_NUM;
chip->inverter_mask = BIT(SAMSUNG_PWM_NUM) - 1;
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index 2a7cd2deaeea..688737f091ac 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -244,7 +244,6 @@ static int pwm_sifive_probe(struct platform_device *pdev)
chip->ops = &pwm_sifive_ops;
chip->of_xlate = of_pwm_xlate_with_flags;
chip->of_pwm_n_cells = 3;
- chip->base = -1;
chip->npwm = 4;
ddata->regs = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/pwm/pwm-sl28cpld.c b/drivers/pwm/pwm-sl28cpld.c
index 0b01ec25e2f0..7a69c1a0c060 100644
--- a/drivers/pwm/pwm-sl28cpld.c
+++ b/drivers/pwm/pwm-sl28cpld.c
@@ -229,7 +229,6 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
chip = &priv->pwm_chip;
chip->dev = &pdev->dev;
chip->ops = &sl28cpld_pwm_ops;
- chip->base = -1;
chip->npwm = 1;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index f63b54aae1b4..1a1cedfd11ce 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -193,7 +193,6 @@ static int spear_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &spear_pwm_ops;
- pc->chip.base = -1;
pc->chip.npwm = NUM_PWM;
ret = clk_prepare(pc->clk);
diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c
index 5123d948efd6..98c479dfae31 100644
--- a/drivers/pwm/pwm-sprd.c
+++ b/drivers/pwm/pwm-sprd.c
@@ -164,6 +164,9 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *cstate = &pwm->state;
int ret;
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
if (state->enabled) {
if (!cstate->enabled) {
/*
@@ -268,7 +271,6 @@ static int sprd_pwm_probe(struct platform_device *pdev)
spc->chip.dev = &pdev->dev;
spc->chip.ops = &sprd_pwm_ops;
- spc->chip.base = -1;
spc->chip.npwm = spc->num_pwms;
ret = pwmchip_add(&spc->chip);
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index 99c70e07858d..f491d56254d7 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -619,7 +619,6 @@ static int sti_pwm_probe(struct platform_device *pdev)
pc->chip.dev = dev;
pc->chip.ops = &sti_pwm_ops;
- pc->chip.base = -1;
pc->chip.npwm = pc->cdata->pwm_num_devs;
ret = pwmchip_add(&pc->chip);
@@ -650,15 +649,13 @@ static int sti_pwm_probe(struct platform_device *pdev)
static int sti_pwm_remove(struct platform_device *pdev)
{
struct sti_pwm_chip *pc = platform_get_drvdata(pdev);
- unsigned int i;
- for (i = 0; i < pc->cdata->pwm_num_devs; i++)
- pwm_disable(&pc->chip.pwms[i]);
+ pwmchip_remove(&pc->chip);
clk_unprepare(pc->pwm_clk);
clk_unprepare(pc->cpt_clk);
- return pwmchip_remove(&pc->chip);
+ return 0;
}
static const struct of_device_id sti_pwm_of_match[] = {
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 134c14621ee0..af08f564ef1d 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -205,7 +205,6 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
- priv->chip.base = -1;
priv->chip.dev = &pdev->dev;
priv->chip.ops = &stm32_pwm_lp_ops;
priv->chip.npwm = 1;
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index d3be944f2ae9..c46fb90036ab 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -633,7 +633,6 @@ static int stm32_pwm_probe(struct platform_device *pdev)
stm32_pwm_detect_complementary(priv);
- priv->chip.base = -1;
priv->chip.dev = dev;
priv->chip.ops = &stm32pwm_ops;
priv->chip.npwm = stm32_pwm_detect_channels(priv);
diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c
index be5f6d7359d4..9dc983a3cbf1 100644
--- a/drivers/pwm/pwm-stmpe.c
+++ b/drivers/pwm/pwm-stmpe.c
@@ -278,7 +278,6 @@ static int __init stmpe_pwm_probe(struct platform_device *pdev)
pwm->stmpe = stmpe;
pwm->chip.dev = &pdev->dev;
- pwm->chip.base = -1;
if (stmpe->partnum == STMPE2401 || stmpe->partnum == STMPE2403) {
pwm->chip.ops = &stmpe_24xx_pwm_ops;
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index ce5c4fc8da6f..e01becd102c0 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -459,7 +459,6 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &sun4i_pwm_ops;
- pwm->chip.base = -1;
pwm->chip.npwm = pwm->data->npwm;
pwm->chip.of_xlate = of_pwm_xlate_with_flags;
pwm->chip.of_pwm_n_cells = 3;
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 55bc63d5a0ae..c529a170bcdd 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -285,7 +285,6 @@ static int tegra_pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &tegra_pwm_ops;
- pwm->chip.base = -1;
pwm->chip.npwm = pwm->soc->num_channels;
ret = pwmchip_add(&pwm->chip);
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 2a8949014bb1..b9a17ab0c202 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -226,7 +226,6 @@ static int ecap_pwm_probe(struct platform_device *pdev)
pc->chip.ops = &ecap_pwm_ops;
pc->chip.of_xlate = of_pwm_xlate_with_flags;
pc->chip.of_pwm_n_cells = 3;
- pc->chip.base = -1;
pc->chip.npwm = 1;
pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index a7fb224d6535..90095a19bf2d 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -449,7 +449,6 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
pc->chip.ops = &ehrpwm_pwm_ops;
pc->chip.of_xlate = of_pwm_xlate_with_flags;
pc->chip.of_pwm_n_cells = 3;
- pc->chip.base = -1;
pc->chip.npwm = NUM_PWM_CHANNEL;
pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index 630b9a578820..6c8df5f4e87d 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -291,7 +291,6 @@ static int twl_pwmled_probe(struct platform_device *pdev)
}
twl->chip.dev = &pdev->dev;
- twl->chip.base = -1;
mutex_init(&twl->mutex);
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index aee67974f353..e83a826bf621 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -310,7 +310,6 @@ static int twl_pwm_probe(struct platform_device *pdev)
twl->chip.ops = &twl6030_pwm_ops;
twl->chip.dev = &pdev->dev;
- twl->chip.base = -1;
twl->chip.npwm = 2;
mutex_init(&twl->mutex);
diff --git a/drivers/pwm/pwm-visconti.c b/drivers/pwm/pwm-visconti.c
new file mode 100644
index 000000000000..46d903786366
--- /dev/null
+++ b/drivers/pwm/pwm-visconti.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Toshiba Visconti pulse-width-modulation controller driver
+ *
+ * Copyright (c) 2020 - 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2020 - 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Authors: Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ *
+ * Limitations:
+ * - The fixed input clock is running at 1 MHz and is divided by either 1,
+ * 2, 4 or 8.
+ * - When the settings of the PWM are modified, the new values are shadowed
+ * in hardware until the PIPGM_PCSR register is written and the currently
+ * running period is completed. This way the hardware switches atomically
+ * from the old setting to the new.
+ * - Disabling the hardware completes the currently running period and keeps
+ * the output at low level at all times.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+#define PIPGM_PCSR(ch) (0x400 + 4 * (ch))
+#define PIPGM_PDUT(ch) (0x420 + 4 * (ch))
+#define PIPGM_PWMC(ch) (0x440 + 4 * (ch))
+
+#define PIPGM_PWMC_PWMACT BIT(5)
+#define PIPGM_PWMC_CLK_MASK GENMASK(1, 0)
+#define PIPGM_PWMC_POLARITY_MASK GENMASK(5, 5)
+
+struct visconti_pwm_chip {
+ struct pwm_chip chip;
+ void __iomem *base;
+};
+
+static inline struct visconti_pwm_chip *visconti_pwm_from_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct visconti_pwm_chip, chip);
+}
+
+static int visconti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct visconti_pwm_chip *priv = visconti_pwm_from_chip(chip);
+ u32 period, duty_cycle, pwmc0;
+
+ if (!state->enabled) {
+ writel(0, priv->base + PIPGM_PCSR(pwm->hwpwm));
+ return 0;
+ }
+
+ /*
+ * The biggest period the hardware can provide is
+ * (0xffff << 3) * 1000 ns
+ * This value fits easily in an u32, so simplify the maths by
+ * capping the values to 32 bit integers.
+ */
+ if (state->period > (0xffff << 3) * 1000)
+ period = (0xffff << 3) * 1000;
+ else
+ period = state->period;
+
+ if (state->duty_cycle > period)
+ duty_cycle = period;
+ else
+ duty_cycle = state->duty_cycle;
+
+ /*
+ * The input clock runs fixed at 1 MHz, so we have only
+ * microsecond resolution and so can divide by
+ * NSEC_PER_SEC / CLKFREQ = 1000 without losing precision.
+ */
+ period /= 1000;
+ duty_cycle /= 1000;
+
+ if (!period)
+ return -ERANGE;
+
+ /*
+ * PWMC controls a divider that divides the input clk by a
+ * power of two between 1 and 8. As a smaller divider yields
+ * higher precision, pick the smallest possible one.
+ */
+ if (period > 0xffff) {
+ pwmc0 = ilog2(period >> 16);
+ if (WARN_ON(pwmc0 > 3))
+ return -EINVAL;
+ } else {
+ pwmc0 = 0;
+ }
+
+ period >>= pwmc0;
+ duty_cycle >>= pwmc0;
+
+ if (state->polarity == PWM_POLARITY_INVERSED)
+ pwmc0 |= PIPGM_PWMC_PWMACT;
+ writel(pwmc0, priv->base + PIPGM_PWMC(pwm->hwpwm));
+ writel(duty_cycle, priv->base + PIPGM_PDUT(pwm->hwpwm));
+ writel(period, priv->base + PIPGM_PCSR(pwm->hwpwm));
+
+ return 0;
+}
+
+static void visconti_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct visconti_pwm_chip *priv = visconti_pwm_from_chip(chip);
+ u32 period, duty, pwmc0, pwmc0_clk;
+
+ period = readl(priv->base + PIPGM_PCSR(pwm->hwpwm));
+ duty = readl(priv->base + PIPGM_PDUT(pwm->hwpwm));
+ pwmc0 = readl(priv->base + PIPGM_PWMC(pwm->hwpwm));
+ pwmc0_clk = pwmc0 & PIPGM_PWMC_CLK_MASK;
+
+ state->period = (period << pwmc0_clk) * NSEC_PER_USEC;
+ state->duty_cycle = (duty << pwmc0_clk) * NSEC_PER_USEC;
+ if (pwmc0 & PIPGM_PWMC_POLARITY_MASK)
+ state->polarity = PWM_POLARITY_INVERSED;
+ else
+ state->polarity = PWM_POLARITY_NORMAL;
+
+ state->enabled = true;
+}
+
+static const struct pwm_ops visconti_pwm_ops = {
+ .apply = visconti_pwm_apply,
+ .get_state = visconti_pwm_get_state,
+ .owner = THIS_MODULE,
+};
+
+static int visconti_pwm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct visconti_pwm_chip *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->chip.dev = dev;
+ priv->chip.ops = &visconti_pwm_ops;
+ priv->chip.npwm = 4;
+
+ ret = pwmchip_add(&priv->chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Cannot register visconti PWM\n");
+
+ return 0;
+}
+
+static int visconti_pwm_remove(struct platform_device *pdev)
+{
+ struct visconti_pwm_chip *priv = platform_get_drvdata(pdev);
+
+ pwmchip_remove(&priv->chip);
+
+ return 0;
+}
+
+static const struct of_device_id visconti_pwm_of_match[] = {
+ { .compatible = "toshiba,visconti-pwm", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, visconti_pwm_of_match);
+
+static struct platform_driver visconti_pwm_driver = {
+ .driver = {
+ .name = "pwm-visconti",
+ .of_match_table = visconti_pwm_of_match,
+ },
+ .probe = visconti_pwm_probe,
+ .remove = visconti_pwm_remove,
+};
+module_platform_driver(visconti_pwm_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>");
+MODULE_ALIAS("platform:pwm-visconti");
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 6e36851a22bb..52fe5d19473a 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -209,7 +209,6 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
chip->chip.ops = &vt8500_pwm_ops;
chip->chip.of_xlate = of_pwm_xlate_with_flags;
chip->chip.of_pwm_n_cells = 3;
- chip->chip.base = -1;
chip->chip.npwm = VT8500_NR_PWMS;
chip->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 15d1574d129b..e68fcedc999c 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -24,11 +24,12 @@ config REMOTEPROC_CDEV
It's safe to say N if you don't want to use this interface.
config IMX_REMOTEPROC
- tristate "IMX6/7 remoteproc support"
+ tristate "i.MX remoteproc support"
depends on ARCH_MXC
+ select MAILBOX
help
- Say y here to support iMX's remote processors (Cortex M4
- on iMX7D) via the remote processor framework.
+ Say y here to support iMX's remote processors via the remote
+ processor framework.
It's safe to say N here.
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 8957ed271d20..d6338872c6db 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -7,13 +7,18 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
+#include <linux/workqueue.h>
+
+#include "remoteproc_internal.h"
#define IMX7D_SRC_SCR 0x0C
#define IMX7D_ENABLE_M4 BIT(3)
@@ -43,7 +48,7 @@
| IMX6SX_SW_M4C_NON_SCLR_RST \
| IMX6SX_SW_M4C_RST)
-#define IMX7D_RPROC_MEM_MAX 8
+#define IMX_RPROC_MEM_MAX 32
/**
* struct imx_rproc_mem - slim internal memory structure
@@ -83,8 +88,42 @@ struct imx_rproc {
struct regmap *regmap;
struct rproc *rproc;
const struct imx_rproc_dcfg *dcfg;
- struct imx_rproc_mem mem[IMX7D_RPROC_MEM_MAX];
+ struct imx_rproc_mem mem[IMX_RPROC_MEM_MAX];
struct clk *clk;
+ struct mbox_client cl;
+ struct mbox_chan *tx_ch;
+ struct mbox_chan *rx_ch;
+ struct work_struct rproc_work;
+ struct workqueue_struct *workqueue;
+ void __iomem *rsc_table;
+};
+
+static const struct imx_rproc_att imx_rproc_att_imx8mq[] = {
+ /* dev addr , sys addr , size , flags */
+ /* TCML - alias */
+ { 0x00000000, 0x007e0000, 0x00020000, 0 },
+ /* OCRAM_S */
+ { 0x00180000, 0x00180000, 0x00008000, 0 },
+ /* OCRAM */
+ { 0x00900000, 0x00900000, 0x00020000, 0 },
+ /* OCRAM */
+ { 0x00920000, 0x00920000, 0x00020000, 0 },
+ /* QSPI Code - alias */
+ { 0x08000000, 0x08000000, 0x08000000, 0 },
+ /* DDR (Code) - alias */
+ { 0x10000000, 0x80000000, 0x0FFE0000, 0 },
+ /* TCML */
+ { 0x1FFE0000, 0x007E0000, 0x00020000, ATT_OWN },
+ /* TCMU */
+ { 0x20000000, 0x00800000, 0x00020000, ATT_OWN },
+ /* OCRAM_S */
+ { 0x20180000, 0x00180000, 0x00008000, ATT_OWN },
+ /* OCRAM */
+ { 0x20200000, 0x00900000, 0x00020000, ATT_OWN },
+ /* OCRAM */
+ { 0x20220000, 0x00920000, 0x00020000, ATT_OWN },
+ /* DDR (Data) */
+ { 0x40000000, 0x40000000, 0x80000000, 0 },
};
static const struct imx_rproc_att imx_rproc_att_imx7d[] = {
@@ -137,6 +176,15 @@ static const struct imx_rproc_att imx_rproc_att_imx6sx[] = {
{ 0x80000000, 0x80000000, 0x60000000, 0 },
};
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
+ .src_reg = IMX7D_SRC_SCR,
+ .src_mask = IMX7D_M4_RST_MASK,
+ .src_start = IMX7D_M4_START,
+ .src_stop = IMX7D_M4_STOP,
+ .att = imx_rproc_att_imx8mq,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8mq),
+};
+
static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
.src_reg = IMX7D_SRC_SCR,
.src_mask = IMX7D_M4_RST_MASK,
@@ -208,7 +256,7 @@ static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da,
return -ENOENT;
}
-static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct imx_rproc *priv = rproc->priv;
void *va = NULL;
@@ -225,7 +273,7 @@ static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
if (imx_rproc_da_to_sys(priv, da, len, &sys))
return NULL;
- for (i = 0; i < IMX7D_RPROC_MEM_MAX; i++) {
+ for (i = 0; i < IMX_RPROC_MEM_MAX; i++) {
if (sys >= priv->mem[i].sys_addr && sys + len <
priv->mem[i].sys_addr + priv->mem[i].size) {
unsigned int offset = sys - priv->mem[i].sys_addr;
@@ -241,10 +289,143 @@ static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
return va;
}
+static int imx_rproc_mem_alloc(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct device *dev = rproc->dev.parent;
+ void *va;
+
+ dev_dbg(dev, "map memory: %p+%zx\n", &mem->dma, mem->len);
+ va = ioremap_wc(mem->dma, mem->len);
+ if (IS_ERR_OR_NULL(va)) {
+ dev_err(dev, "Unable to map memory region: %p+%zx\n",
+ &mem->dma, mem->len);
+ return -ENOMEM;
+ }
+
+ /* Update memory entry va */
+ mem->va = va;
+
+ return 0;
+}
+
+static int imx_rproc_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma);
+ iounmap(mem->va);
+
+ return 0;
+}
+
+static int imx_rproc_prepare(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ struct device_node *np = priv->dev->of_node;
+ struct of_phandle_iterator it;
+ struct rproc_mem_entry *mem;
+ struct reserved_mem *rmem;
+ u32 da;
+
+ /* Register associated reserved memory regions */
+ of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
+ while (of_phandle_iterator_next(&it) == 0) {
+ /*
+ * Ignore the first memory region which will be used vdev buffer.
+ * No need to do extra handlings, rproc_add_virtio_dev will handle it.
+ */
+ if (!strcmp(it.node->name, "vdev0buffer"))
+ continue;
+
+ rmem = of_reserved_mem_lookup(it.node);
+ if (!rmem) {
+ dev_err(priv->dev, "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ /* No need to translate pa to da, i.MX use same map */
+ da = rmem->base;
+
+ /* Register memory region */
+ mem = rproc_mem_entry_init(priv->dev, NULL, (dma_addr_t)rmem->base, rmem->size, da,
+ imx_rproc_mem_alloc, imx_rproc_mem_release,
+ it.node->name);
+
+ if (mem)
+ rproc_coredump_add_segment(rproc, da, rmem->size);
+ else
+ return -ENOMEM;
+
+ rproc_add_carveout(rproc, mem);
+ }
+
+ return 0;
+}
+
+static int imx_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ int ret;
+
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret)
+ dev_info(&rproc->dev, "No resource table in elf\n");
+
+ return 0;
+}
+
+static void imx_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct imx_rproc *priv = rproc->priv;
+ int err;
+ __u32 mmsg;
+
+ if (!priv->tx_ch) {
+ dev_err(priv->dev, "No initialized mbox tx channel\n");
+ return;
+ }
+
+ /*
+ * Send the index of the triggered virtqueue as the mu payload.
+ * Let remote processor know which virtqueue is used.
+ */
+ mmsg = vqid << 16;
+
+ err = mbox_send_message(priv->tx_ch, (void *)&mmsg);
+ if (err < 0)
+ dev_err(priv->dev, "%s: failed (%d, err:%d)\n",
+ __func__, vqid, err);
+}
+
+static int imx_rproc_attach(struct rproc *rproc)
+{
+ return 0;
+}
+
+static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
+{
+ struct imx_rproc *priv = rproc->priv;
+
+ /* The resource table has already been mapped in imx_rproc_addr_init */
+ if (!priv->rsc_table)
+ return NULL;
+
+ *table_sz = SZ_1K;
+ return (struct resource_table *)priv->rsc_table;
+}
+
static const struct rproc_ops imx_rproc_ops = {
+ .prepare = imx_rproc_prepare,
+ .attach = imx_rproc_attach,
.start = imx_rproc_start,
.stop = imx_rproc_stop,
+ .kick = imx_rproc_kick,
.da_to_va = imx_rproc_da_to_va,
+ .load = rproc_elf_load_segments,
+ .parse_fw = imx_rproc_parse_fw,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .get_loaded_rsc_table = imx_rproc_get_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
};
static int imx_rproc_addr_init(struct imx_rproc *priv,
@@ -262,13 +443,13 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
if (!(att->flags & ATT_OWN))
continue;
- if (b >= IMX7D_RPROC_MEM_MAX)
+ if (b >= IMX_RPROC_MEM_MAX)
break;
priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev,
att->sa, att->size);
if (!priv->mem[b].cpu_addr) {
- dev_err(dev, "devm_ioremap_resource failed\n");
+ dev_err(dev, "failed to remap %#x bytes from %#x\n", att->size, att->sa);
return -ENOMEM;
}
priv->mem[b].sys_addr = att->sa;
@@ -287,29 +468,115 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
struct resource res;
node = of_parse_phandle(np, "memory-region", a);
+ /* Not map vdev region */
+ if (!strcmp(node->name, "vdev"))
+ continue;
err = of_address_to_resource(node, 0, &res);
if (err) {
dev_err(dev, "unable to resolve memory region\n");
return err;
}
- if (b >= IMX7D_RPROC_MEM_MAX)
+ of_node_put(node);
+
+ if (b >= IMX_RPROC_MEM_MAX)
break;
- priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res);
- if (IS_ERR(priv->mem[b].cpu_addr)) {
- dev_err(dev, "devm_ioremap_resource failed\n");
- err = PTR_ERR(priv->mem[b].cpu_addr);
- return err;
+ /* Not use resource version, because we might share region */
+ priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
+ if (!priv->mem[b].cpu_addr) {
+ dev_err(dev, "failed to remap %pr\n", &res);
+ return -ENOMEM;
}
priv->mem[b].sys_addr = res.start;
priv->mem[b].size = resource_size(&res);
+ if (!strcmp(node->name, "rsc_table"))
+ priv->rsc_table = priv->mem[b].cpu_addr;
b++;
}
return 0;
}
+static void imx_rproc_vq_work(struct work_struct *work)
+{
+ struct imx_rproc *priv = container_of(work, struct imx_rproc,
+ rproc_work);
+
+ rproc_vq_interrupt(priv->rproc, 0);
+ rproc_vq_interrupt(priv->rproc, 1);
+}
+
+static void imx_rproc_rx_callback(struct mbox_client *cl, void *msg)
+{
+ struct rproc *rproc = dev_get_drvdata(cl->dev);
+ struct imx_rproc *priv = rproc->priv;
+
+ queue_work(priv->workqueue, &priv->rproc_work);
+}
+
+static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ struct device *dev = priv->dev;
+ struct mbox_client *cl;
+ int ret;
+
+ if (!of_get_property(dev->of_node, "mbox-names", NULL))
+ return 0;
+
+ cl = &priv->cl;
+ cl->dev = dev;
+ cl->tx_block = true;
+ cl->tx_tout = 100;
+ cl->knows_txdone = false;
+ cl->rx_callback = imx_rproc_rx_callback;
+
+ priv->tx_ch = mbox_request_channel_byname(cl, "tx");
+ if (IS_ERR(priv->tx_ch)) {
+ ret = PTR_ERR(priv->tx_ch);
+ return dev_err_probe(cl->dev, ret,
+ "failed to request tx mailbox channel: %d\n", ret);
+ }
+
+ priv->rx_ch = mbox_request_channel_byname(cl, "rx");
+ if (IS_ERR(priv->rx_ch)) {
+ mbox_free_channel(priv->tx_ch);
+ ret = PTR_ERR(priv->rx_ch);
+ return dev_err_probe(cl->dev, ret,
+ "failed to request rx mailbox channel: %d\n", ret);
+ }
+
+ return 0;
+}
+
+static void imx_rproc_free_mbox(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+
+ mbox_free_channel(priv->tx_ch);
+ mbox_free_channel(priv->rx_ch);
+}
+
+static int imx_rproc_detect_mode(struct imx_rproc *priv)
+{
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+ struct device *dev = priv->dev;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(priv->regmap, dcfg->src_reg, &val);
+ if (ret) {
+ dev_err(dev, "Failed to read src\n");
+ return ret;
+ }
+
+ if (!(val & dcfg->src_stop))
+ priv->rproc->state = RPROC_DETACHED;
+
+ return 0;
+}
+
static int imx_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -347,18 +614,32 @@ static int imx_rproc_probe(struct platform_device *pdev)
priv->dev = dev;
dev_set_drvdata(dev, rproc);
+ priv->workqueue = create_workqueue(dev_name(dev));
+ if (!priv->workqueue) {
+ dev_err(dev, "cannot create workqueue\n");
+ ret = -ENOMEM;
+ goto err_put_rproc;
+ }
+
+ ret = imx_rproc_xtr_mbox_init(rproc);
+ if (ret)
+ goto err_put_wkq;
ret = imx_rproc_addr_init(priv, pdev);
if (ret) {
dev_err(dev, "failed on imx_rproc_addr_init\n");
- goto err_put_rproc;
+ goto err_put_mbox;
}
+ ret = imx_rproc_detect_mode(priv);
+ if (ret)
+ goto err_put_mbox;
+
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "Failed to get clock\n");
ret = PTR_ERR(priv->clk);
- goto err_put_rproc;
+ goto err_put_mbox;
}
/*
@@ -368,9 +649,11 @@ static int imx_rproc_probe(struct platform_device *pdev)
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(&rproc->dev, "Failed to enable clock\n");
- goto err_put_rproc;
+ goto err_put_mbox;
}
+ INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
+
ret = rproc_add(rproc);
if (ret) {
dev_err(dev, "rproc_add failed\n");
@@ -381,6 +664,10 @@ static int imx_rproc_probe(struct platform_device *pdev)
err_put_clk:
clk_disable_unprepare(priv->clk);
+err_put_mbox:
+ imx_rproc_free_mbox(rproc);
+err_put_wkq:
+ destroy_workqueue(priv->workqueue);
err_put_rproc:
rproc_free(rproc);
@@ -394,6 +681,7 @@ static int imx_rproc_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
rproc_del(rproc);
+ imx_rproc_free_mbox(rproc);
rproc_free(rproc);
return 0;
@@ -402,6 +690,8 @@ static int imx_rproc_remove(struct platform_device *pdev)
static const struct of_device_id imx_rproc_of_match[] = {
{ .compatible = "fsl,imx7d-cm4", .data = &imx_rproc_cfg_imx7d },
{ .compatible = "fsl,imx6sx-cm4", .data = &imx_rproc_cfg_imx6sx },
+ { .compatible = "fsl,imx8mq-cm4", .data = &imx_rproc_cfg_imx8mq },
+ { .compatible = "fsl,imx8mm-cm4", .data = &imx_rproc_cfg_imx8mq },
{},
};
MODULE_DEVICE_TABLE(of, imx_rproc_of_match);
@@ -418,5 +708,5 @@ static struct platform_driver imx_rproc_driver = {
module_platform_driver(imx_rproc_driver);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("IMX6SX/7D remote processor control driver");
+MODULE_DESCRIPTION("i.MX remote processor control driver");
MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c
index e2618c36eaab..a356738160a4 100644
--- a/drivers/remoteproc/ingenic_rproc.c
+++ b/drivers/remoteproc/ingenic_rproc.c
@@ -121,7 +121,7 @@ static void ingenic_rproc_kick(struct rproc *rproc, int vqid)
writel(vqid, vpu->aux_base + REG_CORE_MSG);
}
-static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct vpu *vpu = rproc->priv;
void __iomem *va = NULL;
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index cd266163a65f..54781f553f4e 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -246,7 +246,7 @@ static void keystone_rproc_kick(struct rproc *rproc, int vqid)
* can be used either by the remoteproc core for loading (when using kernel
* remoteproc loader), or by any rpmsg bus drivers.
*/
-static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct keystone_rproc *ksproc = rproc->priv;
void __iomem *va = NULL;
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index ce727598c41c..9679cc26895e 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -272,7 +272,7 @@ static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
}
/* grab the kernel address for this device address */
- ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz);
+ ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz, NULL);
if (!ptr) {
dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
ret = -EINVAL;
@@ -509,7 +509,7 @@ static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
return NULL;
}
-static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
@@ -627,7 +627,7 @@ void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
{
void *ptr;
- ptr = scp_da_to_va(scp->rproc, mem_addr, 0);
+ ptr = scp_da_to_va(scp->rproc, mem_addr, 0, NULL);
if (!ptr)
return ERR_PTR(-EINVAL);
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index d94b7391bf9d..43531caa1959 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -728,7 +728,7 @@ out:
* Return: translated virtual address in kernel memory space on success,
* or NULL on failure.
*/
-static void *omap_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *omap_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct omap_rproc *oproc = rproc->priv;
int i;
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index dcb380e868df..e5778e476245 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -244,8 +244,8 @@ static int pru_rproc_debug_ss_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(pru_rproc_debug_ss_fops, pru_rproc_debug_ss_get,
- pru_rproc_debug_ss_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(pru_rproc_debug_ss_fops, pru_rproc_debug_ss_get,
+ pru_rproc_debug_ss_set, "%llu\n");
/*
* Create PRU-specific debugfs entries
@@ -266,12 +266,17 @@ static void pru_rproc_create_debug_entries(struct rproc *rproc)
static void pru_dispose_irq_mapping(struct pru_rproc *pru)
{
- while (pru->evt_count--) {
+ if (!pru->mapped_irq)
+ return;
+
+ while (pru->evt_count) {
+ pru->evt_count--;
if (pru->mapped_irq[pru->evt_count] > 0)
irq_dispose_mapping(pru->mapped_irq[pru->evt_count]);
}
kfree(pru->mapped_irq);
+ pru->mapped_irq = NULL;
}
/*
@@ -284,7 +289,7 @@ static int pru_handle_intrmap(struct rproc *rproc)
struct pru_rproc *pru = rproc->priv;
struct pru_irq_rsc *rsc = pru->pru_interrupt_map;
struct irq_fwspec fwspec;
- struct device_node *irq_parent;
+ struct device_node *parent, *irq_parent;
int i, ret = 0;
/* not having pru_interrupt_map is not an error */
@@ -307,16 +312,31 @@ static int pru_handle_intrmap(struct rproc *rproc)
pru->evt_count = rsc->num_evts;
pru->mapped_irq = kcalloc(pru->evt_count, sizeof(unsigned int),
GFP_KERNEL);
- if (!pru->mapped_irq)
+ if (!pru->mapped_irq) {
+ pru->evt_count = 0;
return -ENOMEM;
+ }
/*
* parse and fill in system event to interrupt channel and
- * channel-to-host mapping
+ * channel-to-host mapping. The interrupt controller to be used
+ * for these mappings for a given PRU remoteproc is always its
+ * corresponding sibling PRUSS INTC node.
*/
- irq_parent = of_irq_find_parent(pru->dev->of_node);
+ parent = of_get_parent(dev_of_node(pru->dev));
+ if (!parent) {
+ kfree(pru->mapped_irq);
+ pru->mapped_irq = NULL;
+ pru->evt_count = 0;
+ return -ENODEV;
+ }
+
+ irq_parent = of_get_child_by_name(parent, "interrupt-controller");
+ of_node_put(parent);
if (!irq_parent) {
kfree(pru->mapped_irq);
+ pru->mapped_irq = NULL;
+ pru->evt_count = 0;
return -ENODEV;
}
@@ -332,16 +352,20 @@ static int pru_handle_intrmap(struct rproc *rproc)
pru->mapped_irq[i] = irq_create_fwspec_mapping(&fwspec);
if (!pru->mapped_irq[i]) {
- dev_err(dev, "failed to get virq\n");
- ret = pru->mapped_irq[i];
+ dev_err(dev, "failed to get virq for fw mapping %d: event %d chnl %d host %d\n",
+ i, fwspec.param[0], fwspec.param[1],
+ fwspec.param[2]);
+ ret = -EINVAL;
goto map_fail;
}
}
+ of_node_put(irq_parent);
return ret;
map_fail:
pru_dispose_irq_mapping(pru);
+ of_node_put(irq_parent);
return ret;
}
@@ -387,8 +411,7 @@ static int pru_rproc_stop(struct rproc *rproc)
pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
/* dispose irq mapping - new firmware can provide new mapping */
- if (pru->mapped_irq)
- pru_dispose_irq_mapping(pru);
+ pru_dispose_irq_mapping(pru);
return 0;
}
@@ -483,7 +506,7 @@ static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
* core for any PRU client drivers. The PRU Instruction RAM access is restricted
* only to the PRU loader code.
*/
-static void *pru_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *pru_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct pru_rproc *pru = rproc->priv;
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index e02450225e4a..8b0d8bbacd2e 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -281,7 +281,7 @@ static int adsp_stop(struct rproc *rproc)
return ret;
}
-static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
int offset;
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 66106ba25ba3..423b31dfa574 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -1210,6 +1210,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
goto release_firmware;
}
+ if (phdr->p_filesz > phdr->p_memsz) {
+ dev_err(qproc->dev,
+ "refusing to load segment %d with p_filesz > p_memsz\n",
+ i);
+ ret = -EINVAL;
+ goto release_firmware;
+ }
+
ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
if (!ptr) {
dev_err(qproc->dev,
@@ -1241,6 +1249,16 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
goto release_firmware;
}
+ if (seg_fw->size != phdr->p_filesz) {
+ dev_err(qproc->dev,
+ "failed to load segment %d from truncated file %s\n",
+ i, fw_name);
+ ret = -EINVAL;
+ release_firmware(seg_fw);
+ memunmap(ptr);
+ goto release_firmware;
+ }
+
release_firmware(seg_fw);
}
@@ -1661,8 +1679,10 @@ static int q6v5_probe(struct platform_device *pdev)
mba_image = desc->hexagon_mba_image;
ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
0, &mba_image);
- if (ret < 0 && ret != -EINVAL)
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(&pdev->dev, "unable to read mba firmware-name\n");
return ret;
+ }
rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
mba_image, sizeof(*qproc));
@@ -1680,8 +1700,10 @@ static int q6v5_probe(struct platform_device *pdev)
qproc->hexagon_mdt_image = "modem.mdt";
ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1, &qproc->hexagon_mdt_image);
- if (ret < 0 && ret != -EINVAL)
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(&pdev->dev, "unable to read mpss firmware-name\n");
goto free_rproc;
+ }
platform_set_drvdata(pdev, qproc);
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index e635454d6170..b921fc26cd04 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -242,7 +242,7 @@ static int adsp_stop(struct rproc *rproc)
return ret;
}
-static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
int offset;
@@ -785,6 +785,22 @@ static const struct adsp_data wcss_resource_init = {
.ssctl_id = 0x12,
};
+static const struct adsp_data sdx55_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x22,
+};
+
static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8996-adsp-pil", .data = &adsp_resource_init},
@@ -797,6 +813,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init},
+ { .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource},
{ .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
{ .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
{ .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index 78ebe1168b33..20d50ec7eff1 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -4,13 +4,18 @@
* Copyright (C) 2014 Sony Mobile Communications AB
* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*/
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "qcom_common.h"
@@ -24,6 +29,9 @@
#define Q6SS_GFMUX_CTL_REG 0x020
#define Q6SS_PWR_CTL_REG 0x030
#define Q6SS_MEM_PWR_CTL 0x0B0
+#define Q6SS_STRAP_ACC 0x110
+#define Q6SS_CGC_OVERRIDE 0x034
+#define Q6SS_BCR_REG 0x6000
/* AXI Halt Register Offsets */
#define AXI_HALTREQ_REG 0x0
@@ -37,14 +45,19 @@
#define Q6SS_CORE_ARES BIT(1)
#define Q6SS_BUS_ARES_ENABLE BIT(2)
+/* Q6SS_BRC_RESET */
+#define Q6SS_BRC_BLK_ARES BIT(0)
+
/* Q6SS_GFMUX_CTL */
#define Q6SS_CLK_ENABLE BIT(1)
+#define Q6SS_SWITCH_CLK_SRC BIT(8)
/* Q6SS_PWR_CTL */
#define Q6SS_L2DATA_STBY_N BIT(18)
#define Q6SS_SLP_RET_N BIT(19)
#define Q6SS_CLAMP_IO BIT(20)
#define QDSS_BHS_ON BIT(21)
+#define QDSS_Q6_MEMORIES GENMASK(15, 0)
/* Q6SS parameters */
#define Q6SS_LDO_BYP BIT(25)
@@ -53,6 +66,7 @@
#define Q6SS_CLAMP_QMC_MEM BIT(22)
#define HALT_CHECK_MAX_LOOPS 200
#define Q6SS_XO_CBCR GENMASK(5, 3)
+#define Q6SS_SLEEP_CBCR GENMASK(5, 2)
/* Q6SS config/status registers */
#define TCSR_GLOBAL_CFG0 0x0
@@ -71,6 +85,25 @@
#define TCSR_WCSS_CLK_MASK 0x1F
#define TCSR_WCSS_CLK_ENABLE 0x14
+#define MAX_HALT_REG 3
+enum {
+ WCSS_IPQ8074,
+ WCSS_QCS404,
+};
+
+struct wcss_data {
+ const char *firmware_name;
+ unsigned int crash_reason_smem;
+ u32 version;
+ bool aon_reset_required;
+ bool wcss_q6_reset_required;
+ const char *ssr_name;
+ const char *sysmon_name;
+ int ssctl_id;
+ const struct rproc_ops *ops;
+ bool requires_force_stop;
+};
+
struct q6v5_wcss {
struct device *dev;
@@ -82,9 +115,26 @@ struct q6v5_wcss {
u32 halt_wcss;
u32 halt_nc;
+ struct clk *xo;
+ struct clk *ahbfabric_cbcr_clk;
+ struct clk *gcc_abhs_cbcr;
+ struct clk *gcc_axim_cbcr;
+ struct clk *lcc_csr_cbcr;
+ struct clk *ahbs_cbcr;
+ struct clk *tcm_slave_cbcr;
+ struct clk *qdsp6ss_abhm_cbcr;
+ struct clk *qdsp6ss_sleep_cbcr;
+ struct clk *qdsp6ss_axim_cbcr;
+ struct clk *qdsp6ss_xo_cbcr;
+ struct clk *qdsp6ss_core_gfmux;
+ struct clk *lcc_bcr_sleep;
+ struct regulator *cx_supply;
+ struct qcom_sysmon *sysmon;
+
struct reset_control *wcss_aon_reset;
struct reset_control *wcss_reset;
struct reset_control *wcss_q6_reset;
+ struct reset_control *wcss_q6_bcr_reset;
struct qcom_q6v5 q6v5;
@@ -93,6 +143,10 @@ struct q6v5_wcss {
void *mem_region;
size_t mem_size;
+ unsigned int crash_reason_smem;
+ u32 version;
+ bool requires_force_stop;
+
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_ssr ssr_subdev;
};
@@ -237,6 +291,207 @@ wcss_reset:
return ret;
}
+static int q6v5_wcss_qcs404_power_on(struct q6v5_wcss *wcss)
+{
+ unsigned long val;
+ int ret, idx;
+
+ /* Toggle the restart */
+ reset_control_assert(wcss->wcss_reset);
+ usleep_range(200, 300);
+ reset_control_deassert(wcss->wcss_reset);
+ usleep_range(200, 300);
+
+ /* Enable GCC_WDSP_Q6SS_AHBS_CBCR clock */
+ ret = clk_prepare_enable(wcss->gcc_abhs_cbcr);
+ if (ret)
+ return ret;
+
+ /* Remove reset to the WCNSS QDSP6SS */
+ reset_control_deassert(wcss->wcss_q6_bcr_reset);
+
+ /* Enable Q6SSTOP_AHBFABRIC_CBCR clock */
+ ret = clk_prepare_enable(wcss->ahbfabric_cbcr_clk);
+ if (ret)
+ goto disable_gcc_abhs_cbcr_clk;
+
+ /* Enable the LCCCSR CBC clock, Q6SSTOP_Q6SSTOP_LCC_CSR_CBCR clock */
+ ret = clk_prepare_enable(wcss->lcc_csr_cbcr);
+ if (ret)
+ goto disable_ahbfabric_cbcr_clk;
+
+ /* Enable the Q6AHBS CBC, Q6SSTOP_Q6SS_AHBS_CBCR clock */
+ ret = clk_prepare_enable(wcss->ahbs_cbcr);
+ if (ret)
+ goto disable_csr_cbcr_clk;
+
+ /* Enable the TCM slave CBC, Q6SSTOP_Q6SS_TCM_SLAVE_CBCR clock */
+ ret = clk_prepare_enable(wcss->tcm_slave_cbcr);
+ if (ret)
+ goto disable_ahbs_cbcr_clk;
+
+ /* Enable the Q6SS AHB master CBC, Q6SSTOP_Q6SS_AHBM_CBCR clock */
+ ret = clk_prepare_enable(wcss->qdsp6ss_abhm_cbcr);
+ if (ret)
+ goto disable_tcm_slave_cbcr_clk;
+
+ /* Enable the Q6SS AXI master CBC, Q6SSTOP_Q6SS_AXIM_CBCR clock */
+ ret = clk_prepare_enable(wcss->qdsp6ss_axim_cbcr);
+ if (ret)
+ goto disable_abhm_cbcr_clk;
+
+ /* Enable the Q6SS XO CBC */
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ val |= BIT(0);
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+ /* Read CLKOFF bit to go low indicating CLK is enabled */
+ ret = readl_poll_timeout(wcss->reg_base + Q6SS_XO_CBCR,
+ val, !(val & BIT(31)), 1,
+ HALT_CHECK_MAX_LOOPS);
+ if (ret) {
+ dev_err(wcss->dev,
+ "xo cbcr enabling timed out (rc:%d)\n", ret);
+ return ret;
+ }
+
+ writel(0, wcss->reg_base + Q6SS_CGC_OVERRIDE);
+
+ /* Enable QDSP6 sleep clock clock */
+ val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val |= BIT(0);
+ writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
+
+ /* Enable the Enable the Q6 AXI clock, GCC_WDSP_Q6SS_AXIM_CBCR*/
+ ret = clk_prepare_enable(wcss->gcc_axim_cbcr);
+ if (ret)
+ goto disable_sleep_cbcr_clk;
+
+ /* Assert resets, stop core */
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ /* Program the QDSP6SS PWR_CTL register */
+ writel(0x01700000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ writel(0x03700000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ writel(0x03300000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ writel(0x033C0000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /*
+ * Enable memories by turning on the QDSP6 memory foot/head switch, one
+ * bank at a time to avoid in-rush current
+ */
+ for (idx = 28; idx >= 0; idx--) {
+ writel((readl(wcss->reg_base + Q6SS_MEM_PWR_CTL) |
+ (1 << idx)), wcss->reg_base + Q6SS_MEM_PWR_CTL);
+ }
+
+ writel(0x031C0000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+ writel(0x030C0000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val &= ~Q6SS_CORE_ARES;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ /* Enable the Q6 core clock at the GFM, Q6SSTOP_QDSP6SS_GFMUX_CTL */
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val |= Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC;
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+
+ /* Enable sleep clock branch needed for BCR circuit */
+ ret = clk_prepare_enable(wcss->lcc_bcr_sleep);
+ if (ret)
+ goto disable_core_gfmux_clk;
+
+ return 0;
+
+disable_core_gfmux_clk:
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val &= ~(Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC);
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ clk_disable_unprepare(wcss->gcc_axim_cbcr);
+disable_sleep_cbcr_clk:
+ val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val &= ~Q6SS_CLK_ENABLE;
+ writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ val &= ~Q6SS_CLK_ENABLE;
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+ clk_disable_unprepare(wcss->qdsp6ss_axim_cbcr);
+disable_abhm_cbcr_clk:
+ clk_disable_unprepare(wcss->qdsp6ss_abhm_cbcr);
+disable_tcm_slave_cbcr_clk:
+ clk_disable_unprepare(wcss->tcm_slave_cbcr);
+disable_ahbs_cbcr_clk:
+ clk_disable_unprepare(wcss->ahbs_cbcr);
+disable_csr_cbcr_clk:
+ clk_disable_unprepare(wcss->lcc_csr_cbcr);
+disable_ahbfabric_cbcr_clk:
+ clk_disable_unprepare(wcss->ahbfabric_cbcr_clk);
+disable_gcc_abhs_cbcr_clk:
+ clk_disable_unprepare(wcss->gcc_abhs_cbcr);
+
+ return ret;
+}
+
+static inline int q6v5_wcss_qcs404_reset(struct q6v5_wcss *wcss)
+{
+ unsigned long val;
+
+ writel(0x80800000, wcss->reg_base + Q6SS_STRAP_ACC);
+
+ /* Start core execution */
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val &= ~Q6SS_STOP_CORE;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ return 0;
+}
+
+static int q6v5_qcs404_wcss_start(struct rproc *rproc)
+{
+ struct q6v5_wcss *wcss = rproc->priv;
+ int ret;
+
+ ret = clk_prepare_enable(wcss->xo);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(wcss->cx_supply);
+ if (ret)
+ goto disable_xo_clk;
+
+ qcom_q6v5_prepare(&wcss->q6v5);
+
+ ret = q6v5_wcss_qcs404_power_on(wcss);
+ if (ret) {
+ dev_err(wcss->dev, "wcss clk_enable failed\n");
+ goto disable_cx_supply;
+ }
+
+ writel(rproc->bootaddr >> 4, wcss->reg_base + Q6SS_RST_EVB);
+
+ q6v5_wcss_qcs404_reset(wcss);
+
+ ret = qcom_q6v5_wait_for_start(&wcss->q6v5, 5 * HZ);
+ if (ret == -ETIMEDOUT) {
+ dev_err(wcss->dev, "start timed out\n");
+ goto disable_cx_supply;
+ }
+
+ return 0;
+
+disable_cx_supply:
+ regulator_disable(wcss->cx_supply);
+disable_xo_clk:
+ clk_disable_unprepare(wcss->xo);
+
+ return ret;
+}
+
static void q6v5_wcss_halt_axi_port(struct q6v5_wcss *wcss,
struct regmap *halt_map,
u32 offset)
@@ -271,6 +526,70 @@ static void q6v5_wcss_halt_axi_port(struct q6v5_wcss *wcss,
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
}
+static int q6v5_qcs404_wcss_shutdown(struct q6v5_wcss *wcss)
+{
+ unsigned long val;
+ int ret;
+
+ q6v5_wcss_halt_axi_port(wcss, wcss->halt_map, wcss->halt_wcss);
+
+ /* assert clamps to avoid MX current inrush */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val |= (Q6SS_CLAMP_IO | Q6SS_CLAMP_WL | Q6SS_CLAMP_QMC_MEM);
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* Disable memories by turning off memory foot/headswitch */
+ writel((readl(wcss->reg_base + Q6SS_MEM_PWR_CTL) &
+ ~QDSS_Q6_MEMORIES),
+ wcss->reg_base + Q6SS_MEM_PWR_CTL);
+
+ /* Clear the BHS_ON bit */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val &= ~Q6SS_BHS_ON;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ clk_disable_unprepare(wcss->ahbfabric_cbcr_clk);
+ clk_disable_unprepare(wcss->lcc_csr_cbcr);
+ clk_disable_unprepare(wcss->tcm_slave_cbcr);
+ clk_disable_unprepare(wcss->qdsp6ss_abhm_cbcr);
+ clk_disable_unprepare(wcss->qdsp6ss_axim_cbcr);
+
+ val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val &= ~BIT(0);
+ writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
+
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ val &= ~BIT(0);
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+
+ clk_disable_unprepare(wcss->ahbs_cbcr);
+ clk_disable_unprepare(wcss->lcc_bcr_sleep);
+
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val &= ~(Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC);
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+
+ clk_disable_unprepare(wcss->gcc_abhs_cbcr);
+
+ ret = reset_control_assert(wcss->wcss_reset);
+ if (ret) {
+ dev_err(wcss->dev, "wcss_reset failed\n");
+ return ret;
+ }
+ usleep_range(200, 300);
+
+ ret = reset_control_deassert(wcss->wcss_reset);
+ if (ret) {
+ dev_err(wcss->dev, "wcss_reset failed\n");
+ return ret;
+ }
+ usleep_range(200, 300);
+
+ clk_disable_unprepare(wcss->gcc_axim_cbcr);
+
+ return 0;
+}
+
static int q6v5_wcss_powerdown(struct q6v5_wcss *wcss)
{
int ret;
@@ -390,27 +709,35 @@ static int q6v5_wcss_stop(struct rproc *rproc)
int ret;
/* WCSS powerdown */
- ret = qcom_q6v5_request_stop(&wcss->q6v5, NULL);
- if (ret == -ETIMEDOUT) {
- dev_err(wcss->dev, "timed out on wait\n");
- return ret;
+ if (wcss->requires_force_stop) {
+ ret = qcom_q6v5_request_stop(&wcss->q6v5, NULL);
+ if (ret == -ETIMEDOUT) {
+ dev_err(wcss->dev, "timed out on wait\n");
+ return ret;
+ }
}
- ret = q6v5_wcss_powerdown(wcss);
- if (ret)
- return ret;
-
- /* Q6 Power down */
- ret = q6v5_q6_powerdown(wcss);
- if (ret)
- return ret;
+ if (wcss->version == WCSS_QCS404) {
+ ret = q6v5_qcs404_wcss_shutdown(wcss);
+ if (ret)
+ return ret;
+ } else {
+ ret = q6v5_wcss_powerdown(wcss);
+ if (ret)
+ return ret;
+
+ /* Q6 Power down */
+ ret = q6v5_q6_powerdown(wcss);
+ if (ret)
+ return ret;
+ }
qcom_q6v5_unprepare(&wcss->q6v5);
return 0;
}
-static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct q6v5_wcss *wcss = rproc->priv;
int offset;
@@ -438,7 +765,7 @@ static int q6v5_wcss_load(struct rproc *rproc, const struct firmware *fw)
return ret;
}
-static const struct rproc_ops q6v5_wcss_ops = {
+static const struct rproc_ops q6v5_wcss_ipq8074_ops = {
.start = q6v5_wcss_start,
.stop = q6v5_wcss_stop,
.da_to_va = q6v5_wcss_da_to_va,
@@ -446,26 +773,46 @@ static const struct rproc_ops q6v5_wcss_ops = {
.get_boot_addr = rproc_elf_get_boot_addr,
};
-static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss)
+static const struct rproc_ops q6v5_wcss_qcs404_ops = {
+ .start = q6v5_qcs404_wcss_start,
+ .stop = q6v5_wcss_stop,
+ .da_to_va = q6v5_wcss_da_to_va,
+ .load = q6v5_wcss_load,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+ .parse_fw = qcom_register_dump_segments,
+};
+
+static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss,
+ const struct wcss_data *desc)
{
struct device *dev = wcss->dev;
- wcss->wcss_aon_reset = devm_reset_control_get(dev, "wcss_aon_reset");
- if (IS_ERR(wcss->wcss_aon_reset)) {
- dev_err(wcss->dev, "unable to acquire wcss_aon_reset\n");
- return PTR_ERR(wcss->wcss_aon_reset);
+ if (desc->aon_reset_required) {
+ wcss->wcss_aon_reset = devm_reset_control_get_exclusive(dev, "wcss_aon_reset");
+ if (IS_ERR(wcss->wcss_aon_reset)) {
+ dev_err(wcss->dev, "fail to acquire wcss_aon_reset\n");
+ return PTR_ERR(wcss->wcss_aon_reset);
+ }
}
- wcss->wcss_reset = devm_reset_control_get(dev, "wcss_reset");
+ wcss->wcss_reset = devm_reset_control_get_exclusive(dev, "wcss_reset");
if (IS_ERR(wcss->wcss_reset)) {
dev_err(wcss->dev, "unable to acquire wcss_reset\n");
return PTR_ERR(wcss->wcss_reset);
}
- wcss->wcss_q6_reset = devm_reset_control_get(dev, "wcss_q6_reset");
- if (IS_ERR(wcss->wcss_q6_reset)) {
- dev_err(wcss->dev, "unable to acquire wcss_q6_reset\n");
- return PTR_ERR(wcss->wcss_q6_reset);
+ if (desc->wcss_q6_reset_required) {
+ wcss->wcss_q6_reset = devm_reset_control_get_exclusive(dev, "wcss_q6_reset");
+ if (IS_ERR(wcss->wcss_q6_reset)) {
+ dev_err(wcss->dev, "unable to acquire wcss_q6_reset\n");
+ return PTR_ERR(wcss->wcss_q6_reset);
+ }
+ }
+
+ wcss->wcss_q6_bcr_reset = devm_reset_control_get_exclusive(dev, "wcss_q6_bcr_reset");
+ if (IS_ERR(wcss->wcss_q6_bcr_reset)) {
+ dev_err(wcss->dev, "unable to acquire wcss_q6_bcr_reset\n");
+ return PTR_ERR(wcss->wcss_q6_bcr_reset);
}
return 0;
@@ -474,35 +821,48 @@ static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss)
static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss,
struct platform_device *pdev)
{
- struct of_phandle_args args;
+ unsigned int halt_reg[MAX_HALT_REG] = {0};
+ struct device_node *syscon;
struct resource *res;
int ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
- wcss->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(wcss->reg_base))
- return PTR_ERR(wcss->reg_base);
+ wcss->reg_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!wcss->reg_base)
+ return -ENOMEM;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
- wcss->rmb_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(wcss->rmb_base))
- return PTR_ERR(wcss->rmb_base);
+ if (wcss->version == WCSS_IPQ8074) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
+ wcss->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(wcss->rmb_base))
+ return PTR_ERR(wcss->rmb_base);
+ }
- ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
- "qcom,halt-regs", 3, 0, &args);
- if (ret < 0) {
+ syscon = of_parse_phandle(pdev->dev.of_node,
+ "qcom,halt-regs", 0);
+ if (!syscon) {
dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
return -EINVAL;
}
- wcss->halt_map = syscon_node_to_regmap(args.np);
- of_node_put(args.np);
+ wcss->halt_map = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(wcss->halt_map))
return PTR_ERR(wcss->halt_map);
- wcss->halt_q6 = args.args[0];
- wcss->halt_wcss = args.args[1];
- wcss->halt_nc = args.args[2];
+ ret = of_property_read_variable_u32_array(pdev->dev.of_node,
+ "qcom,halt-regs",
+ halt_reg, 0,
+ MAX_HALT_REG);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
+ return -EINVAL;
+ }
+
+ wcss->halt_q6 = halt_reg[0];
+ wcss->halt_wcss = halt_reg[1];
+ wcss->halt_nc = halt_reg[2];
return 0;
}
@@ -536,14 +896,120 @@ static int q6v5_alloc_memory_region(struct q6v5_wcss *wcss)
return 0;
}
+static int q6v5_wcss_init_clock(struct q6v5_wcss *wcss)
+{
+ int ret;
+
+ wcss->xo = devm_clk_get(wcss->dev, "xo");
+ if (IS_ERR(wcss->xo)) {
+ ret = PTR_ERR(wcss->xo);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get xo clock");
+ return ret;
+ }
+
+ wcss->gcc_abhs_cbcr = devm_clk_get(wcss->dev, "gcc_abhs_cbcr");
+ if (IS_ERR(wcss->gcc_abhs_cbcr)) {
+ ret = PTR_ERR(wcss->gcc_abhs_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get gcc abhs clock");
+ return ret;
+ }
+
+ wcss->gcc_axim_cbcr = devm_clk_get(wcss->dev, "gcc_axim_cbcr");
+ if (IS_ERR(wcss->gcc_axim_cbcr)) {
+ ret = PTR_ERR(wcss->gcc_axim_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get gcc axim clock\n");
+ return ret;
+ }
+
+ wcss->ahbfabric_cbcr_clk = devm_clk_get(wcss->dev,
+ "lcc_ahbfabric_cbc");
+ if (IS_ERR(wcss->ahbfabric_cbcr_clk)) {
+ ret = PTR_ERR(wcss->ahbfabric_cbcr_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get ahbfabric clock\n");
+ return ret;
+ }
+
+ wcss->lcc_csr_cbcr = devm_clk_get(wcss->dev, "tcsr_lcc_cbc");
+ if (IS_ERR(wcss->lcc_csr_cbcr)) {
+ ret = PTR_ERR(wcss->lcc_csr_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get csr cbcr clk\n");
+ return ret;
+ }
+
+ wcss->ahbs_cbcr = devm_clk_get(wcss->dev,
+ "lcc_abhs_cbc");
+ if (IS_ERR(wcss->ahbs_cbcr)) {
+ ret = PTR_ERR(wcss->ahbs_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get ahbs_cbcr clk\n");
+ return ret;
+ }
+
+ wcss->tcm_slave_cbcr = devm_clk_get(wcss->dev,
+ "lcc_tcm_slave_cbc");
+ if (IS_ERR(wcss->tcm_slave_cbcr)) {
+ ret = PTR_ERR(wcss->tcm_slave_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get tcm cbcr clk\n");
+ return ret;
+ }
+
+ wcss->qdsp6ss_abhm_cbcr = devm_clk_get(wcss->dev, "lcc_abhm_cbc");
+ if (IS_ERR(wcss->qdsp6ss_abhm_cbcr)) {
+ ret = PTR_ERR(wcss->qdsp6ss_abhm_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get abhm cbcr clk\n");
+ return ret;
+ }
+
+ wcss->qdsp6ss_axim_cbcr = devm_clk_get(wcss->dev, "lcc_axim_cbc");
+ if (IS_ERR(wcss->qdsp6ss_axim_cbcr)) {
+ ret = PTR_ERR(wcss->qdsp6ss_axim_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get axim cbcr clk\n");
+ return ret;
+ }
+
+ wcss->lcc_bcr_sleep = devm_clk_get(wcss->dev, "lcc_bcr_sleep");
+ if (IS_ERR(wcss->lcc_bcr_sleep)) {
+ ret = PTR_ERR(wcss->lcc_bcr_sleep);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get bcr cbcr clk\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int q6v5_wcss_init_regulator(struct q6v5_wcss *wcss)
+{
+ wcss->cx_supply = devm_regulator_get(wcss->dev, "cx");
+ if (IS_ERR(wcss->cx_supply))
+ return PTR_ERR(wcss->cx_supply);
+
+ regulator_set_load(wcss->cx_supply, 100000);
+
+ return 0;
+}
+
static int q6v5_wcss_probe(struct platform_device *pdev)
{
+ const struct wcss_data *desc;
struct q6v5_wcss *wcss;
struct rproc *rproc;
int ret;
- rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_wcss_ops,
- "IPQ8074/q6_fw.mdt", sizeof(*wcss));
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, desc->ops,
+ desc->firmware_name, sizeof(*wcss));
if (!rproc) {
dev_err(&pdev->dev, "failed to allocate rproc\n");
return -ENOMEM;
@@ -551,6 +1017,10 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
wcss = rproc->priv;
wcss->dev = &pdev->dev;
+ wcss->version = desc->version;
+
+ wcss->version = desc->version;
+ wcss->requires_force_stop = desc->requires_force_stop;
ret = q6v5_wcss_init_mmio(wcss, pdev);
if (ret)
@@ -560,17 +1030,33 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
if (ret)
goto free_rproc;
- ret = q6v5_wcss_init_reset(wcss);
+ if (wcss->version == WCSS_QCS404) {
+ ret = q6v5_wcss_init_clock(wcss);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_wcss_init_regulator(wcss);
+ if (ret)
+ goto free_rproc;
+ }
+
+ ret = q6v5_wcss_init_reset(wcss, desc);
if (ret)
goto free_rproc;
- ret = qcom_q6v5_init(&wcss->q6v5, pdev, rproc, WCSS_CRASH_REASON, NULL);
+ ret = qcom_q6v5_init(&wcss->q6v5, pdev, rproc, desc->crash_reason_smem,
+ NULL);
if (ret)
goto free_rproc;
qcom_add_glink_subdev(rproc, &wcss->glink_subdev, "q6wcss");
qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, "q6wcss");
+ if (desc->ssctl_id)
+ wcss->sysmon = qcom_add_sysmon_subdev(rproc,
+ desc->sysmon_name,
+ desc->ssctl_id);
+
ret = rproc_add(rproc);
if (ret)
goto free_rproc;
@@ -595,8 +1081,31 @@ static int q6v5_wcss_remove(struct platform_device *pdev)
return 0;
}
+static const struct wcss_data wcss_ipq8074_res_init = {
+ .firmware_name = "IPQ8074/q6_fw.mdt",
+ .crash_reason_smem = WCSS_CRASH_REASON,
+ .aon_reset_required = true,
+ .wcss_q6_reset_required = true,
+ .ops = &q6v5_wcss_ipq8074_ops,
+ .requires_force_stop = true,
+};
+
+static const struct wcss_data wcss_qcs404_res_init = {
+ .crash_reason_smem = WCSS_CRASH_REASON,
+ .firmware_name = "wcnss.mdt",
+ .version = WCSS_QCS404,
+ .aon_reset_required = false,
+ .wcss_q6_reset_required = false,
+ .ssr_name = "mpss",
+ .sysmon_name = "wcnss",
+ .ssctl_id = 0x12,
+ .ops = &q6v5_wcss_qcs404_ops,
+ .requires_force_stop = false,
+};
+
static const struct of_device_id q6v5_wcss_of_match[] = {
- { .compatible = "qcom,ipq8074-wcss-pil" },
+ { .compatible = "qcom,ipq8074-wcss-pil", .data = &wcss_ipq8074_res_init },
+ { .compatible = "qcom,qcs404-wcss-pil", .data = &wcss_qcs404_res_init },
{ },
};
MODULE_DEVICE_TABLE(of, q6v5_wcss_of_match);
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 2a6a23cb14ca..5f3455aa7e0e 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -320,7 +320,7 @@ static int wcnss_stop(struct rproc *rproc)
return ret;
}
-static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
int offset;
@@ -530,6 +530,7 @@ static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
static int wcnss_probe(struct platform_device *pdev)
{
+ const char *fw_name = WCNSS_FIRMWARE_NAME;
const struct wcnss_data *data;
struct qcom_wcnss *wcnss;
struct resource *res;
@@ -547,8 +548,13 @@ static int wcnss_probe(struct platform_device *pdev)
return -ENXIO;
}
+ ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
+ &fw_name);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops,
- WCNSS_FIRMWARE_NAME, sizeof(*wcnss));
+ fw_name, sizeof(*wcnss));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c
index b19ea3057bde..0b8a84c04f76 100644
--- a/drivers/remoteproc/remoteproc_cdev.c
+++ b/drivers/remoteproc/remoteproc_cdev.c
@@ -32,15 +32,22 @@ static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_
return -EFAULT;
if (!strncmp(cmd, "start", len)) {
- if (rproc->state == RPROC_RUNNING)
+ if (rproc->state == RPROC_RUNNING ||
+ rproc->state == RPROC_ATTACHED)
return -EBUSY;
ret = rproc_boot(rproc);
} else if (!strncmp(cmd, "stop", len)) {
- if (rproc->state != RPROC_RUNNING)
+ if (rproc->state != RPROC_RUNNING &&
+ rproc->state != RPROC_ATTACHED)
return -EINVAL;
rproc_shutdown(rproc);
+ } else if (!strncmp(cmd, "detach", len)) {
+ if (rproc->state != RPROC_ATTACHED)
+ return -EINVAL;
+
+ ret = rproc_detach(rproc);
} else {
dev_err(&rproc->dev, "Unrecognized option\n");
ret = -EINVAL;
@@ -79,11 +86,17 @@ static long rproc_device_ioctl(struct file *filp, unsigned int ioctl, unsigned l
static int rproc_cdev_release(struct inode *inode, struct file *filp)
{
struct rproc *rproc = container_of(inode->i_cdev, struct rproc, cdev);
+ int ret = 0;
- if (rproc->cdev_put_on_release && rproc->state == RPROC_RUNNING)
+ if (!rproc->cdev_put_on_release)
+ return 0;
+
+ if (rproc->state == RPROC_RUNNING)
rproc_shutdown(rproc);
+ else if (rproc->state == RPROC_ATTACHED)
+ ret = rproc_detach(rproc);
- return 0;
+ return ret;
}
static const struct file_operations rproc_fops = {
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index ab150765d124..626a6b90fba2 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -189,13 +189,13 @@ EXPORT_SYMBOL(rproc_va_to_pa);
* here the output of the DMA API for the carveouts, which should be more
* correct.
*/
-void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct rproc_mem_entry *carveout;
void *ptr = NULL;
if (rproc->ops->da_to_va) {
- ptr = rproc->ops->da_to_va(rproc, da, len);
+ ptr = rproc->ops->da_to_va(rproc, da, len, is_iomem);
if (ptr)
goto out;
}
@@ -217,6 +217,9 @@ void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
ptr = carveout->va + offset;
+ if (is_iomem)
+ *is_iomem = carveout->is_iomem;
+
break;
}
@@ -482,7 +485,7 @@ static int copy_dma_range_map(struct device *to, struct device *from)
/**
* rproc_handle_vdev() - handle a vdev fw resource
* @rproc: the remote processor
- * @rsc: the vring resource descriptor
+ * @ptr: the vring resource descriptor
* @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
@@ -507,9 +510,10 @@ static int copy_dma_range_map(struct device *to, struct device *from)
*
* Returns 0 on success, or an appropriate error code otherwise
*/
-static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
+static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
int offset, int avail)
{
+ struct fw_rsc_vdev *rsc = ptr;
struct device *dev = &rproc->dev;
struct rproc_vdev *rvdev;
int i, ret;
@@ -627,7 +631,7 @@ void rproc_vdev_release(struct kref *ref)
/**
* rproc_handle_trace() - handle a shared trace buffer resource
* @rproc: the remote processor
- * @rsc: the trace resource descriptor
+ * @ptr: the trace resource descriptor
* @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
@@ -641,9 +645,10 @@ void rproc_vdev_release(struct kref *ref)
*
* Returns 0 on success, or an appropriate error code otherwise
*/
-static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
+static int rproc_handle_trace(struct rproc *rproc, void *ptr,
int offset, int avail)
{
+ struct fw_rsc_trace *rsc = ptr;
struct rproc_debug_trace *trace;
struct device *dev = &rproc->dev;
char name[15];
@@ -693,7 +698,7 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
/**
* rproc_handle_devmem() - handle devmem resource entry
* @rproc: remote processor handle
- * @rsc: the devmem resource entry
+ * @ptr: the devmem resource entry
* @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
@@ -716,9 +721,10 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
* and not allow firmwares to request access to physical addresses that
* are outside those ranges.
*/
-static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
+static int rproc_handle_devmem(struct rproc *rproc, void *ptr,
int offset, int avail)
{
+ struct fw_rsc_devmem *rsc = ptr;
struct rproc_mem_entry *mapping;
struct device *dev = &rproc->dev;
int ret;
@@ -896,7 +902,7 @@ static int rproc_release_carveout(struct rproc *rproc,
/**
* rproc_handle_carveout() - handle phys contig memory allocation requests
* @rproc: rproc handle
- * @rsc: the resource entry
+ * @ptr: the resource entry
* @offset: offset of the resource entry
* @avail: size of available data (for image validation)
*
@@ -913,9 +919,9 @@ static int rproc_release_carveout(struct rproc *rproc,
* pressure is important; it may have a substantial impact on performance.
*/
static int rproc_handle_carveout(struct rproc *rproc,
- struct fw_rsc_carveout *rsc,
- int offset, int avail)
+ void *ptr, int offset, int avail)
{
+ struct fw_rsc_carveout *rsc = ptr;
struct rproc_mem_entry *carveout;
struct device *dev = &rproc->dev;
@@ -1097,10 +1103,10 @@ EXPORT_SYMBOL(rproc_of_parse_firmware);
* enum fw_resource_type.
*/
static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
- [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
- [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
- [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
- [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
+ [RSC_CARVEOUT] = rproc_handle_carveout,
+ [RSC_DEVMEM] = rproc_handle_devmem,
+ [RSC_TRACE] = rproc_handle_trace,
+ [RSC_VDEV] = rproc_handle_vdev,
};
/* handle firmware resource entries before booting the remote processor */
@@ -1416,7 +1422,7 @@ reset_table_ptr:
return ret;
}
-static int rproc_attach(struct rproc *rproc)
+static int __rproc_attach(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
int ret;
@@ -1444,7 +1450,7 @@ static int rproc_attach(struct rproc *rproc)
goto stop_rproc;
}
- rproc->state = RPROC_RUNNING;
+ rproc->state = RPROC_ATTACHED;
dev_info(dev, "remote processor %s is now attached\n", rproc->name);
@@ -1537,11 +1543,149 @@ disable_iommu:
return ret;
}
+static int rproc_set_rsc_table(struct rproc *rproc)
+{
+ struct resource_table *table_ptr;
+ struct device *dev = &rproc->dev;
+ size_t table_sz;
+ int ret;
+
+ table_ptr = rproc_get_loaded_rsc_table(rproc, &table_sz);
+ if (!table_ptr) {
+ /* Not having a resource table is acceptable */
+ return 0;
+ }
+
+ if (IS_ERR(table_ptr)) {
+ ret = PTR_ERR(table_ptr);
+ dev_err(dev, "can't load resource table: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * If it is possible to detach the remote processor, keep an untouched
+ * copy of the resource table. That way we can start fresh again when
+ * the remote processor is re-attached, that is:
+ *
+ * DETACHED -> ATTACHED -> DETACHED -> ATTACHED
+ *
+ * Free'd in rproc_reset_rsc_table_on_detach() and
+ * rproc_reset_rsc_table_on_stop().
+ */
+ if (rproc->ops->detach) {
+ rproc->clean_table = kmemdup(table_ptr, table_sz, GFP_KERNEL);
+ if (!rproc->clean_table)
+ return -ENOMEM;
+ } else {
+ rproc->clean_table = NULL;
+ }
+
+ rproc->cached_table = NULL;
+ rproc->table_ptr = table_ptr;
+ rproc->table_sz = table_sz;
+
+ return 0;
+}
+
+static int rproc_reset_rsc_table_on_detach(struct rproc *rproc)
+{
+ struct resource_table *table_ptr;
+
+ /* A resource table was never retrieved, nothing to do here */
+ if (!rproc->table_ptr)
+ return 0;
+
+ /*
+ * If we made it to this point a clean_table _must_ have been
+ * allocated in rproc_set_rsc_table(). If one isn't present
+ * something went really wrong and we must complain.
+ */
+ if (WARN_ON(!rproc->clean_table))
+ return -EINVAL;
+
+ /* Remember where the external entity installed the resource table */
+ table_ptr = rproc->table_ptr;
+
+ /*
+ * If we made it here the remote processor was started by another
+ * entity and a cache table doesn't exist. As such make a copy of
+ * the resource table currently used by the remote processor and
+ * use that for the rest of the shutdown process. The memory
+ * allocated here is free'd in rproc_detach().
+ */
+ rproc->cached_table = kmemdup(rproc->table_ptr,
+ rproc->table_sz, GFP_KERNEL);
+ if (!rproc->cached_table)
+ return -ENOMEM;
+
+ /*
+ * Use a copy of the resource table for the remainder of the
+ * shutdown process.
+ */
+ rproc->table_ptr = rproc->cached_table;
+
+ /*
+ * Reset the memory area where the firmware loaded the resource table
+ * to its original value. That way when we re-attach the remote
+ * processor the resource table is clean and ready to be used again.
+ */
+ memcpy(table_ptr, rproc->clean_table, rproc->table_sz);
+
+ /*
+ * The clean resource table is no longer needed. Allocated in
+ * rproc_set_rsc_table().
+ */
+ kfree(rproc->clean_table);
+
+ return 0;
+}
+
+static int rproc_reset_rsc_table_on_stop(struct rproc *rproc)
+{
+ /* A resource table was never retrieved, nothing to do here */
+ if (!rproc->table_ptr)
+ return 0;
+
+ /*
+ * If a cache table exists the remote processor was started by
+ * the remoteproc core. That cache table should be used for
+ * the rest of the shutdown process.
+ */
+ if (rproc->cached_table)
+ goto out;
+
+ /*
+ * If we made it here the remote processor was started by another
+ * entity and a cache table doesn't exist. As such make a copy of
+ * the resource table currently used by the remote processor and
+ * use that for the rest of the shutdown process. The memory
+ * allocated here is free'd in rproc_shutdown().
+ */
+ rproc->cached_table = kmemdup(rproc->table_ptr,
+ rproc->table_sz, GFP_KERNEL);
+ if (!rproc->cached_table)
+ return -ENOMEM;
+
+ /*
+ * Since the remote processor is being switched off the clean table
+ * won't be needed. Allocated in rproc_set_rsc_table().
+ */
+ kfree(rproc->clean_table);
+
+out:
+ /*
+ * Use a copy of the resource table for the remainder of the
+ * shutdown process.
+ */
+ rproc->table_ptr = rproc->cached_table;
+ return 0;
+}
+
/*
* Attach to remote processor - similar to rproc_fw_boot() but without
* the steps that deal with the firmware image.
*/
-static int rproc_actuate(struct rproc *rproc)
+static int rproc_attach(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
int ret;
@@ -1556,6 +1700,19 @@ static int rproc_actuate(struct rproc *rproc)
return ret;
}
+ /* Do anything that is needed to boot the remote processor */
+ ret = rproc_prepare_device(rproc);
+ if (ret) {
+ dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret);
+ goto disable_iommu;
+ }
+
+ ret = rproc_set_rsc_table(rproc);
+ if (ret) {
+ dev_err(dev, "can't load resource table: %d\n", ret);
+ goto unprepare_device;
+ }
+
/* reset max_notifyid */
rproc->max_notifyid = -1;
@@ -1570,7 +1727,7 @@ static int rproc_actuate(struct rproc *rproc)
ret = rproc_handle_resources(rproc, rproc_loading_handlers);
if (ret) {
dev_err(dev, "Failed to process resources: %d\n", ret);
- goto disable_iommu;
+ goto unprepare_device;
}
/* Allocate carveout resources associated to rproc */
@@ -1581,7 +1738,7 @@ static int rproc_actuate(struct rproc *rproc)
goto clean_up_resources;
}
- ret = rproc_attach(rproc);
+ ret = __rproc_attach(rproc);
if (ret)
goto clean_up_resources;
@@ -1589,6 +1746,9 @@ static int rproc_actuate(struct rproc *rproc)
clean_up_resources:
rproc_resource_cleanup(rproc);
+unprepare_device:
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
disable_iommu:
rproc_disable_iommu(rproc);
return ret;
@@ -1642,11 +1802,20 @@ static int rproc_stop(struct rproc *rproc, bool crashed)
struct device *dev = &rproc->dev;
int ret;
+ /* No need to continue if a stop() operation has not been provided */
+ if (!rproc->ops->stop)
+ return -EINVAL;
+
/* Stop any subdevices for the remote processor */
rproc_stop_subdevices(rproc, crashed);
/* the installed resource table is no longer accessible */
- rproc->table_ptr = rproc->cached_table;
+ ret = rproc_reset_rsc_table_on_stop(rproc);
+ if (ret) {
+ dev_err(dev, "can't reset resource table: %d\n", ret);
+ return ret;
+ }
+
/* power off the remote processor */
ret = rproc->ops->stop(rproc);
@@ -1659,19 +1828,48 @@ static int rproc_stop(struct rproc *rproc, bool crashed)
rproc->state = RPROC_OFFLINE;
- /*
- * The remote processor has been stopped and is now offline, which means
- * that the next time it is brought back online the remoteproc core will
- * be responsible to load its firmware. As such it is no longer
- * autonomous.
- */
- rproc->autonomous = false;
-
dev_info(dev, "stopped remote processor %s\n", rproc->name);
return 0;
}
+/*
+ * __rproc_detach(): Does the opposite of __rproc_attach()
+ */
+static int __rproc_detach(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ /* No need to continue if a detach() operation has not been provided */
+ if (!rproc->ops->detach)
+ return -EINVAL;
+
+ /* Stop any subdevices for the remote processor */
+ rproc_stop_subdevices(rproc, false);
+
+ /* the installed resource table is no longer accessible */
+ ret = rproc_reset_rsc_table_on_detach(rproc);
+ if (ret) {
+ dev_err(dev, "can't reset resource table: %d\n", ret);
+ return ret;
+ }
+
+ /* Tell the remote processor the core isn't available anymore */
+ ret = rproc->ops->detach(rproc);
+ if (ret) {
+ dev_err(dev, "can't detach from rproc: %d\n", ret);
+ return ret;
+ }
+
+ rproc_unprepare_subdevices(rproc);
+
+ rproc->state = RPROC_DETACHED;
+
+ dev_info(dev, "detached remote processor %s\n", rproc->name);
+
+ return 0;
+}
/**
* rproc_trigger_recovery() - recover a remoteproc
@@ -1802,7 +2000,7 @@ int rproc_boot(struct rproc *rproc)
if (rproc->state == RPROC_DETACHED) {
dev_info(dev, "attaching to %s\n", rproc->name);
- ret = rproc_actuate(rproc);
+ ret = rproc_attach(rproc);
} else {
dev_info(dev, "powering up %s\n", rproc->name);
@@ -1885,6 +2083,65 @@ out:
EXPORT_SYMBOL(rproc_shutdown);
/**
+ * rproc_detach() - Detach the remote processor from the
+ * remoteproc core
+ *
+ * @rproc: the remote processor
+ *
+ * Detach a remote processor (previously attached to with rproc_attach()).
+ *
+ * In case @rproc is still being used by an additional user(s), then
+ * this function will just decrement the power refcount and exit,
+ * without disconnecting the device.
+ *
+ * Function rproc_detach() calls __rproc_detach() in order to let a remote
+ * processor know that services provided by the application processor are
+ * no longer available. From there it should be possible to remove the
+ * platform driver and even power cycle the application processor (if the HW
+ * supports it) without needing to switch off the remote processor.
+ */
+int rproc_detach(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret) {
+ dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
+ return ret;
+ }
+
+ /* if the remote proc is still needed, bail out */
+ if (!atomic_dec_and_test(&rproc->power)) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = __rproc_detach(rproc);
+ if (ret) {
+ atomic_inc(&rproc->power);
+ goto out;
+ }
+
+ /* clean up all acquired resources */
+ rproc_resource_cleanup(rproc);
+
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
+
+ rproc_disable_iommu(rproc);
+
+ /* Free the copy of the resource table */
+ kfree(rproc->cached_table);
+ rproc->cached_table = NULL;
+ rproc->table_ptr = NULL;
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+EXPORT_SYMBOL(rproc_detach);
+
+/**
* rproc_get_by_phandle() - find a remote processor by phandle
* @phandle: phandle to the rproc
*
@@ -2077,16 +2334,6 @@ int rproc_add(struct rproc *rproc)
if (ret < 0)
return ret;
- /*
- * Remind ourselves the remote processor has been attached to rather
- * than booted by the remoteproc core. This is important because the
- * RPROC_DETACHED state will be lost as soon as the remote processor
- * has been attached to. Used in firmware_show() and reset in
- * rproc_stop().
- */
- if (rproc->state == RPROC_DETACHED)
- rproc->autonomous = true;
-
/* if rproc is marked always-on, request it to boot */
if (rproc->auto_boot) {
ret = rproc_trigger_auto_boot(rproc);
@@ -2347,10 +2594,8 @@ int rproc_del(struct rproc *rproc)
if (!rproc)
return -EINVAL;
- /* if rproc is marked always-on, rproc_add() booted it */
/* TODO: make sure this works with rproc->power > 1 */
- if (rproc->auto_boot)
- rproc_shutdown(rproc);
+ rproc_shutdown(rproc);
mutex_lock(&rproc->lock);
rproc->state = RPROC_DELETED;
@@ -2492,7 +2737,11 @@ static int rproc_panic_handler(struct notifier_block *nb, unsigned long event,
rcu_read_lock();
list_for_each_entry_rcu(rproc, &rproc_list, node) {
- if (!rproc->ops->panic || rproc->state != RPROC_RUNNING)
+ if (!rproc->ops->panic)
+ continue;
+
+ if (rproc->state != RPROC_RUNNING &&
+ rproc->state != RPROC_ATTACHED)
continue;
d = rproc->ops->panic(rproc);
diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c
index 81ec154a6a5e..aee657cc08c6 100644
--- a/drivers/remoteproc/remoteproc_coredump.c
+++ b/drivers/remoteproc/remoteproc_coredump.c
@@ -153,18 +153,22 @@ static void rproc_copy_segment(struct rproc *rproc, void *dest,
size_t offset, size_t size)
{
void *ptr;
+ bool is_iomem;
if (segment->dump) {
segment->dump(rproc, segment, dest, offset, size);
} else {
- ptr = rproc_da_to_va(rproc, segment->da + offset, size);
+ ptr = rproc_da_to_va(rproc, segment->da + offset, size, &is_iomem);
if (!ptr) {
dev_err(&rproc->dev,
"invalid copy request for segment %pad with offset %zu and size %zu)\n",
&segment->da, offset, size);
memset(dest, 0xff, size);
} else {
- memcpy(dest, ptr, size);
+ if (is_iomem)
+ memcpy_fromio(dest, ptr, size);
+ else
+ memcpy(dest, ptr, size);
}
}
}
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 7e5845376e9f..b5a1e3b697d9 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -132,7 +132,7 @@ static ssize_t rproc_trace_read(struct file *filp, char __user *userbuf,
char buf[100];
int len;
- va = rproc_da_to_va(data->rproc, trace->da, trace->len);
+ va = rproc_da_to_va(data->rproc, trace->da, trace->len, NULL);
if (!va) {
len = scnprintf(buf, sizeof(buf), "Trace %s not available\n",
diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c
index df68d87752e4..11423588965a 100644
--- a/drivers/remoteproc/remoteproc_elf_loader.c
+++ b/drivers/remoteproc/remoteproc_elf_loader.c
@@ -175,6 +175,7 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
u64 offset = elf_phdr_get_p_offset(class, phdr);
u32 type = elf_phdr_get_p_type(class, phdr);
void *ptr;
+ bool is_iomem;
if (type != PT_LOAD)
continue;
@@ -204,7 +205,7 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
}
/* grab the kernel address for this device address */
- ptr = rproc_da_to_va(rproc, da, memsz);
+ ptr = rproc_da_to_va(rproc, da, memsz, &is_iomem);
if (!ptr) {
dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da,
memsz);
@@ -213,8 +214,12 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
}
/* put the segment where the remote processor expects it */
- if (filesz)
- memcpy(ptr, elf_data + offset, filesz);
+ if (filesz) {
+ if (is_iomem)
+ memcpy_fromio(ptr, (void __iomem *)(elf_data + offset), filesz);
+ else
+ memcpy(ptr, elf_data + offset, filesz);
+ }
/*
* Zero out remaining memory for this segment.
@@ -223,8 +228,12 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
* did this for us. albeit harmless, we may consider removing
* this.
*/
- if (memsz > filesz)
- memset(ptr + filesz, 0, memsz - filesz);
+ if (memsz > filesz) {
+ if (is_iomem)
+ memset_io((void __iomem *)(ptr + filesz), 0, memsz - filesz);
+ else
+ memset(ptr + filesz, 0, memsz - filesz);
+ }
}
return ret;
@@ -377,6 +386,6 @@ struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
return NULL;
}
- return rproc_da_to_va(rproc, sh_addr, sh_size);
+ return rproc_da_to_va(rproc, sh_addr, sh_size, NULL);
}
EXPORT_SYMBOL(rproc_elf_find_loaded_rsc_table);
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index c34002888d2c..a328e634b1de 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -84,7 +84,7 @@ static inline void rproc_char_device_remove(struct rproc *rproc)
void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
-void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len);
+void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
phys_addr_t rproc_va_to_pa(void *cpu_addr);
int rproc_trigger_recovery(struct rproc *rproc);
@@ -178,6 +178,16 @@ struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc,
}
static inline
+struct resource_table *rproc_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *size)
+{
+ if (rproc->ops->get_loaded_rsc_table)
+ return rproc->ops->get_loaded_rsc_table(rproc, size);
+
+ return NULL;
+}
+
+static inline
bool rproc_u64_fit_in_size_t(u64 val)
{
if (sizeof(size_t) == sizeof(u64))
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index 1dbef895e65e..ea8b89f97d7b 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -15,7 +15,7 @@ static ssize_t recovery_show(struct device *dev,
{
struct rproc *rproc = to_rproc(dev);
- return sprintf(buf, "%s", rproc->recovery_disabled ? "disabled\n" : "enabled\n");
+ return sysfs_emit(buf, "%s", rproc->recovery_disabled ? "disabled\n" : "enabled\n");
}
/*
@@ -82,7 +82,7 @@ static ssize_t coredump_show(struct device *dev,
{
struct rproc *rproc = to_rproc(dev);
- return sprintf(buf, "%s\n", rproc_coredump_str[rproc->dump_conf]);
+ return sysfs_emit(buf, "%s\n", rproc_coredump_str[rproc->dump_conf]);
}
/*
@@ -138,11 +138,8 @@ static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
* If the remote processor has been started by an external
* entity we have no idea of what image it is running. As such
* simply display a generic string rather then rproc->firmware.
- *
- * Here we rely on the autonomous flag because a remote processor
- * may have been attached to and currently in a running state.
*/
- if (rproc->autonomous)
+ if (rproc->state == RPROC_ATTACHED)
firmware = "unknown";
return sprintf(buf, "%s\n", firmware);
@@ -172,6 +169,7 @@ static const char * const rproc_state_string[] = {
[RPROC_RUNNING] = "running",
[RPROC_CRASHED] = "crashed",
[RPROC_DELETED] = "deleted",
+ [RPROC_ATTACHED] = "attached",
[RPROC_DETACHED] = "detached",
[RPROC_LAST] = "invalid",
};
@@ -196,17 +194,24 @@ static ssize_t state_store(struct device *dev,
int ret = 0;
if (sysfs_streq(buf, "start")) {
- if (rproc->state == RPROC_RUNNING)
+ if (rproc->state == RPROC_RUNNING ||
+ rproc->state == RPROC_ATTACHED)
return -EBUSY;
ret = rproc_boot(rproc);
if (ret)
dev_err(&rproc->dev, "Boot failed: %d\n", ret);
} else if (sysfs_streq(buf, "stop")) {
- if (rproc->state != RPROC_RUNNING)
+ if (rproc->state != RPROC_RUNNING &&
+ rproc->state != RPROC_ATTACHED)
return -EINVAL;
rproc_shutdown(rproc);
+ } else if (sysfs_streq(buf, "detach")) {
+ if (rproc->state != RPROC_ATTACHED)
+ return -EINVAL;
+
+ ret = rproc_detach(rproc);
} else {
dev_err(&rproc->dev, "Unrecognised option: %s\n", buf);
ret = -EINVAL;
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
index 09bcb4d8b9e0..22096adc1ad3 100644
--- a/drivers/remoteproc/st_slim_rproc.c
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -174,7 +174,7 @@ static int slim_rproc_stop(struct rproc *rproc)
return 0;
}
-static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct st_slim_rproc *slim_rproc = rproc->priv;
void *va = NULL;
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index ccb3c14a0023..7353f9e7e7af 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -28,7 +28,7 @@
#define RELEASE_BOOT 1
#define MBOX_NB_VQ 2
-#define MBOX_NB_MBX 3
+#define MBOX_NB_MBX 4
#define STM32_SMC_RCC 0x82001000
#define STM32_SMC_REG_WRITE 0x1
@@ -38,6 +38,7 @@
#define STM32_MBX_VQ1 "vq1"
#define STM32_MBX_VQ1_ID 1
#define STM32_MBX_SHUTDOWN "shutdown"
+#define STM32_MBX_DETACH "detach"
#define RSC_TBL_SIZE 1024
@@ -207,16 +208,7 @@ static int stm32_rproc_mbox_idx(struct rproc *rproc, const unsigned char *name)
return -EINVAL;
}
-static int stm32_rproc_elf_load_rsc_table(struct rproc *rproc,
- const struct firmware *fw)
-{
- if (rproc_elf_load_rsc_table(rproc, fw))
- dev_warn(&rproc->dev, "no resource table found for this firmware\n");
-
- return 0;
-}
-
-static int stm32_rproc_parse_memory_regions(struct rproc *rproc)
+static int stm32_rproc_prepare(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
@@ -274,12 +266,10 @@ static int stm32_rproc_parse_memory_regions(struct rproc *rproc)
static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
{
- int ret = stm32_rproc_parse_memory_regions(rproc);
-
- if (ret)
- return ret;
+ if (rproc_elf_load_rsc_table(rproc, fw))
+ dev_warn(&rproc->dev, "no resource table found for this firmware\n");
- return stm32_rproc_elf_load_rsc_table(rproc, fw);
+ return 0;
}
static irqreturn_t stm32_rproc_wdg(int irq, void *data)
@@ -347,6 +337,15 @@ static const struct stm32_mbox stm32_rproc_mbox[MBOX_NB_MBX] = {
.tx_done = NULL,
.tx_tout = 500, /* 500 ms time out */
},
+ },
+ {
+ .name = STM32_MBX_DETACH,
+ .vq_id = -1,
+ .client = {
+ .tx_block = true,
+ .tx_done = NULL,
+ .tx_tout = 200, /* 200 ms time out to detach should be fair enough */
+ },
}
};
@@ -472,6 +471,25 @@ static int stm32_rproc_attach(struct rproc *rproc)
return stm32_rproc_set_hold_boot(rproc, true);
}
+static int stm32_rproc_detach(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, dummy_data, idx;
+
+ /* Inform the remote processor of the detach */
+ idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_DETACH);
+ if (idx >= 0 && ddata->mb[idx].chan) {
+ /* A dummy data is sent to allow to block on transmit */
+ err = mbox_send_message(ddata->mb[idx].chan,
+ &dummy_data);
+ if (err < 0)
+ dev_warn(&rproc->dev, "warning: remote FW detach without ack\n");
+ }
+
+ /* Allow remote processor to auto-reboot */
+ return stm32_rproc_set_hold_boot(rproc, false);
+}
+
static int stm32_rproc_stop(struct rproc *rproc)
{
struct stm32_rproc *ddata = rproc->priv;
@@ -546,14 +564,89 @@ static void stm32_rproc_kick(struct rproc *rproc, int vqid)
}
}
+static int stm32_rproc_da_to_pa(struct rproc *rproc,
+ u64 da, phys_addr_t *pa)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ struct stm32_rproc_mem *p_mem;
+ unsigned int i;
+
+ for (i = 0; i < ddata->nb_rmems; i++) {
+ p_mem = &ddata->rmems[i];
+
+ if (da < p_mem->dev_addr ||
+ da >= p_mem->dev_addr + p_mem->size)
+ continue;
+
+ *pa = da - p_mem->dev_addr + p_mem->bus_addr;
+ dev_dbg(dev, "da %llx to pa %#x\n", da, *pa);
+
+ return 0;
+ }
+
+ dev_err(dev, "can't translate da %llx\n", da);
+
+ return -EINVAL;
+}
+
+static struct resource_table *
+stm32_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ phys_addr_t rsc_pa;
+ u32 rsc_da;
+ int err;
+
+ /* The resource table has already been mapped, nothing to do */
+ if (ddata->rsc_va)
+ goto done;
+
+ err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da);
+ if (err) {
+ dev_err(dev, "failed to read rsc tbl addr\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!rsc_da)
+ /* no rsc table */
+ return ERR_PTR(-ENOENT);
+
+ err = stm32_rproc_da_to_pa(rproc, rsc_da, &rsc_pa);
+ if (err)
+ return ERR_PTR(err);
+
+ ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE);
+ if (IS_ERR_OR_NULL(ddata->rsc_va)) {
+ dev_err(dev, "Unable to map memory region: %pa+%zx\n",
+ &rsc_pa, RSC_TBL_SIZE);
+ ddata->rsc_va = NULL;
+ return ERR_PTR(-ENOMEM);
+ }
+
+done:
+ /*
+ * Assuming the resource table fits in 1kB is fair.
+ * Notice for the detach, that this 1 kB memory area has to be reserved in the coprocessor
+ * firmware for the resource table. On detach, the remoteproc core re-initializes this
+ * entire area by overwriting it with the initial values stored in rproc->clean_table.
+ */
+ *table_sz = RSC_TBL_SIZE;
+ return (struct resource_table *)ddata->rsc_va;
+}
+
static const struct rproc_ops st_rproc_ops = {
+ .prepare = stm32_rproc_prepare,
.start = stm32_rproc_start,
.stop = stm32_rproc_stop,
.attach = stm32_rproc_attach,
+ .detach = stm32_rproc_detach,
.kick = stm32_rproc_kick,
.load = rproc_elf_load_segments,
.parse_fw = stm32_rproc_parse_fw,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .get_loaded_rsc_table = stm32_rproc_get_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
@@ -695,75 +788,6 @@ static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata,
return regmap_read(ddata->m4_state.map, ddata->m4_state.reg, state);
}
-static int stm32_rproc_da_to_pa(struct platform_device *pdev,
- struct stm32_rproc *ddata,
- u64 da, phys_addr_t *pa)
-{
- struct device *dev = &pdev->dev;
- struct stm32_rproc_mem *p_mem;
- unsigned int i;
-
- for (i = 0; i < ddata->nb_rmems; i++) {
- p_mem = &ddata->rmems[i];
-
- if (da < p_mem->dev_addr ||
- da >= p_mem->dev_addr + p_mem->size)
- continue;
-
- *pa = da - p_mem->dev_addr + p_mem->bus_addr;
- dev_dbg(dev, "da %llx to pa %#x\n", da, *pa);
-
- return 0;
- }
-
- dev_err(dev, "can't translate da %llx\n", da);
-
- return -EINVAL;
-}
-
-static int stm32_rproc_get_loaded_rsc_table(struct platform_device *pdev,
- struct rproc *rproc,
- struct stm32_rproc *ddata)
-{
- struct device *dev = &pdev->dev;
- phys_addr_t rsc_pa;
- u32 rsc_da;
- int err;
-
- err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da);
- if (err) {
- dev_err(dev, "failed to read rsc tbl addr\n");
- return err;
- }
-
- if (!rsc_da)
- /* no rsc table */
- return 0;
-
- err = stm32_rproc_da_to_pa(pdev, ddata, rsc_da, &rsc_pa);
- if (err)
- return err;
-
- ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE);
- if (IS_ERR_OR_NULL(ddata->rsc_va)) {
- dev_err(dev, "Unable to map memory region: %pa+%zx\n",
- &rsc_pa, RSC_TBL_SIZE);
- ddata->rsc_va = NULL;
- return -ENOMEM;
- }
-
- /*
- * The resource table is already loaded in device memory, no need
- * to work with a cached table.
- */
- rproc->cached_table = NULL;
- /* Assuming the resource table fits in 1kB is fair */
- rproc->table_sz = RSC_TBL_SIZE;
- rproc->table_ptr = (struct resource_table *)ddata->rsc_va;
-
- return 0;
-}
-
static int stm32_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -797,18 +821,9 @@ static int stm32_rproc_probe(struct platform_device *pdev)
if (ret)
goto free_rproc;
- if (state == M4_STATE_CRUN) {
+ if (state == M4_STATE_CRUN)
rproc->state = RPROC_DETACHED;
- ret = stm32_rproc_parse_memory_regions(rproc);
- if (ret)
- goto free_resources;
-
- ret = stm32_rproc_get_loaded_rsc_table(pdev, rproc, ddata);
- if (ret)
- goto free_resources;
- }
-
rproc->has_iommu = false;
ddata->workqueue = create_workqueue(dev_name(dev));
if (!ddata->workqueue) {
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index 863c0214e0a8..fd4eb67a6681 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -354,7 +354,7 @@ static int k3_dsp_rproc_stop(struct rproc *rproc)
* can be used either by the remoteproc core for loading (when using kernel
* remoteproc loader), or by any rpmsg bus drivers.
*/
-static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct k3_dsp_rproc *kproc = rproc->priv;
void __iomem *va = NULL;
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 62b5a4c29456..5cf8d030a1f0 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -590,7 +590,7 @@ out:
* present in a DSP or IPU device). The translated addresses can be used
* either by the remoteproc core for loading, or by any rpmsg bus drivers.
*/
-static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct k3_r5_core *core = kproc->core;
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index 92d387dfc03b..484f7605823e 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -89,7 +89,7 @@ static int wkup_m3_rproc_stop(struct rproc *rproc)
return error;
}
-static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct wkup_m3_rproc *wkupm3 = rproc->priv;
void *va = NULL;
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 7043c7f6dcf0..3e7f55e44d84 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -197,6 +197,7 @@ config RESET_SIMPLE
- RCC reset controller in STM32 MCUs
- Allwinner SoCs
- ZTE's zx2967 family
+ - SiFive FU740 SoCs
config RESET_STM32MP157
bool "STM32MP157 Reset Driver" if COMPILE_TEST
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index dbf881b586d9..71c1c8264b2d 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -359,6 +359,30 @@ int reset_control_reset(struct reset_control *rstc)
EXPORT_SYMBOL_GPL(reset_control_reset);
/**
+ * reset_control_bulk_reset - reset the controlled devices in order
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset controls set
+ *
+ * Issue a reset on all provided reset controls, in order.
+ *
+ * See also: reset_control_reset()
+ */
+int reset_control_bulk_reset(int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ int ret, i;
+
+ for (i = 0; i < num_rstcs; i++) {
+ ret = reset_control_reset(rstcs[i].rstc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(reset_control_bulk_reset);
+
+/**
* reset_control_rearm - allow shared reset line to be re-triggered"
* @rstc: reset controller
*
@@ -462,6 +486,36 @@ int reset_control_assert(struct reset_control *rstc)
EXPORT_SYMBOL_GPL(reset_control_assert);
/**
+ * reset_control_bulk_assert - asserts the reset lines in order
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset controls set
+ *
+ * Assert the reset lines for all provided reset controls, in order.
+ * If an assertion fails, already asserted resets are deasserted again.
+ *
+ * See also: reset_control_assert()
+ */
+int reset_control_bulk_assert(int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ int ret, i;
+
+ for (i = 0; i < num_rstcs; i++) {
+ ret = reset_control_assert(rstcs[i].rstc);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ while (i--)
+ reset_control_deassert(rstcs[i].rstc);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(reset_control_bulk_assert);
+
+/**
* reset_control_deassert - deasserts the reset line
* @rstc: reset controller
*
@@ -512,6 +566,36 @@ int reset_control_deassert(struct reset_control *rstc)
EXPORT_SYMBOL_GPL(reset_control_deassert);
/**
+ * reset_control_bulk_deassert - deasserts the reset lines in reverse order
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset controls set
+ *
+ * Deassert the reset lines for all provided reset controls, in reverse order.
+ * If a deassertion fails, already deasserted resets are asserted again.
+ *
+ * See also: reset_control_deassert()
+ */
+int reset_control_bulk_deassert(int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ int ret, i;
+
+ for (i = num_rstcs - 1; i >= 0; i--) {
+ ret = reset_control_deassert(rstcs[i].rstc);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ while (i < num_rstcs)
+ reset_control_assert(rstcs[i++].rstc);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(reset_control_bulk_deassert);
+
+/**
* reset_control_status - returns a negative errno if not supported, a
* positive value if the reset line is asserted, or zero if the reset
* line is not asserted or if the desc is NULL (optional reset).
@@ -589,6 +673,36 @@ int reset_control_acquire(struct reset_control *rstc)
EXPORT_SYMBOL_GPL(reset_control_acquire);
/**
+ * reset_control_bulk_acquire - acquires reset controls for exclusive use
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset controls set
+ *
+ * This is used to explicitly acquire reset controls requested with
+ * reset_control_bulk_get_exclusive_release() for temporary exclusive use.
+ *
+ * See also: reset_control_acquire(), reset_control_bulk_release()
+ */
+int reset_control_bulk_acquire(int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ int ret, i;
+
+ for (i = 0; i < num_rstcs; i++) {
+ ret = reset_control_acquire(rstcs[i].rstc);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ while (i--)
+ reset_control_release(rstcs[i].rstc);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(reset_control_bulk_acquire);
+
+/**
* reset_control_release() - releases exclusive access to a reset control
* @rstc: reset control
*
@@ -610,6 +724,26 @@ void reset_control_release(struct reset_control *rstc)
}
EXPORT_SYMBOL_GPL(reset_control_release);
+/**
+ * reset_control_bulk_release() - releases exclusive access to reset controls
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset controls set
+ *
+ * Releases exclusive access right to reset controls previously obtained by a
+ * call to reset_control_bulk_acquire().
+ *
+ * See also: reset_control_release(), reset_control_bulk_acquire()
+ */
+void reset_control_bulk_release(int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ int i;
+
+ for (i = 0; i < num_rstcs; i++)
+ reset_control_release(rstcs[i].rstc);
+}
+EXPORT_SYMBOL_GPL(reset_control_bulk_release);
+
static struct reset_control *__reset_control_get_internal(
struct reset_controller_dev *rcdev,
unsigned int index, bool shared, bool acquired)
@@ -814,6 +948,32 @@ struct reset_control *__reset_control_get(struct device *dev, const char *id,
}
EXPORT_SYMBOL_GPL(__reset_control_get);
+int __reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired)
+{
+ int ret, i;
+
+ for (i = 0; i < num_rstcs; i++) {
+ rstcs[i].rstc = __reset_control_get(dev, rstcs[i].id, 0,
+ shared, optional, acquired);
+ if (IS_ERR(rstcs[i].rstc)) {
+ ret = PTR_ERR(rstcs[i].rstc);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ mutex_lock(&reset_list_mutex);
+ while (i--)
+ __reset_control_put_internal(rstcs[i].rstc);
+ mutex_unlock(&reset_list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__reset_control_bulk_get);
+
static void reset_control_array_put(struct reset_control_array *resets)
{
int i;
@@ -845,6 +1005,23 @@ void reset_control_put(struct reset_control *rstc)
}
EXPORT_SYMBOL_GPL(reset_control_put);
+/**
+ * reset_control_bulk_put - free the reset controllers
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset controls set
+ */
+void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+ mutex_lock(&reset_list_mutex);
+ while (num_rstcs--) {
+ if (IS_ERR_OR_NULL(rstcs[num_rstcs].rstc))
+ continue;
+ __reset_control_put_internal(rstcs[num_rstcs].rstc);
+ }
+ mutex_unlock(&reset_list_mutex);
+}
+EXPORT_SYMBOL_GPL(reset_control_bulk_put);
+
static void devm_reset_control_release(struct device *dev, void *res)
{
reset_control_put(*(struct reset_control **)res);
@@ -874,6 +1051,44 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
}
EXPORT_SYMBOL_GPL(__devm_reset_control_get);
+struct reset_control_bulk_devres {
+ int num_rstcs;
+ struct reset_control_bulk_data *rstcs;
+};
+
+static void devm_reset_control_bulk_release(struct device *dev, void *res)
+{
+ struct reset_control_bulk_devres *devres = res;
+
+ reset_control_bulk_put(devres->num_rstcs, devres->rstcs);
+}
+
+int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired)
+{
+ struct reset_control_bulk_devres *ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_reset_control_bulk_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = __reset_control_bulk_get(dev, num_rstcs, rstcs, shared, optional, acquired);
+ if (ret < 0) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ ptr->num_rstcs = num_rstcs;
+ ptr->rstcs = rstcs;
+ devres_add(dev, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__devm_reset_control_bulk_get);
+
/**
* __device_reset - find reset controller associated with the device
* and perform reset
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 27a05167c18c..05533c71b10e 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -857,6 +857,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
dev_err(glink->dev,
"no intent found for channel %s intent %d",
channel->name, liid);
+ ret = -ENOENT;
goto advance_rx;
}
}
@@ -1332,6 +1333,20 @@ static int qcom_glink_trysend(struct rpmsg_endpoint *ept, void *data, int len)
return __qcom_glink_send(channel, data, len, false);
}
+static int qcom_glink_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
+{
+ struct glink_channel *channel = to_glink_channel(ept);
+
+ return __qcom_glink_send(channel, data, len, true);
+}
+
+static int qcom_glink_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
+{
+ struct glink_channel *channel = to_glink_channel(ept);
+
+ return __qcom_glink_send(channel, data, len, false);
+}
+
/*
* Finds the device_node for the glink child interested in this channel.
*/
@@ -1364,7 +1379,9 @@ static const struct rpmsg_device_ops glink_device_ops = {
static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
.destroy_ept = qcom_glink_destroy_ept,
.send = qcom_glink_send,
+ .sendto = qcom_glink_sendto,
.trysend = qcom_glink_trysend,
+ .trysendto = qcom_glink_trysendto,
};
static void qcom_glink_rpdev_release(struct device *dev)
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 19903de6268d..8da1b5cb31b3 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -974,6 +974,20 @@ static int qcom_smd_trysend(struct rpmsg_endpoint *ept, void *data, int len)
return __qcom_smd_send(qsept->qsch, data, len, false);
}
+static int qcom_smd_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
+{
+ struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
+
+ return __qcom_smd_send(qsept->qsch, data, len, true);
+}
+
+static int qcom_smd_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
+{
+ struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
+
+ return __qcom_smd_send(qsept->qsch, data, len, false);
+}
+
static __poll_t qcom_smd_poll(struct rpmsg_endpoint *ept,
struct file *filp, poll_table *wait)
{
@@ -1038,7 +1052,9 @@ static const struct rpmsg_device_ops qcom_smd_device_ops = {
static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops = {
.destroy_ept = qcom_smd_destroy_ept,
.send = qcom_smd_send,
+ .sendto = qcom_smd_sendto,
.trysend = qcom_smd_trysend,
+ .trysendto = qcom_smd_trysendto,
.poll = qcom_smd_poll,
};
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index 4bbbacdbf3bb..2bebc9b2d163 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -127,6 +127,9 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
struct rpmsg_device *rpdev = eptdev->rpdev;
struct device *dev = &eptdev->dev;
+ if (eptdev->ept)
+ return -EBUSY;
+
get_device(dev);
ept = rpmsg_create_ept(rpdev, rpmsg_ept_cb, eptdev, eptdev->chinfo);
@@ -239,9 +242,9 @@ static ssize_t rpmsg_eptdev_write_iter(struct kiocb *iocb,
}
if (filp->f_flags & O_NONBLOCK)
- ret = rpmsg_trysend(eptdev->ept, kbuf, len);
+ ret = rpmsg_trysendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
else
- ret = rpmsg_send(eptdev->ept, kbuf, len);
+ ret = rpmsg_sendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
unlock_eptdev:
mutex_unlock(&eptdev->ept_lock);
@@ -543,7 +546,7 @@ static struct rpmsg_driver rpmsg_chrdev_driver = {
},
};
-static int rpmsg_char_init(void)
+static int rpmsg_chrdev_init(void)
{
int ret;
@@ -569,7 +572,7 @@ static int rpmsg_char_init(void)
return ret;
}
-postcore_initcall(rpmsg_char_init);
+postcore_initcall(rpmsg_chrdev_init);
static void rpmsg_chrdev_exit(void)
{
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index e87d4cf926eb..8e49a3bacfc7 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -813,14 +813,57 @@ static void rpmsg_xmit_done(struct virtqueue *svq)
wake_up_interruptible(&vrp->sendq);
}
+/*
+ * Called to expose to user a /dev/rpmsg_ctrlX interface allowing to
+ * create endpoint-to-endpoint communication without associated RPMsg channel.
+ * The endpoints are rattached to the ctrldev RPMsg device.
+ */
+static struct rpmsg_device *rpmsg_virtio_add_ctrl_dev(struct virtio_device *vdev)
+{
+ struct virtproc_info *vrp = vdev->priv;
+ struct virtio_rpmsg_channel *vch;
+ struct rpmsg_device *rpdev_ctrl;
+ int err = 0;
+
+ vch = kzalloc(sizeof(*vch), GFP_KERNEL);
+ if (!vch)
+ return ERR_PTR(-ENOMEM);
+
+ /* Link the channel to the vrp */
+ vch->vrp = vrp;
+
+ /* Assign public information to the rpmsg_device */
+ rpdev_ctrl = &vch->rpdev;
+ rpdev_ctrl->ops = &virtio_rpmsg_ops;
+
+ rpdev_ctrl->dev.parent = &vrp->vdev->dev;
+ rpdev_ctrl->dev.release = virtio_rpmsg_release_device;
+ rpdev_ctrl->little_endian = virtio_is_little_endian(vrp->vdev);
+
+ err = rpmsg_chrdev_register_device(rpdev_ctrl);
+ if (err) {
+ kfree(vch);
+ return ERR_PTR(err);
+ }
+
+ return rpdev_ctrl;
+}
+
+static void rpmsg_virtio_del_ctrl_dev(struct rpmsg_device *rpdev_ctrl)
+{
+ if (!rpdev_ctrl)
+ return;
+ kfree(to_virtio_rpmsg_channel(rpdev_ctrl));
+}
+
static int rpmsg_probe(struct virtio_device *vdev)
{
vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
static const char * const names[] = { "input", "output" };
struct virtqueue *vqs[2];
struct virtproc_info *vrp;
- struct virtio_rpmsg_channel *vch;
- struct rpmsg_device *rpdev_ns;
+ struct virtio_rpmsg_channel *vch = NULL;
+ struct rpmsg_device *rpdev_ns, *rpdev_ctrl;
void *bufs_va;
int err = 0, i;
size_t total_buf_space;
@@ -894,12 +937,18 @@ static int rpmsg_probe(struct virtio_device *vdev)
vdev->priv = vrp;
+ rpdev_ctrl = rpmsg_virtio_add_ctrl_dev(vdev);
+ if (IS_ERR(rpdev_ctrl)) {
+ err = PTR_ERR(rpdev_ctrl);
+ goto free_coherent;
+ }
+
/* if supported by the remote processor, enable the name service */
if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
vch = kzalloc(sizeof(*vch), GFP_KERNEL);
if (!vch) {
err = -ENOMEM;
- goto free_coherent;
+ goto free_ctrldev;
}
/* Link the channel to our vrp */
@@ -915,7 +964,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
err = rpmsg_ns_register_device(rpdev_ns);
if (err)
- goto free_coherent;
+ goto free_vch;
}
/*
@@ -939,8 +988,11 @@ static int rpmsg_probe(struct virtio_device *vdev)
return 0;
-free_coherent:
+free_vch:
kfree(vch);
+free_ctrldev:
+ rpmsg_virtio_del_ctrl_dev(rpdev_ctrl);
+free_coherent:
dma_free_coherent(vdev->dev.parent, total_buf_space,
bufs_va, vrp->bufs_dma);
vqs_del:
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 773386b4aeb7..d8c13fded164 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1339,6 +1339,7 @@ config RTC_DRV_DIGICOLOR
config RTC_DRV_IMXDI
tristate "Freescale IMX DryIce Real Time Clock"
depends on ARCH_MXC
+ depends on OF
help
Support for Freescale IMX DryIce RTC
@@ -1906,7 +1907,7 @@ config RTC_DRV_HID_SENSOR_TIME
config RTC_DRV_GOLDFISH
tristate "Goldfish Real Time Clock"
- depends on OF && HAS_IOMEM
+ depends on HAS_IOMEM
help
Say yes to enable RTC driver for the Goldfish based virtual platform.
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index dcb34c73319e..9a2bd4947007 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -545,7 +545,7 @@ EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable);
int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
{
- int rc = 0, err;
+ int err;
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
@@ -561,17 +561,21 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
if (rtc->uie_rtctimer.enabled == enabled)
goto out;
- if (rtc->uie_unsupported) {
- err = -EINVAL;
- goto out;
+ if (rtc->uie_unsupported || !test_bit(RTC_FEATURE_ALARM, rtc->features)) {
+ mutex_unlock(&rtc->ops_lock);
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+ return rtc_dev_update_irq_enable_emul(rtc, enabled);
+#else
+ return -EINVAL;
+#endif
}
if (enabled) {
struct rtc_time tm;
ktime_t now, onesec;
- rc = __rtc_read_time(rtc, &tm);
- if (rc)
+ err = __rtc_read_time(rtc, &tm);
+ if (err)
goto out;
onesec = ktime_set(1, 0);
now = rtc_tm_to_ktime(tm);
@@ -585,24 +589,6 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
out:
mutex_unlock(&rtc->ops_lock);
- /*
- * __rtc_read_time() failed, this probably means that the RTC time has
- * never been set or less probably there is a transient error on the
- * bus. In any case, avoid enabling emulation has this will fail when
- * reading the time too.
- */
- if (rc)
- return rc;
-
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
- /*
- * Enable emulation if the driver returned -EINVAL to signal that it has
- * been configured without interrupts or they are not available at the
- * moment.
- */
- if (err == -EINVAL)
- err = rtc_dev_update_irq_enable_emul(rtc, enabled);
-#endif
return err;
}
EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c
index b20d8f26dcdb..a9b355510cd4 100644
--- a/drivers/rtc/rtc-ab-eoz9.c
+++ b/drivers/rtc/rtc-ab-eoz9.c
@@ -11,6 +11,7 @@
#include <linux/bcd.h>
#include <linux/of.h>
#include <linux/regmap.h>
+#include <linux/bitfield.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@@ -57,6 +58,24 @@
#define ABEOZ9_SEC_LEN 7
+#define ABEOZ9_REG_ALARM_SEC 0x10
+#define ABEOZ9_BIT_ALARM_SEC GENMASK(6, 0)
+#define ABEOZ9_REG_ALARM_MIN 0x11
+#define ABEOZ9_BIT_ALARM_MIN GENMASK(6, 0)
+#define ABEOZ9_REG_ALARM_HOURS 0x12
+#define ABEOZ9_BIT_ALARM_HOURS_PM BIT(5)
+#define ABEOZ9_BIT_ALARM_HOURS GENMASK(4, 0)
+#define ABEOZ9_REG_ALARM_DAYS 0x13
+#define ABEOZ9_BIT_ALARM_DAYS GENMASK(5, 0)
+#define ABEOZ9_REG_ALARM_WEEKDAYS 0x14
+#define ABEOZ9_BIT_ALARM_WEEKDAYS GENMASK(2, 0)
+#define ABEOZ9_REG_ALARM_MONTHS 0x15
+#define ABEOZ9_BIT_ALARM_MONTHS GENMASK(4, 0)
+#define ABEOZ9_REG_ALARM_YEARS 0x16
+
+#define ABEOZ9_ALARM_LEN 7
+#define ABEOZ9_BIT_ALARM_AE BIT(7)
+
#define ABEOZ9_REG_REG_TEMP 0x20
#define ABEOZ953_TEMP_MAX 120
#define ABEOZ953_TEMP_MIN -60
@@ -186,6 +205,98 @@ static int abeoz9_rtc_set_time(struct device *dev, struct rtc_time *tm)
return abeoz9_reset_validity(regmap);
}
+static int abeoz9_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct abeoz9_rtc_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ u8 regs[ABEOZ9_ALARM_LEN];
+ u8 val[2];
+ int ret;
+
+ ret = abeoz9_check_validity(dev);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(regmap, ABEOZ9_REG_CTRL_INT, val, sizeof(val));
+ if (ret)
+ return ret;
+
+ alarm->enabled = val[0] & ABEOZ9_REG_CTRL_INT_AIE;
+ alarm->pending = val[1] & ABEOZ9_REG_CTRL_INT_FLAG_AF;
+
+ ret = regmap_bulk_read(regmap, ABEOZ9_REG_ALARM_SEC, regs, sizeof(regs));
+ if (ret)
+ return ret;
+
+ alarm->time.tm_sec = bcd2bin(FIELD_GET(ABEOZ9_BIT_ALARM_SEC, regs[0]));
+ alarm->time.tm_min = bcd2bin(FIELD_GET(ABEOZ9_BIT_ALARM_MIN, regs[1]));
+ alarm->time.tm_hour = bcd2bin(FIELD_GET(ABEOZ9_BIT_ALARM_HOURS, regs[2]));
+ if (FIELD_GET(ABEOZ9_BIT_ALARM_HOURS_PM, regs[2]))
+ alarm->time.tm_hour += 12;
+
+ alarm->time.tm_mday = bcd2bin(FIELD_GET(ABEOZ9_BIT_ALARM_DAYS, regs[3]));
+
+ return 0;
+}
+
+static int abeoz9_rtc_alarm_irq_enable(struct device *dev, u32 enable)
+{
+ struct abeoz9_rtc_data *data = dev_get_drvdata(dev);
+
+ return regmap_update_bits(data->regmap, ABEOZ9_REG_CTRL_INT,
+ ABEOZ9_REG_CTRL_INT_AIE,
+ FIELD_PREP(ABEOZ9_REG_CTRL_INT_AIE, enable));
+}
+
+static int abeoz9_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct abeoz9_rtc_data *data = dev_get_drvdata(dev);
+ u8 regs[ABEOZ9_ALARM_LEN] = {0};
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, ABEOZ9_REG_CTRL_INT_FLAG,
+ ABEOZ9_REG_CTRL_INT_FLAG_AF, 0);
+ if (ret)
+ return ret;
+
+ regs[0] = ABEOZ9_BIT_ALARM_AE | FIELD_PREP(ABEOZ9_BIT_ALARM_SEC,
+ bin2bcd(alarm->time.tm_sec));
+ regs[1] = ABEOZ9_BIT_ALARM_AE | FIELD_PREP(ABEOZ9_BIT_ALARM_MIN,
+ bin2bcd(alarm->time.tm_min));
+ regs[2] = ABEOZ9_BIT_ALARM_AE | FIELD_PREP(ABEOZ9_BIT_ALARM_HOURS,
+ bin2bcd(alarm->time.tm_hour));
+ regs[3] = ABEOZ9_BIT_ALARM_AE | FIELD_PREP(ABEOZ9_BIT_ALARM_DAYS,
+ bin2bcd(alarm->time.tm_mday));
+
+ ret = regmap_bulk_write(data->regmap, ABEOZ9_REG_ALARM_SEC, regs,
+ sizeof(regs));
+ if (ret)
+ return ret;
+
+ return abeoz9_rtc_alarm_irq_enable(dev, alarm->enabled);
+}
+
+static irqreturn_t abeoz9_rtc_irq(int irq, void *dev)
+{
+ struct abeoz9_rtc_data *data = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, ABEOZ9_REG_CTRL_INT_FLAG, &val);
+ if (ret)
+ return IRQ_NONE;
+
+ if (!FIELD_GET(ABEOZ9_REG_CTRL_INT_FLAG_AF, val))
+ return IRQ_NONE;
+
+ regmap_update_bits(data->regmap, ABEOZ9_REG_CTRL_INT_FLAG,
+ ABEOZ9_REG_CTRL_INT_FLAG_AF, 0);
+
+ rtc_update_irq(data->rtc, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
static int abeoz9_trickle_parse_dt(struct device_node *node)
{
u32 ohms = 0;
@@ -258,12 +369,16 @@ static int abeoz9_rtc_setup(struct device *dev, struct device_node *node)
static const struct rtc_class_ops rtc_ops = {
.read_time = abeoz9_rtc_get_time,
- .set_time = abeoz9_rtc_set_time,
+ .set_time = abeoz9_rtc_set_time,
+ .read_alarm = abeoz9_rtc_read_alarm,
+ .set_alarm = abeoz9_rtc_set_alarm,
+ .alarm_irq_enable = abeoz9_rtc_alarm_irq_enable,
};
static const struct regmap_config abeoz9_rtc_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
+ .max_register = 0x3f,
};
#if IS_REACHABLE(CONFIG_HWMON)
@@ -419,6 +534,24 @@ static int abeoz9_probe(struct i2c_client *client,
data->rtc->ops = &rtc_ops;
data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
data->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ data->rtc->uie_unsupported = 1;
+ clear_bit(RTC_FEATURE_ALARM, data->rtc->features);
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ abeoz9_rtc_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ dev_name(dev), dev);
+ if (ret) {
+ dev_err(dev, "failed to request alarm irq\n");
+ return ret;
+ }
+ }
+
+ if (client->irq > 0 || device_property_read_bool(dev, "wakeup-source")) {
+ ret = device_init_wakeup(dev, true);
+ set_bit(RTC_FEATURE_ALARM, data->rtc->features);
+ }
ret = devm_rtc_register_device(data->rtc);
if (ret)
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index cd8e438bc9c4..336cb9aa5e33 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -169,9 +169,6 @@ enum ds_type {
struct ds1307 {
enum ds_type type;
- unsigned long flags;
-#define HAS_NVRAM 0 /* bit 0 == sysfs file active */
-#define HAS_ALARM 1 /* bit 1 == irq claimed */
struct device *dev;
struct regmap *regmap;
const char *name;
@@ -296,7 +293,11 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f);
tmp = regs[DS1307_REG_HOUR] & 0x3f;
t->tm_hour = bcd2bin(tmp);
- t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
+ /* rx8130 is bit position, not BCD */
+ if (ds1307->type == rx_8130)
+ t->tm_wday = fls(regs[DS1307_REG_WDAY] & 0x7f);
+ else
+ t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
t->tm_mday = bcd2bin(regs[DS1307_REG_MDAY] & 0x3f);
tmp = regs[DS1307_REG_MONTH] & 0x1f;
t->tm_mon = bcd2bin(tmp) - 1;
@@ -343,7 +344,11 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
regs[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
regs[DS1307_REG_MIN] = bin2bcd(t->tm_min);
regs[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
- regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
+ /* rx8130 is bit position, not BCD */
+ if (ds1307->type == rx_8130)
+ regs[DS1307_REG_WDAY] = 1 << t->tm_wday;
+ else
+ regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
regs[DS1307_REG_MDAY] = bin2bcd(t->tm_mday);
regs[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);
@@ -411,9 +416,6 @@ static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t)
int ret;
u8 regs[9];
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
/* read all ALARM1, ALARM2, and status registers at once */
ret = regmap_bulk_read(ds1307->regmap, DS1339_REG_ALARM1_SECS,
regs, sizeof(regs));
@@ -454,9 +456,6 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
u8 control, status;
int ret;
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, enabled=%d, pending=%d\n",
"alarm set", t->time.tm_sec, t->time.tm_min,
@@ -512,9 +511,6 @@ static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -ENOTTY;
-
return regmap_update_bits(ds1307->regmap, DS1337_REG_CONTROL,
DS1337_BIT_A1IE,
enabled ? DS1337_BIT_A1IE : 0);
@@ -592,9 +588,6 @@ static int rx8130_read_alarm(struct device *dev, struct rtc_wkalrm *t)
u8 ald[3], ctl[3];
int ret;
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
/* Read alarm registers. */
ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_ALARM_MIN, ald,
sizeof(ald));
@@ -634,9 +627,6 @@ static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t)
u8 ald[3], ctl[3];
int ret;
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d "
"enabled=%d pending=%d\n", __func__,
t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
@@ -681,9 +671,6 @@ static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled)
struct ds1307 *ds1307 = dev_get_drvdata(dev);
int ret, reg;
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
ret = regmap_read(ds1307->regmap, RX8130_REG_CONTROL0, &reg);
if (ret < 0)
return ret;
@@ -735,9 +722,6 @@ static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t)
u8 regs[10];
int ret;
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
/* Read control and alarm 0 registers. */
ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs,
sizeof(regs));
@@ -793,9 +777,6 @@ static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
unsigned char regs[10];
int wday, ret;
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
wday = mcp794xx_alm_weekday(dev, &t->time);
if (wday < 0)
return wday;
@@ -842,9 +823,6 @@ static int mcp794xx_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
return regmap_update_bits(ds1307->regmap, MCP794XX_REG_CONTROL,
MCP794XX_BIT_ALM0_EN,
enabled ? MCP794XX_BIT_ALM0_EN : 0);
@@ -1641,7 +1619,7 @@ static int ds3231_clks_register(struct ds1307 *ds1307)
* Interrupt signal due to alarm conditions and square-wave
* output share same pin, so don't initialize both.
*/
- if (i == DS3231_CLK_SQW && test_bit(HAS_ALARM, &ds1307->flags))
+ if (i == DS3231_CLK_SQW && test_bit(RTC_FEATURE_ALARM, ds1307->rtc->features))
continue;
init.name = ds3231_clks_names[i];
@@ -1964,15 +1942,15 @@ static int ds1307_probe(struct i2c_client *client,
bin2bcd(tmp));
}
- if (want_irq || ds1307_can_wakeup_device) {
- device_set_wakeup_capable(ds1307->dev, true);
- set_bit(HAS_ALARM, &ds1307->flags);
- }
-
ds1307->rtc = devm_rtc_allocate_device(ds1307->dev);
if (IS_ERR(ds1307->rtc))
return PTR_ERR(ds1307->rtc);
+ if (want_irq || ds1307_can_wakeup_device)
+ device_set_wakeup_capable(ds1307->dev, true);
+ else
+ clear_bit(RTC_FEATURE_ALARM, ds1307->rtc->features);
+
if (ds1307_can_wakeup_device && !want_irq) {
dev_info(ds1307->dev,
"'wakeup-source' is set, request for an IRQ is disabled!\n");
@@ -1988,7 +1966,7 @@ static int ds1307_probe(struct i2c_client *client,
if (err) {
client->irq = 0;
device_set_wakeup_capable(ds1307->dev, false);
- clear_bit(HAS_ALARM, &ds1307->flags);
+ clear_bit(RTC_FEATURE_ALARM, ds1307->rtc->features);
dev_err(ds1307->dev, "unable to request IRQ!\n");
} else {
dev_dbg(ds1307->dev, "got IRQ %d\n", client->irq);
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index bda884333082..1109cad83838 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -104,12 +104,6 @@ rtc_write(uint8_t val, uint32_t reg)
writeb(val, ds1511_base + (reg * reg_spacing));
}
-static inline void
-rtc_write_alarm(uint8_t val, enum ds1511reg reg)
-{
- rtc_write((val | 0x80), reg);
-}
-
static noinline uint8_t
rtc_read(enum ds1511reg reg)
{
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index 57cc09d0a806..c0df49fb978c 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -310,6 +310,7 @@ static const struct of_device_id ftm_rtc_match[] = {
{ .compatible = "fsl,lx2160a-ftm-alarm", },
{ },
};
+MODULE_DEVICE_TABLE(of, ftm_rtc_match);
static const struct acpi_device_id ftm_imx_acpi_ids[] = {
{"NXP0014",},
diff --git a/drivers/rtc/rtc-imx-sc.c b/drivers/rtc/rtc-imx-sc.c
index cc9fbab49999..814d516645e2 100644
--- a/drivers/rtc/rtc-imx-sc.c
+++ b/drivers/rtc/rtc-imx-sc.c
@@ -80,16 +80,6 @@ static int imx_sc_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
return imx_scu_irq_group_enable(SC_IRQ_GROUP_RTC, SC_IRQ_RTC, enable);
}
-static int imx_sc_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- /*
- * SCU firmware does NOT provide read alarm API, but .read_alarm
- * callback is required by RTC framework to support alarm function,
- * so just return here.
- */
- return 0;
-}
-
static int imx_sc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct imx_sc_msg_timer_rtc_set_alarm msg;
@@ -127,7 +117,6 @@ static int imx_sc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static const struct rtc_class_ops imx_sc_rtc_ops = {
.read_time = imx_sc_rtc_read_time,
.set_time = imx_sc_rtc_set_time,
- .read_alarm = imx_sc_rtc_read_alarm,
.set_alarm = imx_sc_rtc_set_alarm,
.alarm_irq_enable = imx_sc_rtc_alarm_irq_enable,
};
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index c2692da74e09..c1806f4d68e7 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -840,19 +840,17 @@ static int __exit dryice_rtc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id dryice_dt_ids[] = {
{ .compatible = "fsl,imx25-rtc" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, dryice_dt_ids);
-#endif
static struct platform_driver dryice_rtc_driver = {
.driver = {
.name = "imxdi_rtc",
- .of_match_table = of_match_ptr(dryice_dt_ids),
+ .of_match_table = dryice_dt_ids,
},
.remove = __exit_p(dryice_rtc_remove),
};
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 1d2e99a70fce..f0f6b9b6daec 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -421,7 +421,7 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
/* Try to get irq number. We also can work in
* the mode without IRQ.
*/
- m48t59->irq = platform_get_irq(pdev, 0);
+ m48t59->irq = platform_get_irq_optional(pdev, 0);
if (m48t59->irq <= 0)
m48t59->irq = NO_IRQ;
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index db57dda7ab97..0f08f22df869 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -415,7 +415,7 @@ static int mxc_rtc_probe(struct platform_device *pdev)
static struct platform_driver mxc_rtc_driver = {
.driver = {
.name = "mxc_rtc",
- .of_match_table = of_match_ptr(imx_rtc_dt_ids),
+ .of_match_table = imx_rtc_dt_ids,
},
.probe = mxc_rtc_probe,
};
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index dc7db2477f88..d46e0f0cc502 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -786,8 +786,7 @@ static int omap_rtc_probe(struct platform_device *pdev)
/* enable RTC functional clock */
if (rtc->type->has_32kclk_en) {
reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
- rtc_writel(rtc, OMAP_RTC_OSC_REG,
- reg | OMAP_RTC_OSC_32KCLK_EN);
+ rtc_write(rtc, OMAP_RTC_OSC_REG, reg | OMAP_RTC_OSC_32KCLK_EN);
}
/* clear old status */
@@ -845,7 +844,7 @@ static int omap_rtc_probe(struct platform_device *pdev)
reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE;
reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC;
- rtc_writel(rtc, OMAP_RTC_OSC_REG, reg);
+ rtc_write(rtc, OMAP_RTC_OSC_REG, reg);
}
rtc->type->lock(rtc);
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index aef6c1ee8bb0..82becae14229 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -478,6 +478,7 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
{
struct clk *clk;
struct clk_init_data init;
+ struct device_node *node = pcf85063->rtc->dev.parent->of_node;
init.name = "pcf85063-clkout";
init.ops = &pcf85063_clkout_ops;
@@ -487,15 +488,13 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
pcf85063->clkout_hw.init = &init;
/* optional override of the clockname */
- of_property_read_string(pcf85063->rtc->dev.of_node,
- "clock-output-names", &init.name);
+ of_property_read_string(node, "clock-output-names", &init.name);
/* register the clock */
clk = devm_clk_register(&pcf85063->rtc->dev, &pcf85063->clkout_hw);
if (!IS_ERR(clk))
- of_clk_add_provider(pcf85063->rtc->dev.of_node,
- of_clk_src_simple_get, clk);
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
return clk;
}
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 5e1e7b2a8c9a..740e2136ca98 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -8,12 +8,15 @@
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/of.h>
-
-#define DRIVER_NAME "rtc-pcf8523"
+#include <linux/pm_wakeirq.h>
#define REG_CONTROL1 0x00
#define REG_CONTROL1_CAP_SEL BIT(7)
#define REG_CONTROL1_STOP BIT(5)
+#define REG_CONTROL1_AIE BIT(1)
+
+#define REG_CONTROL2 0x01
+#define REG_CONTROL2_AF BIT(3)
#define REG_CONTROL3 0x02
#define REG_CONTROL3_PM_BLD BIT(7) /* battery low detection disabled */
@@ -32,9 +35,22 @@
#define REG_MONTHS 0x08
#define REG_YEARS 0x09
+#define REG_MINUTE_ALARM 0x0a
+#define REG_HOUR_ALARM 0x0b
+#define REG_DAY_ALARM 0x0c
+#define REG_WEEKDAY_ALARM 0x0d
+#define ALARM_DIS BIT(7)
+
#define REG_OFFSET 0x0e
#define REG_OFFSET_MODE BIT(7)
+#define REG_TMR_CLKOUT_CTRL 0x0f
+
+struct pcf8523 {
+ struct rtc_device *rtc;
+ struct i2c_client *client;
+};
+
static int pcf8523_read(struct i2c_client *client, u8 reg, u8 *valuep)
{
struct i2c_msg msgs[2];
@@ -140,6 +156,27 @@ static int pcf8523_set_pm(struct i2c_client *client, u8 pm)
return 0;
}
+static irqreturn_t pcf8523_irq(int irq, void *dev_id)
+{
+ struct pcf8523 *pcf8523 = i2c_get_clientdata(dev_id);
+ u8 value;
+ int err;
+
+ err = pcf8523_read(pcf8523->client, REG_CONTROL2, &value);
+ if (err < 0)
+ return IRQ_HANDLED;
+
+ if (value & REG_CONTROL2_AF) {
+ value &= ~REG_CONTROL2_AF;
+ pcf8523_write(pcf8523->client, REG_CONTROL2, value);
+ rtc_update_irq(pcf8523->rtc, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
static int pcf8523_stop_rtc(struct i2c_client *client)
{
u8 value;
@@ -259,11 +296,118 @@ static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
return pcf8523_start_rtc(client);
}
+static int pcf8523_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 start = REG_MINUTE_ALARM, regs[4];
+ struct i2c_msg msgs[2];
+ u8 value;
+ int err;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 1;
+ msgs[0].buf = &start;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = sizeof(regs);
+ msgs[1].buf = regs;
+
+ err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (err < 0)
+ return err;
+
+ tm->time.tm_sec = 0;
+ tm->time.tm_min = bcd2bin(regs[0] & 0x7F);
+ tm->time.tm_hour = bcd2bin(regs[1] & 0x3F);
+ tm->time.tm_mday = bcd2bin(regs[2] & 0x3F);
+ tm->time.tm_wday = bcd2bin(regs[3] & 0x7);
+
+ err = pcf8523_read(client, REG_CONTROL1, &value);
+ if (err < 0)
+ return err;
+ tm->enabled = !!(value & REG_CONTROL1_AIE);
+
+ err = pcf8523_read(client, REG_CONTROL2, &value);
+ if (err < 0)
+ return err;
+ tm->pending = !!(value & REG_CONTROL2_AF);
+
+ return 0;
+}
+
+static int pcf8523_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 value;
+ int err;
+
+ err = pcf8523_read(client, REG_CONTROL1, &value);
+ if (err < 0)
+ return err;
+
+ value &= REG_CONTROL1_AIE;
+
+ if (enabled)
+ value |= REG_CONTROL1_AIE;
+
+ err = pcf8523_write(client, REG_CONTROL1, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_msg msg;
+ u8 regs[5];
+ int err;
+
+ err = pcf8523_irq_enable(dev, 0);
+ if (err)
+ return err;
+
+ err = pcf8523_write(client, REG_CONTROL2, 0);
+ if (err < 0)
+ return err;
+
+ /* The alarm has no seconds, round up to nearest minute */
+ if (tm->time.tm_sec) {
+ time64_t alarm_time = rtc_tm_to_time64(&tm->time);
+
+ alarm_time += 60 - tm->time.tm_sec;
+ rtc_time64_to_tm(alarm_time, &tm->time);
+ }
+
+ regs[0] = REG_MINUTE_ALARM;
+ regs[1] = bin2bcd(tm->time.tm_min);
+ regs[2] = bin2bcd(tm->time.tm_hour);
+ regs[3] = bin2bcd(tm->time.tm_mday);
+ regs[4] = ALARM_DIS;
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = sizeof(regs);
+ msg.buf = regs;
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err < 0)
+ return err;
+
+ if (tm->enabled)
+ return pcf8523_irq_enable(dev, tm->enabled);
+
+ return 0;
+}
+
#ifdef CONFIG_RTC_INTF_DEV
static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
unsigned long arg)
{
struct i2c_client *client = to_i2c_client(dev);
+ unsigned int flags = 0;
+ u8 value;
int ret;
switch (cmd) {
@@ -272,9 +416,16 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
if (ret < 0)
return ret;
if (ret)
- ret = RTC_VL_BACKUP_LOW;
+ flags |= RTC_VL_BACKUP_LOW;
- return put_user(ret, (unsigned int __user *)arg);
+ ret = pcf8523_read(client, REG_SECONDS, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value & REG_SECONDS_OS)
+ flags |= RTC_VL_DATA_INVALID;
+
+ return put_user(flags, (unsigned int __user *)arg);
default:
return -ENOIOCTLCMD;
@@ -322,6 +473,9 @@ static int pcf8523_rtc_set_offset(struct device *dev, long offset)
static const struct rtc_class_ops pcf8523_rtc_ops = {
.read_time = pcf8523_rtc_read_time,
.set_time = pcf8523_rtc_set_time,
+ .read_alarm = pcf8523_rtc_read_alarm,
+ .set_alarm = pcf8523_rtc_set_alarm,
+ .alarm_irq_enable = pcf8523_irq_enable,
.ioctl = pcf8523_rtc_ioctl,
.read_offset = pcf8523_rtc_read_offset,
.set_offset = pcf8523_rtc_set_offset,
@@ -330,12 +484,21 @@ static const struct rtc_class_ops pcf8523_rtc_ops = {
static int pcf8523_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct pcf8523 *pcf8523;
struct rtc_device *rtc;
+ bool wakeup_source = false;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
+ pcf8523 = devm_kzalloc(&client->dev, sizeof(struct pcf8523), GFP_KERNEL);
+ if (!pcf8523)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, pcf8523);
+ pcf8523->client = client;
+
err = pcf8523_load_capacitance(client);
if (err < 0)
dev_warn(&client->dev, "failed to set xtal load capacitance: %d",
@@ -349,9 +512,32 @@ static int pcf8523_probe(struct i2c_client *client,
if (IS_ERR(rtc))
return PTR_ERR(rtc);
+ pcf8523->rtc = rtc;
rtc->ops = &pcf8523_rtc_ops;
rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->uie_unsupported = 1;
+
+ if (client->irq > 0) {
+ err = pcf8523_write(client, REG_TMR_CLKOUT_CTRL, 0x38);
+ if (err < 0)
+ return err;
+
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, pcf8523_irq,
+ IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ dev_name(&rtc->dev), client);
+ if (err)
+ return err;
+
+ dev_pm_set_wake_irq(&client->dev, client->irq);
+ }
+
+#ifdef CONFIG_OF
+ wakeup_source = of_property_read_bool(client->dev.of_node, "wakeup-source");
+#endif
+ if (client->irq > 0 || wakeup_source)
+ device_init_wakeup(&client->dev, true);
return devm_rtc_register_device(rtc);
}
@@ -373,7 +559,7 @@ MODULE_DEVICE_TABLE(of, pcf8523_of_match);
static struct i2c_driver pcf8523_driver = {
.driver = {
- .name = DRIVER_NAME,
+ .name = "rtc-pcf8523",
.of_match_table = of_match_ptr(pcf8523_of_match),
},
.probe = pcf8523_probe,
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index eb206597a8fa..29a1c65661e9 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -445,6 +445,16 @@ static const struct pm8xxx_rtc_regs pm8941_regs = {
.alarm_en = BIT(7),
};
+static const struct pm8xxx_rtc_regs pmk8350_regs = {
+ .ctrl = 0x6146,
+ .write = 0x6140,
+ .read = 0x6148,
+ .alarm_rw = 0x6240,
+ .alarm_ctrl = 0x6246,
+ .alarm_ctrl2 = 0x6248,
+ .alarm_en = BIT(7),
+};
+
/*
* Hardcoded RTC bases until IORESOURCE_REG mapping is figured out
*/
@@ -453,6 +463,7 @@ static const struct of_device_id pm8xxx_id_table[] = {
{ .compatible = "qcom,pm8018-rtc", .data = &pm8921_regs },
{ .compatible = "qcom,pm8058-rtc", .data = &pm8058_regs },
{ .compatible = "qcom,pm8941-rtc", .data = &pm8941_regs },
+ { .compatible = "qcom,pmk8350-rtc", .data = &pmk8350_regs },
{ },
};
MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index 0c48d980d06a..12c807306893 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -320,7 +320,7 @@ static int rv3028_get_time(struct device *dev, struct rtc_time *tm)
tm->tm_sec = bcd2bin(date[RV3028_SEC] & 0x7f);
tm->tm_min = bcd2bin(date[RV3028_MIN] & 0x7f);
tm->tm_hour = bcd2bin(date[RV3028_HOUR] & 0x3f);
- tm->tm_wday = ilog2(date[RV3028_WDAY] & 0x7f);
+ tm->tm_wday = date[RV3028_WDAY] & 0x7f;
tm->tm_mday = bcd2bin(date[RV3028_DAY] & 0x3f);
tm->tm_mon = bcd2bin(date[RV3028_MONTH] & 0x1f) - 1;
tm->tm_year = bcd2bin(date[RV3028_YEAR]) + 100;
@@ -337,7 +337,7 @@ static int rv3028_set_time(struct device *dev, struct rtc_time *tm)
date[RV3028_SEC] = bin2bcd(tm->tm_sec);
date[RV3028_MIN] = bin2bcd(tm->tm_min);
date[RV3028_HOUR] = bin2bcd(tm->tm_hour);
- date[RV3028_WDAY] = 1 << (tm->tm_wday);
+ date[RV3028_WDAY] = tm->tm_wday;
date[RV3028_DAY] = bin2bcd(tm->tm_mday);
date[RV3028_MONTH] = bin2bcd(tm->tm_mon + 1);
date[RV3028_YEAR] = bin2bcd(tm->tm_year - 100);
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 79161d4c6ce4..f4d425002f7f 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -447,6 +447,12 @@ static int rx6110_i2c_probe(struct i2c_client *client,
return rx6110_probe(rx6110, &client->dev);
}
+static const struct acpi_device_id rx6110_i2c_acpi_match[] = {
+ { "SECC6110" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, rx6110_i2c_acpi_match);
+
static const struct i2c_device_id rx6110_i2c_id[] = {
{ "rx6110", 0 },
{ }
@@ -456,6 +462,7 @@ MODULE_DEVICE_TABLE(i2c, rx6110_i2c_id);
static struct i2c_driver rx6110_i2c_driver = {
.driver = {
.name = RX6110_DRIVER_NAME,
+ .acpi_match_table = rx6110_i2c_acpi_match,
},
.probe = rx6110_i2c_probe,
.id_table = rx6110_i2c_id,
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 80b66f16db89..038269a6b08c 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -713,16 +713,10 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
static int s5m_rtc_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *s5m87xx = dev_get_drvdata(pdev->dev.parent);
- struct sec_platform_data *pdata = s5m87xx->pdata;
struct s5m_rtc_info *info;
const struct regmap_config *regmap_cfg;
int ret, alarm_irq;
- if (!pdata) {
- dev_err(pdev->dev.parent, "Platform data not supplied\n");
- return -ENODEV;
- }
-
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index 833daeb7b60e..ee721e53c155 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -153,12 +153,12 @@ static void rtc_wait_not_busy(struct spear_rtc_config *config)
static irqreturn_t spear_rtc_irq(int irq, void *dev_id)
{
struct spear_rtc_config *config = dev_id;
- unsigned long flags, events = 0;
+ unsigned long events = 0;
unsigned int irq_data;
- spin_lock_irqsave(&config->lock, flags);
+ spin_lock(&config->lock);
irq_data = readl(config->ioaddr + STATUS_REG);
- spin_unlock_irqrestore(&config->lock, flags);
+ spin_unlock(&config->lock);
if ((irq_data & RTC_INT_MASK)) {
spear_rtc_clear_interrupt(config);
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index 288abb1abdb8..bc89c62ccb9b 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -18,6 +18,7 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/math64.h>
+#include <linux/property.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/mfd/tps65910.h>
diff --git a/drivers/rtc/sysfs.c b/drivers/rtc/sysfs.c
index 8a957d31a1a4..74026f67fdfb 100644
--- a/drivers/rtc/sysfs.c
+++ b/drivers/rtc/sysfs.c
@@ -273,7 +273,7 @@ static bool rtc_does_wakealarm(struct rtc_device *rtc)
if (!device_can_wakeup(rtc->dev.parent))
return false;
- return rtc->ops->set_alarm != NULL;
+ return !!test_bit(RTC_FEATURE_ALARM, rtc->features);
}
static umode_t rtc_attr_is_visible(struct kobject *kobj,
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 3f026021e95e..84f659cafe76 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1532,8 +1532,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
switch (action) {
case IO_SCH_ORPH_UNREG:
case IO_SCH_UNREG:
- if (!cdev)
- css_sch_device_unregister(sch);
+ css_sch_device_unregister(sch);
break;
case IO_SCH_ORPH_ATTACH:
case IO_SCH_UNREG_ATTACH:
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 175b82b98f36..a1f08e9aa064 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1429,7 +1429,7 @@ static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
struct qeth_qdio_out_q *queue,
- bool drain)
+ bool drain, int budget)
{
struct qeth_qdio_out_buffer *buf, *tmp;
@@ -1441,7 +1441,7 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
if (drain)
qeth_notify_skbs(queue, buf,
TX_NOTIFY_GENERALERROR);
- qeth_tx_complete_buf(buf, drain, 0);
+ qeth_tx_complete_buf(buf, drain, budget);
list_del(&buf->list_entry);
qeth_free_out_buf(buf);
@@ -1453,7 +1453,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
{
int j;
- qeth_tx_complete_pending_bufs(q->card, q, true);
+ qeth_tx_complete_pending_bufs(q->card, q, true, 0);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (!q->bufs[j])
@@ -2566,11 +2566,12 @@ static int qeth_ulp_setup(struct qeth_card *card)
return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
}
-static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
+static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
+ gfp_t gfp)
{
struct qeth_qdio_out_buffer *newbuf;
- newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
+ newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp);
if (!newbuf)
return -ENOMEM;
@@ -2605,7 +2606,7 @@ static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
goto err_qdio_bufs;
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
- if (qeth_init_qdio_out_buf(q, i))
+ if (qeth_alloc_out_buf(q, i, GFP_KERNEL))
goto err_out_bufs;
}
@@ -6080,7 +6081,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
/* Prepare the queue slot for immediate re-use: */
qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
- if (qeth_init_qdio_out_buf(queue, bidx)) {
+ if (qeth_alloc_out_buf(queue, bidx,
+ GFP_ATOMIC)) {
QETH_CARD_TEXT(card, 2, "outofbuf");
qeth_schedule_recovery(card);
}
@@ -6144,7 +6146,7 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
unsigned int bytes = 0;
int completed;
- qeth_tx_complete_pending_bufs(card, queue, false);
+ qeth_tx_complete_pending_bufs(card, queue, false, budget);
if (qeth_out_queue_is_empty(queue)) {
napi_complete(napi);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index dd441eaec66e..d308ff744a29 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1098,8 +1098,9 @@ walk_ipv6:
tmp.disp_flag = QETH_DISP_ADDR_ADD;
tmp.is_multicast = 1;
- read_lock_bh(&in6_dev->lock);
- for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
+ for (im6 = rtnl_dereference(in6_dev->mc_list);
+ im6;
+ im6 = rtnl_dereference(im6->next)) {
tmp.u.a6.addr = im6->mca_addr;
ipm = qeth_l3_find_addr_by_ip(card, &tmp);
@@ -1117,30 +1118,11 @@ walk_ipv6:
qeth_l3_ipaddr_hash(ipm));
}
- read_unlock_bh(&in6_dev->lock);
out:
return 0;
}
-static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
- __be16 proto, u16 vid)
-{
- struct qeth_card *card = dev->ml_priv;
-
- QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
- return 0;
-}
-
-static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
- __be16 proto, u16 vid)
-{
- struct qeth_card *card = dev->ml_priv;
-
- QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
- return 0;
-}
-
static void qeth_l3_set_promisc_mode(struct qeth_card *card)
{
bool enable = card->dev->flags & IFF_PROMISC;
@@ -1861,8 +1843,6 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_do_ioctl = qeth_do_ioctl,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
- .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
};
@@ -1878,8 +1858,6 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_do_ioctl = qeth_do_ioctl,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
- .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
.ndo_neigh_setup = qeth_l3_neigh_setup,
};
@@ -1933,8 +1911,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->needed_headroom = headroom;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_RX;
netif_keep_dst(card->dev);
if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6))
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index ab42feab233f..77ccb96e5ed4 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/* -*- mode: c; c-basic-offset: 8 -*- */
/* NCR (or Symbios) 53c700 and 53c700-66 Driver
*
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index c9f8c497babb..2df347ca91af 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* -*- mode: c; c-basic-offset: 8 -*- */
/* Driver for 53c700 and 53c700-66 chips from NCR and Symbios
*
diff --git a/drivers/scsi/aacraid/TODO b/drivers/scsi/aacraid/TODO
deleted file mode 100644
index 78dc863eff4f..000000000000
--- a/drivers/scsi/aacraid/TODO
+++ /dev/null
@@ -1,3 +0,0 @@
-o Testing
-o More testing
-o I/O size increase
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index cb74ab1ae5a4..9b89c26ccfdb 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -1058,9 +1058,3 @@ static void __exit exit_ch_module(void)
module_init(init_ch_module);
module_exit(exit_ch_module);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index dc36531d589e..222593bc2afe 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -1649,8 +1649,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
}
/* Get the read only section offset */
- ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
- PCI_VPD_LRDT_RO_DATA);
+ ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
if (unlikely(ro_start < 0)) {
dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
rc = -ENODEV;
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 5d9eeac6717a..45ec9f16c085 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -616,6 +616,7 @@ static const struct file_operations esas2r_proc_fops = {
};
static const struct proc_ops esas2r_proc_ops = {
+ .proc_lseek = default_llseek,
.proc_ioctl = esas2r_proc_ioctl,
#ifdef CONFIG_COMPAT
.proc_compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 1a3c534826ba..bc33d54a4011 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -7099,23 +7099,3 @@ ips_init_phase2(int index)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING);
MODULE_VERSION(IPS_VER_STRING);
-
-
-/*
- * Overrides for Emacs so that we almost follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 2
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -2
- * c-argdecl-indent: 2
- * c-label-offset: -2
- * c-continued-statement-offset: 2
- * c-continued-brace-offset: 0
- * indent-tabs-mode: nil
- * tab-width: 8
- * End:
- */
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 6c0678fb9a67..65edf000e447 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -1211,23 +1211,3 @@ typedef struct {
IPS_COMPAT_TAMPA, \
IPS_COMPAT_KEYWEST \
}
-
-
-/*
- * Overrides for Emacs so that we almost follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 2
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -2
- * c-argdecl-indent: 2
- * c-label-offset: -2
- * c-continued-statement-offset: 2
- * c-continued-brace-offset: 0
- * indent-tabs-mode: nil
- * tab-width: 8
- * End:
- */
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index de71d240a56f..6d14a7a94d0b 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/* -*- mode: c; c-basic-offset: 8 -*- */
/* PARISC LASI driver for the 53c700 chip
*
diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h
index 01a1bfb8ea2a..f0ef8f7f82c1 100644
--- a/drivers/scsi/megaraid/mbox_defs.h
+++ b/drivers/scsi/megaraid/mbox_defs.h
@@ -781,5 +781,3 @@ typedef struct {
} __attribute__ ((packed)) mbox_sgl32;
#endif // _MRAID_MBOX_DEFS_H_
-
-/* vim: set ts=8 sw=8 tw=78: */
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index 3a7596e47a88..2ad0aa2f837d 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -282,5 +282,3 @@ struct mraid_pci_blk {
};
#endif // _MEGA_COMMON_H_
-
-// vim: set ts=8 sw=8 tw=78:
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index b1a2d3536add..145fde302d7d 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -4068,5 +4068,3 @@ megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, cha
*/
module_init(megaraid_init);
module_exit(megaraid_exit);
-
-/* vim: set ts=8 sw=8 tw=78 ai si: */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 3e4347c6dab1..d2fe7f69cd5d 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -230,5 +230,3 @@ typedef struct {
#define WROUTDOOR(rdev, value) writel(value, (rdev)->baseaddr + 0x2C)
#endif // _MEGARAID_H_
-
-// vim: set ts=8 sw=8 tw=78:
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 8f35174a1f9a..928da90b79be 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4403,15 +4403,3 @@ MODULE_FIRMWARE("qlogic/1040.bin");
MODULE_FIRMWARE("qlogic/1280.bin");
MODULE_FIRMWARE("qlogic/12160.bin");
MODULE_VERSION(QLA1280_VERSION);
-
-/*
- * Overrides for Emacs so that we almost follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index f1553a453616..0ffdb8f2995f 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -17,6 +17,7 @@
#include <linux/genhd.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
+#include <linux/pagemap.h>
#include <linux/msdos_partition.h>
#include <asm/unaligned.h>
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 97c6f81b1d2a..678651b9b4dd 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/* -*- mode: c; c-basic-offset: 8 -*- */
/* SNI RM driver
*
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index 4274bd1b0f99..96f74a1dc603 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -46,9 +46,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
{
#ifdef CONFIG_FSL_PAMU
struct device *dev = pcfg->dev;
- int window_count = 1;
- struct iommu_domain_geometry geom_attr;
- struct pamu_stash_attribute stash_attr;
int ret;
pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
@@ -56,38 +53,9 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
goto no_iommu;
}
- geom_attr.aperture_start = 0;
- geom_attr.aperture_end =
- ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
- geom_attr.force_aperture = true;
- ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
- &geom_attr);
+ ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu);
if (ret < 0) {
- dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
- ret);
- goto out_domain_free;
- }
- ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
- &window_count);
- if (ret < 0) {
- dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
- ret);
- goto out_domain_free;
- }
- stash_attr.cpu = cpu;
- stash_attr.cache = PAMU_ATTR_CACHE_L1;
- ret = iommu_domain_set_attr(pcfg->iommu_domain,
- DOMAIN_ATTR_FSL_PAMU_STASH,
- &stash_attr);
- if (ret < 0) {
- dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
- __func__, ret);
- goto out_domain_free;
- }
- ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
- IOMMU_READ | IOMMU_WRITE);
- if (ret < 0) {
- dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
+ dev_err(dev, "%s(): fsl_pamu_configure_l1_stash() = %d",
__func__, ret);
goto out_domain_free;
}
@@ -97,14 +65,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
ret);
goto out_domain_free;
}
- ret = iommu_domain_set_attr(pcfg->iommu_domain,
- DOMAIN_ATTR_FSL_PAMU_ENABLE,
- &window_count);
- if (ret < 0) {
- dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
- ret);
- goto out_detach_device;
- }
no_iommu:
#endif
@@ -113,8 +73,6 @@ no_iommu:
return;
#ifdef CONFIG_FSL_PAMU
-out_detach_device:
- iommu_detach_device(pcfg->iommu_domain, NULL);
out_domain_free:
iommu_domain_free(pcfg->iommu_domain);
pcfg->iommu_domain = NULL;
@@ -169,15 +127,8 @@ static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
unsigned int cpu)
{
#ifdef CONFIG_FSL_PAMU /* TODO */
- struct pamu_stash_attribute stash_attr;
- int ret;
-
if (pcfg->iommu_domain) {
- stash_attr.cpu = cpu;
- stash_attr.cache = PAMU_ATTR_CACHE_L1;
- ret = iommu_domain_set_attr(pcfg->iommu_domain,
- DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
- if (ret < 0) {
+ if (fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu) < 0) {
dev_err(pcfg->dev,
"Failed to update pamu stash setting\n");
return;
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 6bd22359d411..8e3b78bb2ac2 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -1904,7 +1904,7 @@ static int tegra_io_pad_pinconf_get(struct pinctrl_dev *pctl_dev,
arg = ret;
break;
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
ret = tegra_io_pad_is_powered(pmc, pad->id);
if (ret < 0)
return ret;
@@ -1941,7 +1941,7 @@ static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev,
arg = pinconf_to_config_argument(configs[i]);
switch (param) {
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
if (arg)
err = tegra_io_pad_power_disable(pad->id);
else
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 64e8f0828e85..39dc02e366f4 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1553,13 +1553,12 @@ static int spi_imx_slave_abort(struct spi_master *master)
static int spi_imx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *of_id =
- of_match_device(spi_imx_dt_ids, &pdev->dev);
struct spi_master *master;
struct spi_imx_data *spi_imx;
struct resource *res;
int ret, irq, spi_drctl;
- const struct spi_imx_devtype_data *devtype_data = of_id->data;
+ const struct spi_imx_devtype_data *devtype_data =
+ of_device_get_match_data(&pdev->dev);
bool slave_mode;
u32 val;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 808e78d6cd98..b7ae5bdc4eb5 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -76,8 +76,6 @@ source "drivers/staging/clocking-wizard/Kconfig"
source "drivers/staging/fbtft/Kconfig"
-source "drivers/staging/fsl-dpaa2/Kconfig"
-
source "drivers/staging/most/Kconfig"
source "drivers/staging/ks7010/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 5a871f0ff2f4..075c979bfe7c 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -28,7 +28,6 @@ obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
obj-$(CONFIG_UNISYSSPAR) += unisys/
obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
obj-$(CONFIG_FB_TFT) += fbtft/
-obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/
obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_KS7010) += ks7010/
obj-$(CONFIG_GREYBUS) += greybus/
diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig
deleted file mode 100644
index 244237bb068a..000000000000
--- a/drivers/staging/fsl-dpaa2/Kconfig
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers
-#
-
-config FSL_DPAA2
- bool "Freescale DPAA2 devices"
- depends on FSL_MC_BUS
- help
- Build drivers for Freescale DataPath Acceleration
- Architecture (DPAA2) family of SoCs.
-
-config FSL_DPAA2_ETHSW
- tristate "Freescale DPAA2 Ethernet Switch"
- depends on FSL_DPAA2
- depends on NET_SWITCHDEV
- help
- Driver for Freescale DPAA2 Ethernet Switch. Select
- BRIDGE to have support for bridge tools.
diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile
deleted file mode 100644
index 9645db7689c9..000000000000
--- a/drivers/staging/fsl-dpaa2/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers
-#
-
-obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/
diff --git a/drivers/staging/fsl-dpaa2/ethsw/Makefile b/drivers/staging/fsl-dpaa2/ethsw/Makefile
deleted file mode 100644
index f6f2cf798faf..000000000000
--- a/drivers/staging/fsl-dpaa2/ethsw/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the Freescale DPAA2 Ethernet Switch
-#
-# Copyright 2014-2017 Freescale Semiconductor Inc.
-# Copyright 2017-2018 NXP
-
-obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
-
-dpaa2-ethsw-objs := ethsw.o ethsw-ethtool.o dpsw.o
diff --git a/drivers/staging/fsl-dpaa2/ethsw/README b/drivers/staging/fsl-dpaa2/ethsw/README
deleted file mode 100644
index b48dcbf7c5fb..000000000000
--- a/drivers/staging/fsl-dpaa2/ethsw/README
+++ /dev/null
@@ -1,106 +0,0 @@
-DPAA2 Ethernet Switch driver
-============================
-
-This file provides documentation for the DPAA2 Ethernet Switch driver
-
-
-Contents
-========
- Supported Platforms
- Architecture Overview
- Creating an Ethernet Switch
- Features
-
-
- Supported Platforms
-===================
-This driver provides networking support for Freescale LS2085A, LS2088A
-DPAA2 SoCs.
-
-
-Architecture Overview
-=====================
-The Ethernet Switch in the DPAA2 architecture consists of several hardware
-resources that provide the functionality. These are allocated and
-configured via the Management Complex (MC) portals. MC abstracts most of
-these resources as DPAA2 objects and exposes ABIs through which they can
-be configured and controlled.
-
-For a more detailed description of the DPAA2 architecture and its object
-abstractions see:
- drivers/staging/fsl-mc/README.txt
-
-The Ethernet Switch is built on top of a Datapath Switch (DPSW) object.
-
-Configuration interface:
-
- ---------------------
- | DPAA2 Switch driver |
- ---------------------
- .
- .
- ----------
- | DPSW API |
- ----------
- . software
- ================= . ==============
- . hardware
- ---------------------
- | MC hardware portals |
- ---------------------
- .
- .
- ------
- | DPSW |
- ------
-
-Driver uses the switch device driver model and exposes each switch port as
-a network interface, which can be included in a bridge. Traffic switched
-between ports is offloaded into the hardware. Exposed network interfaces
-are not used for I/O, they are used just for configuration. This
-limitation is going to be addressed in the future.
-
-The DPSW can have ports connected to DPNIs or to PHYs via DPMACs.
-
-
- [ethA] [ethB] [ethC] [ethD] [ethE] [ethF]
- : : : : : :
- : : : : : :
-[eth drv] [eth drv] [ ethsw drv ]
- : : : : : : kernel
-========================================================================
- : : : : : : hardware
- [DPNI] [DPNI] [============= DPSW =================]
- | | | | | |
- | ---------- | [DPMAC] [DPMAC]
- ------------------------------- | |
- | |
- [PHY] [PHY]
-
-For a more detailed description of the Ethernet switch device driver model
-see:
- Documentation/networking/switchdev.rst
-
-Creating an Ethernet Switch
-===========================
-A device is created for the switch objects probed on the MC bus. Each DPSW
-has a number of properties which determine the configuration options and
-associated hardware resources.
-
-A DPSW object (and the other DPAA2 objects needed for a DPAA2 switch) can
-be added to a container on the MC bus in one of two ways: statically,
-through a Datapath Layout Binary file (DPL) that is parsed by MC at boot
-time; or created dynamically at runtime, via the DPAA2 objects APIs.
-
-Features
-========
-Driver configures DPSW to perform hardware switching offload of
-unicast/multicast/broadcast (VLAN tagged or untagged) traffic between its
-ports.
-
-It allows configuration of hardware learning, flooding, multicast groups,
-port VLAN configuration and STP state.
-
-Static entries can be added/removed from the FDB.
-
-Hardware statistics for each port are provided through ethtool -S option.
diff --git a/drivers/staging/fsl-dpaa2/ethsw/TODO b/drivers/staging/fsl-dpaa2/ethsw/TODO
deleted file mode 100644
index 4d46857b0b2b..000000000000
--- a/drivers/staging/fsl-dpaa2/ethsw/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-* Add I/O capabilities on switch port netdevices. This will allow control
-traffic to reach the CPU.
-* Add ACL to redirect control traffic to CPU.
-* Add support for multiple FDBs and switch port partitioning
-* MC firmware uprev; the DPAA2 objects used by the Ethernet Switch driver
-need to be kept in sync with binary interface changes in MC
-* refine README file
-* cleanup
-
-NOTE: At least first three of the above are required before getting the
-DPAA2 Ethernet Switch driver out of staging. Another requirement is that
-dpio driver is moved to drivers/soc (this is required for I/O).
-
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
deleted file mode 100644
index 9cfd8a8e0197..000000000000
--- a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
+++ /dev/null
@@ -1,594 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2017-2018 NXP
- *
- */
-
-#ifndef __FSL_DPSW_H
-#define __FSL_DPSW_H
-
-/* Data Path L2-Switch API
- * Contains API for handling DPSW topology and functionality
- */
-
-struct fsl_mc_io;
-
-/**
- * DPSW general definitions
- */
-
-/**
- * Maximum number of traffic class priorities
- */
-#define DPSW_MAX_PRIORITIES 8
-/**
- * Maximum number of interfaces
- */
-#define DPSW_MAX_IF 64
-
-int dpsw_open(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int dpsw_id,
- u16 *token);
-
-int dpsw_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-/**
- * DPSW options
- */
-
-/**
- * Disable flooding
- */
-#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
-/**
- * Disable Multicast
- */
-#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
-/**
- * Support control interface
- */
-#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
-/**
- * Disable flooding metering
- */
-#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL
-/**
- * Enable metering
- */
-#define DPSW_OPT_METERING_EN 0x0000000000000040ULL
-
-/**
- * enum dpsw_component_type - component type of a bridge
- * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
- * enterprise VLAN bridge or of a Provider Bridge used
- * to process C-tagged frames
- * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
- * Provider Bridge
- *
- */
-enum dpsw_component_type {
- DPSW_COMPONENT_TYPE_C_VLAN = 0,
- DPSW_COMPONENT_TYPE_S_VLAN
-};
-
-int dpsw_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-int dpsw_disable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-int dpsw_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-/**
- * DPSW IRQ Index and Events
- */
-
-#define DPSW_IRQ_INDEX_IF 0x0000
-#define DPSW_IRQ_INDEX_L2SW 0x0001
-
-/**
- * IRQ event - Indicates that the link state changed
- */
-#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
-
-/**
- * struct dpsw_irq_cfg - IRQ configuration
- * @addr: Address that must be written to signal a message-based interrupt
- * @val: Value to write into irq_addr address
- * @irq_num: A user defined number associated with this IRQ
- */
-struct dpsw_irq_cfg {
- u64 addr;
- u32 val;
- int irq_num;
-};
-
-int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 en);
-
-int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 mask);
-
-int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *status);
-
-int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status);
-
-/**
- * struct dpsw_attr - Structure representing DPSW attributes
- * @id: DPSW object ID
- * @options: Enable/Disable DPSW features
- * @max_vlans: Maximum Number of VLANs
- * @max_meters_per_if: Number of meters per interface
- * @max_fdbs: Maximum Number of FDBs
- * @max_fdb_entries: Number of FDB entries for default FDB table;
- * 0 - indicates default 1024 entries.
- * @fdb_aging_time: Default FDB aging time for default FDB table;
- * 0 - indicates default 300 seconds
- * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
- * 0 - indicates default 32
- * @mem_size: DPSW frame storage memory size
- * @num_ifs: Number of interfaces
- * @num_vlans: Current number of VLANs
- * @num_fdbs: Current number of FDBs
- * @component_type: Component type of this bridge
- */
-struct dpsw_attr {
- int id;
- u64 options;
- u16 max_vlans;
- u8 max_meters_per_if;
- u8 max_fdbs;
- u16 max_fdb_entries;
- u16 fdb_aging_time;
- u16 max_fdb_mc_groups;
- u16 num_ifs;
- u16 mem_size;
- u16 num_vlans;
- u8 num_fdbs;
- enum dpsw_component_type component_type;
-};
-
-int dpsw_get_attributes(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dpsw_attr *attr);
-
-/**
- * enum dpsw_action - Action selection for special/control frames
- * @DPSW_ACTION_DROP: Drop frame
- * @DPSW_ACTION_REDIRECT: Redirect frame to control port
- */
-enum dpsw_action {
- DPSW_ACTION_DROP = 0,
- DPSW_ACTION_REDIRECT = 1
-};
-
-/**
- * Enable auto-negotiation
- */
-#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
-/**
- * Enable half-duplex mode
- */
-#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
-/**
- * Enable pause frames
- */
-#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
-/**
- * Enable a-symmetric pause frames
- */
-#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
-
-/**
- * struct dpsw_link_cfg - Structure representing DPSW link configuration
- * @rate: Rate
- * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
- */
-struct dpsw_link_cfg {
- u32 rate;
- u64 options;
-};
-
-int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
- struct dpsw_link_cfg *cfg);
-/**
- * struct dpsw_link_state - Structure representing DPSW link state
- * @rate: Rate
- * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
- * @up: 0 - covers two cases: down and disconnected, 1 - up
- */
-struct dpsw_link_state {
- u32 rate;
- u64 options;
- u8 up;
-};
-
-int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
- struct dpsw_link_state *state);
-
-int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
- u8 en);
-
-int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
- u8 en);
-
-/**
- * struct dpsw_tci_cfg - Tag Control Information (TCI) configuration
- * @pcp: Priority Code Point (PCP): a 3-bit field which refers
- * to the IEEE 802.1p priority
- * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
- * separately or in conjunction with PCP to indicate frames
- * eligible to be dropped in the presence of congestion
- * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
- * to which the frame belongs. The hexadecimal values
- * of 0x000 and 0xFFF are reserved;
- * all other values may be used as VLAN identifiers,
- * allowing up to 4,094 VLANs
- */
-struct dpsw_tci_cfg {
- u8 pcp;
- u8 dei;
- u16 vlan_id;
-};
-
-int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
- const struct dpsw_tci_cfg *cfg);
-
-int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
- struct dpsw_tci_cfg *cfg);
-
-/**
- * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
- * @DPSW_STP_STATE_BLOCKING: Blocking state
- * @DPSW_STP_STATE_LISTENING: Listening state
- * @DPSW_STP_STATE_LEARNING: Learning state
- * @DPSW_STP_STATE_FORWARDING: Forwarding state
- *
- */
-enum dpsw_stp_state {
- DPSW_STP_STATE_DISABLED = 0,
- DPSW_STP_STATE_LISTENING = 1,
- DPSW_STP_STATE_LEARNING = 2,
- DPSW_STP_STATE_FORWARDING = 3,
- DPSW_STP_STATE_BLOCKING = 0
-};
-
-/**
- * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
- * @vlan_id: VLAN ID STP state
- * @state: STP state
- */
-struct dpsw_stp_cfg {
- u16 vlan_id;
- enum dpsw_stp_state state;
-};
-
-int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
- const struct dpsw_stp_cfg *cfg);
-
-/**
- * enum dpsw_accepted_frames - Types of frames to accept
- * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
- * priority tagged frames
- * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
- * Priority-Tagged frames received on this interface.
- *
- */
-enum dpsw_accepted_frames {
- DPSW_ADMIT_ALL = 1,
- DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
-};
-
-/**
- * enum dpsw_counter - Counters types
- * @DPSW_CNT_ING_FRAME: Counts ingress frames
- * @DPSW_CNT_ING_BYTE: Counts ingress bytes
- * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
- * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
- * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
- * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
- * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
- * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
- * @DPSW_CNT_EGR_FRAME: Counts egress frames
- * @DPSW_CNT_EGR_BYTE: Counts egress bytes
- * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
- * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
- * @DPSW_CNT_ING_NO_BUFF_DISCARD: Counts ingress no buffer discarded frames
- */
-enum dpsw_counter {
- DPSW_CNT_ING_FRAME = 0x0,
- DPSW_CNT_ING_BYTE = 0x1,
- DPSW_CNT_ING_FLTR_FRAME = 0x2,
- DPSW_CNT_ING_FRAME_DISCARD = 0x3,
- DPSW_CNT_ING_MCAST_FRAME = 0x4,
- DPSW_CNT_ING_MCAST_BYTE = 0x5,
- DPSW_CNT_ING_BCAST_FRAME = 0x6,
- DPSW_CNT_ING_BCAST_BYTES = 0x7,
- DPSW_CNT_EGR_FRAME = 0x8,
- DPSW_CNT_EGR_BYTE = 0x9,
- DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
- DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb,
- DPSW_CNT_ING_NO_BUFF_DISCARD = 0xc,
-};
-
-int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
- enum dpsw_counter type,
- u64 *counter);
-
-int dpsw_if_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id);
-
-int dpsw_if_disable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id);
-
-int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 if_id,
- u16 frame_length);
-
-/**
- * struct dpsw_vlan_cfg - VLAN Configuration
- * @fdb_id: Forwarding Data Base
- */
-struct dpsw_vlan_cfg {
- u16 fdb_id;
-};
-
-int dpsw_vlan_add(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 vlan_id,
- const struct dpsw_vlan_cfg *cfg);
-
-/**
- * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
- * @num_ifs: The number of interfaces that are assigned to the egress
- * list for this VLAN
- * @if_id: The set of interfaces that are
- * assigned to the egress list for this VLAN
- */
-struct dpsw_vlan_if_cfg {
- u16 num_ifs;
- u16 if_id[DPSW_MAX_IF];
-};
-
-int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 vlan_id,
- const struct dpsw_vlan_if_cfg *cfg);
-
-int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 vlan_id,
- const struct dpsw_vlan_if_cfg *cfg);
-
-int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 vlan_id,
- const struct dpsw_vlan_if_cfg *cfg);
-
-int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 vlan_id,
- const struct dpsw_vlan_if_cfg *cfg);
-
-int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 vlan_id);
-
-/**
- * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
- * @DPSW_FDB_ENTRY_STATIC: Static entry
- * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
- */
-enum dpsw_fdb_entry_type {
- DPSW_FDB_ENTRY_STATIC = 0,
- DPSW_FDB_ENTRY_DINAMIC = 1
-};
-
-/**
- * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
- * @type: Select static or dynamic entry
- * @mac_addr: MAC address
- * @if_egress: Egress interface ID
- */
-struct dpsw_fdb_unicast_cfg {
- enum dpsw_fdb_entry_type type;
- u8 mac_addr[6];
- u16 if_egress;
-};
-
-int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 fdb_id,
- const struct dpsw_fdb_unicast_cfg *cfg);
-
-int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 fdb_id,
- const struct dpsw_fdb_unicast_cfg *cfg);
-
-#define DPSW_FDB_ENTRY_TYPE_DYNAMIC BIT(0)
-#define DPSW_FDB_ENTRY_TYPE_UNICAST BIT(1)
-
-/**
- * struct fdb_dump_entry - fdb snapshot entry
- * @mac_addr: MAC address
- * @type: bit0 - DINAMIC(1)/STATIC(0), bit1 - UNICAST(1)/MULTICAST(0)
- * @if_info: unicast - egress interface, multicast - number of egress interfaces
- * @if_mask: multicast - egress interface mask
- */
-struct fdb_dump_entry {
- u8 mac_addr[6];
- u8 type;
- u8 if_info;
- u8 if_mask[8];
-};
-
-int dpsw_fdb_dump(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 fdb_id,
- u64 iova_addr,
- u32 iova_size,
- u16 *num_entries);
-
-/**
- * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
- * @type: Select static or dynamic entry
- * @mac_addr: MAC address
- * @num_ifs: Number of external and internal interfaces
- * @if_id: Egress interface IDs
- */
-struct dpsw_fdb_multicast_cfg {
- enum dpsw_fdb_entry_type type;
- u8 mac_addr[6];
- u16 num_ifs;
- u16 if_id[DPSW_MAX_IF];
-};
-
-int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 fdb_id,
- const struct dpsw_fdb_multicast_cfg *cfg);
-
-int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 fdb_id,
- const struct dpsw_fdb_multicast_cfg *cfg);
-
-/**
- * enum dpsw_fdb_learning_mode - Auto-learning modes
- * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning
- * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning
- * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
- * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU
- *
- * NONE - SECURE LEARNING
- * SMAC found DMAC found CTLU Action
- * v v Forward frame to
- * 1. DMAC destination
- * - v Forward frame to
- * 1. DMAC destination
- * 2. Control interface
- * v - Forward frame to
- * 1. Flooding list of interfaces
- * - - Forward frame to
- * 1. Flooding list of interfaces
- * 2. Control interface
- * SECURE LEARING
- * SMAC found DMAC found CTLU Action
- * v v Forward frame to
- * 1. DMAC destination
- * - v Forward frame to
- * 1. Control interface
- * v - Forward frame to
- * 1. Flooding list of interfaces
- * - - Forward frame to
- * 1. Control interface
- */
-enum dpsw_fdb_learning_mode {
- DPSW_FDB_LEARNING_MODE_DIS = 0,
- DPSW_FDB_LEARNING_MODE_HW = 1,
- DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
- DPSW_FDB_LEARNING_MODE_SECURE = 3
-};
-
-int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u16 fdb_id,
- enum dpsw_fdb_learning_mode mode);
-
-/**
- * struct dpsw_fdb_attr - FDB Attributes
- * @max_fdb_entries: Number of FDB entries
- * @fdb_aging_time: Aging time in seconds
- * @learning_mode: Learning mode
- * @num_fdb_mc_groups: Current number of multicast groups
- * @max_fdb_mc_groups: Maximum number of multicast groups
- */
-struct dpsw_fdb_attr {
- u16 max_fdb_entries;
- u16 fdb_aging_time;
- enum dpsw_fdb_learning_mode learning_mode;
- u16 num_fdb_mc_groups;
- u16 max_fdb_mc_groups;
-};
-
-int dpsw_get_api_version(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 *major_ver,
- u16 *minor_ver);
-
-int dpsw_if_get_port_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- u16 if_id, u8 mac_addr[6]);
-
-int dpsw_if_get_primary_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags,
- u16 token, u16 if_id, u8 mac_addr[6]);
-
-int dpsw_if_set_primary_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags,
- u16 token, u16 if_id, u8 mac_addr[6]);
-
-#endif /* __FSL_DPSW_H */
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
deleted file mode 100644
index 703055e063ff..000000000000
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ /dev/null
@@ -1,1839 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DPAA2 Ethernet Switch driver
- *
- * Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2017-2018 NXP
- *
- */
-
-#include <linux/module.h>
-
-#include <linux/interrupt.h>
-#include <linux/msi.h>
-#include <linux/kthread.h>
-#include <linux/workqueue.h>
-
-#include <linux/fsl/mc.h>
-
-#include "ethsw.h"
-
-/* Minimal supported DPSW version */
-#define DPSW_MIN_VER_MAJOR 8
-#define DPSW_MIN_VER_MINOR 1
-
-#define DEFAULT_VLAN_ID 1
-
-static int dpaa2_switch_add_vlan(struct ethsw_core *ethsw, u16 vid)
-{
- int err;
-
- struct dpsw_vlan_cfg vcfg = {
- .fdb_id = 0,
- };
-
- err = dpsw_vlan_add(ethsw->mc_io, 0,
- ethsw->dpsw_handle, vid, &vcfg);
- if (err) {
- dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
- return err;
- }
- ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
-
- return 0;
-}
-
-static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv)
-{
- struct net_device *netdev = port_priv->netdev;
- struct dpsw_link_state state;
- int err;
-
- err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx, &state);
- if (err) {
- netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
- return true;
- }
-
- WARN_ONCE(state.up > 1, "Garbage read into link_state");
-
- return state.up ? true : false;
-}
-
-static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
-{
- struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct net_device *netdev = port_priv->netdev;
- struct dpsw_tci_cfg tci_cfg = { 0 };
- bool up;
- int err, ret;
-
- err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
- port_priv->idx, &tci_cfg);
- if (err) {
- netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
- return err;
- }
-
- tci_cfg.vlan_id = pvid;
-
- /* Interface needs to be down to change PVID */
- up = dpaa2_switch_port_is_up(port_priv);
- if (up) {
- err = dpsw_if_disable(ethsw->mc_io, 0,
- ethsw->dpsw_handle,
- port_priv->idx);
- if (err) {
- netdev_err(netdev, "dpsw_if_disable err %d\n", err);
- return err;
- }
- }
-
- err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
- port_priv->idx, &tci_cfg);
- if (err) {
- netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
- goto set_tci_error;
- }
-
- /* Delete previous PVID info and mark the new one */
- port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
- port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
- port_priv->pvid = pvid;
-
-set_tci_error:
- if (up) {
- ret = dpsw_if_enable(ethsw->mc_io, 0,
- ethsw->dpsw_handle,
- port_priv->idx);
- if (ret) {
- netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
- return ret;
- }
- }
-
- return err;
-}
-
-static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
- u16 vid, u16 flags)
-{
- struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct net_device *netdev = port_priv->netdev;
- struct dpsw_vlan_if_cfg vcfg;
- int err;
-
- if (port_priv->vlans[vid]) {
- netdev_warn(netdev, "VLAN %d already configured\n", vid);
- return -EEXIST;
- }
-
- vcfg.num_ifs = 1;
- vcfg.if_id[0] = port_priv->idx;
- err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
- if (err) {
- netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
- return err;
- }
-
- port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
-
- if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
- err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
- ethsw->dpsw_handle,
- vid, &vcfg);
- if (err) {
- netdev_err(netdev,
- "dpsw_vlan_add_if_untagged err %d\n", err);
- return err;
- }
- port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
- }
-
- if (flags & BRIDGE_VLAN_INFO_PVID) {
- err = dpaa2_switch_port_set_pvid(port_priv, vid);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int dpaa2_switch_set_learning(struct ethsw_core *ethsw, bool enable)
-{
- enum dpsw_fdb_learning_mode learn_mode;
- int err;
-
- if (enable)
- learn_mode = DPSW_FDB_LEARNING_MODE_HW;
- else
- learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
-
- err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
- learn_mode);
- if (err) {
- dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n", err);
- return err;
- }
- ethsw->learning = enable;
-
- return 0;
-}
-
-static int dpaa2_switch_port_set_flood(struct ethsw_port_priv *port_priv, bool enable)
-{
- int err;
-
- err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx, enable);
- if (err) {
- netdev_err(port_priv->netdev,
- "dpsw_if_set_flooding err %d\n", err);
- return err;
- }
- port_priv->flood = enable;
-
- return 0;
-}
-
-static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
-{
- struct dpsw_stp_cfg stp_cfg = {
- .state = state,
- };
- int err;
- u16 vid;
-
- if (!netif_running(port_priv->netdev) || state == port_priv->stp_state)
- return 0; /* Nothing to do */
-
- for (vid = 0; vid <= VLAN_VID_MASK; vid++) {
- if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
- stp_cfg.vlan_id = vid;
- err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx, &stp_cfg);
- if (err) {
- netdev_err(port_priv->netdev,
- "dpsw_if_set_stp err %d\n", err);
- return err;
- }
- }
- }
-
- port_priv->stp_state = state;
-
- return 0;
-}
-
-static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
-{
- struct ethsw_port_priv *ppriv_local = NULL;
- int i, err;
-
- if (!ethsw->vlans[vid])
- return -ENOENT;
-
- err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
- if (err) {
- dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
- return err;
- }
- ethsw->vlans[vid] = 0;
-
- for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
- ppriv_local = ethsw->ports[i];
- ppriv_local->vlans[vid] = 0;
- }
-
- return 0;
-}
-
-static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
- const unsigned char *addr)
-{
- struct dpsw_fdb_unicast_cfg entry = {0};
- int err;
-
- entry.if_egress = port_priv->idx;
- entry.type = DPSW_FDB_ENTRY_STATIC;
- ether_addr_copy(entry.mac_addr, addr);
-
- err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- 0, &entry);
- if (err)
- netdev_err(port_priv->netdev,
- "dpsw_fdb_add_unicast err %d\n", err);
- return err;
-}
-
-static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
- const unsigned char *addr)
-{
- struct dpsw_fdb_unicast_cfg entry = {0};
- int err;
-
- entry.if_egress = port_priv->idx;
- entry.type = DPSW_FDB_ENTRY_STATIC;
- ether_addr_copy(entry.mac_addr, addr);
-
- err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- 0, &entry);
- /* Silently discard error for calling multiple times the del command */
- if (err && err != -ENXIO)
- netdev_err(port_priv->netdev,
- "dpsw_fdb_remove_unicast err %d\n", err);
- return err;
-}
-
-static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
- const unsigned char *addr)
-{
- struct dpsw_fdb_multicast_cfg entry = {0};
- int err;
-
- ether_addr_copy(entry.mac_addr, addr);
- entry.type = DPSW_FDB_ENTRY_STATIC;
- entry.num_ifs = 1;
- entry.if_id[0] = port_priv->idx;
-
- err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- 0, &entry);
- /* Silently discard error for calling multiple times the add command */
- if (err && err != -ENXIO)
- netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
- err);
- return err;
-}
-
-static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
- const unsigned char *addr)
-{
- struct dpsw_fdb_multicast_cfg entry = {0};
- int err;
-
- ether_addr_copy(entry.mac_addr, addr);
- entry.type = DPSW_FDB_ENTRY_STATIC;
- entry.num_ifs = 1;
- entry.if_id[0] = port_priv->idx;
-
- err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- 0, &entry);
- /* Silently discard error for calling multiple times the del command */
- if (err && err != -ENAVAIL)
- netdev_err(port_priv->netdev,
- "dpsw_fdb_remove_multicast err %d\n", err);
- return err;
-}
-
-static int dpaa2_switch_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev, const unsigned char *addr,
- u16 vid, u16 flags,
- struct netlink_ext_ack *extack)
-{
- if (is_unicast_ether_addr(addr))
- return dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
- addr);
- else
- return dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
- addr);
-}
-
-static int dpaa2_switch_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid)
-{
- if (is_unicast_ether_addr(addr))
- return dpaa2_switch_port_fdb_del_uc(netdev_priv(dev),
- addr);
- else
- return dpaa2_switch_port_fdb_del_mc(netdev_priv(dev),
- addr);
-}
-
-static void dpaa2_switch_port_get_stats(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- u64 tmp;
- int err;
-
- err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx,
- DPSW_CNT_ING_FRAME, &stats->rx_packets);
- if (err)
- goto error;
-
- err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx,
- DPSW_CNT_EGR_FRAME, &stats->tx_packets);
- if (err)
- goto error;
-
- err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx,
- DPSW_CNT_ING_BYTE, &stats->rx_bytes);
- if (err)
- goto error;
-
- err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx,
- DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
- if (err)
- goto error;
-
- err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx,
- DPSW_CNT_ING_FRAME_DISCARD,
- &stats->rx_dropped);
- if (err)
- goto error;
-
- err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx,
- DPSW_CNT_ING_FLTR_FRAME,
- &tmp);
- if (err)
- goto error;
- stats->rx_dropped += tmp;
-
- err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx,
- DPSW_CNT_EGR_FRAME_DISCARD,
- &stats->tx_dropped);
- if (err)
- goto error;
-
- return;
-
-error:
- netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
-}
-
-static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev,
- int attr_id)
-{
- return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
-}
-
-static int dpaa2_switch_port_get_offload_stats(int attr_id,
- const struct net_device *netdev,
- void *sp)
-{
- switch (attr_id) {
- case IFLA_OFFLOAD_XSTATS_CPU_HIT:
- dpaa2_switch_port_get_stats((struct net_device *)netdev, sp);
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- int err;
-
- err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
- 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx,
- (u16)ETHSW_L2_MAX_FRM(mtu));
- if (err) {
- netdev_err(netdev,
- "dpsw_if_set_max_frame_length() err %d\n", err);
- return err;
- }
-
- netdev->mtu = mtu;
- return 0;
-}
-
-static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- struct dpsw_link_state state;
- int err;
-
- /* Interrupts are received even though no one issued an 'ifconfig up'
- * on the switch interface. Ignore these link state update interrupts
- */
- if (!netif_running(netdev))
- return 0;
-
- err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx, &state);
- if (err) {
- netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
- return err;
- }
-
- WARN_ONCE(state.up > 1, "Garbage read into link_state");
-
- if (state.up != port_priv->link_state) {
- if (state.up)
- netif_carrier_on(netdev);
- else
- netif_carrier_off(netdev);
- port_priv->link_state = state.up;
- }
-
- return 0;
-}
-
-static int dpaa2_switch_port_open(struct net_device *netdev)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- int err;
-
- /* No need to allow Tx as control interface is disabled */
- netif_tx_stop_all_queues(netdev);
-
- /* Explicitly set carrier off, otherwise
- * netif_carrier_ok() will return true and cause 'ip link show'
- * to report the LOWER_UP flag, even though the link
- * notification wasn't even received.
- */
- netif_carrier_off(netdev);
-
- err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx);
- if (err) {
- netdev_err(netdev, "dpsw_if_enable err %d\n", err);
- return err;
- }
-
- /* sync carrier state */
- err = dpaa2_switch_port_carrier_state_sync(netdev);
- if (err) {
- netdev_err(netdev,
- "dpaa2_switch_port_carrier_state_sync err %d\n", err);
- goto err_carrier_sync;
- }
-
- return 0;
-
-err_carrier_sync:
- dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx);
- return err;
-}
-
-static int dpaa2_switch_port_stop(struct net_device *netdev)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- int err;
-
- err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx);
- if (err) {
- netdev_err(netdev, "dpsw_if_disable err %d\n", err);
- return err;
- }
-
- return 0;
-}
-
-static netdev_tx_t dpaa2_switch_port_dropframe(struct sk_buff *skb,
- struct net_device *netdev)
-{
- /* we don't support I/O for now, drop the frame */
- dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
-}
-
-static int dpaa2_switch_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(dev);
-
- ppid->id_len = 1;
- ppid->id[0] = port_priv->ethsw_data->dev_id;
-
- return 0;
-}
-
-static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name,
- size_t len)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- int err;
-
- err = snprintf(name, len, "p%d", port_priv->idx);
- if (err >= len)
- return -EINVAL;
-
- return 0;
-}
-
-struct ethsw_dump_ctx {
- struct net_device *dev;
- struct sk_buff *skb;
- struct netlink_callback *cb;
- int idx;
-};
-
-static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
- struct ethsw_dump_ctx *dump)
-{
- int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
- u32 portid = NETLINK_CB(dump->cb->skb).portid;
- u32 seq = dump->cb->nlh->nlmsg_seq;
- struct nlmsghdr *nlh;
- struct ndmsg *ndm;
-
- if (dump->idx < dump->cb->args[2])
- goto skip;
-
- nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
- sizeof(*ndm), NLM_F_MULTI);
- if (!nlh)
- return -EMSGSIZE;
-
- ndm = nlmsg_data(nlh);
- ndm->ndm_family = AF_BRIDGE;
- ndm->ndm_pad1 = 0;
- ndm->ndm_pad2 = 0;
- ndm->ndm_flags = NTF_SELF;
- ndm->ndm_type = 0;
- ndm->ndm_ifindex = dump->dev->ifindex;
- ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP;
-
- if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
- goto nla_put_failure;
-
- nlmsg_end(dump->skb, nlh);
-
-skip:
- dump->idx++;
- return 0;
-
-nla_put_failure:
- nlmsg_cancel(dump->skb, nlh);
- return -EMSGSIZE;
-}
-
-static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry,
- struct ethsw_port_priv *port_priv)
-{
- int idx = port_priv->idx;
- int valid;
-
- if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
- valid = entry->if_info == port_priv->idx;
- else
- valid = entry->if_mask[idx / 8] & BIT(idx % 8);
-
- return valid;
-}
-
-static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct net_device *net_dev,
- struct net_device *filter_dev, int *idx)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
- struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct device *dev = net_dev->dev.parent;
- struct fdb_dump_entry *fdb_entries;
- struct fdb_dump_entry fdb_entry;
- struct ethsw_dump_ctx dump = {
- .dev = net_dev,
- .skb = skb,
- .cb = cb,
- .idx = *idx,
- };
- dma_addr_t fdb_dump_iova;
- u16 num_fdb_entries;
- u32 fdb_dump_size;
- int err = 0, i;
- u8 *dma_mem;
-
- fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry);
- dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL);
- if (!dma_mem)
- return -ENOMEM;
-
- fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, fdb_dump_iova)) {
- netdev_err(net_dev, "dma_map_single() failed\n");
- err = -ENOMEM;
- goto err_map;
- }
-
- err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
- fdb_dump_iova, fdb_dump_size, &num_fdb_entries);
- if (err) {
- netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err);
- goto err_dump;
- }
-
- dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE);
-
- fdb_entries = (struct fdb_dump_entry *)dma_mem;
- for (i = 0; i < num_fdb_entries; i++) {
- fdb_entry = fdb_entries[i];
-
- if (!dpaa2_switch_port_fdb_valid_entry(&fdb_entry, port_priv))
- continue;
-
- err = dpaa2_switch_fdb_dump_nl(&fdb_entry, &dump);
- if (err)
- goto end;
- }
-
-end:
- *idx = dump.idx;
-
- kfree(dma_mem);
-
- return 0;
-
-err_dump:
- dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE);
-err_map:
- kfree(dma_mem);
- return err;
-}
-
-static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
-{
- struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct net_device *net_dev = port_priv->netdev;
- struct device *dev = net_dev->dev.parent;
- u8 mac_addr[ETH_ALEN];
- int err;
-
- if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR))
- return 0;
-
- /* Get firmware address, if any */
- err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle,
- port_priv->idx, mac_addr);
- if (err) {
- dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n");
- return err;
- }
-
- /* First check if firmware has any address configured by bootloader */
- if (!is_zero_ether_addr(mac_addr)) {
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
- } else {
- /* No MAC address configured, fill in net_dev->dev_addr
- * with a random one
- */
- eth_hw_addr_random(net_dev);
- dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
-
- /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
- * practical purposes, this will be our "permanent" mac address,
- * at least until the next reboot. This move will also permit
- * register_netdevice() to properly fill up net_dev->perm_addr.
- */
- net_dev->addr_assign_type = NET_ADDR_PERM;
- }
-
- return 0;
-}
-
-static const struct net_device_ops dpaa2_switch_port_ops = {
- .ndo_open = dpaa2_switch_port_open,
- .ndo_stop = dpaa2_switch_port_stop,
-
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_get_stats64 = dpaa2_switch_port_get_stats,
- .ndo_change_mtu = dpaa2_switch_port_change_mtu,
- .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats,
- .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats,
- .ndo_fdb_add = dpaa2_switch_port_fdb_add,
- .ndo_fdb_del = dpaa2_switch_port_fdb_del,
- .ndo_fdb_dump = dpaa2_switch_port_fdb_dump,
-
- .ndo_start_xmit = dpaa2_switch_port_dropframe,
- .ndo_get_port_parent_id = dpaa2_switch_port_parent_id,
- .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name,
-};
-
-static bool dpaa2_switch_port_dev_check(const struct net_device *netdev,
- struct notifier_block *nb)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-
- if (netdev->netdev_ops == &dpaa2_switch_port_ops &&
- (!nb || &port_priv->ethsw_data->port_nb == nb ||
- &port_priv->ethsw_data->port_switchdev_nb == nb ||
- &port_priv->ethsw_data->port_switchdevb_nb == nb))
- return true;
-
- return false;
-}
-
-static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw)
-{
- int i;
-
- for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
- dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev);
- dpaa2_switch_port_set_mac_addr(ethsw->ports[i]);
- }
-}
-
-static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
-{
- struct device *dev = (struct device *)arg;
- struct ethsw_core *ethsw = dev_get_drvdata(dev);
-
- /* Mask the events and the if_id reserved bits to be cleared on read */
- u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
- int err;
-
- err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
- DPSW_IRQ_INDEX_IF, &status);
- if (err) {
- dev_err(dev, "Can't get irq status (err %d)\n", err);
-
- err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
- DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
- if (err)
- dev_err(dev, "Can't clear irq status (err %d)\n", err);
- goto out;
- }
-
- if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
- dpaa2_switch_links_state_update(ethsw);
-
-out:
- return IRQ_HANDLED;
-}
-
-static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
-{
- struct device *dev = &sw_dev->dev;
- struct ethsw_core *ethsw = dev_get_drvdata(dev);
- u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
- struct fsl_mc_device_irq *irq;
- int err;
-
- err = fsl_mc_allocate_irqs(sw_dev);
- if (err) {
- dev_err(dev, "MC irqs allocation failed\n");
- return err;
- }
-
- if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
- err = -EINVAL;
- goto free_irq;
- }
-
- err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
- DPSW_IRQ_INDEX_IF, 0);
- if (err) {
- dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
- goto free_irq;
- }
-
- irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
-
- err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
- NULL,
- dpaa2_switch_irq0_handler_thread,
- IRQF_NO_SUSPEND | IRQF_ONESHOT,
- dev_name(dev), dev);
- if (err) {
- dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
- goto free_irq;
- }
-
- err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
- DPSW_IRQ_INDEX_IF, mask);
- if (err) {
- dev_err(dev, "dpsw_set_irq_mask(): %d\n", err);
- goto free_devm_irq;
- }
-
- err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
- DPSW_IRQ_INDEX_IF, 1);
- if (err) {
- dev_err(dev, "dpsw_set_irq_enable(): %d\n", err);
- goto free_devm_irq;
- }
-
- return 0;
-
-free_devm_irq:
- devm_free_irq(dev, irq->msi_desc->irq, dev);
-free_irq:
- fsl_mc_free_irqs(sw_dev);
- return err;
-}
-
-static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
-{
- struct device *dev = &sw_dev->dev;
- struct ethsw_core *ethsw = dev_get_drvdata(dev);
- int err;
-
- err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
- DPSW_IRQ_INDEX_IF, 0);
- if (err)
- dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
-
- fsl_mc_free_irqs(sw_dev);
-}
-
-static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
- u8 state)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-
- return dpaa2_switch_port_set_stp_state(port_priv, state);
-}
-
-static int
-dpaa2_switch_port_attr_br_flags_pre_set(struct net_device *netdev,
- struct switchdev_brport_flags flags)
-{
- if (flags.mask & ~(BR_LEARNING | BR_FLOOD))
- return -EINVAL;
-
- return 0;
-}
-
-static int
-dpaa2_switch_port_attr_br_flags_set(struct net_device *netdev,
- struct switchdev_brport_flags flags)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- int err = 0;
-
- if (flags.mask & BR_LEARNING) {
- /* Learning is enabled per switch */
- err = dpaa2_switch_set_learning(port_priv->ethsw_data,
- !!(flags.val & BR_LEARNING));
- if (err)
- return err;
- }
-
- if (flags.mask & BR_FLOOD) {
- err = dpaa2_switch_port_set_flood(port_priv,
- !!(flags.val & BR_FLOOD));
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int dpaa2_switch_port_attr_set(struct net_device *netdev,
- const struct switchdev_attr *attr)
-{
- int err = 0;
-
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- err = dpaa2_switch_port_attr_stp_state_set(netdev,
- attr->u.stp_state);
- break;
- case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
- err = dpaa2_switch_port_attr_br_flags_pre_set(netdev,
- attr->u.brport_flags);
- break;
- case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- err = dpaa2_switch_port_attr_br_flags_set(netdev,
- attr->u.brport_flags);
- break;
- case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- /* VLANs are supported by default */
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int dpaa2_switch_port_vlans_add(struct net_device *netdev,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct dpsw_attr *attr = &ethsw->sw_attr;
- int err = 0;
-
- /* Make sure that the VLAN is not already configured
- * on the switch port
- */
- if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
- return -EEXIST;
-
- /* Check if there is space for a new VLAN */
- err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
- &ethsw->sw_attr);
- if (err) {
- netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
- return err;
- }
- if (attr->max_vlans - attr->num_vlans < 1)
- return -ENOSPC;
-
- /* Check if there is space for a new VLAN */
- err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
- &ethsw->sw_attr);
- if (err) {
- netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
- return err;
- }
- if (attr->max_vlans - attr->num_vlans < 1)
- return -ENOSPC;
-
- if (!port_priv->ethsw_data->vlans[vlan->vid]) {
- /* this is a new VLAN */
- err = dpaa2_switch_add_vlan(port_priv->ethsw_data, vlan->vid);
- if (err)
- return err;
-
- port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL;
- }
-
- return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags);
-}
-
-static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
- const unsigned char *addr)
-{
- struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
- struct netdev_hw_addr *ha;
-
- netif_addr_lock_bh(netdev);
- list_for_each_entry(ha, &list->list, list) {
- if (ether_addr_equal(ha->addr, addr)) {
- netif_addr_unlock_bh(netdev);
- return 1;
- }
- }
- netif_addr_unlock_bh(netdev);
- return 0;
-}
-
-static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
- const struct switchdev_obj_port_mdb *mdb)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- int err;
-
- /* Check if address is already set on this port */
- if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
- return -EEXIST;
-
- err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr);
- if (err)
- return err;
-
- err = dev_mc_add(netdev, mdb->addr);
- if (err) {
- netdev_err(netdev, "dev_mc_add err %d\n", err);
- dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
- }
-
- return err;
-}
-
-static int dpaa2_switch_port_obj_add(struct net_device *netdev,
- const struct switchdev_obj *obj)
-{
- int err;
-
- switch (obj->id) {
- case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = dpaa2_switch_port_vlans_add(netdev,
- SWITCHDEV_OBJ_PORT_VLAN(obj));
- break;
- case SWITCHDEV_OBJ_ID_PORT_MDB:
- err = dpaa2_switch_port_mdb_add(netdev,
- SWITCHDEV_OBJ_PORT_MDB(obj));
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
-{
- struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct net_device *netdev = port_priv->netdev;
- struct dpsw_vlan_if_cfg vcfg;
- int i, err;
-
- if (!port_priv->vlans[vid])
- return -ENOENT;
-
- if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
- err = dpaa2_switch_port_set_pvid(port_priv, 0);
- if (err)
- return err;
- }
-
- vcfg.num_ifs = 1;
- vcfg.if_id[0] = port_priv->idx;
- if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
- err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
- ethsw->dpsw_handle,
- vid, &vcfg);
- if (err) {
- netdev_err(netdev,
- "dpsw_vlan_remove_if_untagged err %d\n",
- err);
- }
- port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
- }
-
- if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
- err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
- vid, &vcfg);
- if (err) {
- netdev_err(netdev,
- "dpsw_vlan_remove_if err %d\n", err);
- return err;
- }
- port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
-
- /* Delete VLAN from switch if it is no longer configured on
- * any port
- */
- for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
- if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
- return 0; /* Found a port member in VID */
-
- ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
-
- err = dpaa2_switch_dellink(ethsw, vid);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int dpaa2_switch_port_vlans_del(struct net_device *netdev,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-
- if (netif_is_bridge_master(vlan->obj.orig_dev))
- return -EOPNOTSUPP;
-
- return dpaa2_switch_port_del_vlan(port_priv, vlan->vid);
-}
-
-static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
- const struct switchdev_obj_port_mdb *mdb)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- int err;
-
- if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
- return -ENOENT;
-
- err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
- if (err)
- return err;
-
- err = dev_mc_del(netdev, mdb->addr);
- if (err) {
- netdev_err(netdev, "dev_mc_del err %d\n", err);
- return err;
- }
-
- return err;
-}
-
-static int dpaa2_switch_port_obj_del(struct net_device *netdev,
- const struct switchdev_obj *obj)
-{
- int err;
-
- switch (obj->id) {
- case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
- break;
- case SWITCHDEV_OBJ_ID_PORT_MDB:
- err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
- return err;
-}
-
-static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
- struct switchdev_notifier_port_attr_info
- *port_attr_info)
-{
- int err;
-
- err = dpaa2_switch_port_attr_set(netdev, port_attr_info->attr);
-
- port_attr_info->handled = true;
- return notifier_from_errno(err);
-}
-
-/* For the moment, only flood setting needs to be updated */
-static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
- struct net_device *upper_dev)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct ethsw_port_priv *other_port_priv;
- struct net_device *other_dev;
- struct list_head *iter;
- int i, err;
-
- for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
- if (ethsw->ports[i]->bridge_dev &&
- (ethsw->ports[i]->bridge_dev != upper_dev)) {
- netdev_err(netdev,
- "Only one bridge supported per DPSW object!\n");
- return -EINVAL;
- }
-
- netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
- if (!dpaa2_switch_port_dev_check(other_dev, NULL))
- continue;
-
- other_port_priv = netdev_priv(other_dev);
- if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
- netdev_err(netdev,
- "Interface from a different DPSW is in the bridge already!\n");
- return -EINVAL;
- }
- }
-
- /* Enable flooding */
- err = dpaa2_switch_port_set_flood(port_priv, 1);
- if (!err)
- port_priv->bridge_dev = upper_dev;
-
- return err;
-}
-
-static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- int err;
-
- /* Disable flooding */
- err = dpaa2_switch_port_set_flood(port_priv, 0);
- if (!err)
- port_priv->bridge_dev = NULL;
-
- return err;
-}
-
-static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
-{
- struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- struct netdev_notifier_changeupper_info *info = ptr;
- struct net_device *upper_dev;
- int err = 0;
-
- if (!dpaa2_switch_port_dev_check(netdev, nb))
- return NOTIFY_DONE;
-
- /* Handle just upper dev link/unlink for the moment */
- if (event == NETDEV_CHANGEUPPER) {
- upper_dev = info->upper_dev;
- if (netif_is_bridge_master(upper_dev)) {
- if (info->linking)
- err = dpaa2_switch_port_bridge_join(netdev, upper_dev);
- else
- err = dpaa2_switch_port_bridge_leave(netdev);
- }
- }
-
- return notifier_from_errno(err);
-}
-
-struct ethsw_switchdev_event_work {
- struct work_struct work;
- struct switchdev_notifier_fdb_info fdb_info;
- struct net_device *dev;
- unsigned long event;
-};
-
-static void dpaa2_switch_event_work(struct work_struct *work)
-{
- struct ethsw_switchdev_event_work *switchdev_work =
- container_of(work, struct ethsw_switchdev_event_work, work);
- struct net_device *dev = switchdev_work->dev;
- struct switchdev_notifier_fdb_info *fdb_info;
- int err;
-
- rtnl_lock();
- fdb_info = &switchdev_work->fdb_info;
-
- switch (switchdev_work->event) {
- case SWITCHDEV_FDB_ADD_TO_DEVICE:
- if (!fdb_info->added_by_user)
- break;
- if (is_unicast_ether_addr(fdb_info->addr))
- err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
- fdb_info->addr);
- else
- err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
- fdb_info->addr);
- if (err)
- break;
- fdb_info->offloaded = true;
- call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
- &fdb_info->info, NULL);
- break;
- case SWITCHDEV_FDB_DEL_TO_DEVICE:
- if (!fdb_info->added_by_user)
- break;
- if (is_unicast_ether_addr(fdb_info->addr))
- dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
- else
- dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
- break;
- }
-
- rtnl_unlock();
- kfree(switchdev_work->fdb_info.addr);
- kfree(switchdev_work);
- dev_put(dev);
-}
-
-/* Called under rcu_read_lock() */
-static int dpaa2_switch_port_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
-{
- struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
- struct ethsw_port_priv *port_priv = netdev_priv(dev);
- struct ethsw_switchdev_event_work *switchdev_work;
- struct switchdev_notifier_fdb_info *fdb_info = ptr;
- struct ethsw_core *ethsw = port_priv->ethsw_data;
-
- if (!dpaa2_switch_port_dev_check(dev, nb))
- return NOTIFY_DONE;
-
- if (event == SWITCHDEV_PORT_ATTR_SET)
- return dpaa2_switch_port_attr_set_event(dev, ptr);
-
- switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
- if (!switchdev_work)
- return NOTIFY_BAD;
-
- INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work);
- switchdev_work->dev = dev;
- switchdev_work->event = event;
-
- switch (event) {
- case SWITCHDEV_FDB_ADD_TO_DEVICE:
- case SWITCHDEV_FDB_DEL_TO_DEVICE:
- memcpy(&switchdev_work->fdb_info, ptr,
- sizeof(switchdev_work->fdb_info));
- switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
- if (!switchdev_work->fdb_info.addr)
- goto err_addr_alloc;
-
- ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
- fdb_info->addr);
-
- /* Take a reference on the device to avoid being freed. */
- dev_hold(dev);
- break;
- default:
- kfree(switchdev_work);
- return NOTIFY_DONE;
- }
-
- queue_work(ethsw->workqueue, &switchdev_work->work);
-
- return NOTIFY_DONE;
-
-err_addr_alloc:
- kfree(switchdev_work);
- return NOTIFY_BAD;
-}
-
-static int dpaa2_switch_port_obj_event(unsigned long event,
- struct net_device *netdev,
- struct switchdev_notifier_port_obj_info *port_obj_info)
-{
- int err = -EOPNOTSUPP;
-
- switch (event) {
- case SWITCHDEV_PORT_OBJ_ADD:
- err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj);
- break;
- case SWITCHDEV_PORT_OBJ_DEL:
- err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
- break;
- }
-
- port_obj_info->handled = true;
- return notifier_from_errno(err);
-}
-
-static int dpaa2_switch_port_blocking_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
-{
- struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
-
- if (!dpaa2_switch_port_dev_check(dev, nb))
- return NOTIFY_DONE;
-
- switch (event) {
- case SWITCHDEV_PORT_OBJ_ADD:
- case SWITCHDEV_PORT_OBJ_DEL:
- return dpaa2_switch_port_obj_event(event, dev, ptr);
- case SWITCHDEV_PORT_ATTR_SET:
- return dpaa2_switch_port_attr_set_event(dev, ptr);
- }
-
- return NOTIFY_DONE;
-}
-
-static int dpaa2_switch_register_notifier(struct device *dev)
-{
- struct ethsw_core *ethsw = dev_get_drvdata(dev);
- int err;
-
- ethsw->port_nb.notifier_call = dpaa2_switch_port_netdevice_event;
- err = register_netdevice_notifier(&ethsw->port_nb);
- if (err) {
- dev_err(dev, "Failed to register netdev notifier\n");
- return err;
- }
-
- ethsw->port_switchdev_nb.notifier_call = dpaa2_switch_port_event;
- err = register_switchdev_notifier(&ethsw->port_switchdev_nb);
- if (err) {
- dev_err(dev, "Failed to register switchdev notifier\n");
- goto err_switchdev_nb;
- }
-
- ethsw->port_switchdevb_nb.notifier_call = dpaa2_switch_port_blocking_event;
- err = register_switchdev_blocking_notifier(&ethsw->port_switchdevb_nb);
- if (err) {
- dev_err(dev, "Failed to register switchdev blocking notifier\n");
- goto err_switchdev_blocking_nb;
- }
-
- return 0;
-
-err_switchdev_blocking_nb:
- unregister_switchdev_notifier(&ethsw->port_switchdev_nb);
-err_switchdev_nb:
- unregister_netdevice_notifier(&ethsw->port_nb);
- return err;
-}
-
-static void dpaa2_switch_detect_features(struct ethsw_core *ethsw)
-{
- ethsw->features = 0;
-
- if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6))
- ethsw->features |= ETHSW_FEATURE_MAC_ADDR;
-}
-
-static int dpaa2_switch_init(struct fsl_mc_device *sw_dev)
-{
- struct device *dev = &sw_dev->dev;
- struct ethsw_core *ethsw = dev_get_drvdata(dev);
- struct dpsw_stp_cfg stp_cfg;
- int err;
- u16 i;
-
- ethsw->dev_id = sw_dev->obj_desc.id;
-
- err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, &ethsw->dpsw_handle);
- if (err) {
- dev_err(dev, "dpsw_open err %d\n", err);
- return err;
- }
-
- err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
- &ethsw->sw_attr);
- if (err) {
- dev_err(dev, "dpsw_get_attributes err %d\n", err);
- goto err_close;
- }
-
- err = dpsw_get_api_version(ethsw->mc_io, 0,
- &ethsw->major,
- &ethsw->minor);
- if (err) {
- dev_err(dev, "dpsw_get_api_version err %d\n", err);
- goto err_close;
- }
-
- /* Minimum supported DPSW version check */
- if (ethsw->major < DPSW_MIN_VER_MAJOR ||
- (ethsw->major == DPSW_MIN_VER_MAJOR &&
- ethsw->minor < DPSW_MIN_VER_MINOR)) {
- dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n",
- ethsw->major,
- ethsw->minor,
- DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
- err = -ENOTSUPP;
- goto err_close;
- }
-
- dpaa2_switch_detect_features(ethsw);
-
- err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
- if (err) {
- dev_err(dev, "dpsw_reset err %d\n", err);
- goto err_close;
- }
-
- err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
- DPSW_FDB_LEARNING_MODE_HW);
- if (err) {
- dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
- goto err_close;
- }
-
- stp_cfg.vlan_id = DEFAULT_VLAN_ID;
- stp_cfg.state = DPSW_STP_STATE_FORWARDING;
-
- for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
- err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
- &stp_cfg);
- if (err) {
- dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
- err, i);
- goto err_close;
- }
-
- err = dpsw_if_set_broadcast(ethsw->mc_io, 0,
- ethsw->dpsw_handle, i, 1);
- if (err) {
- dev_err(dev,
- "dpsw_if_set_broadcast err %d for port %d\n",
- err, i);
- goto err_close;
- }
- }
-
- ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
- WQ_MEM_RECLAIM, "ethsw",
- ethsw->sw_attr.id);
- if (!ethsw->workqueue) {
- err = -ENOMEM;
- goto err_close;
- }
-
- err = dpaa2_switch_register_notifier(dev);
- if (err)
- goto err_destroy_ordered_workqueue;
-
- return 0;
-
-err_destroy_ordered_workqueue:
- destroy_workqueue(ethsw->workqueue);
-
-err_close:
- dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
- return err;
-}
-
-static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
-{
- struct net_device *netdev = port_priv->netdev;
- struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct dpsw_vlan_if_cfg vcfg;
- int err;
-
- /* Switch starts with all ports configured to VLAN 1. Need to
- * remove this setting to allow configuration at bridge join
- */
- vcfg.num_ifs = 1;
- vcfg.if_id[0] = port_priv->idx;
-
- err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
- DEFAULT_VLAN_ID, &vcfg);
- if (err) {
- netdev_err(netdev, "dpsw_vlan_remove_if_untagged err %d\n",
- err);
- return err;
- }
-
- err = dpaa2_switch_port_set_pvid(port_priv, 0);
- if (err)
- return err;
-
- err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
- DEFAULT_VLAN_ID, &vcfg);
- if (err)
- netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err);
-
- return err;
-}
-
-static void dpaa2_switch_unregister_notifier(struct device *dev)
-{
- struct ethsw_core *ethsw = dev_get_drvdata(dev);
- struct notifier_block *nb;
- int err;
-
- nb = &ethsw->port_switchdevb_nb;
- err = unregister_switchdev_blocking_notifier(nb);
- if (err)
- dev_err(dev,
- "Failed to unregister switchdev blocking notifier (%d)\n",
- err);
-
- err = unregister_switchdev_notifier(&ethsw->port_switchdev_nb);
- if (err)
- dev_err(dev,
- "Failed to unregister switchdev notifier (%d)\n", err);
-
- err = unregister_netdevice_notifier(&ethsw->port_nb);
- if (err)
- dev_err(dev,
- "Failed to unregister netdev notifier (%d)\n", err);
-}
-
-static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev)
-{
- struct device *dev = &sw_dev->dev;
- struct ethsw_core *ethsw = dev_get_drvdata(dev);
- int err;
-
- dpaa2_switch_unregister_notifier(dev);
-
- err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
- if (err)
- dev_warn(dev, "dpsw_close err %d\n", err);
-}
-
-static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
-{
- struct ethsw_port_priv *port_priv;
- struct ethsw_core *ethsw;
- struct device *dev;
- int i;
-
- dev = &sw_dev->dev;
- ethsw = dev_get_drvdata(dev);
-
- dpaa2_switch_teardown_irqs(sw_dev);
-
- dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
-
- for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
- port_priv = ethsw->ports[i];
- unregister_netdev(port_priv->netdev);
- free_netdev(port_priv->netdev);
- }
- kfree(ethsw->ports);
-
- dpaa2_switch_takedown(sw_dev);
-
- destroy_workqueue(ethsw->workqueue);
-
- fsl_mc_portal_free(ethsw->mc_io);
-
- kfree(ethsw);
-
- dev_set_drvdata(dev, NULL);
-
- return 0;
-}
-
-static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
- u16 port_idx)
-{
- struct ethsw_port_priv *port_priv;
- struct device *dev = ethsw->dev;
- struct net_device *port_netdev;
- int err;
-
- port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
- if (!port_netdev) {
- dev_err(dev, "alloc_etherdev error\n");
- return -ENOMEM;
- }
-
- port_priv = netdev_priv(port_netdev);
- port_priv->netdev = port_netdev;
- port_priv->ethsw_data = ethsw;
-
- port_priv->idx = port_idx;
- port_priv->stp_state = BR_STATE_FORWARDING;
-
- /* Flooding is implicitly enabled */
- port_priv->flood = true;
-
- SET_NETDEV_DEV(port_netdev, dev);
- port_netdev->netdev_ops = &dpaa2_switch_port_ops;
- port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops;
-
- /* Set MTU limits */
- port_netdev->min_mtu = ETH_MIN_MTU;
- port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
-
- err = dpaa2_switch_port_init(port_priv, port_idx);
- if (err)
- goto err_port_probe;
-
- err = dpaa2_switch_port_set_mac_addr(port_priv);
- if (err)
- goto err_port_probe;
-
- err = register_netdev(port_netdev);
- if (err < 0) {
- dev_err(dev, "register_netdev error %d\n", err);
- goto err_port_probe;
- }
-
- ethsw->ports[port_idx] = port_priv;
-
- return 0;
-
-err_port_probe:
- free_netdev(port_netdev);
-
- return err;
-}
-
-static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
-{
- struct device *dev = &sw_dev->dev;
- struct ethsw_core *ethsw;
- int i, err;
-
- /* Allocate switch core*/
- ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
-
- if (!ethsw)
- return -ENOMEM;
-
- ethsw->dev = dev;
- dev_set_drvdata(dev, ethsw);
-
- err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
- &ethsw->mc_io);
- if (err) {
- if (err == -ENXIO)
- err = -EPROBE_DEFER;
- else
- dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
- goto err_free_drvdata;
- }
-
- err = dpaa2_switch_init(sw_dev);
- if (err)
- goto err_free_cmdport;
-
- /* DEFAULT_VLAN_ID is implicitly configured on the switch */
- ethsw->vlans[DEFAULT_VLAN_ID] = ETHSW_VLAN_MEMBER;
-
- /* Learning is implicitly enabled */
- ethsw->learning = true;
-
- ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
- GFP_KERNEL);
- if (!(ethsw->ports)) {
- err = -ENOMEM;
- goto err_takedown;
- }
-
- for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
- err = dpaa2_switch_probe_port(ethsw, i);
- if (err)
- goto err_free_ports;
- }
-
- err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
- if (err) {
- dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
- goto err_free_ports;
- }
-
- /* Make sure the switch ports are disabled at probe time */
- for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
- dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i);
-
- /* Setup IRQs */
- err = dpaa2_switch_setup_irqs(sw_dev);
- if (err)
- goto err_stop;
-
- dev_info(dev, "probed %d port switch\n", ethsw->sw_attr.num_ifs);
- return 0;
-
-err_stop:
- dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
-
-err_free_ports:
- /* Cleanup registered ports only */
- for (i--; i >= 0; i--) {
- unregister_netdev(ethsw->ports[i]->netdev);
- free_netdev(ethsw->ports[i]->netdev);
- }
- kfree(ethsw->ports);
-
-err_takedown:
- dpaa2_switch_takedown(sw_dev);
-
-err_free_cmdport:
- fsl_mc_portal_free(ethsw->mc_io);
-
-err_free_drvdata:
- kfree(ethsw);
- dev_set_drvdata(dev, NULL);
-
- return err;
-}
-
-static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = {
- {
- .vendor = FSL_MC_VENDOR_FREESCALE,
- .obj_type = "dpsw",
- },
- { .vendor = 0x0 }
-};
-MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
-
-static struct fsl_mc_driver dpaa2_switch_drv = {
- .driver = {
- .name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
- },
- .probe = dpaa2_switch_probe,
- .remove = dpaa2_switch_remove,
- .match_id_table = dpaa2_switch_match_id_table
-};
-
-module_fsl_mc_driver(dpaa2_switch_drv);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
deleted file mode 100644
index 5f9211ccb1ef..000000000000
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * DPAA2 Ethernet Switch declarations
- *
- * Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2017-2018 NXP
- *
- */
-
-#ifndef __ETHSW_H
-#define __ETHSW_H
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/rtnetlink.h>
-#include <linux/if_vlan.h>
-#include <uapi/linux/if_bridge.h>
-#include <net/switchdev.h>
-#include <linux/if_bridge.h>
-
-#include "dpsw.h"
-
-/* Number of IRQs supported */
-#define DPSW_IRQ_NUM 2
-
-/* Port is member of VLAN */
-#define ETHSW_VLAN_MEMBER 1
-/* VLAN to be treated as untagged on egress */
-#define ETHSW_VLAN_UNTAGGED 2
-/* Untagged frames will be assigned to this VLAN */
-#define ETHSW_VLAN_PVID 4
-/* VLAN configured on the switch */
-#define ETHSW_VLAN_GLOBAL 8
-
-/* Maximum Frame Length supported by HW (currently 10k) */
-#define DPAA2_MFL (10 * 1024)
-#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
-#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
-
-#define ETHSW_FEATURE_MAC_ADDR BIT(0)
-
-extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops;
-
-struct ethsw_core;
-
-/* Per port private data */
-struct ethsw_port_priv {
- struct net_device *netdev;
- u16 idx;
- struct ethsw_core *ethsw_data;
- u8 link_state;
- u8 stp_state;
- bool flood;
-
- u8 vlans[VLAN_VID_MASK + 1];
- u16 pvid;
- struct net_device *bridge_dev;
-};
-
-/* Switch data */
-struct ethsw_core {
- struct device *dev;
- struct fsl_mc_io *mc_io;
- u16 dpsw_handle;
- struct dpsw_attr sw_attr;
- u16 major, minor;
- unsigned long features;
- int dev_id;
- struct ethsw_port_priv **ports;
-
- u8 vlans[VLAN_VID_MASK + 1];
- bool learning;
-
- struct notifier_block port_nb;
- struct notifier_block port_switchdev_nb;
- struct notifier_block port_switchdevb_nb;
- struct workqueue_struct *workqueue;
-};
-
-#endif /* __ETHSW_H */
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
index 7a09061cda57..df5ca3596470 100644
--- a/drivers/staging/media/tegra-video/vi.c
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -1131,8 +1131,8 @@ static void tegra_channel_host1x_syncpts_free(struct tegra_vi_channel *chan)
int i;
for (i = 0; i < chan->numgangports; i++) {
- host1x_syncpt_free(chan->mw_ack_sp[i]);
- host1x_syncpt_free(chan->frame_start_sp[i]);
+ host1x_syncpt_put(chan->mw_ack_sp[i]);
+ host1x_syncpt_put(chan->frame_start_sp[i]);
}
}
@@ -1177,7 +1177,7 @@ static int tegra_channel_host1x_syncpt_init(struct tegra_vi_channel *chan)
mw_sp = host1x_syncpt_request(&vi->client, flags);
if (!mw_sp) {
dev_err(vi->dev, "failed to request memory ack syncpoint\n");
- host1x_syncpt_free(fs_sp);
+ host1x_syncpt_put(fs_sp);
ret = -ENOMEM;
goto free_syncpts;
}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 7c60b0cd8bf7..dcbba9621b21 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -407,14 +407,10 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
int cvm_oct_common_init(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- const u8 *mac = NULL;
+ int ret;
- if (priv->of_node)
- mac = of_get_mac_address(priv->of_node);
-
- if (!IS_ERR_OR_NULL(mac))
- ether_addr_copy(dev->dev_addr, mac);
- else
+ ret = of_get_mac_address(priv->of_node, dev->dev_addr);
+ if (ret)
eth_hw_addr_random(dev);
/*
diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
index e7bc1988124a..4b9fdf99981b 100644
--- a/drivers/staging/wfx/main.c
+++ b/drivers/staging/wfx/main.c
@@ -334,7 +334,6 @@ int wfx_probe(struct wfx_dev *wdev)
{
int i;
int err;
- const void *macaddr;
struct gpio_desc *gpio_saved;
// During first part of boot, gpio_wakeup cannot yet been used. So
@@ -423,9 +422,9 @@ int wfx_probe(struct wfx_dev *wdev)
for (i = 0; i < ARRAY_SIZE(wdev->addresses); i++) {
eth_zero_addr(wdev->addresses[i].addr);
- macaddr = of_get_mac_address(wdev->dev->of_node);
- if (!IS_ERR_OR_NULL(macaddr)) {
- ether_addr_copy(wdev->addresses[i].addr, macaddr);
+ err = of_get_mac_address(wdev->dev->of_node,
+ wdev->addresses[i].addr);
+ if (!err) {
wdev->addresses[i].addr[ETH_ALEN - 1] += i;
} else {
ether_addr_copy(wdev->addresses[i].addr,
diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
index dffe3ba8c7c4..e61b91d14ad1 100644
--- a/drivers/thermal/amlogic_thermal.c
+++ b/drivers/thermal/amlogic_thermal.c
@@ -254,10 +254,8 @@ static int amlogic_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pdata);
base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base)) {
- dev_err(dev, "failed to get io address\n");
+ if (IS_ERR(base))
return PTR_ERR(base);
- }
pdata->regmap = devm_regmap_init_mmio(dev, base,
pdata->data->regmap_config);
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index 3199977f1e73..c8e4344d5a3d 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -184,7 +184,6 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
data->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(data->regs)) {
err = PTR_ERR(data->regs);
- dev_err(&pdev->dev, "Could not get registers: %d\n", err);
return err;
}
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index 10af3341e5ea..eeb4e4b76c0b 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -13,10 +13,10 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpu_cooling.h>
+#include <linux/device.h>
#include <linux/energy_model.h>
#include <linux/err.h>
#include <linux/export.h>
-#include <linux/idr.h>
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
@@ -50,8 +50,6 @@ struct time_in_idle {
/**
* struct cpufreq_cooling_device - data for cooling device with cpufreq
- * @id: unique integer value corresponding to each cpufreq_cooling_device
- * registered.
* @last_load: load measured by the latest call to cpufreq_get_requested_power()
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
@@ -61,7 +59,6 @@ struct time_in_idle {
* @cdev: thermal_cooling_device pointer to keep track of the
* registered cooling device.
* @policy: cpufreq policy.
- * @node: list_head to link all cpufreq_cooling_device together.
* @idle_time: idle time stats
* @qos_req: PM QoS contraint to apply
*
@@ -69,23 +66,17 @@ struct time_in_idle {
* cpufreq_cooling_device.
*/
struct cpufreq_cooling_device {
- int id;
u32 last_load;
unsigned int cpufreq_state;
unsigned int max_level;
struct em_perf_domain *em;
struct cpufreq_policy *policy;
- struct list_head node;
#ifndef CONFIG_SMP
struct time_in_idle *idle_time;
#endif
struct freq_qos_request qos_req;
};
-static DEFINE_IDA(cpufreq_ida);
-static DEFINE_MUTEX(cooling_list_lock);
-static LIST_HEAD(cpufreq_cdev_list);
-
#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
/**
* get_level: Find the level for a particular frequency
@@ -125,7 +116,7 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
{
int i;
- for (i = cpufreq_cdev->max_level; i >= 0; i--) {
+ for (i = cpufreq_cdev->max_level; i > 0; i--) {
if (power >= cpufreq_cdev->em->table[i].power)
break;
}
@@ -528,11 +519,11 @@ __cpufreq_cooling_register(struct device_node *np,
{
struct thermal_cooling_device *cdev;
struct cpufreq_cooling_device *cpufreq_cdev;
- char dev_name[THERMAL_NAME_LENGTH];
unsigned int i;
struct device *dev;
int ret;
struct thermal_cooling_device_ops *cooling_ops;
+ char *name;
dev = get_cpu_device(policy->cpu);
if (unlikely(!dev)) {
@@ -567,16 +558,6 @@ __cpufreq_cooling_register(struct device_node *np,
/* max_level is an index, not a counter */
cpufreq_cdev->max_level = i - 1;
- ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL);
- if (ret < 0) {
- cdev = ERR_PTR(ret);
- goto free_idle_time;
- }
- cpufreq_cdev->id = ret;
-
- snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
- cpufreq_cdev->id);
-
cooling_ops = &cpufreq_cooling_ops;
#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
@@ -591,7 +572,7 @@ __cpufreq_cooling_register(struct device_node *np,
pr_err("%s: unsorted frequency tables are not supported\n",
__func__);
cdev = ERR_PTR(-EINVAL);
- goto remove_ida;
+ goto free_idle_time;
}
ret = freq_qos_add_request(&policy->constraints,
@@ -601,24 +582,25 @@ __cpufreq_cooling_register(struct device_node *np,
pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
ret);
cdev = ERR_PTR(ret);
- goto remove_ida;
+ goto free_idle_time;
}
- cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
+ cdev = ERR_PTR(-ENOMEM);
+ name = kasprintf(GFP_KERNEL, "cpufreq-%s", dev_name(dev));
+ if (!name)
+ goto remove_qos_req;
+
+ cdev = thermal_of_cooling_device_register(np, name, cpufreq_cdev,
cooling_ops);
+ kfree(name);
+
if (IS_ERR(cdev))
goto remove_qos_req;
- mutex_lock(&cooling_list_lock);
- list_add(&cpufreq_cdev->node, &cpufreq_cdev_list);
- mutex_unlock(&cooling_list_lock);
-
return cdev;
remove_qos_req:
freq_qos_remove_request(&cpufreq_cdev->qos_req);
-remove_ida:
- ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
free_idle_time:
free_idle_time(cpufreq_cdev);
free_cdev:
@@ -706,13 +688,8 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
cpufreq_cdev = cdev->devdata;
- mutex_lock(&cooling_list_lock);
- list_del(&cpufreq_cdev->node);
- mutex_unlock(&cooling_list_lock);
-
thermal_cooling_device_unregister(cdev);
freq_qos_remove_request(&cpufreq_cdev->qos_req);
- ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
free_idle_time(cpufreq_cdev);
kfree(cpufreq_cdev);
}
diff --git a/drivers/thermal/cpuidle_cooling.c b/drivers/thermal/cpuidle_cooling.c
index 7ecab4b16b29..4f41102e8b16 100644
--- a/drivers/thermal/cpuidle_cooling.c
+++ b/drivers/thermal/cpuidle_cooling.c
@@ -9,9 +9,9 @@
#include <linux/cpu_cooling.h>
#include <linux/cpuidle.h>
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/idle_inject.h>
-#include <linux/idr.h>
#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/thermal.h>
@@ -26,8 +26,6 @@ struct cpuidle_cooling_device {
unsigned long state;
};
-static DEFINE_IDA(cpuidle_ida);
-
/**
* cpuidle_cooling_runtime - Running time computation
* @idle_duration_us: CPU idle time to inject in microseconds
@@ -174,10 +172,11 @@ static int __cpuidle_cooling_register(struct device_node *np,
struct idle_inject_device *ii_dev;
struct cpuidle_cooling_device *idle_cdev;
struct thermal_cooling_device *cdev;
+ struct device *dev;
unsigned int idle_duration_us = TICK_USEC;
unsigned int latency_us = UINT_MAX;
- char dev_name[THERMAL_NAME_LENGTH];
- int id, ret;
+ char *name;
+ int ret;
idle_cdev = kzalloc(sizeof(*idle_cdev), GFP_KERNEL);
if (!idle_cdev) {
@@ -185,16 +184,10 @@ static int __cpuidle_cooling_register(struct device_node *np,
goto out;
}
- id = ida_simple_get(&cpuidle_ida, 0, 0, GFP_KERNEL);
- if (id < 0) {
- ret = id;
- goto out_kfree;
- }
-
ii_dev = idle_inject_register(drv->cpumask);
if (!ii_dev) {
ret = -EINVAL;
- goto out_id;
+ goto out_kfree;
}
of_property_read_u32(np, "duration-us", &idle_duration_us);
@@ -205,24 +198,32 @@ static int __cpuidle_cooling_register(struct device_node *np,
idle_cdev->ii_dev = ii_dev;
- snprintf(dev_name, sizeof(dev_name), "thermal-idle-%d", id);
+ dev = get_cpu_device(cpumask_first(drv->cpumask));
- cdev = thermal_of_cooling_device_register(np, dev_name, idle_cdev,
+ name = kasprintf(GFP_KERNEL, "idle-%s", dev_name(dev));
+ if (!name) {
+ ret = -ENOMEM;
+ goto out_unregister;
+ }
+
+ cdev = thermal_of_cooling_device_register(np, name, idle_cdev,
&cpuidle_cooling_ops);
if (IS_ERR(cdev)) {
ret = PTR_ERR(cdev);
- goto out_unregister;
+ goto out_kfree_name;
}
pr_debug("%s: Idle injection set with idle duration=%u, latency=%u\n",
- dev_name, idle_duration_us, latency_us);
+ name, idle_duration_us, latency_us);
+
+ kfree(name);
return 0;
+out_kfree_name:
+ kfree(name);
out_unregister:
idle_inject_unregister(ii_dev);
-out_id:
- ida_simple_remove(&cpuidle_ida, id);
out_kfree:
kfree(idle_cdev);
out:
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index fed3121ff2a1..3a788ac4f525 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -14,7 +14,6 @@
#include <linux/devfreq_cooling.h>
#include <linux/energy_model.h>
#include <linux/export.h>
-#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
@@ -25,11 +24,8 @@
#define HZ_PER_KHZ 1000
#define SCALE_ERROR_MITIGATION 100
-static DEFINE_IDA(devfreq_ida);
-
/**
* struct devfreq_cooling_device - Devfreq cooling device
- * @id: unique integer value corresponding to each
* devfreq_cooling_device registered.
* @cdev: Pointer to associated thermal cooling device.
* @devfreq: Pointer to associated devfreq device.
@@ -51,7 +47,6 @@ static DEFINE_IDA(devfreq_ida);
* @em_pd: Energy Model for the associated Devfreq device
*/
struct devfreq_cooling_device {
- int id;
struct thermal_cooling_device *cdev;
struct devfreq *devfreq;
unsigned long cooling_state;
@@ -363,7 +358,7 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
struct thermal_cooling_device *cdev;
struct device *dev = df->dev.parent;
struct devfreq_cooling_device *dfc;
- char dev_name[THERMAL_NAME_LENGTH];
+ char *name;
int err, num_opps;
dfc = kzalloc(sizeof(*dfc), GFP_KERNEL);
@@ -407,30 +402,27 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
if (err < 0)
goto free_table;
- err = ida_simple_get(&devfreq_ida, 0, 0, GFP_KERNEL);
- if (err < 0)
+ err = -ENOMEM;
+ name = kasprintf(GFP_KERNEL, "devfreq-%s", dev_name(dev));
+ if (!name)
goto remove_qos_req;
- dfc->id = err;
-
- snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d", dfc->id);
-
- cdev = thermal_of_cooling_device_register(np, dev_name, dfc,
+ cdev = thermal_of_cooling_device_register(np, name, dfc,
&devfreq_cooling_ops);
+ kfree(name);
+
if (IS_ERR(cdev)) {
err = PTR_ERR(cdev);
dev_err(dev,
"Failed to register devfreq cooling device (%d)\n",
err);
- goto release_ida;
+ goto remove_qos_req;
}
dfc->cdev = cdev;
return cdev;
-release_ida:
- ida_simple_remove(&devfreq_ida, dfc->id);
remove_qos_req:
dev_pm_qos_remove_request(&dfc->req_max_freq);
free_table:
@@ -527,7 +519,6 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
dev = dfc->devfreq->dev.parent;
thermal_cooling_device_unregister(dfc->cdev);
- ida_simple_remove(&devfreq_ida, dfc->id);
dev_pm_qos_remove_request(&dfc->req_max_freq);
em_dev_unregister_perf_domain(dev);
diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
index aaa07180ab48..1e5abf4822be 100644
--- a/drivers/thermal/gov_fair_share.c
+++ b/drivers/thermal/gov_fair_share.c
@@ -82,6 +82,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
int total_instance = 0;
int cur_trip_level = get_trip_level(tz);
+ mutex_lock(&tz->lock);
+
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
if (instance->trip != trip)
continue;
@@ -105,11 +107,12 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
instance->target = get_target_state(tz, cdev, percentage,
cur_trip_level);
- mutex_lock(&instance->cdev->lock);
- instance->cdev->updated = false;
- mutex_unlock(&instance->cdev->lock);
- thermal_cdev_update(cdev);
+ mutex_lock(&cdev->lock);
+ __thermal_cdev_update(cdev);
+ mutex_unlock(&cdev->lock);
}
+
+ mutex_unlock(&tz->lock);
return 0;
}
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 92acae53df49..13e375751d22 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -301,9 +301,8 @@ power_actor_set_power(struct thermal_cooling_device *cdev,
instance->target = clamp_val(state, instance->lower, instance->upper);
mutex_lock(&cdev->lock);
- cdev->updated = false;
+ __thermal_cdev_update(cdev);
mutex_unlock(&cdev->lock);
- thermal_cdev_update(cdev);
return 0;
}
@@ -374,9 +373,11 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
*/
extra_power = min(extra_power, capped_extra_power);
if (capped_extra_power > 0)
- for (i = 0; i < num_actors; i++)
- granted_power[i] += (extra_actor_power[i] *
- extra_power) / capped_extra_power;
+ for (i = 0; i < num_actors; i++) {
+ u64 extra_range = (u64)extra_actor_power[i] * extra_power;
+ granted_power[i] += DIV_ROUND_CLOSEST_ULL(extra_range,
+ capped_extra_power);
+ }
}
static int allocate_power(struct thermal_zone_device *tz,
@@ -569,22 +570,33 @@ static void reset_pid_controller(struct power_allocator_params *params)
params->prev_err = 0;
}
-static void allow_maximum_power(struct thermal_zone_device *tz)
+static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
{
struct thermal_instance *instance;
struct power_allocator_params *params = tz->governor_data;
+ u32 req_power;
mutex_lock(&tz->lock);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ struct thermal_cooling_device *cdev = instance->cdev;
+
if ((instance->trip != params->trip_max_desired_temperature) ||
(!cdev_is_power_actor(instance->cdev)))
continue;
instance->target = 0;
mutex_lock(&instance->cdev->lock);
- instance->cdev->updated = false;
+ /*
+ * Call for updating the cooling devices local stats and avoid
+ * periods of dozen of seconds when those have not been
+ * maintained.
+ */
+ cdev->ops->get_requested_power(cdev, &req_power);
+
+ if (update)
+ __thermal_cdev_update(instance->cdev);
+
mutex_unlock(&instance->cdev->lock);
- thermal_cdev_update(instance->cdev);
}
mutex_unlock(&tz->lock);
}
@@ -698,6 +710,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
int ret;
int switch_on_temp, control_temp;
struct power_allocator_params *params = tz->governor_data;
+ bool update;
/*
* We get called for every trip point but we only need to do
@@ -709,9 +722,10 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
&switch_on_temp);
if (!ret && (tz->temperature < switch_on_temp)) {
+ update = (tz->last_temperature >= switch_on_temp);
tz->passive = 0;
reset_pid_controller(params);
- allow_maximum_power(tz);
+ allow_maximum_power(tz, update);
return 0;
}
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index ee05950afd2f..9a21ac0ceb11 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -1,7 +1,7 @@
/*
- * Hisilicon thermal sensor driver
+ * HiSilicon thermal sensor driver
*
- * Copyright (c) 2014-2015 Hisilicon Limited.
+ * Copyright (c) 2014-2015 HiSilicon Limited.
* Copyright (c) 2014-2015 Linaro Limited.
*
* Xinwei Kong <kong.kongxinwei@hisilicon.com>
@@ -572,10 +572,8 @@ static int hisi_thermal_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
data->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(data->regs)) {
- dev_err(dev, "failed to get io address\n");
+ if (IS_ERR(data->regs))
return PTR_ERR(data->regs);
- }
ret = data->ops->probe(data);
if (ret)
@@ -672,5 +670,5 @@ module_platform_driver(hisi_thermal_driver);
MODULE_AUTHOR("Xinwei Kong <kong.kongxinwei@hisilicon.com>");
MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
-MODULE_DESCRIPTION("Hisilicon thermal driver");
+MODULE_DESCRIPTION("HiSilicon thermal driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index ce4f59213c7a..e4299ca3423c 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -79,3 +79,14 @@ config INTEL_PCH_THERMAL
Enable this to support thermal reporting on certain intel PCHs.
Thermal reporting device will provide temperature reading,
programmable trip points and other information.
+
+config INTEL_TCC_COOLING
+ tristate "Intel TCC offset cooling Driver"
+ depends on X86
+ help
+ Enable this to support system cooling by adjusting the effective TCC
+ activation temperature via the TCC Offset register, which is widely
+ supported on modern Intel platforms.
+ Note that, on different platforms, the behavior might be different
+ on how fast the setting takes effect, and how much the CPU frequency
+ is reduced.
diff --git a/drivers/thermal/intel/Makefile b/drivers/thermal/intel/Makefile
index ff2ad30ef397..5ff2afa388f7 100644
--- a/drivers/thermal/intel/Makefile
+++ b/drivers/thermal/intel/Makefile
@@ -10,4 +10,5 @@ obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
+obj-$(CONFIG_INTEL_TCC_COOLING) += intel_tcc_cooling.o
obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o
diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c
new file mode 100644
index 000000000000..8ec10d55d421
--- /dev/null
+++ b/drivers/thermal/intel/intel_tcc_cooling.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * cooling device driver that activates the processor throttling by
+ * programming the TCC Offset register.
+ * Copyright (c) 2021, Intel Corporation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/thermal.h>
+#include <asm/cpu_device_id.h>
+
+#define TCC_SHIFT 24
+#define TCC_MASK (0x3fULL<<24)
+#define TCC_PROGRAMMABLE BIT(30)
+
+static struct thermal_cooling_device *tcc_cdev;
+
+static int tcc_get_max_state(struct thermal_cooling_device *cdev, unsigned long
+ *state)
+{
+ *state = TCC_MASK >> TCC_SHIFT;
+ return 0;
+}
+
+static int tcc_offset_update(int tcc)
+{
+ u64 val;
+ int err;
+
+ err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
+ if (err)
+ return err;
+
+ val &= ~TCC_MASK;
+ val |= tcc << TCC_SHIFT;
+
+ err = wrmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, val);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tcc_get_cur_state(struct thermal_cooling_device *cdev, unsigned long
+ *state)
+{
+ u64 val;
+ int err;
+
+ err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
+ if (err)
+ return err;
+
+ *state = (val & TCC_MASK) >> TCC_SHIFT;
+ return 0;
+}
+
+static int tcc_set_cur_state(struct thermal_cooling_device *cdev, unsigned long
+ state)
+{
+ return tcc_offset_update(state);
+}
+
+static const struct thermal_cooling_device_ops tcc_cooling_ops = {
+ .get_max_state = tcc_get_max_state,
+ .get_cur_state = tcc_get_cur_state,
+ .set_cur_state = tcc_set_cur_state,
+};
+
+static const struct x86_cpu_id tcc_ids[] __initconst = {
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
+ {}
+};
+
+MODULE_DEVICE_TABLE(x86cpu, tcc_ids);
+
+static int __init tcc_cooling_init(void)
+{
+ int ret;
+ u64 val;
+ const struct x86_cpu_id *id;
+
+ int err;
+
+ id = x86_match_cpu(tcc_ids);
+ if (!id)
+ return -ENODEV;
+
+ err = rdmsrl_safe(MSR_PLATFORM_INFO, &val);
+ if (err)
+ return err;
+
+ if (!(val & TCC_PROGRAMMABLE))
+ return -ENODEV;
+
+ pr_info("Programmable TCC Offset detected\n");
+
+ tcc_cdev =
+ thermal_cooling_device_register("TCC Offset", NULL,
+ &tcc_cooling_ops);
+ if (IS_ERR(tcc_cdev)) {
+ ret = PTR_ERR(tcc_cdev);
+ return ret;
+ }
+ return 0;
+}
+
+module_init(tcc_cooling_init)
+
+static void __exit tcc_cooling_exit(void)
+{
+ thermal_cooling_device_unregister(tcc_cdev);
+}
+
+module_exit(tcc_cooling_exit)
+
+MODULE_DESCRIPTION("TCC offset cooling device Driver");
+MODULE_AUTHOR("Zhang Rui <rui.zhang@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 149c6d7fd5a0..97e8678ccf0e 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -573,12 +573,12 @@ static int raw_to_mcelsius_v1(struct mtk_thermal *mt, int sensno, s32 raw)
static int raw_to_mcelsius_v2(struct mtk_thermal *mt, int sensno, s32 raw)
{
- s32 format_1 = 0;
- s32 format_2 = 0;
- s32 g_oe = 1;
- s32 g_gain = 1;
- s32 g_x_roomt = 0;
- s32 tmp = 0;
+ s32 format_1;
+ s32 format_2;
+ s32 g_oe;
+ s32 g_gain;
+ s32 g_x_roomt;
+ s32 tmp;
if (raw == 0)
return 0;
diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
index 6dc879fea9c8..7419e196dbb0 100644
--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
@@ -17,6 +17,7 @@
#include "../thermal_core.h"
+#define QPNP_TM_REG_DIG_MAJOR 0x01
#define QPNP_TM_REG_TYPE 0x04
#define QPNP_TM_REG_SUBTYPE 0x05
#define QPNP_TM_REG_STATUS 0x08
@@ -38,26 +39,30 @@
#define ALARM_CTRL_FORCE_ENABLE BIT(7)
-/*
- * Trip point values based on threshold control
- * 0 = {105 C, 125 C, 145 C}
- * 1 = {110 C, 130 C, 150 C}
- * 2 = {115 C, 135 C, 155 C}
- * 3 = {120 C, 140 C, 160 C}
-*/
-#define TEMP_STAGE_STEP 20000 /* Stage step: 20.000 C */
-#define TEMP_STAGE_HYSTERESIS 2000
+#define THRESH_COUNT 4
+#define STAGE_COUNT 3
+
+/* Over-temperature trip point values in mC */
+static const long temp_map_gen1[THRESH_COUNT][STAGE_COUNT] = {
+ { 105000, 125000, 145000 },
+ { 110000, 130000, 150000 },
+ { 115000, 135000, 155000 },
+ { 120000, 140000, 160000 },
+};
+
+static const long temp_map_gen2_v1[THRESH_COUNT][STAGE_COUNT] = {
+ { 90000, 110000, 140000 },
+ { 95000, 115000, 145000 },
+ { 100000, 120000, 150000 },
+ { 105000, 125000, 155000 },
+};
-#define TEMP_THRESH_MIN 105000 /* Threshold Min: 105 C */
-#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */
+#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */
#define THRESH_MIN 0
#define THRESH_MAX 3
-/* Stage 2 Threshold Min: 125 C */
-#define STAGE2_THRESHOLD_MIN 125000
-/* Stage 2 Threshold Max: 140 C */
-#define STAGE2_THRESHOLD_MAX 140000
+#define TEMP_STAGE_HYSTERESIS 2000
/* Temperature in Milli Celsius reported during stage 0 if no ADC is present */
#define DEFAULT_TEMP 37000
@@ -77,6 +82,7 @@ struct qpnp_tm_chip {
bool initialized;
struct iio_channel *adc;
+ const long (*temp_map)[THRESH_COUNT][STAGE_COUNT];
};
/* This array maps from GEN2 alarm state to GEN1 alarm stage */
@@ -101,6 +107,23 @@ static int qpnp_tm_write(struct qpnp_tm_chip *chip, u16 addr, u8 data)
}
/**
+ * qpnp_tm_decode_temp() - return temperature in mC corresponding to the
+ * specified over-temperature stage
+ * @chip: Pointer to the qpnp_tm chip
+ * @stage: Over-temperature stage
+ *
+ * Return: temperature in mC
+ */
+static long qpnp_tm_decode_temp(struct qpnp_tm_chip *chip, unsigned int stage)
+{
+ if (!chip->temp_map || chip->thresh >= THRESH_COUNT || stage == 0 ||
+ stage > STAGE_COUNT)
+ return 0;
+
+ return (*chip->temp_map)[chip->thresh][stage - 1];
+}
+
+/**
* qpnp_tm_get_temp_stage() - return over-temperature stage
* @chip: Pointer to the qpnp_tm chip
*
@@ -149,14 +172,12 @@ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
if (stage_new > stage_old) {
/* increasing stage, use lower bound */
- chip->temp = (stage_new - 1) * TEMP_STAGE_STEP +
- chip->thresh * TEMP_THRESH_STEP +
- TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
+ chip->temp = qpnp_tm_decode_temp(chip, stage_new)
+ + TEMP_STAGE_HYSTERESIS;
} else if (stage_new < stage_old) {
/* decreasing stage, use upper bound */
- chip->temp = stage_new * TEMP_STAGE_STEP +
- chip->thresh * TEMP_THRESH_STEP -
- TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
+ chip->temp = qpnp_tm_decode_temp(chip, stage_new + 1)
+ - TEMP_STAGE_HYSTERESIS;
}
chip->stage = stage;
@@ -199,26 +220,28 @@ static int qpnp_tm_get_temp(void *data, int *temp)
static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip,
int temp)
{
- u8 reg;
+ long stage2_threshold_min = (*chip->temp_map)[THRESH_MIN][1];
+ long stage2_threshold_max = (*chip->temp_map)[THRESH_MAX][1];
bool disable_s2_shutdown = false;
+ u8 reg;
WARN_ON(!mutex_is_locked(&chip->lock));
/*
* Default: S2 and S3 shutdown enabled, thresholds at
- * 105C/125C/145C, monitoring at 25Hz
+ * lowest threshold set, monitoring at 25Hz
*/
reg = SHUTDOWN_CTRL1_RATE_25HZ;
if (temp == THERMAL_TEMP_INVALID ||
- temp < STAGE2_THRESHOLD_MIN) {
+ temp < stage2_threshold_min) {
chip->thresh = THRESH_MIN;
goto skip;
}
- if (temp <= STAGE2_THRESHOLD_MAX) {
+ if (temp <= stage2_threshold_max) {
chip->thresh = THRESH_MAX -
- ((STAGE2_THRESHOLD_MAX - temp) /
+ ((stage2_threshold_max - temp) /
TEMP_THRESH_STEP);
disable_s2_shutdown = true;
} else {
@@ -326,9 +349,7 @@ static int qpnp_tm_init(struct qpnp_tm_chip *chip)
? chip->stage : alarm_state_map[chip->stage];
if (stage)
- chip->temp = chip->thresh * TEMP_THRESH_STEP +
- (stage - 1) * TEMP_STAGE_STEP +
- TEMP_THRESH_MIN;
+ chip->temp = qpnp_tm_decode_temp(chip, stage);
crit_temp = qpnp_tm_get_critical_trip_temp(chip);
ret = qpnp_tm_update_critical_trip_temp(chip, crit_temp);
@@ -350,7 +371,7 @@ static int qpnp_tm_probe(struct platform_device *pdev)
{
struct qpnp_tm_chip *chip;
struct device_node *node;
- u8 type, subtype;
+ u8 type, subtype, dig_major;
u32 res;
int ret, irq;
@@ -400,6 +421,12 @@ static int qpnp_tm_probe(struct platform_device *pdev)
return ret;
}
+ ret = qpnp_tm_read(chip, QPNP_TM_REG_DIG_MAJOR, &dig_major);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not read dig_major\n");
+ return ret;
+ }
+
if (type != QPNP_TM_TYPE || (subtype != QPNP_TM_SUBTYPE_GEN1
&& subtype != QPNP_TM_SUBTYPE_GEN2)) {
dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n",
@@ -408,6 +435,10 @@ static int qpnp_tm_probe(struct platform_device *pdev)
}
chip->subtype = subtype;
+ if (subtype == QPNP_TM_SUBTYPE_GEN2 && dig_major >= 1)
+ chip->temp_map = &temp_map_gen2_v1;
+ else
+ chip->temp_map = &temp_map_gen1;
/*
* Register the sensor before initializing the hardware to be able to
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
index 2a28a5af209e..67c1748cdf73 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -10,8 +10,6 @@
#include <linux/thermal.h>
#include "tsens.h"
-#define CAL_MDEGC 30000
-
#define CONFIG_ADDR 0x3640
#define CONFIG_ADDR_8660 0x3620
/* CONFIG_ADDR bitmasks */
@@ -21,40 +19,38 @@
#define CONFIG_SHIFT_8660 28
#define CONFIG_MASK_8660 (3 << CONFIG_SHIFT_8660)
-#define STATUS_CNTL_ADDR_8064 0x3660
#define CNTL_ADDR 0x3620
/* CNTL_ADDR bitmasks */
#define EN BIT(0)
#define SW_RST BIT(1)
-#define SENSOR0_EN BIT(3)
+
+#define MEASURE_PERIOD BIT(18)
#define SLP_CLK_ENA BIT(26)
#define SLP_CLK_ENA_8660 BIT(24)
-#define MEASURE_PERIOD 1
#define SENSOR0_SHIFT 3
-/* INT_STATUS_ADDR bitmasks */
-#define MIN_STATUS_MASK BIT(0)
-#define LOWER_STATUS_CLR BIT(1)
-#define UPPER_STATUS_CLR BIT(2)
-#define MAX_STATUS_MASK BIT(3)
-
#define THRESHOLD_ADDR 0x3624
-/* THRESHOLD_ADDR bitmasks */
-#define THRESHOLD_MAX_LIMIT_SHIFT 24
-#define THRESHOLD_MIN_LIMIT_SHIFT 16
-#define THRESHOLD_UPPER_LIMIT_SHIFT 8
-#define THRESHOLD_LOWER_LIMIT_SHIFT 0
-
-/* Initial temperature threshold values */
-#define LOWER_LIMIT_TH 0x50
-#define UPPER_LIMIT_TH 0xdf
-#define MIN_LIMIT_TH 0x0
-#define MAX_LIMIT_TH 0xff
-
-#define S0_STATUS_ADDR 0x3628
+
#define INT_STATUS_ADDR 0x363c
-#define TRDY_MASK BIT(7)
-#define TIMEOUT_US 100
+
+#define S0_STATUS_OFF 0x3628
+#define S1_STATUS_OFF 0x362c
+#define S2_STATUS_OFF 0x3630
+#define S3_STATUS_OFF 0x3634
+#define S4_STATUS_OFF 0x3638
+#define S5_STATUS_OFF 0x3664 /* Sensors 5-10 found on apq8064/msm8960 */
+#define S6_STATUS_OFF 0x3668
+#define S7_STATUS_OFF 0x366c
+#define S8_STATUS_OFF 0x3670
+#define S9_STATUS_OFF 0x3674
+#define S10_STATUS_OFF 0x3678
+
+/* Original slope - 350 to compensate mC to C inaccuracy */
+static u32 tsens_msm8960_slope[] = {
+ 826, 826, 804, 826,
+ 761, 782, 782, 849,
+ 782, 849, 782
+ };
static int suspend_8960(struct tsens_priv *priv)
{
@@ -115,17 +111,34 @@ static int resume_8960(struct tsens_priv *priv)
static int enable_8960(struct tsens_priv *priv, int id)
{
int ret;
- u32 reg, mask;
+ u32 reg, mask = BIT(id);
ret = regmap_read(priv->tm_map, CNTL_ADDR, &reg);
if (ret)
return ret;
- mask = BIT(id + SENSOR0_SHIFT);
+ /* HARDWARE BUG:
+ * On platforms with more than 6 sensors, all remaining sensors
+ * must be enabled together, otherwise undefined results are expected.
+ * (Sensor 6-7 disabled, Sensor 3 disabled...) In the original driver,
+ * all the sensors are enabled in one step hence this bug is not
+ * triggered.
+ */
+ if (id > 5)
+ mask = GENMASK(10, 6);
+
+ mask <<= SENSOR0_SHIFT;
+
+ /* Sensors already enabled. Skip. */
+ if ((reg & mask) == mask)
+ return 0;
+
ret = regmap_write(priv->tm_map, CNTL_ADDR, reg | SW_RST);
if (ret)
return ret;
+ reg |= MEASURE_PERIOD;
+
if (priv->num_sensors > 1)
reg |= mask | SLP_CLK_ENA | EN;
else
@@ -162,63 +175,11 @@ static void disable_8960(struct tsens_priv *priv)
regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
}
-static int init_8960(struct tsens_priv *priv)
-{
- int ret, i;
- u32 reg_cntl;
-
- priv->tm_map = dev_get_regmap(priv->dev, NULL);
- if (!priv->tm_map)
- return -ENODEV;
-
- /*
- * The status registers for each sensor are discontiguous
- * because some SoCs have 5 sensors while others have more
- * but the control registers stay in the same place, i.e
- * directly after the first 5 status registers.
- */
- for (i = 0; i < priv->num_sensors; i++) {
- if (i >= 5)
- priv->sensor[i].status = S0_STATUS_ADDR + 40;
- priv->sensor[i].status += i * 4;
- }
-
- reg_cntl = SW_RST;
- ret = regmap_update_bits(priv->tm_map, CNTL_ADDR, SW_RST, reg_cntl);
- if (ret)
- return ret;
-
- if (priv->num_sensors > 1) {
- reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18);
- reg_cntl &= ~SW_RST;
- ret = regmap_update_bits(priv->tm_map, CONFIG_ADDR,
- CONFIG_MASK, CONFIG);
- } else {
- reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16);
- reg_cntl &= ~CONFIG_MASK_8660;
- reg_cntl |= CONFIG_8660 << CONFIG_SHIFT_8660;
- }
-
- reg_cntl |= GENMASK(priv->num_sensors - 1, 0) << SENSOR0_SHIFT;
- ret = regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
- if (ret)
- return ret;
-
- reg_cntl |= EN;
- ret = regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
- if (ret)
- return ret;
-
- return 0;
-}
-
static int calibrate_8960(struct tsens_priv *priv)
{
int i;
char *data;
-
- ssize_t num_read = priv->num_sensors;
- struct tsens_sensor *s = priv->sensor;
+ u32 p1[11];
data = qfprom_read(priv->dev, "calib");
if (IS_ERR(data))
@@ -226,60 +187,96 @@ static int calibrate_8960(struct tsens_priv *priv)
if (IS_ERR(data))
return PTR_ERR(data);
- for (i = 0; i < num_read; i++, s++)
- s->offset = data[i];
+ for (i = 0; i < priv->num_sensors; i++) {
+ p1[i] = data[i];
+ priv->sensor[i].slope = tsens_msm8960_slope[i];
+ }
+
+ compute_intercept_slope(priv, p1, NULL, ONE_PT_CALIB);
kfree(data);
return 0;
}
-/* Temperature on y axis and ADC-code on x-axis */
-static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s)
-{
- int slope, offset;
-
- slope = thermal_zone_get_slope(s->tzd);
- offset = CAL_MDEGC - slope * s->offset;
-
- return adc_code * slope + offset;
-}
-
-static int get_temp_8960(const struct tsens_sensor *s, int *temp)
-{
- int ret;
- u32 code, trdy;
- struct tsens_priv *priv = s->priv;
- unsigned long timeout;
-
- timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
- do {
- ret = regmap_read(priv->tm_map, INT_STATUS_ADDR, &trdy);
- if (ret)
- return ret;
- if (!(trdy & TRDY_MASK))
- continue;
- ret = regmap_read(priv->tm_map, s->status, &code);
- if (ret)
- return ret;
- *temp = code_to_mdegC(code, s);
- return 0;
- } while (time_before(jiffies, timeout));
-
- return -ETIMEDOUT;
-}
+static const struct reg_field tsens_8960_regfields[MAX_REGFIELDS] = {
+ /* ----- SROT ------ */
+ /* No VERSION information */
+
+ /* CNTL */
+ [TSENS_EN] = REG_FIELD(CNTL_ADDR, 0, 0),
+ [TSENS_SW_RST] = REG_FIELD(CNTL_ADDR, 1, 1),
+ /* 8960 has 5 sensors, 8660 has 11, we only handle 5 */
+ [SENSOR_EN] = REG_FIELD(CNTL_ADDR, 3, 7),
+
+ /* ----- TM ------ */
+ /* INTERRUPT ENABLE */
+ /* NO INTERRUPT ENABLE */
+
+ /* Single UPPER/LOWER TEMPERATURE THRESHOLD for all sensors */
+ [LOW_THRESH_0] = REG_FIELD(THRESHOLD_ADDR, 0, 7),
+ [UP_THRESH_0] = REG_FIELD(THRESHOLD_ADDR, 8, 15),
+ /* MIN_THRESH_0 and MAX_THRESH_0 are not present in the regfield
+ * Recycle CRIT_THRESH_0 and 1 to set the required regs to hardcoded temp
+ * MIN_THRESH_0 -> CRIT_THRESH_1
+ * MAX_THRESH_0 -> CRIT_THRESH_0
+ */
+ [CRIT_THRESH_1] = REG_FIELD(THRESHOLD_ADDR, 16, 23),
+ [CRIT_THRESH_0] = REG_FIELD(THRESHOLD_ADDR, 24, 31),
+
+ /* UPPER/LOWER INTERRUPT [CLEAR/STATUS] */
+ /* 1 == clear, 0 == normal operation */
+ [LOW_INT_CLEAR_0] = REG_FIELD(CNTL_ADDR, 9, 9),
+ [UP_INT_CLEAR_0] = REG_FIELD(CNTL_ADDR, 10, 10),
+
+ /* NO CRITICAL INTERRUPT SUPPORT on 8960 */
+
+ /* Sn_STATUS */
+ [LAST_TEMP_0] = REG_FIELD(S0_STATUS_OFF, 0, 7),
+ [LAST_TEMP_1] = REG_FIELD(S1_STATUS_OFF, 0, 7),
+ [LAST_TEMP_2] = REG_FIELD(S2_STATUS_OFF, 0, 7),
+ [LAST_TEMP_3] = REG_FIELD(S3_STATUS_OFF, 0, 7),
+ [LAST_TEMP_4] = REG_FIELD(S4_STATUS_OFF, 0, 7),
+ [LAST_TEMP_5] = REG_FIELD(S5_STATUS_OFF, 0, 7),
+ [LAST_TEMP_6] = REG_FIELD(S6_STATUS_OFF, 0, 7),
+ [LAST_TEMP_7] = REG_FIELD(S7_STATUS_OFF, 0, 7),
+ [LAST_TEMP_8] = REG_FIELD(S8_STATUS_OFF, 0, 7),
+ [LAST_TEMP_9] = REG_FIELD(S9_STATUS_OFF, 0, 7),
+ [LAST_TEMP_10] = REG_FIELD(S10_STATUS_OFF, 0, 7),
+
+ /* No VALID field on 8960 */
+ /* TSENS_INT_STATUS bits: 1 == threshold violated */
+ [MIN_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 0, 0),
+ [LOWER_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 1, 1),
+ [UPPER_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 2, 2),
+ /* No CRITICAL field on 8960 */
+ [MAX_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 3, 3),
+
+ /* TRDY: 1=ready, 0=in progress */
+ [TRDY] = REG_FIELD(INT_STATUS_ADDR, 7, 7),
+};
static const struct tsens_ops ops_8960 = {
- .init = init_8960,
+ .init = init_common,
.calibrate = calibrate_8960,
- .get_temp = get_temp_8960,
+ .get_temp = get_temp_common,
.enable = enable_8960,
.disable = disable_8960,
.suspend = suspend_8960,
.resume = resume_8960,
};
+static struct tsens_features tsens_8960_feat = {
+ .ver_major = VER_0,
+ .crit_int = 0,
+ .adc = 1,
+ .srot_split = 0,
+ .max_sensors = 11,
+};
+
struct tsens_plat_data data_8960 = {
.num_sensors = 11,
.ops = &ops_8960,
+ .feat = &tsens_8960_feat,
+ .fields = tsens_8960_regfields,
};
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index 4ffa2e2c0145..f136cb350238 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -190,6 +190,39 @@
#define BIT_APPEND 0x3
+/* eeprom layout data for mdm9607 */
+#define MDM9607_BASE0_MASK 0x000000ff
+#define MDM9607_BASE1_MASK 0x000ff000
+#define MDM9607_BASE0_SHIFT 0
+#define MDM9607_BASE1_SHIFT 12
+
+#define MDM9607_S0_P1_MASK 0x00003f00
+#define MDM9607_S1_P1_MASK 0x03f00000
+#define MDM9607_S2_P1_MASK 0x0000003f
+#define MDM9607_S3_P1_MASK 0x0003f000
+#define MDM9607_S4_P1_MASK 0x0000003f
+
+#define MDM9607_S0_P2_MASK 0x000fc000
+#define MDM9607_S1_P2_MASK 0xfc000000
+#define MDM9607_S2_P2_MASK 0x00000fc0
+#define MDM9607_S3_P2_MASK 0x00fc0000
+#define MDM9607_S4_P2_MASK 0x00000fc0
+
+#define MDM9607_S0_P1_SHIFT 8
+#define MDM9607_S1_P1_SHIFT 20
+#define MDM9607_S2_P1_SHIFT 0
+#define MDM9607_S3_P1_SHIFT 12
+#define MDM9607_S4_P1_SHIFT 0
+
+#define MDM9607_S0_P2_SHIFT 14
+#define MDM9607_S1_P2_SHIFT 26
+#define MDM9607_S2_P2_SHIFT 6
+#define MDM9607_S3_P2_SHIFT 18
+#define MDM9607_S4_P2_SHIFT 6
+
+#define MDM9607_CAL_SEL_MASK 0x00700000
+#define MDM9607_CAL_SEL_SHIFT 20
+
static int calibrate_8916(struct tsens_priv *priv)
{
int base0 = 0, base1 = 0, i;
@@ -452,7 +485,56 @@ static int calibrate_8974(struct tsens_priv *priv)
return 0;
}
-/* v0.1: 8916, 8939, 8974 */
+static int calibrate_9607(struct tsens_priv *priv)
+{
+ int base, i;
+ u32 p1[5], p2[5];
+ int mode = 0;
+ u32 *qfprom_cdata;
+
+ qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
+ if (IS_ERR(qfprom_cdata))
+ return PTR_ERR(qfprom_cdata);
+
+ mode = (qfprom_cdata[2] & MDM9607_CAL_SEL_MASK) >> MDM9607_CAL_SEL_SHIFT;
+ dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base = (qfprom_cdata[2] & MDM9607_BASE1_MASK) >> MDM9607_BASE1_SHIFT;
+ p2[0] = (qfprom_cdata[0] & MDM9607_S0_P2_MASK) >> MDM9607_S0_P2_SHIFT;
+ p2[1] = (qfprom_cdata[0] & MDM9607_S1_P2_MASK) >> MDM9607_S1_P2_SHIFT;
+ p2[2] = (qfprom_cdata[1] & MDM9607_S2_P2_MASK) >> MDM9607_S2_P2_SHIFT;
+ p2[3] = (qfprom_cdata[1] & MDM9607_S3_P2_MASK) >> MDM9607_S3_P2_SHIFT;
+ p2[4] = (qfprom_cdata[2] & MDM9607_S4_P2_MASK) >> MDM9607_S4_P2_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p2[i] = ((base + p2[i]) << 2);
+ fallthrough;
+ case ONE_PT_CALIB2:
+ base = (qfprom_cdata[0] & MDM9607_BASE0_MASK);
+ p1[0] = (qfprom_cdata[0] & MDM9607_S0_P1_MASK) >> MDM9607_S0_P1_SHIFT;
+ p1[1] = (qfprom_cdata[0] & MDM9607_S1_P1_MASK) >> MDM9607_S1_P1_SHIFT;
+ p1[2] = (qfprom_cdata[1] & MDM9607_S2_P1_MASK) >> MDM9607_S2_P1_SHIFT;
+ p1[3] = (qfprom_cdata[1] & MDM9607_S3_P1_MASK) >> MDM9607_S3_P1_SHIFT;
+ p1[4] = (qfprom_cdata[2] & MDM9607_S4_P1_MASK) >> MDM9607_S4_P1_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p1[i] = ((base + p1[i]) << 2);
+ break;
+ default:
+ for (i = 0; i < priv->num_sensors; i++) {
+ p1[i] = 500;
+ p2[i] = 780;
+ }
+ break;
+ }
+
+ compute_intercept_slope(priv, p1, p2, mode);
+ kfree(qfprom_cdata);
+
+ return 0;
+}
+
+/* v0.1: 8916, 8939, 8974, 9607 */
static struct tsens_features tsens_v0_1_feat = {
.ver_major = VER_0_1,
@@ -540,3 +622,17 @@ struct tsens_plat_data data_8974 = {
.feat = &tsens_v0_1_feat,
.fields = tsens_v0_1_regfields,
};
+
+static const struct tsens_ops ops_9607 = {
+ .init = init_common,
+ .calibrate = calibrate_9607,
+ .get_temp = get_temp_common,
+};
+
+struct tsens_plat_data data_9607 = {
+ .num_sensors = 5,
+ .ops = &ops_9607,
+ .hw_ids = (unsigned int []){ 0, 1, 2, 3, 4 },
+ .feat = &tsens_v0_1_feat,
+ .fields = tsens_v0_1_regfields,
+};
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
index 3c19a3800c6d..573e261ccca7 100644
--- a/drivers/thermal/qcom/tsens-v1.c
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -380,11 +380,11 @@ static const struct tsens_ops ops_8976 = {
.get_temp = get_temp_tsens_valid,
};
-/* Valid for both MSM8956 and MSM8976. Sensor ID 3 is unused. */
+/* Valid for both MSM8956 and MSM8976. */
struct tsens_plat_data data_8976 = {
.num_sensors = 11,
.ops = &ops_8976,
- .hw_ids = (unsigned int[]){0, 1, 2, 4, 5, 6, 7, 8, 9, 10},
+ .hw_ids = (unsigned int[]){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
.feat = &tsens_v1_feat,
.fields = tsens_v1_regfields,
};
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index d8ce3a687b80..4c7ebd1d3f9c 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
@@ -85,7 +86,8 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
"%s: sensor%d - data_point1:%#x data_point2:%#x\n",
__func__, i, p1[i], p2[i]);
- priv->sensor[i].slope = SLOPE_DEFAULT;
+ if (!priv->sensor[i].slope)
+ priv->sensor[i].slope = SLOPE_DEFAULT;
if (mode == TWO_PT_CALIB) {
/*
* slope (m) = adc_code2 - adc_code1 (y2 - y1)/
@@ -515,6 +517,15 @@ static irqreturn_t tsens_irq_thread(int irq, void *data)
dev_dbg(priv->dev, "[%u] %s: no violation: %d\n",
hw_id, __func__, temp);
}
+
+ if (tsens_version(priv) < VER_0_1) {
+ /* Constraint: There is only 1 interrupt control register for all
+ * 11 temperature sensor. So monitoring more than 1 sensor based
+ * on interrupts will yield inconsistent result. To overcome this
+ * issue we will monitor only sensor 0 which is the master sensor.
+ */
+ break;
+ }
}
return IRQ_HANDLED;
@@ -530,6 +541,13 @@ static int tsens_set_trips(void *_sensor, int low, int high)
int high_val, low_val, cl_high, cl_low;
u32 hw_id = s->hw_id;
+ if (tsens_version(priv) < VER_0_1) {
+ /* Pre v0.1 IP had a single register for each type of interrupt
+ * and thresholds
+ */
+ hw_id = 0;
+ }
+
dev_dbg(dev, "[%u] %s: proposed thresholds: (%d:%d)\n",
hw_id, __func__, low, high);
@@ -584,18 +602,21 @@ int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp)
u32 valid;
int ret;
- ret = regmap_field_read(priv->rf[valid_idx], &valid);
- if (ret)
- return ret;
- while (!valid) {
- /* Valid bit is 0 for 6 AHB clock cycles.
- * At 19.2MHz, 1 AHB clock is ~60ns.
- * We should enter this loop very, very rarely.
- */
- ndelay(400);
+ /* VER_0 doesn't have VALID bit */
+ if (tsens_version(priv) >= VER_0_1) {
ret = regmap_field_read(priv->rf[valid_idx], &valid);
if (ret)
return ret;
+ while (!valid) {
+ /* Valid bit is 0 for 6 AHB clock cycles.
+ * At 19.2MHz, 1 AHB clock is ~60ns.
+ * We should enter this loop very, very rarely.
+ */
+ ndelay(400);
+ ret = regmap_field_read(priv->rf[valid_idx], &valid);
+ if (ret)
+ return ret;
+ }
}
/* Valid bit is set, OK to read the temperature */
@@ -608,15 +629,29 @@ int get_temp_common(const struct tsens_sensor *s, int *temp)
{
struct tsens_priv *priv = s->priv;
int hw_id = s->hw_id;
- int last_temp = 0, ret;
+ int last_temp = 0, ret, trdy;
+ unsigned long timeout;
- ret = regmap_field_read(priv->rf[LAST_TEMP_0 + hw_id], &last_temp);
- if (ret)
- return ret;
+ timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
+ do {
+ if (tsens_version(priv) == VER_0) {
+ ret = regmap_field_read(priv->rf[TRDY], &trdy);
+ if (ret)
+ return ret;
+ if (!trdy)
+ continue;
+ }
- *temp = code_to_degc(last_temp, s) * 1000;
+ ret = regmap_field_read(priv->rf[LAST_TEMP_0 + hw_id], &last_temp);
+ if (ret)
+ return ret;
- return 0;
+ *temp = code_to_degc(last_temp, s) * 1000;
+
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ return -ETIMEDOUT;
}
#ifdef CONFIG_DEBUG_FS
@@ -738,25 +773,42 @@ int __init init_common(struct tsens_priv *priv)
priv->tm_offset = 0x1000;
}
- res = platform_get_resource(op, IORESOURCE_MEM, 0);
- tm_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(tm_base)) {
- ret = PTR_ERR(tm_base);
- goto err_put_device;
+ if (tsens_version(priv) >= VER_0_1) {
+ res = platform_get_resource(op, IORESOURCE_MEM, 0);
+ tm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(tm_base)) {
+ ret = PTR_ERR(tm_base);
+ goto err_put_device;
+ }
+
+ priv->tm_map = devm_regmap_init_mmio(dev, tm_base, &tsens_config);
+ } else { /* VER_0 share the same gcc regs using a syscon */
+ struct device *parent = priv->dev->parent;
+
+ if (parent)
+ priv->tm_map = syscon_node_to_regmap(parent->of_node);
}
- priv->tm_map = devm_regmap_init_mmio(dev, tm_base, &tsens_config);
- if (IS_ERR(priv->tm_map)) {
- ret = PTR_ERR(priv->tm_map);
+ if (IS_ERR_OR_NULL(priv->tm_map)) {
+ if (!priv->tm_map)
+ ret = -ENODEV;
+ else
+ ret = PTR_ERR(priv->tm_map);
goto err_put_device;
}
+ /* VER_0 have only tm_map */
+ if (!priv->srot_map)
+ priv->srot_map = priv->tm_map;
+
if (tsens_version(priv) > VER_0_1) {
for (i = VER_MAJOR; i <= VER_STEP; i++) {
priv->rf[i] = devm_regmap_field_alloc(dev, priv->srot_map,
priv->fields[i]);
- if (IS_ERR(priv->rf[i]))
- return PTR_ERR(priv->rf[i]);
+ if (IS_ERR(priv->rf[i])) {
+ ret = PTR_ERR(priv->rf[i]);
+ goto err_put_device;
+ }
}
ret = regmap_field_read(priv->rf[VER_MINOR], &ver_minor);
if (ret)
@@ -769,6 +821,10 @@ int __init init_common(struct tsens_priv *priv)
ret = PTR_ERR(priv->rf[TSENS_EN]);
goto err_put_device;
}
+ /* in VER_0 TSENS need to be explicitly enabled */
+ if (tsens_version(priv) == VER_0)
+ regmap_field_write(priv->rf[TSENS_EN], 1);
+
ret = regmap_field_read(priv->rf[TSENS_EN], &enabled);
if (ret)
goto err_put_device;
@@ -791,6 +847,19 @@ int __init init_common(struct tsens_priv *priv)
goto err_put_device;
}
+ priv->rf[TSENS_SW_RST] =
+ devm_regmap_field_alloc(dev, priv->srot_map, priv->fields[TSENS_SW_RST]);
+ if (IS_ERR(priv->rf[TSENS_SW_RST])) {
+ ret = PTR_ERR(priv->rf[TSENS_SW_RST]);
+ goto err_put_device;
+ }
+
+ priv->rf[TRDY] = devm_regmap_field_alloc(dev, priv->tm_map, priv->fields[TRDY]);
+ if (IS_ERR(priv->rf[TRDY])) {
+ ret = PTR_ERR(priv->rf[TRDY]);
+ goto err_put_device;
+ }
+
/* This loop might need changes if enum regfield_ids is reordered */
for (j = LAST_TEMP_0; j <= UP_THRESH_15; j += 16) {
for (i = 0; i < priv->feat->max_sensors; i++) {
@@ -806,7 +875,7 @@ int __init init_common(struct tsens_priv *priv)
}
}
- if (priv->feat->crit_int) {
+ if (priv->feat->crit_int || tsens_version(priv) < VER_0_1) {
/* Loop might need changes if enum regfield_ids is reordered */
for (j = CRITICAL_STATUS_0; j <= CRIT_THRESH_15; j += 16) {
for (i = 0; i < priv->feat->max_sensors; i++) {
@@ -844,7 +913,11 @@ int __init init_common(struct tsens_priv *priv)
}
spin_lock_init(&priv->ul_lock);
- tsens_enable_irq(priv);
+
+ /* VER_0 interrupt doesn't need to be enabled */
+ if (tsens_version(priv) >= VER_0_1)
+ tsens_enable_irq(priv);
+
tsens_debug_init(op);
err_put_device:
@@ -895,6 +968,12 @@ static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, tsens_resume);
static const struct of_device_id tsens_table[] = {
{
+ .compatible = "qcom,ipq8064-tsens",
+ .data = &data_8960,
+ }, {
+ .compatible = "qcom,mdm9607-tsens",
+ .data = &data_9607,
+ }, {
.compatible = "qcom,msm8916-tsens",
.data = &data_8916,
}, {
@@ -943,10 +1022,19 @@ static int tsens_register_irq(struct tsens_priv *priv, char *irqname,
if (irq == -ENXIO)
ret = 0;
} else {
- ret = devm_request_threaded_irq(&pdev->dev, irq,
- NULL, thread_fn,
- IRQF_ONESHOT,
- dev_name(&pdev->dev), priv);
+ /* VER_0 interrupt is TRIGGER_RISING, VER_0_1 and up is ONESHOT */
+ if (tsens_version(priv) == VER_0)
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ thread_fn, NULL,
+ IRQF_TRIGGER_RISING,
+ dev_name(&pdev->dev),
+ priv);
+ else
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ thread_fn, IRQF_ONESHOT,
+ dev_name(&pdev->dev),
+ priv);
+
if (ret)
dev_err(&pdev->dev, "%s: failed to get irq\n",
__func__);
@@ -975,6 +1063,19 @@ static int tsens_register(struct tsens_priv *priv)
priv->ops->enable(priv, i);
}
+ /* VER_0 require to set MIN and MAX THRESH
+ * These 2 regs are set using the:
+ * - CRIT_THRESH_0 for MAX THRESH hardcoded to 120°C
+ * - CRIT_THRESH_1 for MIN THRESH hardcoded to 0°C
+ */
+ if (tsens_version(priv) < VER_0_1) {
+ regmap_field_write(priv->rf[CRIT_THRESH_0],
+ tsens_mC_to_hw(priv->sensor, 120000));
+
+ regmap_field_write(priv->rf[CRIT_THRESH_1],
+ tsens_mC_to_hw(priv->sensor, 0));
+ }
+
ret = tsens_register_irq(priv, "uplow", tsens_irq_thread);
if (ret < 0)
return ret;
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index f40b625f897e..1471a2c00f15 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -13,6 +13,7 @@
#define CAL_DEGC_PT2 120
#define SLOPE_FACTOR 1000
#define SLOPE_DEFAULT 3200
+#define TIMEOUT_US 100
#define THRESHOLD_MAX_ADC_CODE 0x3ff
#define THRESHOLD_MIN_ADC_CODE 0x0
@@ -25,7 +26,8 @@ struct tsens_priv;
/* IP version numbers in ascending order */
enum tsens_ver {
- VER_0_1 = 0,
+ VER_0 = 0,
+ VER_0_1,
VER_1_X,
VER_2_X,
};
@@ -585,7 +587,7 @@ int get_temp_common(const struct tsens_sensor *s, int *temp);
extern struct tsens_plat_data data_8960;
/* TSENS v0.1 targets */
-extern struct tsens_plat_data data_8916, data_8939, data_8974;
+extern struct tsens_plat_data data_8916, data_8939, data_8974, data_9607;
/* TSENS v1 targets */
extern struct tsens_plat_data data_tsens_v1, data_8976;
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 75c69fe6e955..e1e412348076 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -60,7 +60,7 @@
#define MCELSIUS(temp) ((temp) * 1000)
#define GEN3_FUSE_MASK 0xFFF
-#define TSC_MAX_NUM 4
+#define TSC_MAX_NUM 5
/* default THCODE values if FUSEs are missing */
static const int thcodes[TSC_MAX_NUM][3] = {
@@ -68,6 +68,7 @@ static const int thcodes[TSC_MAX_NUM][3] = {
{ 3393, 2795, 2216 },
{ 3389, 2805, 2237 },
{ 3415, 2694, 2195 },
+ { 3356, 2724, 2244 },
};
/* Structure for thermal temperature calculation */
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
index 8c80bd06dd9f..d9cd23cbb671 100644
--- a/drivers/thermal/sun8i_thermal.c
+++ b/drivers/thermal/sun8i_thermal.c
@@ -300,7 +300,7 @@ static int sun8i_ths_calibrate(struct ths_device *tmdev)
* or 0x8xx, so they won't be away from the default value
* for a lot.
*
- * So here we do not return error if the calibartion data is
+ * So here we do not return error if the calibration data is
* not available, except the probe needs deferring.
*/
goto out;
@@ -418,7 +418,7 @@ static int sun8i_h3_thermal_init(struct ths_device *tmdev)
}
/*
- * Without this undocummented value, the returned temperatures would
+ * Without this undocumented value, the returned temperatures would
* be higher than real ones by about 20C.
*/
#define SUN50I_H6_CTRL0_UNK 0x0000002f
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 66e0639da4bf..8e303e9d1dc0 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -2118,7 +2118,6 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
struct tegra_soctherm *tegra;
struct thermal_zone_device *z;
struct tsensor_shared_calib shared_calib;
- struct resource *res;
struct tegra_soctherm_soc *soc;
unsigned int i;
int err;
@@ -2140,26 +2139,20 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
tegra->soc = soc;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "soctherm-reg");
- tegra->regs = devm_ioremap_resource(&pdev->dev, res);
+ tegra->regs = devm_platform_ioremap_resource_byname(pdev, "soctherm-reg");
if (IS_ERR(tegra->regs)) {
dev_err(&pdev->dev, "can't get soctherm registers");
return PTR_ERR(tegra->regs);
}
if (!tegra->soc->use_ccroc) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "car-reg");
- tegra->clk_regs = devm_ioremap_resource(&pdev->dev, res);
+ tegra->clk_regs = devm_platform_ioremap_resource_byname(pdev, "car-reg");
if (IS_ERR(tegra->clk_regs)) {
dev_err(&pdev->dev, "can't get car clk registers");
return PTR_ERR(tegra->clk_regs);
}
} else {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "ccroc-reg");
- tegra->ccroc_regs = devm_ioremap_resource(&pdev->dev, res);
+ tegra->ccroc_regs = devm_platform_ioremap_resource_byname(pdev, "ccroc-reg");
if (IS_ERR(tegra->ccroc_regs)) {
dev_err(&pdev->dev, "can't get ccroc registers");
return PTR_ERR(tegra->ccroc_regs);
@@ -2195,7 +2188,7 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
if (err)
return err;
- /* calculate tsensor calibaration data */
+ /* calculate tsensor calibration data */
for (i = 0; i < soc->num_tsensors; ++i) {
err = tegra_calc_tsensor_calib(&soc->tsensors[i],
&shared_calib,
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 996c038f83a4..d20b25f40d19 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -561,24 +561,6 @@ void thermal_zone_device_update(struct thermal_zone_device *tz,
}
EXPORT_SYMBOL_GPL(thermal_zone_device_update);
-/**
- * thermal_notify_framework - Sensor drivers use this API to notify framework
- * @tz: thermal zone device
- * @trip: indicates which trip point has been crossed
- *
- * This function handles the trip events from sensor drivers. It starts
- * throttling the cooling devices according to the policy configured.
- * For CRITICAL and HOT trip points, this notifies the respective drivers,
- * and does actual throttling for other trip points i.e ACTIVE and PASSIVE.
- * The throttling policy is based on the configured platform data; if no
- * platform data is provided, this uses the step_wise throttling policy.
- */
-void thermal_notify_framework(struct thermal_zone_device *tz, int trip)
-{
- handle_thermal_trip(tz, trip);
-}
-EXPORT_SYMBOL_GPL(thermal_notify_framework);
-
static void thermal_zone_device_check(struct work_struct *work)
{
struct thermal_zone_device *tz = container_of(work, struct
@@ -960,10 +942,7 @@ __thermal_cooling_device_register(struct device_node *np,
{
struct thermal_cooling_device *cdev;
struct thermal_zone_device *pos = NULL;
- int result;
-
- if (type && strlen(type) >= THERMAL_NAME_LENGTH)
- return ERR_PTR(-EINVAL);
+ int ret;
if (!ops || !ops->get_max_state || !ops->get_cur_state ||
!ops->set_cur_state)
@@ -973,14 +952,17 @@ __thermal_cooling_device_register(struct device_node *np,
if (!cdev)
return ERR_PTR(-ENOMEM);
- result = ida_simple_get(&thermal_cdev_ida, 0, 0, GFP_KERNEL);
- if (result < 0) {
- kfree(cdev);
- return ERR_PTR(result);
+ ret = ida_simple_get(&thermal_cdev_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto out_kfree_cdev;
+ cdev->id = ret;
+
+ cdev->type = kstrdup(type ? type : "", GFP_KERNEL);
+ if (!cdev->type) {
+ ret = -ENOMEM;
+ goto out_ida_remove;
}
- cdev->id = result;
- strlcpy(cdev->type, type ? : "", sizeof(cdev->type));
mutex_init(&cdev->lock);
INIT_LIST_HEAD(&cdev->thermal_instances);
cdev->np = np;
@@ -990,12 +972,9 @@ __thermal_cooling_device_register(struct device_node *np,
cdev->devdata = devdata;
thermal_cooling_device_setup_sysfs(cdev);
dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
- result = device_register(&cdev->device);
- if (result) {
- ida_simple_remove(&thermal_cdev_ida, cdev->id);
- put_device(&cdev->device);
- return ERR_PTR(result);
- }
+ ret = device_register(&cdev->device);
+ if (ret)
+ goto out_kfree_type;
/* Add 'this' new cdev to the global cdev list */
mutex_lock(&thermal_list_lock);
@@ -1013,6 +992,15 @@ __thermal_cooling_device_register(struct device_node *np,
mutex_unlock(&thermal_list_lock);
return cdev;
+
+out_kfree_type:
+ kfree(cdev->type);
+ put_device(&cdev->device);
+out_ida_remove:
+ ida_simple_remove(&thermal_cdev_ida, cdev->id);
+out_kfree_cdev:
+ kfree(cdev);
+ return ERR_PTR(ret);
}
/**
@@ -1171,6 +1159,7 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
ida_simple_remove(&thermal_cdev_ida, cdev->id);
device_del(&cdev->device);
thermal_cooling_device_destroy_sysfs(cdev);
+ kfree(cdev->type);
put_device(&cdev->device);
}
EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 86b8cef7310e..726e327b4205 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -66,6 +66,7 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
}
void thermal_cdev_update(struct thermal_cooling_device *);
+void __thermal_cdev_update(struct thermal_cooling_device *cdev);
/**
* struct thermal_trip - representation of a point in temperature domain
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index 7f50f412e02a..3edd047e144f 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -192,18 +192,11 @@ static void thermal_cdev_set_cur_state(struct thermal_cooling_device *cdev,
thermal_cooling_device_stats_update(cdev, target);
}
-void thermal_cdev_update(struct thermal_cooling_device *cdev)
+void __thermal_cdev_update(struct thermal_cooling_device *cdev)
{
struct thermal_instance *instance;
unsigned long target = 0;
- mutex_lock(&cdev->lock);
- /* cooling device is updated*/
- if (cdev->updated) {
- mutex_unlock(&cdev->lock);
- return;
- }
-
/* Make sure cdev enters the deepest cooling state */
list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
dev_dbg(&cdev->device, "zone%d->target=%lu\n",
@@ -216,11 +209,25 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
thermal_cdev_set_cur_state(cdev, target);
- cdev->updated = true;
- mutex_unlock(&cdev->lock);
trace_cdev_update(cdev, target);
dev_dbg(&cdev->device, "set to state %lu\n", target);
}
+
+/**
+ * thermal_cdev_update - update cooling device state if needed
+ * @cdev: pointer to struct thermal_cooling_device
+ *
+ * Update the cooling device state if there is a need.
+ */
+void thermal_cdev_update(struct thermal_cooling_device *cdev)
+{
+ mutex_lock(&cdev->lock);
+ if (!cdev->updated) {
+ __thermal_cdev_update(cdev);
+ cdev->updated = true;
+ }
+ mutex_unlock(&cdev->lock);
+}
EXPORT_SYMBOL(thermal_cdev_update);
/**
diff --git a/drivers/thermal/thermal_mmio.c b/drivers/thermal/thermal_mmio.c
index d0bdf1ea3331..ded1dd0d4ef7 100644
--- a/drivers/thermal/thermal_mmio.c
+++ b/drivers/thermal/thermal_mmio.c
@@ -54,11 +54,8 @@ static int thermal_mmio_probe(struct platform_device *pdev)
resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sensor->mmio_base = devm_ioremap_resource(&pdev->dev, resource);
- if (IS_ERR(sensor->mmio_base)) {
- dev_err(&pdev->dev, "failed to ioremap memory (%ld)\n",
- PTR_ERR(sensor->mmio_base));
+ if (IS_ERR(sensor->mmio_base))
return PTR_ERR(sensor->mmio_base);
- }
sensor_init_func = device_get_match_data(&pdev->dev);
if (sensor_init_func) {
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 69ef12f852b7..5b76f9a1280d 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -704,14 +704,17 @@ static int thermal_of_populate_bind_params(struct device_node *np,
count = of_count_phandle_with_args(np, "cooling-device",
"#cooling-cells");
- if (!count) {
+ if (count <= 0) {
pr_err("Add a cooling_device property with at least one device\n");
+ ret = -ENOENT;
goto end;
}
__tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL);
- if (!__tcbp)
+ if (!__tcbp) {
+ ret = -ENOMEM;
goto end;
+ }
for (i = 0; i < count; i++) {
ret = of_parse_phandle_with_args(np, "cooling-device",
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 8a3646e26ddd..ebe7cb70bfb6 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -9,30 +9,29 @@
* Eduardo Valentin <eduardo.valentin@ti.com>
*/
-#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/cpu_pm.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/export.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
-#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/sys_soc.h>
-#include <linux/reboot.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
#include <linux/io.h>
#include <linux/iopoll.h>
-#include <linux/cpu_pm.h>
-#include <linux/device.h>
-#include <linux/pm_runtime.h>
-#include <linux/pm.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/reboot.h>
+#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
+#include <linux/types.h>
#include "ti-bandgap.h"
@@ -1143,14 +1142,10 @@ static int ti_bandgap_restore_ctxt(struct ti_bandgap *bgp)
for (i = 0; i < bgp->conf->sensor_count; i++) {
struct temp_sensor_registers *tsr;
struct temp_sensor_regval *rval;
- u32 val = 0;
rval = &bgp->regval[i];
tsr = bgp->conf->sensors[i].registers;
- if (TI_BANDGAP_HAS(bgp, COUNTER))
- val = ti_bandgap_readl(bgp, tsr->bgap_counter);
-
if (TI_BANDGAP_HAS(bgp, TSHUT_CONFIG))
ti_bandgap_writel(bgp, rval->tshut_threshold,
tsr->tshut_threshold);
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index ffd1e098bfd2..a503c1b2bfd9 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -14,6 +14,7 @@ config VDPA_SIM
depends on RUNTIME_TESTING_MENU && HAS_DMA
select DMA_OPS
select VHOST_RING
+ select IOMMU_IOVA
help
Enable this module to support vDPA device simulators. These devices
are used for testing, prototyping and development of vDPA.
@@ -25,6 +26,13 @@ config VDPA_SIM_NET
help
vDPA networking device simulator which loops TX traffic back to RX.
+config VDPA_SIM_BLOCK
+ tristate "vDPA simulator for block device"
+ depends on VDPA_SIM
+ help
+ vDPA block device simulator which terminates IO request in a
+ memory buffer.
+
config IFCVF
tristate "Intel IFC VF vDPA driver"
depends on PCI_MSI
@@ -52,4 +60,11 @@ config MLX5_VDPA_NET
be executed by the hardware. It also supports a variety of stateless
offloads depending on the actual device used and firmware version.
+config VP_VDPA
+ tristate "Virtio PCI bridge vDPA driver"
+ select VIRTIO_PCI_LIB
+ depends on PCI_MSI
+ help
+ This kernel module bridges virtio PCI device to vDPA bus.
+
endif # VDPA
diff --git a/drivers/vdpa/Makefile b/drivers/vdpa/Makefile
index d160e9b63a66..67fe7f3d6943 100644
--- a/drivers/vdpa/Makefile
+++ b/drivers/vdpa/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_VDPA) += vdpa.o
obj-$(CONFIG_VDPA_SIM) += vdpa_sim/
obj-$(CONFIG_IFCVF) += ifcvf/
obj-$(CONFIG_MLX5_VDPA) += mlx5/
+obj-$(CONFIG_VP_VDPA) += virtio_pci/
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index f2a128e56de5..1a661ab45af5 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -202,10 +202,11 @@ static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
ifcvf_get_status(hw);
}
-u64 ifcvf_get_features(struct ifcvf_hw *hw)
+u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
u32 features_lo, features_hi;
+ u64 features;
ifc_iowrite32(0, &cfg->device_feature_select);
features_lo = ifc_ioread32(&cfg->device_feature);
@@ -213,7 +214,26 @@ u64 ifcvf_get_features(struct ifcvf_hw *hw)
ifc_iowrite32(1, &cfg->device_feature_select);
features_hi = ifc_ioread32(&cfg->device_feature);
- return ((u64)features_hi << 32) | features_lo;
+ features = ((u64)features_hi << 32) | features_lo;
+
+ return features;
+}
+
+u64 ifcvf_get_features(struct ifcvf_hw *hw)
+{
+ return hw->hw_features;
+}
+
+int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
+{
+ struct ifcvf_adapter *ifcvf = vf_to_adapter(hw);
+
+ if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
+ IFCVF_ERR(ifcvf->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 64696d63fe07..0111bfdeb342 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -15,15 +15,26 @@
#include <linux/pci_regs.h>
#include <linux/vdpa.h>
#include <uapi/linux/virtio_net.h>
+#include <uapi/linux/virtio_blk.h>
#include <uapi/linux/virtio_config.h>
#include <uapi/linux/virtio_pci.h>
-#define IFCVF_VENDOR_ID 0x1AF4
-#define IFCVF_DEVICE_ID 0x1041
-#define IFCVF_SUBSYS_VENDOR_ID 0x8086
-#define IFCVF_SUBSYS_DEVICE_ID 0x001A
+#define N3000_VENDOR_ID 0x1AF4
+#define N3000_DEVICE_ID 0x1041
+#define N3000_SUBSYS_VENDOR_ID 0x8086
+#define N3000_SUBSYS_DEVICE_ID 0x001A
-#define IFCVF_SUPPORTED_FEATURES \
+#define C5000X_PL_VENDOR_ID 0x1AF4
+#define C5000X_PL_DEVICE_ID 0x1000
+#define C5000X_PL_SUBSYS_VENDOR_ID 0x8086
+#define C5000X_PL_SUBSYS_DEVICE_ID 0x0001
+
+#define C5000X_PL_BLK_VENDOR_ID 0x1AF4
+#define C5000X_PL_BLK_DEVICE_ID 0x1001
+#define C5000X_PL_BLK_SUBSYS_VENDOR_ID 0x8086
+#define C5000X_PL_BLK_SUBSYS_DEVICE_ID 0x0002
+
+#define IFCVF_NET_SUPPORTED_FEATURES \
((1ULL << VIRTIO_NET_F_MAC) | \
(1ULL << VIRTIO_F_ANY_LAYOUT) | \
(1ULL << VIRTIO_F_VERSION_1) | \
@@ -78,6 +89,8 @@ struct ifcvf_hw {
void __iomem *notify_base;
u32 notify_off_multiplier;
u64 req_features;
+ u64 hw_features;
+ u32 dev_type;
struct virtio_pci_common_cfg __iomem *common_cfg;
void __iomem *net_cfg;
struct vring_info vring[IFCVF_MAX_QUEUE_PAIRS * 2];
@@ -116,7 +129,10 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
void ifcvf_reset(struct ifcvf_hw *hw);
u64 ifcvf_get_features(struct ifcvf_hw *hw);
+u64 ifcvf_get_hw_features(struct ifcvf_hw *hw);
+int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features);
u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
+int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
#endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index d555a6a5d1ba..ab0ab5cf0f6e 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -14,7 +14,6 @@
#include <linux/sysfs.h>
#include "ifcvf_base.h"
-#define VERSION_STRING "0.1"
#define DRIVER_AUTHOR "Intel Corporation"
#define IFCVF_DRIVER_NAME "ifcvf"
@@ -169,10 +168,23 @@ static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
{
+ struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ struct pci_dev *pdev = adapter->pdev;
+
u64 features;
- features = ifcvf_get_features(vf) & IFCVF_SUPPORTED_FEATURES;
+ switch (vf->dev_type) {
+ case VIRTIO_ID_NET:
+ features = ifcvf_get_features(vf) & IFCVF_NET_SUPPORTED_FEATURES;
+ break;
+ case VIRTIO_ID_BLOCK:
+ features = ifcvf_get_features(vf);
+ break;
+ default:
+ features = 0;
+ IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
+ }
return features;
}
@@ -180,6 +192,11 @@ static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ int ret;
+
+ ret = ifcvf_verify_min_features(vf, features);
+ if (ret)
+ return ret;
vf->req_features = features;
@@ -319,12 +336,17 @@ static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
{
- return VIRTIO_ID_NET;
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return vf->dev_type;
}
static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
{
- return IFCVF_SUBSYS_VENDOR_ID;
+ struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
+ struct pci_dev *pdev = adapter->pdev;
+
+ return pdev->subsystem_vendor;
}
static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
@@ -332,6 +354,28 @@ static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
return IFCVF_QUEUE_ALIGNMENT;
}
+static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ struct pci_dev *pdev = adapter->pdev;
+ size_t size;
+
+ switch (vf->dev_type) {
+ case VIRTIO_ID_NET:
+ size = sizeof(struct virtio_net_config);
+ break;
+ case VIRTIO_ID_BLOCK:
+ size = sizeof(struct virtio_blk_config);
+ break;
+ default:
+ size = 0;
+ IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
+ }
+
+ return size;
+}
+
static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
unsigned int offset,
void *buf, unsigned int len)
@@ -392,6 +436,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.get_device_id = ifcvf_vdpa_get_device_id,
.get_vendor_id = ifcvf_vdpa_get_vendor_id,
.get_vq_align = ifcvf_vdpa_get_vq_align,
+ .get_config_size = ifcvf_vdpa_get_config_size,
.get_config = ifcvf_vdpa_get_config,
.set_config = ifcvf_vdpa_set_config,
.set_config_cb = ifcvf_vdpa_set_config_cb,
@@ -441,6 +486,19 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, adapter);
vf = &adapter->vf;
+
+ /* This drirver drives both modern virtio devices and transitional
+ * devices in modern mode.
+ * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
+ * so legacy devices and transitional devices in legacy
+ * mode will not work for vDPA, this driver will not
+ * drive devices with legacy interface.
+ */
+ if (pdev->device < 0x1040)
+ vf->dev_type = pdev->subsystem_device;
+ else
+ vf->dev_type = pdev->device - 0x1040;
+
vf->base = pcim_iomap_table(pdev);
adapter->pdev = pdev;
@@ -455,6 +513,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
vf->vring[i].irq = -EINVAL;
+ vf->hw_features = ifcvf_get_hw_features(vf);
+
ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
if (ret) {
IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
@@ -476,10 +536,19 @@ static void ifcvf_remove(struct pci_dev *pdev)
}
static struct pci_device_id ifcvf_pci_ids[] = {
- { PCI_DEVICE_SUB(IFCVF_VENDOR_ID,
- IFCVF_DEVICE_ID,
- IFCVF_SUBSYS_VENDOR_ID,
- IFCVF_SUBSYS_DEVICE_ID) },
+ { PCI_DEVICE_SUB(N3000_VENDOR_ID,
+ N3000_DEVICE_ID,
+ N3000_SUBSYS_VENDOR_ID,
+ N3000_SUBSYS_DEVICE_ID) },
+ { PCI_DEVICE_SUB(C5000X_PL_VENDOR_ID,
+ C5000X_PL_DEVICE_ID,
+ C5000X_PL_SUBSYS_VENDOR_ID,
+ C5000X_PL_SUBSYS_DEVICE_ID) },
+ { PCI_DEVICE_SUB(C5000X_PL_BLK_VENDOR_ID,
+ C5000X_PL_BLK_DEVICE_ID,
+ C5000X_PL_BLK_SUBSYS_VENDOR_ID,
+ C5000X_PL_BLK_SUBSYS_DEVICE_ID) },
+
{ 0 },
};
MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
@@ -494,4 +563,3 @@ static struct pci_driver ifcvf_driver = {
module_pci_driver(ifcvf_driver);
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(VERSION_STRING);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 4d2809c7d4e3..189e4385df40 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1809,6 +1809,11 @@ err_setup:
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
}
+static size_t mlx5_vdpa_get_config_size(struct vdpa_device *vdev)
+{
+ return sizeof(struct virtio_net_config);
+}
+
static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, void *buf,
unsigned int len)
{
@@ -1895,6 +1900,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.get_vendor_id = mlx5_vdpa_get_vendor_id,
.get_status = mlx5_vdpa_get_status,
.set_status = mlx5_vdpa_set_status,
+ .get_config_size = mlx5_vdpa_get_config_size,
.get_config = mlx5_vdpa_get_config,
.set_config = mlx5_vdpa_set_config,
.get_generation = mlx5_vdpa_get_generation,
@@ -1974,23 +1980,32 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
}
}
-static int mlx5v_probe(struct auxiliary_device *adev,
- const struct auxiliary_device_id *id)
+struct mlx5_vdpa_mgmtdev {
+ struct vdpa_mgmt_dev mgtdev;
+ struct mlx5_adev *madev;
+ struct mlx5_vdpa_net *ndev;
+};
+
+static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
{
- struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev);
- struct mlx5_core_dev *mdev = madev->mdev;
+ struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
struct virtio_net_config *config;
struct mlx5_vdpa_dev *mvdev;
struct mlx5_vdpa_net *ndev;
+ struct mlx5_core_dev *mdev;
u32 max_vqs;
int err;
+ if (mgtdev->ndev)
+ return -ENOSPC;
+
+ mdev = mgtdev->madev->mdev;
/* we save one virtqueue for control virtqueue should we require it */
max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
- NULL);
+ name);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
@@ -2017,11 +2032,12 @@ static int mlx5v_probe(struct auxiliary_device *adev,
if (err)
goto err_res;
- err = vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs));
+ mvdev->vdev.mdev = &mgtdev->mgtdev;
+ err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs));
if (err)
goto err_reg;
- dev_set_drvdata(&adev->dev, ndev);
+ mgtdev->ndev = ndev;
return 0;
err_reg:
@@ -2034,11 +2050,62 @@ err_mtu:
return err;
}
+static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *dev)
+{
+ struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
+
+ _vdpa_unregister_device(dev);
+ mgtdev->ndev = NULL;
+}
+
+static const struct vdpa_mgmtdev_ops mdev_ops = {
+ .dev_add = mlx5_vdpa_dev_add,
+ .dev_del = mlx5_vdpa_dev_del,
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static int mlx5v_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+
+{
+ struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = madev->mdev;
+ struct mlx5_vdpa_mgmtdev *mgtdev;
+ int err;
+
+ mgtdev = kzalloc(sizeof(*mgtdev), GFP_KERNEL);
+ if (!mgtdev)
+ return -ENOMEM;
+
+ mgtdev->mgtdev.ops = &mdev_ops;
+ mgtdev->mgtdev.device = mdev->device;
+ mgtdev->mgtdev.id_table = id_table;
+ mgtdev->madev = madev;
+
+ err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
+ if (err)
+ goto reg_err;
+
+ dev_set_drvdata(&adev->dev, mgtdev);
+
+ return 0;
+
+reg_err:
+ kfree(mgtdev);
+ return err;
+}
+
static void mlx5v_remove(struct auxiliary_device *adev)
{
- struct mlx5_vdpa_dev *mvdev = dev_get_drvdata(&adev->dev);
+ struct mlx5_vdpa_mgmtdev *mgtdev;
- vdpa_unregister_device(&mvdev->vdev);
+ mgtdev = dev_get_drvdata(&adev->dev);
+ vdpa_mgmtdev_unregister(&mgtdev->mgtdev);
+ kfree(mgtdev);
}
static const struct auxiliary_device_id mlx5v_id_table[] = {
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 5cffce67cab0..bb3f1d1f0422 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -75,8 +75,8 @@ static void vdpa_release_dev(struct device *d)
* Driver should use vdpa_alloc_device() wrapper macro instead of
* using this directly.
*
- * Returns an error when parent/config/dma_dev is not set or fail to get
- * ida.
+ * Return: Returns an error when parent/config/dma_dev is not set or fail to get
+ * ida.
*/
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
@@ -157,7 +157,7 @@ static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs)
* @vdev: the vdpa device to be registered to vDPA bus
* @nvqs: number of virtqueues supported by this device
*
- * Returns an error when fail to add device to vDPA bus
+ * Return: Returns an error when fail to add device to vDPA bus
*/
int _vdpa_register_device(struct vdpa_device *vdev, int nvqs)
{
@@ -174,7 +174,7 @@ EXPORT_SYMBOL_GPL(_vdpa_register_device);
* @vdev: the vdpa device to be registered to vDPA bus
* @nvqs: number of virtqueues supported by this device
*
- * Returns an error when fail to add to vDPA bus
+ * Return: Returns an error when fail to add to vDPA bus
*/
int vdpa_register_device(struct vdpa_device *vdev, int nvqs)
{
@@ -218,7 +218,7 @@ EXPORT_SYMBOL_GPL(vdpa_unregister_device);
* @drv: the vdpa device driver to be registered
* @owner: module owner of the driver
*
- * Returns an err when fail to do the registration
+ * Return: Returns an err when fail to do the registration
*/
int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
{
@@ -245,6 +245,8 @@ EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
* @mdev: Pointer to vdpa management device
* vdpa_mgmtdev_register() register a vdpa management device which supports
* vdpa device management.
+ * Return: Returns 0 on success or failure when required callback ops are not
+ * initialized.
*/
int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
{
diff --git a/drivers/vdpa/vdpa_sim/Makefile b/drivers/vdpa/vdpa_sim/Makefile
index 79d4536d347e..d458103302f2 100644
--- a/drivers/vdpa/vdpa_sim/Makefile
+++ b/drivers/vdpa/vdpa_sim/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VDPA_SIM) += vdpa_sim.o
obj-$(CONFIG_VDPA_SIM_NET) += vdpa_sim_net.o
+obj-$(CONFIG_VDPA_SIM_BLOCK) += vdpa_sim_blk.o
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 5b6b2f87d40c..98f793bc9376 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -17,6 +17,7 @@
#include <linux/vringh.h>
#include <linux/vdpa.h>
#include <linux/vhost_iotlb.h>
+#include <linux/iova.h>
#include "vdpa_sim.h"
@@ -128,30 +129,57 @@ static int dir_to_perm(enum dma_data_direction dir)
return perm;
}
+static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
+ size_t size, unsigned int perm)
+{
+ struct iova *iova;
+ dma_addr_t dma_addr;
+ int ret;
+
+ /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */
+ iova = alloc_iova(&vdpasim->iova, size, ULONG_MAX - 1, true);
+ if (!iova)
+ return DMA_MAPPING_ERROR;
+
+ dma_addr = iova_dma_addr(&vdpasim->iova, iova);
+
+ spin_lock(&vdpasim->iommu_lock);
+ ret = vhost_iotlb_add_range(vdpasim->iommu, (u64)dma_addr,
+ (u64)dma_addr + size - 1, (u64)paddr, perm);
+ spin_unlock(&vdpasim->iommu_lock);
+
+ if (ret) {
+ __free_iova(&vdpasim->iova, iova);
+ return DMA_MAPPING_ERROR;
+ }
+
+ return dma_addr;
+}
+
+static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr,
+ size_t size)
+{
+ spin_lock(&vdpasim->iommu_lock);
+ vhost_iotlb_del_range(vdpasim->iommu, (u64)dma_addr,
+ (u64)dma_addr + size - 1);
+ spin_unlock(&vdpasim->iommu_lock);
+
+ free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr));
+}
+
static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
struct vdpasim *vdpasim = dev_to_sim(dev);
- struct vhost_iotlb *iommu = vdpasim->iommu;
- u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset;
- int ret, perm = dir_to_perm(dir);
+ phys_addr_t paddr = page_to_phys(page) + offset;
+ int perm = dir_to_perm(dir);
if (perm < 0)
return DMA_MAPPING_ERROR;
- /* For simplicity, use identical mapping to avoid e.g iova
- * allocator.
- */
- spin_lock(&vdpasim->iommu_lock);
- ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
- pa, dir_to_perm(dir));
- spin_unlock(&vdpasim->iommu_lock);
- if (ret)
- return DMA_MAPPING_ERROR;
-
- return (dma_addr_t)(pa);
+ return vdpasim_map_range(vdpasim, paddr, size, perm);
}
static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
@@ -159,12 +187,8 @@ static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
unsigned long attrs)
{
struct vdpasim *vdpasim = dev_to_sim(dev);
- struct vhost_iotlb *iommu = vdpasim->iommu;
- spin_lock(&vdpasim->iommu_lock);
- vhost_iotlb_del_range(iommu, (u64)dma_addr,
- (u64)dma_addr + size - 1);
- spin_unlock(&vdpasim->iommu_lock);
+ vdpasim_unmap_range(vdpasim, dma_addr, size);
}
static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
@@ -172,27 +196,22 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
unsigned long attrs)
{
struct vdpasim *vdpasim = dev_to_sim(dev);
- struct vhost_iotlb *iommu = vdpasim->iommu;
- void *addr = kmalloc(size, flag);
- int ret;
+ phys_addr_t paddr;
+ void *addr;
- spin_lock(&vdpasim->iommu_lock);
+ addr = kmalloc(size, flag);
if (!addr) {
*dma_addr = DMA_MAPPING_ERROR;
- } else {
- u64 pa = virt_to_phys(addr);
-
- ret = vhost_iotlb_add_range(iommu, (u64)pa,
- (u64)pa + size - 1,
- pa, VHOST_MAP_RW);
- if (ret) {
- *dma_addr = DMA_MAPPING_ERROR;
- kfree(addr);
- addr = NULL;
- } else
- *dma_addr = (dma_addr_t)pa;
+ return NULL;
+ }
+
+ paddr = virt_to_phys(addr);
+
+ *dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW);
+ if (*dma_addr == DMA_MAPPING_ERROR) {
+ kfree(addr);
+ return NULL;
}
- spin_unlock(&vdpasim->iommu_lock);
return addr;
}
@@ -202,14 +221,10 @@ static void vdpasim_free_coherent(struct device *dev, size_t size,
unsigned long attrs)
{
struct vdpasim *vdpasim = dev_to_sim(dev);
- struct vhost_iotlb *iommu = vdpasim->iommu;
- spin_lock(&vdpasim->iommu_lock);
- vhost_iotlb_del_range(iommu, (u64)dma_addr,
- (u64)dma_addr + size - 1);
- spin_unlock(&vdpasim->iommu_lock);
+ vdpasim_unmap_range(vdpasim, dma_addr, size);
- kfree(phys_to_virt((uintptr_t)dma_addr));
+ kfree(vaddr);
}
static const struct dma_map_ops vdpasim_dma_ops = {
@@ -269,7 +284,15 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
goto err_iommu;
for (i = 0; i < dev_attr->nvqs; i++)
- vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu);
+ vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu,
+ &vdpasim->iommu_lock);
+
+ ret = iova_cache_get();
+ if (ret)
+ goto err_iommu;
+
+ /* For simplicity we use an IOVA allocator with byte granularity */
+ init_iova_domain(&vdpasim->iova, 1, 0);
vdpasim->vdpa.dma_dev = dev;
@@ -439,6 +462,13 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
spin_unlock(&vdpasim->lock);
}
+static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ return vdpasim->dev_attr.config_size;
+}
+
static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
void *buf, unsigned int len)
{
@@ -539,8 +569,17 @@ static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
static void vdpasim_free(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ int i;
cancel_work_sync(&vdpasim->work);
+
+ for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
+ vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
+ vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
+ }
+
+ put_iova_domain(&vdpasim->iova);
+ iova_cache_put();
kvfree(vdpasim->buffer);
if (vdpasim->iommu)
vhost_iotlb_free(vdpasim->iommu);
@@ -566,6 +605,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.get_vendor_id = vdpasim_get_vendor_id,
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
+ .get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
@@ -593,6 +633,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_vendor_id = vdpasim_get_vendor_id,
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
+ .get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
index 6d75444f9948..cd58e888bcf3 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -6,6 +6,7 @@
#ifndef _VDPA_SIM_H
#define _VDPA_SIM_H
+#include <linux/iova.h>
#include <linux/vringh.h>
#include <linux/vdpa.h>
#include <linux/virtio_byteorder.h>
@@ -57,6 +58,7 @@ struct vdpasim {
/* virtio config according to device type */
void *config;
struct vhost_iotlb *iommu;
+ struct iova_domain iova;
void *buffer;
u32 status;
u32 generation;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
new file mode 100644
index 000000000000..5bfe1c281645
--- /dev/null
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VDPA simulator for block device.
+ *
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2021, Red Hat Inc. All rights reserved.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/blkdev.h>
+#include <linux/vringh.h>
+#include <linux/vdpa.h>
+#include <linux/blkdev.h>
+#include <uapi/linux/virtio_blk.h>
+
+#include "vdpa_sim.h"
+
+#define DRV_VERSION "0.1"
+#define DRV_AUTHOR "Max Gurtovoy <mgurtovoy@nvidia.com>"
+#define DRV_DESC "vDPA Device Simulator for block device"
+#define DRV_LICENSE "GPL v2"
+
+#define VDPASIM_BLK_FEATURES (VDPASIM_FEATURES | \
+ (1ULL << VIRTIO_BLK_F_SIZE_MAX) | \
+ (1ULL << VIRTIO_BLK_F_SEG_MAX) | \
+ (1ULL << VIRTIO_BLK_F_BLK_SIZE) | \
+ (1ULL << VIRTIO_BLK_F_TOPOLOGY) | \
+ (1ULL << VIRTIO_BLK_F_MQ))
+
+#define VDPASIM_BLK_CAPACITY 0x40000
+#define VDPASIM_BLK_SIZE_MAX 0x1000
+#define VDPASIM_BLK_SEG_MAX 32
+#define VDPASIM_BLK_VQ_NUM 1
+
+static char vdpasim_blk_id[VIRTIO_BLK_ID_BYTES] = "vdpa_blk_sim";
+
+static bool vdpasim_blk_check_range(u64 start_sector, size_t range_size)
+{
+ u64 range_sectors = range_size >> SECTOR_SHIFT;
+
+ if (range_size > VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)
+ return false;
+
+ if (start_sector > VDPASIM_BLK_CAPACITY)
+ return false;
+
+ if (range_sectors > VDPASIM_BLK_CAPACITY - start_sector)
+ return false;
+
+ return true;
+}
+
+/* Returns 'true' if the request is handled (with or without an I/O error)
+ * and the status is correctly written in the last byte of the 'in iov',
+ * 'false' otherwise.
+ */
+static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
+ struct vdpasim_virtqueue *vq)
+{
+ size_t pushed = 0, to_pull, to_push;
+ struct virtio_blk_outhdr hdr;
+ ssize_t bytes;
+ loff_t offset;
+ u64 sector;
+ u8 status;
+ u32 type;
+ int ret;
+
+ ret = vringh_getdesc_iotlb(&vq->vring, &vq->out_iov, &vq->in_iov,
+ &vq->head, GFP_ATOMIC);
+ if (ret != 1)
+ return false;
+
+ if (vq->out_iov.used < 1 || vq->in_iov.used < 1) {
+ dev_err(&vdpasim->vdpa.dev, "missing headers - out_iov: %u in_iov %u\n",
+ vq->out_iov.used, vq->in_iov.used);
+ return false;
+ }
+
+ if (vq->in_iov.iov[vq->in_iov.used - 1].iov_len < 1) {
+ dev_err(&vdpasim->vdpa.dev, "request in header too short\n");
+ return false;
+ }
+
+ /* The last byte is the status and we checked if the last iov has
+ * enough room for it.
+ */
+ to_push = vringh_kiov_length(&vq->in_iov) - 1;
+
+ to_pull = vringh_kiov_length(&vq->out_iov);
+
+ bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &hdr,
+ sizeof(hdr));
+ if (bytes != sizeof(hdr)) {
+ dev_err(&vdpasim->vdpa.dev, "request out header too short\n");
+ return false;
+ }
+
+ to_pull -= bytes;
+
+ type = vdpasim32_to_cpu(vdpasim, hdr.type);
+ sector = vdpasim64_to_cpu(vdpasim, hdr.sector);
+ offset = sector << SECTOR_SHIFT;
+ status = VIRTIO_BLK_S_OK;
+
+ switch (type) {
+ case VIRTIO_BLK_T_IN:
+ if (!vdpasim_blk_check_range(sector, to_push)) {
+ dev_err(&vdpasim->vdpa.dev,
+ "reading over the capacity - offset: 0x%llx len: 0x%zx\n",
+ offset, to_push);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov,
+ vdpasim->buffer + offset,
+ to_push);
+ if (bytes < 0) {
+ dev_err(&vdpasim->vdpa.dev,
+ "vringh_iov_push_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
+ bytes, offset, to_push);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ pushed += bytes;
+ break;
+
+ case VIRTIO_BLK_T_OUT:
+ if (!vdpasim_blk_check_range(sector, to_pull)) {
+ dev_err(&vdpasim->vdpa.dev,
+ "writing over the capacity - offset: 0x%llx len: 0x%zx\n",
+ offset, to_pull);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov,
+ vdpasim->buffer + offset,
+ to_pull);
+ if (bytes < 0) {
+ dev_err(&vdpasim->vdpa.dev,
+ "vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
+ bytes, offset, to_pull);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+ break;
+
+ case VIRTIO_BLK_T_GET_ID:
+ bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov,
+ vdpasim_blk_id,
+ VIRTIO_BLK_ID_BYTES);
+ if (bytes < 0) {
+ dev_err(&vdpasim->vdpa.dev,
+ "vringh_iov_push_iotlb() error: %zd\n", bytes);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ pushed += bytes;
+ break;
+
+ default:
+ dev_warn(&vdpasim->vdpa.dev,
+ "Unsupported request type %d\n", type);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ /* If some operations fail, we need to skip the remaining bytes
+ * to put the status in the last byte
+ */
+ if (to_push - pushed > 0)
+ vringh_kiov_advance(&vq->in_iov, to_push - pushed);
+
+ /* Last byte is the status */
+ bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, &status, 1);
+ if (bytes != 1)
+ return false;
+
+ pushed += bytes;
+
+ /* Make sure data is wrote before advancing index */
+ smp_wmb();
+
+ vringh_complete_iotlb(&vq->vring, vq->head, pushed);
+
+ return true;
+}
+
+static void vdpasim_blk_work(struct work_struct *work)
+{
+ struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
+ int i;
+
+ spin_lock(&vdpasim->lock);
+
+ if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
+ goto out;
+
+ for (i = 0; i < VDPASIM_BLK_VQ_NUM; i++) {
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[i];
+
+ if (!vq->ready)
+ continue;
+
+ while (vdpasim_blk_handle_req(vdpasim, vq)) {
+ /* Make sure used is visible before rasing the interrupt. */
+ smp_wmb();
+
+ local_bh_disable();
+ if (vringh_need_notify_iotlb(&vq->vring) > 0)
+ vringh_notify(&vq->vring);
+ local_bh_enable();
+ }
+ }
+out:
+ spin_unlock(&vdpasim->lock);
+}
+
+static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config)
+{
+ struct virtio_blk_config *blk_config = config;
+
+ memset(config, 0, sizeof(struct virtio_blk_config));
+
+ blk_config->capacity = cpu_to_vdpasim64(vdpasim, VDPASIM_BLK_CAPACITY);
+ blk_config->size_max = cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_SIZE_MAX);
+ blk_config->seg_max = cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_SEG_MAX);
+ blk_config->num_queues = cpu_to_vdpasim16(vdpasim, VDPASIM_BLK_VQ_NUM);
+ blk_config->min_io_size = cpu_to_vdpasim16(vdpasim, 1);
+ blk_config->opt_io_size = cpu_to_vdpasim32(vdpasim, 1);
+ blk_config->blk_size = cpu_to_vdpasim32(vdpasim, SECTOR_SIZE);
+}
+
+static void vdpasim_blk_mgmtdev_release(struct device *dev)
+{
+}
+
+static struct device vdpasim_blk_mgmtdev = {
+ .init_name = "vdpasim_blk",
+ .release = vdpasim_blk_mgmtdev_release,
+};
+
+static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
+{
+ struct vdpasim_dev_attr dev_attr = {};
+ struct vdpasim *simdev;
+ int ret;
+
+ dev_attr.mgmt_dev = mdev;
+ dev_attr.name = name;
+ dev_attr.id = VIRTIO_ID_BLOCK;
+ dev_attr.supported_features = VDPASIM_BLK_FEATURES;
+ dev_attr.nvqs = VDPASIM_BLK_VQ_NUM;
+ dev_attr.config_size = sizeof(struct virtio_blk_config);
+ dev_attr.get_config = vdpasim_blk_get_config;
+ dev_attr.work_fn = vdpasim_blk_work;
+ dev_attr.buffer_size = VDPASIM_BLK_CAPACITY << SECTOR_SHIFT;
+
+ simdev = vdpasim_create(&dev_attr);
+ if (IS_ERR(simdev))
+ return PTR_ERR(simdev);
+
+ ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_BLK_VQ_NUM);
+ if (ret)
+ goto put_dev;
+
+ return 0;
+
+put_dev:
+ put_device(&simdev->vdpa.dev);
+ return ret;
+}
+
+static void vdpasim_blk_dev_del(struct vdpa_mgmt_dev *mdev,
+ struct vdpa_device *dev)
+{
+ struct vdpasim *simdev = container_of(dev, struct vdpasim, vdpa);
+
+ _vdpa_unregister_device(&simdev->vdpa);
+}
+
+static const struct vdpa_mgmtdev_ops vdpasim_blk_mgmtdev_ops = {
+ .dev_add = vdpasim_blk_dev_add,
+ .dev_del = vdpasim_blk_dev_del
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct vdpa_mgmt_dev mgmt_dev = {
+ .device = &vdpasim_blk_mgmtdev,
+ .id_table = id_table,
+ .ops = &vdpasim_blk_mgmtdev_ops,
+};
+
+static int __init vdpasim_blk_init(void)
+{
+ int ret;
+
+ ret = device_register(&vdpasim_blk_mgmtdev);
+ if (ret)
+ return ret;
+
+ ret = vdpa_mgmtdev_register(&mgmt_dev);
+ if (ret)
+ goto parent_err;
+
+ return 0;
+
+parent_err:
+ device_unregister(&vdpasim_blk_mgmtdev);
+ return ret;
+}
+
+static void __exit vdpasim_blk_exit(void)
+{
+ vdpa_mgmtdev_unregister(&mgmt_dev);
+ device_unregister(&vdpasim_blk_mgmtdev);
+}
+
+module_init(vdpasim_blk_init)
+module_exit(vdpasim_blk_exit)
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE(DRV_LICENSE);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/vdpa/virtio_pci/Makefile b/drivers/vdpa/virtio_pci/Makefile
new file mode 100644
index 000000000000..231088d3af7d
--- /dev/null
+++ b/drivers/vdpa/virtio_pci/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VP_VDPA) += vp_vdpa.o
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
new file mode 100644
index 000000000000..c76ebb531212
--- /dev/null
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vDPA bridge driver for modern virtio-pci device
+ *
+ * Copyright (c) 2020, Red Hat Inc. All rights reserved.
+ * Author: Jason Wang <jasowang@redhat.com>
+ *
+ * Based on virtio_pci_modern.c.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/vdpa.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
+#include <linux/virtio_pci.h>
+#include <linux/virtio_pci_modern.h>
+
+#define VP_VDPA_QUEUE_MAX 256
+#define VP_VDPA_DRIVER_NAME "vp_vdpa"
+#define VP_VDPA_NAME_SIZE 256
+
+struct vp_vring {
+ void __iomem *notify;
+ char msix_name[VP_VDPA_NAME_SIZE];
+ struct vdpa_callback cb;
+ resource_size_t notify_pa;
+ int irq;
+};
+
+struct vp_vdpa {
+ struct vdpa_device vdpa;
+ struct virtio_pci_modern_device mdev;
+ struct vp_vring *vring;
+ struct vdpa_callback config_cb;
+ char msix_name[VP_VDPA_NAME_SIZE];
+ int config_irq;
+ int queues;
+ int vectors;
+};
+
+static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa)
+{
+ return container_of(vdpa, struct vp_vdpa, vdpa);
+}
+
+static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+
+ return &vp_vdpa->mdev;
+}
+
+static u64 vp_vdpa_get_features(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_get_features(mdev);
+}
+
+static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ vp_modern_set_features(mdev, features);
+
+ return 0;
+}
+
+static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_get_status(mdev);
+}
+
+static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa)
+{
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct pci_dev *pdev = mdev->pci_dev;
+ int i;
+
+ for (i = 0; i < vp_vdpa->queues; i++) {
+ if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) {
+ vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR);
+ devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq,
+ &vp_vdpa->vring[i]);
+ vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
+ }
+ }
+
+ if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) {
+ vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR);
+ devm_free_irq(&pdev->dev, vp_vdpa->config_irq, vp_vdpa);
+ vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
+ }
+
+ if (vp_vdpa->vectors) {
+ pci_free_irq_vectors(pdev);
+ vp_vdpa->vectors = 0;
+ }
+}
+
+static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg)
+{
+ struct vp_vring *vring = arg;
+
+ if (vring->cb.callback)
+ return vring->cb.callback(vring->cb.private);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vp_vdpa_config_handler(int irq, void *arg)
+{
+ struct vp_vdpa *vp_vdpa = arg;
+
+ if (vp_vdpa->config_cb.callback)
+ return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private);
+
+ return IRQ_HANDLED;
+}
+
+static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
+{
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct pci_dev *pdev = mdev->pci_dev;
+ int i, ret, irq;
+ int queues = vp_vdpa->queues;
+ int vectors = queues + 1;
+
+ ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
+ if (ret != vectors) {
+ dev_err(&pdev->dev,
+ "vp_vdpa: fail to allocate irq vectors want %d but %d\n",
+ vectors, ret);
+ return ret;
+ }
+
+ vp_vdpa->vectors = vectors;
+
+ for (i = 0; i < queues; i++) {
+ snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE,
+ "vp-vdpa[%s]-%d\n", pci_name(pdev), i);
+ irq = pci_irq_vector(pdev, i);
+ ret = devm_request_irq(&pdev->dev, irq,
+ vp_vdpa_vq_handler,
+ 0, vp_vdpa->vring[i].msix_name,
+ &vp_vdpa->vring[i]);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "vp_vdpa: fail to request irq for vq %d\n", i);
+ goto err;
+ }
+ vp_modern_queue_vector(mdev, i, i);
+ vp_vdpa->vring[i].irq = irq;
+ }
+
+ snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n",
+ pci_name(pdev));
+ irq = pci_irq_vector(pdev, queues);
+ ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0,
+ vp_vdpa->msix_name, vp_vdpa);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "vp_vdpa: fail to request irq for vq %d\n", i);
+ goto err;
+ }
+ vp_modern_config_vector(mdev, queues);
+ vp_vdpa->config_irq = irq;
+
+ return 0;
+err:
+ vp_vdpa_free_irq(vp_vdpa);
+ return ret;
+}
+
+static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ u8 s = vp_vdpa_get_status(vdpa);
+
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
+ !(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ vp_vdpa_request_irq(vp_vdpa);
+ }
+
+ vp_modern_set_status(mdev, status);
+
+ if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) &&
+ (s & VIRTIO_CONFIG_S_DRIVER_OK))
+ vp_vdpa_free_irq(vp_vdpa);
+}
+
+static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
+{
+ return VP_VDPA_QUEUE_MAX;
+}
+
+static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
+ struct vdpa_vq_state *state)
+{
+ /* Note that this is not supported by virtio specification, so
+ * we return -EOPNOTSUPP here. This means we can't support live
+ * migration, vhost device start/stop.
+ */
+ return -EOPNOTSUPP;
+}
+
+static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
+ const struct vdpa_vq_state *state)
+{
+ /* Note that this is not supported by virtio specification, so
+ * we return -ENOPOTSUPP here. This means we can't support live
+ * migration, vhost device start/stop.
+ */
+ return -EOPNOTSUPP;
+}
+
+static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
+ struct vdpa_callback *cb)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+
+ vp_vdpa->vring[qid].cb = *cb;
+}
+
+static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa,
+ u16 qid, bool ready)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ vp_modern_set_queue_enable(mdev, qid, ready);
+}
+
+static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_get_queue_enable(mdev, qid);
+}
+
+static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
+ u32 num)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ vp_modern_set_queue_size(mdev, qid, num);
+}
+
+static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
+ u64 desc_area, u64 driver_area,
+ u64 device_area)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ vp_modern_queue_address(mdev, qid, desc_area,
+ driver_area, device_area);
+
+ return 0;
+}
+
+static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+
+ vp_iowrite16(qid, vp_vdpa->vring[qid].notify);
+}
+
+static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_generation(mdev);
+}
+
+static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return mdev->id.device;
+}
+
+static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return mdev->id.vendor;
+}
+
+static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa)
+{
+ return PAGE_SIZE;
+}
+
+static size_t vp_vdpa_get_config_size(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return mdev->device_len;
+}
+
+static void vp_vdpa_get_config(struct vdpa_device *vdpa,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ u8 old, new;
+ u8 *p;
+ int i;
+
+ do {
+ old = vp_ioread8(&mdev->common->config_generation);
+ p = buf;
+ for (i = 0; i < len; i++)
+ *p++ = vp_ioread8(mdev->device + offset + i);
+
+ new = vp_ioread8(&mdev->common->config_generation);
+ } while (old != new);
+}
+
+static void vp_vdpa_set_config(struct vdpa_device *vdpa,
+ unsigned int offset, const void *buf,
+ unsigned int len)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ const u8 *p = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ vp_iowrite8(*p++, mdev->device + offset + i);
+}
+
+static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa,
+ struct vdpa_callback *cb)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+
+ vp_vdpa->config_cb = *cb;
+}
+
+static struct vdpa_notification_area
+vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct vdpa_notification_area notify;
+
+ notify.addr = vp_vdpa->vring[qid].notify_pa;
+ notify.size = mdev->notify_offset_multiplier;
+
+ return notify;
+}
+
+static const struct vdpa_config_ops vp_vdpa_ops = {
+ .get_features = vp_vdpa_get_features,
+ .set_features = vp_vdpa_set_features,
+ .get_status = vp_vdpa_get_status,
+ .set_status = vp_vdpa_set_status,
+ .get_vq_num_max = vp_vdpa_get_vq_num_max,
+ .get_vq_state = vp_vdpa_get_vq_state,
+ .get_vq_notification = vp_vdpa_get_vq_notification,
+ .set_vq_state = vp_vdpa_set_vq_state,
+ .set_vq_cb = vp_vdpa_set_vq_cb,
+ .set_vq_ready = vp_vdpa_set_vq_ready,
+ .get_vq_ready = vp_vdpa_get_vq_ready,
+ .set_vq_num = vp_vdpa_set_vq_num,
+ .set_vq_address = vp_vdpa_set_vq_address,
+ .kick_vq = vp_vdpa_kick_vq,
+ .get_generation = vp_vdpa_get_generation,
+ .get_device_id = vp_vdpa_get_device_id,
+ .get_vendor_id = vp_vdpa_get_vendor_id,
+ .get_vq_align = vp_vdpa_get_vq_align,
+ .get_config_size = vp_vdpa_get_config_size,
+ .get_config = vp_vdpa_get_config,
+ .set_config = vp_vdpa_set_config,
+ .set_config_cb = vp_vdpa_set_config_cb,
+};
+
+static void vp_vdpa_free_irq_vectors(void *data)
+{
+ pci_free_irq_vectors(data);
+}
+
+static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct virtio_pci_modern_device *mdev;
+ struct device *dev = &pdev->dev;
+ struct vp_vdpa *vp_vdpa;
+ int ret, i;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
+ dev, &vp_vdpa_ops, NULL);
+ if (vp_vdpa == NULL) {
+ dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
+ return -ENOMEM;
+ }
+
+ mdev = &vp_vdpa->mdev;
+ mdev->pci_dev = pdev;
+
+ ret = vp_modern_probe(mdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
+ goto err;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, vp_vdpa);
+
+ vp_vdpa->vdpa.dma_dev = &pdev->dev;
+ vp_vdpa->queues = vp_modern_get_num_queues(mdev);
+
+ ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed for adding devres for freeing irq vectors\n");
+ goto err;
+ }
+
+ vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues,
+ sizeof(*vp_vdpa->vring),
+ GFP_KERNEL);
+ if (!vp_vdpa->vring) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "Fail to allocate virtqueues\n");
+ goto err;
+ }
+
+ for (i = 0; i < vp_vdpa->queues; i++) {
+ vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
+ vp_vdpa->vring[i].notify =
+ vp_modern_map_vq_notify(mdev, i,
+ &vp_vdpa->vring[i].notify_pa);
+ if (!vp_vdpa->vring[i].notify) {
+ dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i);
+ goto err;
+ }
+ }
+ vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
+
+ ret = vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register to vdpa bus\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ put_device(&vp_vdpa->vdpa.dev);
+ return ret;
+}
+
+static void vp_vdpa_remove(struct pci_dev *pdev)
+{
+ struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev);
+
+ vdpa_unregister_device(&vp_vdpa->vdpa);
+ vp_modern_remove(&vp_vdpa->mdev);
+}
+
+static struct pci_driver vp_vdpa_driver = {
+ .name = "vp-vdpa",
+ .id_table = NULL, /* only dynamic ids */
+ .probe = vp_vdpa_probe,
+ .remove = vp_vdpa_remove,
+};
+
+module_pci_driver(vp_vdpa_driver);
+
+MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
+MODULE_DESCRIPTION("vp-vdpa");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1");
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 07296326d24d..a0747c35a778 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -2248,7 +2248,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
int ret;
bool resv_msi, msi_remap;
phys_addr_t resv_msi_base = 0;
- struct iommu_domain_geometry geo;
+ struct iommu_domain_geometry *geo;
LIST_HEAD(iova_copy);
LIST_HEAD(group_resv_regions);
@@ -2316,10 +2316,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
}
if (iommu->nesting) {
- int attr = 1;
-
- ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING,
- &attr);
+ ret = iommu_enable_nesting(domain->domain);
if (ret)
goto out_domain;
}
@@ -2329,10 +2326,9 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
goto out_domain;
/* Get aperture info */
- iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY, &geo);
-
- if (vfio_iommu_aper_conflict(iommu, geo.aperture_start,
- geo.aperture_end)) {
+ geo = &domain->domain->geometry;
+ if (vfio_iommu_aper_conflict(iommu, geo->aperture_start,
+ geo->aperture_end)) {
ret = -EINVAL;
goto out_detach;
}
@@ -2355,8 +2351,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (ret)
goto out_detach;
- ret = vfio_iommu_aper_resize(&iova_copy, geo.aperture_start,
- geo.aperture_end);
+ ret = vfio_iommu_aper_resize(&iova_copy, geo->aperture_start,
+ geo->aperture_end);
if (ret)
goto out_detach;
@@ -2489,7 +2485,6 @@ static void vfio_iommu_aper_expand(struct vfio_iommu *iommu,
struct list_head *iova_copy)
{
struct vfio_domain *domain;
- struct iommu_domain_geometry geo;
struct vfio_iova *node;
dma_addr_t start = 0;
dma_addr_t end = (dma_addr_t)~0;
@@ -2498,12 +2493,12 @@ static void vfio_iommu_aper_expand(struct vfio_iommu *iommu,
return;
list_for_each_entry(domain, &iommu->domain_list, next) {
- iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY,
- &geo);
- if (geo.aperture_start > start)
- start = geo.aperture_start;
- if (geo.aperture_end < end)
- end = geo.aperture_end;
+ struct iommu_domain_geometry *geo = &domain->domain->geometry;
+
+ if (geo->aperture_start > start)
+ start = geo->aperture_start;
+ if (geo->aperture_end < end)
+ end = geo->aperture_end;
}
/* Modify aperture limits. The new aper is either same or bigger */
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index bfa4c6ef554e..fb41db3da611 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -16,12 +16,12 @@
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/iommu.h>
#include <linux/uuid.h>
#include <linux/vdpa.h>
#include <linux/nospec.h>
#include <linux/vhost.h>
-#include <linux/virtio_net.h>
#include "vhost.h"
@@ -188,13 +188,8 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
struct vhost_vdpa_config *c)
{
- long size = 0;
-
- switch (v->virtio_id) {
- case VIRTIO_ID_NET:
- size = sizeof(struct virtio_net_config);
- break;
- }
+ struct vdpa_device *vdpa = v->vdpa;
+ long size = vdpa->config->get_config_size(vdpa);
if (c->len == 0)
return -EINVAL;
@@ -836,18 +831,14 @@ static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
{
struct vdpa_iova_range *range = &v->range;
- struct iommu_domain_geometry geo;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
if (ops->get_iova_range) {
*range = ops->get_iova_range(vdpa);
- } else if (v->domain &&
- !iommu_domain_get_attr(v->domain,
- DOMAIN_ATTR_GEOMETRY, &geo) &&
- geo.force_aperture) {
- range->first = geo.aperture_start;
- range->last = geo.aperture_end;
+ } else if (v->domain && v->domain->geometry.force_aperture) {
+ range->first = v->domain->geometry.aperture_start;
+ range->last = v->domain->geometry.aperture_end;
} else {
range->first = 0;
range->last = ULLONG_MAX;
@@ -993,6 +984,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_end - vma->vm_start != notify.size)
return -ENOTSUPP;
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &vhost_vdpa_vm_ops;
return 0;
}
@@ -1027,10 +1019,6 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
int minor;
int r;
- /* Currently, we only accept the network devices. */
- if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
- return -ENOTSUPP;
-
v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!v)
return -ENOMEM;
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 85d85faba058..4af8fa259d65 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -75,6 +75,34 @@ static inline int __vringh_get_head(const struct vringh *vrh,
return head;
}
+/**
+ * vringh_kiov_advance - skip bytes from vring_kiov
+ * @iov: an iov passed to vringh_getdesc_*() (updated as we consume)
+ * @len: the maximum length to advance
+ */
+void vringh_kiov_advance(struct vringh_kiov *iov, size_t len)
+{
+ while (len && iov->i < iov->used) {
+ size_t partlen = min(iov->iov[iov->i].iov_len, len);
+
+ iov->consumed += partlen;
+ iov->iov[iov->i].iov_len -= partlen;
+ iov->iov[iov->i].iov_base += partlen;
+
+ if (!iov->iov[iov->i].iov_len) {
+ /* Fix up old iov element then increment. */
+ iov->iov[iov->i].iov_len = iov->consumed;
+ iov->iov[iov->i].iov_base -= iov->consumed;
+
+ iov->consumed = 0;
+ iov->i++;
+ }
+
+ len -= partlen;
+ }
+}
+EXPORT_SYMBOL(vringh_kiov_advance);
+
/* Copy some bytes to/from the iovec. Returns num copied. */
static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
struct vringh_kiov *iov,
@@ -95,19 +123,8 @@ static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
done += partlen;
len -= partlen;
ptr += partlen;
- iov->consumed += partlen;
- iov->iov[iov->i].iov_len -= partlen;
- iov->iov[iov->i].iov_base += partlen;
- if (!iov->iov[iov->i].iov_len) {
- /* Fix up old iov element then increment. */
- iov->iov[iov->i].iov_len = iov->consumed;
- iov->iov[iov->i].iov_base -= iov->consumed;
-
-
- iov->consumed = 0;
- iov->i++;
- }
+ vringh_kiov_advance(iov, partlen);
}
return done;
}
@@ -290,9 +307,9 @@ __vringh_iov(struct vringh *vrh, u16 i,
return -EINVAL;
if (riov)
- riov->i = riov->used = 0;
+ riov->i = riov->used = riov->consumed = 0;
if (wiov)
- wiov->i = wiov->used = 0;
+ wiov->i = wiov->used = wiov->consumed = 0;
for (;;) {
void *addr;
@@ -662,7 +679,10 @@ EXPORT_SYMBOL(vringh_init_user);
* *head will be vrh->vring.num. You may be able to ignore an invalid
* descriptor, but there's not much you can do with an invalid ring.
*
- * Note that you may need to clean up riov and wiov, even on error!
+ * Note that you can reuse riov and wiov with subsequent calls. Content is
+ * overwritten and memory reallocated if more space is needed.
+ * When you don't have to use riov and wiov anymore, you should clean up them
+ * calling vringh_iov_cleanup() to release the memory, even on error!
*/
int vringh_getdesc_user(struct vringh *vrh,
struct vringh_iov *riov,
@@ -932,7 +952,10 @@ EXPORT_SYMBOL(vringh_init_kern);
* *head will be vrh->vring.num. You may be able to ignore an invalid
* descriptor, but there's not much you can do with an invalid ring.
*
- * Note that you may need to clean up riov and wiov, even on error!
+ * Note that you can reuse riov and wiov with subsequent calls. Content is
+ * overwritten and memory reallocated if more space is needed.
+ * When you don't have to use riov and wiov anymore, you should clean up them
+ * calling vringh_kiov_cleanup() to release the memory, even on error!
*/
int vringh_getdesc_kern(struct vringh *vrh,
struct vringh_kiov *riov,
@@ -1074,6 +1097,8 @@ static int iotlb_translate(const struct vringh *vrh,
int ret = 0;
u64 s = 0;
+ spin_lock(vrh->iotlb_lock);
+
while (len > s) {
u64 size, pa, pfn;
@@ -1103,6 +1128,8 @@ static int iotlb_translate(const struct vringh *vrh,
++ret;
}
+ spin_unlock(vrh->iotlb_lock);
+
return ret;
}
@@ -1262,10 +1289,13 @@ EXPORT_SYMBOL(vringh_init_iotlb);
* vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
* @vrh: the vring
* @iotlb: iotlb associated with this vring
+ * @iotlb_lock: spinlock to synchronize the iotlb accesses
*/
-void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb)
+void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
+ spinlock_t *iotlb_lock)
{
vrh->iotlb = iotlb;
+ vrh->iotlb_lock = iotlb_lock;
}
EXPORT_SYMBOL(vringh_set_iotlb);
@@ -1285,7 +1315,10 @@ EXPORT_SYMBOL(vringh_set_iotlb);
* *head will be vrh->vring.num. You may be able to ignore an invalid
* descriptor, but there's not much you can do with an invalid ring.
*
- * Note that you may need to clean up riov and wiov, even on error!
+ * Note that you can reuse riov and wiov with subsequent calls. Content is
+ * overwritten and memory reallocated if more space is needed.
+ * When you don't have to use riov and wiov anymore, you should clean up them
+ * calling vringh_kiov_cleanup() to release the memory, even on error!
*/
int vringh_getdesc_iotlb(struct vringh *vrh,
struct vringh_kiov *riov,
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index f58a545b3bf3..8ea8f079cde2 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -575,7 +575,8 @@ static int efifb_probe(struct platform_device *dev)
goto err_fb_dealoc;
}
fb_info(info, "%s frame buffer device\n", info->fix.id);
- pm_runtime_get_sync(&efifb_pci_dev->dev);
+ if (efifb_pci_dev)
+ pm_runtime_get_sync(&efifb_pci_dev->dev);
return 0;
err_fb_dealoc:
@@ -602,7 +603,8 @@ static int efifb_remove(struct platform_device *pdev)
unregister_framebuffer(info);
sysfs_remove_groups(&pdev->dev.kobj, efifb_groups);
framebuffer_release(info);
- pm_runtime_put(&efifb_pci_dev->dev);
+ if (efifb_pci_dev)
+ pm_runtime_put(&efifb_pci_dev->dev);
return 0;
}
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index a3853421b263..4325bf7f388c 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -2608,12 +2608,3 @@ EXPORT_SYMBOL(matroxfb_register_driver);
EXPORT_SYMBOL(matroxfb_unregister_driver);
EXPORT_SYMBOL(matroxfb_wait_for_sync);
EXPORT_SYMBOL(matroxfb_enable_irq);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
-
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index 1e8a38a7967d..e2757ff1c23d 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -1451,13 +1451,3 @@ MODULE_DESCRIPTION("Legacy VGA framebuffer device driver");
MODULE_LICENSE("GPL");
module_init(vga16fb_init);
module_exit(vga16fb_exit);
-
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
-
diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
index f1964ea4b826..e21e1e86ad15 100644
--- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
+++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
@@ -1524,7 +1524,8 @@ static const struct file_operations ne_enclave_fops = {
* enclave file descriptor to be further used for enclave
* resources handling e.g. memory regions and CPUs.
* @ne_pci_dev : Private data associated with the PCI device.
- * @slot_uid: Generated unique slot id associated with an enclave.
+ * @slot_uid: User pointer to store the generated unique slot id
+ * associated with an enclave to.
*
* Context: Process context. This function is called with the ne_pci_dev enclave
* mutex held.
@@ -1532,7 +1533,7 @@ static const struct file_operations ne_enclave_fops = {
* * Enclave fd on success.
* * Negative return value on failure.
*/
-static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
+static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 __user *slot_uid)
{
struct ne_pci_dev_cmd_reply cmd_reply = {};
int enclave_fd = -1;
@@ -1634,7 +1635,18 @@ static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list);
- *slot_uid = ne_enclave->slot_uid;
+ if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) {
+ /*
+ * As we're holding the only reference to 'enclave_file', fput()
+ * will call ne_enclave_release() which will do a proper cleanup
+ * of all so far allocated resources, leaving only the unused fd
+ * for us to free.
+ */
+ fput(enclave_file);
+ put_unused_fd(enclave_fd);
+
+ return -EFAULT;
+ }
fd_install(enclave_fd, enclave_file);
@@ -1671,34 +1683,13 @@ static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
case NE_CREATE_VM: {
int enclave_fd = -1;
- struct file *enclave_file = NULL;
struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
- int rc = -EINVAL;
- u64 slot_uid = 0;
+ u64 __user *slot_uid = (void __user *)arg;
mutex_lock(&ne_pci_dev->enclaves_list_mutex);
-
- enclave_fd = ne_create_vm_ioctl(ne_pci_dev, &slot_uid);
- if (enclave_fd < 0) {
- rc = enclave_fd;
-
- mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
-
- return rc;
- }
-
+ enclave_fd = ne_create_vm_ioctl(ne_pci_dev, slot_uid);
mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
- if (copy_to_user((void __user *)arg, &slot_uid, sizeof(slot_uid))) {
- enclave_file = fget(enclave_fd);
- /* Decrement file refs to have release() called. */
- fput(enclave_file);
- fput(enclave_file);
- put_unused_fd(enclave_fd);
-
- return -EFAULT;
- }
-
return enclave_fd;
}
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 8985fc2cea86..510e9318854d 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -734,7 +734,7 @@ static void report_free_page_func(struct work_struct *work)
#ifdef CONFIG_BALLOON_COMPACTION
/*
* virtballoon_migratepage - perform the balloon page migration on behalf of
- * a compation thread. (called under page lock)
+ * a compaction thread. (called under page lock)
* @vb_dev_info: the balloon device
* @newpage: page that will replace the isolated page after migration finishes.
* @page : the isolated (old) page that is about to be migrated to newpage.
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index fbd4ebc00eb6..30654d3a0b41 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -192,7 +192,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
struct virtqueue *vq;
- u16 num, off;
+ u16 num;
int err;
if (index >= vp_modern_get_num_queues(mdev))
@@ -208,9 +208,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
return ERR_PTR(-EINVAL);
}
- /* get offset of notification word for this vq */
- off = vp_modern_get_queue_notify_off(mdev, index);
-
info->msix_vector = msix_vec;
/* create the vring */
@@ -227,27 +224,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
virtqueue_get_avail_addr(vq),
virtqueue_get_used_addr(vq));
- if (mdev->notify_base) {
- /* offset should not wrap */
- if ((u64)off * mdev->notify_offset_multiplier + 2
- > mdev->notify_len) {
- dev_warn(&mdev->pci_dev->dev,
- "bad notification offset %u (x %u) "
- "for queue %u > %zd",
- off, mdev->notify_offset_multiplier,
- index, mdev->notify_len);
- err = -EINVAL;
- goto err_map_notify;
- }
- vq->priv = (void __force *)mdev->notify_base +
- off * mdev->notify_offset_multiplier;
- } else {
- vq->priv = (void __force *)vp_modern_map_capability(mdev,
- mdev->notify_map_cap, 2, 2,
- off * mdev->notify_offset_multiplier, 2,
- NULL);
- }
-
+ vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL);
if (!vq->priv) {
err = -ENOMEM;
goto err_map_notify;
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index cbd667496bb1..54f297028586 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -13,14 +13,14 @@
* @start: start from the capability
* @size: map size
* @len: the length that is actually mapped
+ * @pa: physical address of the capability
*
* Returns the io address of for the part of the capability
*/
-void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
- size_t minlen,
- u32 align,
- u32 start, u32 size,
- size_t *len)
+static void __iomem *
+vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
+ size_t minlen, u32 align, u32 start, u32 size,
+ size_t *len, resource_size_t *pa)
{
struct pci_dev *dev = mdev->pci_dev;
u8 bar;
@@ -88,9 +88,11 @@ void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, in
dev_err(&dev->dev,
"virtio_pci: unable to map virtio %u@%u on bar %i\n",
length, offset, bar);
+ else if (pa)
+ *pa = pci_resource_start(dev, bar) + offset;
+
return p;
}
-EXPORT_SYMBOL_GPL(vp_modern_map_capability);
/**
* virtio_pci_find_capability - walk capabilities to find device info.
@@ -275,12 +277,12 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
mdev->common = vp_modern_map_capability(mdev, common,
sizeof(struct virtio_pci_common_cfg), 4,
0, sizeof(struct virtio_pci_common_cfg),
- NULL);
+ NULL, NULL);
if (!mdev->common)
goto err_map_common;
mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
0, 1,
- NULL);
+ NULL, NULL);
if (!mdev->isr)
goto err_map_isr;
@@ -308,7 +310,8 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
mdev->notify_base = vp_modern_map_capability(mdev, notify,
2, 2,
0, notify_length,
- &mdev->notify_len);
+ &mdev->notify_len,
+ &mdev->notify_pa);
if (!mdev->notify_base)
goto err_map_notify;
} else {
@@ -321,7 +324,8 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
if (device) {
mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
0, PAGE_SIZE,
- &mdev->device_len);
+ &mdev->device_len,
+ NULL);
if (!mdev->device)
goto err_map_device;
}
@@ -584,14 +588,51 @@ EXPORT_SYMBOL_GPL(vp_modern_get_num_queues);
*
* Returns the notification offset for a virtqueue
*/
-u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
- u16 index)
+static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
+ u16 index)
{
vp_iowrite16(index, &mdev->common->queue_select);
return vp_ioread16(&mdev->common->queue_notify_off);
}
-EXPORT_SYMBOL_GPL(vp_modern_get_queue_notify_off);
+
+/*
+ * vp_modern_map_vq_notify - map notification area for a
+ * specific virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ * @pa: the pointer to the physical address of the nofity area
+ *
+ * Returns the address of the notification area
+ */
+void __iomem *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
+ u16 index, resource_size_t *pa)
+{
+ u16 off = vp_modern_get_queue_notify_off(mdev, index);
+
+ if (mdev->notify_base) {
+ /* offset should not wrap */
+ if ((u64)off * mdev->notify_offset_multiplier + 2
+ > mdev->notify_len) {
+ dev_warn(&mdev->pci_dev->dev,
+ "bad notification offset %u (x %u) "
+ "for queue %u > %zd",
+ off, mdev->notify_offset_multiplier,
+ index, mdev->notify_len);
+ return NULL;
+ }
+ if (pa)
+ *pa = mdev->notify_pa +
+ off * mdev->notify_offset_multiplier;
+ return mdev->notify_base + off * mdev->notify_offset_multiplier;
+ } else {
+ return vp_modern_map_capability(mdev,
+ mdev->notify_map_cap, 2, 2,
+ off * mdev->notify_offset_multiplier, 2,
+ NULL, pa);
+ }
+}
+EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify);
MODULE_VERSION("0.1");
MODULE_DESCRIPTION("Modern Virtio PCI Device");
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 2b385c1b4a99..4c89afc0df62 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -40,14 +40,7 @@
#include <trace/events/swiotlb.h>
#define MAX_DMA_BITS 32
-/*
- * Used to do a quick range check in swiotlb_tbl_unmap_single and
- * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
- * API.
- */
-static char *xen_io_tlb_start, *xen_io_tlb_end;
-static unsigned long xen_io_tlb_nslabs;
/*
* Quick lookup value of the bus address of the IOTLB.
*/
@@ -82,11 +75,6 @@ static inline phys_addr_t xen_dma_to_phys(struct device *dev,
return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
}
-static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address)
-{
- return xen_phys_to_dma(dev, virt_to_phys(address));
-}
-
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
@@ -111,15 +99,12 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
* have the same virtual address as another address
* in our domain. Therefore _only_ check address within our domain.
*/
- if (pfn_valid(PFN_DOWN(paddr))) {
- return paddr >= virt_to_phys(xen_io_tlb_start) &&
- paddr < virt_to_phys(xen_io_tlb_end);
- }
+ if (pfn_valid(PFN_DOWN(paddr)))
+ return is_swiotlb_buffer(paddr);
return 0;
}
-static int
-xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
+static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
{
int i, rc;
int dma_bits;
@@ -145,16 +130,6 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
} while (i < nslabs);
return 0;
}
-static unsigned long xen_set_nslabs(unsigned long nr_tbl)
-{
- if (!nr_tbl) {
- xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
- xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
- } else
- xen_io_tlb_nslabs = nr_tbl;
-
- return xen_io_tlb_nslabs << IO_TLB_SHIFT;
-}
enum xen_swiotlb_err {
XEN_SWIOTLB_UNKNOWN = 0,
@@ -177,102 +152,109 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
}
return "";
}
-int __ref xen_swiotlb_init(int verbose, bool early)
+
+#define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
+
+int __ref xen_swiotlb_init(void)
{
- unsigned long bytes, order;
- int rc = -ENOMEM;
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
- unsigned int repeat = 3;
+ unsigned long bytes = swiotlb_size_or_default();
+ unsigned long nslabs = bytes >> IO_TLB_SHIFT;
+ unsigned int order, repeat = 3;
+ int rc = -ENOMEM;
+ char *start;
- xen_io_tlb_nslabs = swiotlb_nr_tbl();
retry:
- bytes = xen_set_nslabs(xen_io_tlb_nslabs);
- order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
-
- /*
- * IO TLB memory already allocated. Just use it.
- */
- if (io_tlb_start != 0) {
- xen_io_tlb_start = phys_to_virt(io_tlb_start);
- goto end;
- }
+ m_ret = XEN_SWIOTLB_ENOMEM;
+ order = get_order(bytes);
/*
* Get IO TLB memory from any location.
*/
- if (early) {
- xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
- PAGE_SIZE);
- if (!xen_io_tlb_start)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
- } else {
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
- while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
- xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
- if (xen_io_tlb_start)
- break;
- order--;
- }
- if (order != get_order(bytes)) {
- pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
- (PAGE_SIZE << order) >> 20);
- xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
- bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
- }
+ while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
+ start = (void *)xen_get_swiotlb_free_pages(order);
+ if (start)
+ break;
+ order--;
}
- if (!xen_io_tlb_start) {
- m_ret = XEN_SWIOTLB_ENOMEM;
+ if (!start)
goto error;
+ if (order != get_order(bytes)) {
+ pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
+ (PAGE_SIZE << order) >> 20);
+ nslabs = SLABS_PER_PAGE << order;
+ bytes = nslabs << IO_TLB_SHIFT;
}
+
/*
* And replace that memory with pages under 4GB.
*/
- rc = xen_swiotlb_fixup(xen_io_tlb_start,
- bytes,
- xen_io_tlb_nslabs);
+ rc = xen_swiotlb_fixup(start, nslabs);
if (rc) {
- if (early)
- memblock_free(__pa(xen_io_tlb_start),
- PAGE_ALIGN(bytes));
- else {
- free_pages((unsigned long)xen_io_tlb_start, order);
- xen_io_tlb_start = NULL;
- }
+ free_pages((unsigned long)start, order);
m_ret = XEN_SWIOTLB_EFIXUP;
goto error;
}
- if (early) {
- if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
- verbose))
- panic("Cannot allocate SWIOTLB buffer");
- rc = 0;
- } else
- rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
-
-end:
- xen_io_tlb_end = xen_io_tlb_start + bytes;
- if (!rc)
- swiotlb_set_max_segment(PAGE_SIZE);
-
- return rc;
+ rc = swiotlb_late_init_with_tbl(start, nslabs);
+ if (rc)
+ return rc;
+ swiotlb_set_max_segment(PAGE_SIZE);
+ return 0;
error:
if (repeat--) {
- xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
- (xen_io_tlb_nslabs >> 1));
+ /* Min is 2MB */
+ nslabs = max(1024UL, (nslabs >> 1));
pr_info("Lowering to %luMB\n",
- (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
+ (nslabs << IO_TLB_SHIFT) >> 20);
goto retry;
}
pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
- if (early)
- panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
- else
- free_pages((unsigned long)xen_io_tlb_start, order);
+ free_pages((unsigned long)start, order);
return rc;
}
+#ifdef CONFIG_X86
+void __init xen_swiotlb_init_early(void)
+{
+ unsigned long bytes = swiotlb_size_or_default();
+ unsigned long nslabs = bytes >> IO_TLB_SHIFT;
+ unsigned int repeat = 3;
+ char *start;
+ int rc;
+
+retry:
+ /*
+ * Get IO TLB memory from any location.
+ */
+ start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
+ if (!start)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
+
+ /*
+ * And replace that memory with pages under 4GB.
+ */
+ rc = xen_swiotlb_fixup(start, nslabs);
+ if (rc) {
+ memblock_free(__pa(start), PAGE_ALIGN(bytes));
+ if (repeat--) {
+ /* Min is 2MB */
+ nslabs = max(1024UL, (nslabs >> 1));
+ bytes = nslabs << IO_TLB_SHIFT;
+ pr_info("Lowering to %luMB\n", bytes >> 20);
+ goto retry;
+ }
+ panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
+ }
+
+ if (swiotlb_init_with_tbl(start, nslabs, false))
+ panic("Cannot allocate SWIOTLB buffer");
+ swiotlb_set_max_segment(PAGE_SIZE);
+}
+#endif /* CONFIG_X86 */
+
static void *
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
@@ -406,7 +388,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* Ensure that the address returned is DMA'ble
*/
if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
- swiotlb_tbl_unmap_single(dev, map, size, size, dir,
+ swiotlb_tbl_unmap_single(dev, map, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR;
}
@@ -445,7 +427,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(hwdev, dev_addr))
- swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
+ swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
}
static void
@@ -462,7 +444,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
}
if (is_xen_swiotlb_buffer(dev, dma_addr))
- swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
+ swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
}
static void
@@ -472,7 +454,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
if (is_xen_swiotlb_buffer(dev, dma_addr))
- swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
+ swiotlb_sync_single_for_device(dev, paddr, size, dir);
if (!dev_is_dma_coherent(dev)) {
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
@@ -560,7 +542,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
static int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask;
+ return xen_phys_to_dma(hwdev, io_tlb_default_mem->end - 1) <= mask;
}
const struct dma_map_ops xen_swiotlb_dma_ops = {